#! /bin/sh # Script to apply kernel patches. # usage: patch-kernel [ sourcedir [ patchdir [ stopversion ] [ -acxx ] ] ] # The source directory defaults to /usr/src/linux, and the patch # directory defaults to the current directory. # e.g. # scripts/patch-kernel . .. # Update the kernel tree in the current directory using patches in the # directory above to the latest Linus kernel # scripts/patch-kernel . .. -ac # Get the latest Linux kernel and patch it with the latest ac patch # scripts/patch-kernel . .. 2.4.9 # Gets standard kernel 2.4.9 # scripts/patch-kernel . .. 2.4.9 -ac # Gets 2.4.9 with latest ac patches # scripts/patch-kernel . .. 2.4.9 -ac11 # Gets 2.4.9 with ac patch ac11 # Note: It uses the patches relative to the Linus kernels, not the # ac to ac relative patches # # It determines the current kernel version from the top-level Makefile. # It then looks for patches for the next sublevel in the patch directory. # This is applied using "patch -p1 -s" from within the kernel directory. # A check is then made for "*.rej" files to see if the patch was # successful. If it is, then all of the "*.orig" files are removed. # # Nick Holloway , 2nd January 1995. # # Added support for handling multiple types of compression. What includes # gzip, bzip, bzip2, zip, compress, and plaintext. # # Adam Sulmicki , 1st January 1997. # # Added ability to stop at a given version number # Put the full version number (i.e. 2.3.31) as the last parameter # Dave Gilbert , 11th December 1999. # Fixed previous patch so that if we are already at the correct version # not to patch up. # # Added -ac option, use -ac or -ac9 (say) to stop at a particular version # Dave Gilbert , 29th September 2001. # # Add support for (use of) EXTRAVERSION (to support 2.6.8.x, e.g.); # update usage message; # fix some whitespace damage; # be smarter about stopping when current version is larger than requested; # Randy Dunlap , 2004-AUG-18. # # Add better support for (non-incremental) 2.6.x.y patches; # If an ending version number if not specified, the script automatically # increments the SUBLEVEL (x in 2.6.x.y) until no more patch files are found; # however, EXTRAVERSION (y in 2.6.x.y) is never automatically incremented # but must be specified fully. # # patch-kernel does not normally support reverse patching, but does so when # applying EXTRAVERSION (x.y) patches, so that moving from 2.6.11.y to 2.6.11.z # is easy and handled by the script (reverse 2.6.11.y and apply 2.6.11.z). # Randy Dunlap , 2005-APR-08. PNAME=patch-kernel # Set directories from arguments, or use defaults. sourcedir=${1-/usr/src/linux} patchdir=${2-.} stopvers=${3-default} if [ "$1" = -h -o "$1" = --help -o ! -r "$sourcedir/Makefile" ]; then cat << USAGE usage: $PNAME [-h] [ sourcedir [ patchdir [ stopversion ] [ -acxx ] ] ] source directory defaults to /usr/src/linux, patch directory defaults to the current directory, stopversion defaults to . USAGE exit 1 fi # See if we have any -ac options for PARM in $* do case $PARM in -ac*) gotac=$PARM; esac; done # --------------------------------------------------------------------------- # arg1 is filename noFile () { echo "cannot find patch file: ${patch}" exit 1 } # --------------------------------------------------------------------------- backwards () { echo "$PNAME does not support reverse patching" exit 1 } # --------------------------------------------------------------------------- # Find a file, first parameter is basename of file # it tries many compression mechanisms and sets variables to say how to get it findFile () { filebase=$1; if [ -r ${filebase}.gz ]; then ext=".gz" name="gzip" uncomp="gunzip -dc" elif [ -r ${filebase}.bz ]; then ext=".bz" name="bzip" uncomp="bunzip -dc" elif [ -r ${filebase}.bz2 ]; then ext=".bz2" name="bzip2" uncomp="bunzip2 -dc" elif [ -r ${filebase}.xz ]; then ext=".xz" name="xz" uncomp="xz -dc" elif [ -r ${filebase}.zip ]; then ext=".zip" name="zip" uncomp="unzip -d" elif [ -r ${filebase}.Z ]; then ext=".Z" name="uncompress" uncomp="uncompress -c" elif [ -r ${filebase} ]; then ext="" name="plaintext" uncomp="cat" else return 1; fi return 0; } # --------------------------------------------------------------------------- # Apply a patch and check it goes in cleanly # First param is patch name (e.g. patch-2.4.9-ac5) - without path or extension applyPatch () { echo -n "Applying $1 (${name})... " if $uncomp ${patchdir}/$1${ext} | patch -p1 -s -N -E -d $sourcedir then echo "done." else echo "failed. Clean up yourself." return 1; fi if [ "`find $sourcedir/ '(' -name '*.rej' -o -name '.*.rej' ')' -print`" ] then echo "Aborting. Reject files found." return 1; fi # Remove backup files find $sourcedir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \; return 0; } # --------------------------------------------------------------------------- # arg1 is patch filename reversePatch () { echo -n "Reversing $1 (${name}) ... " if $uncomp ${patchdir}/"$1"${ext} | patch -p1 -Rs -N -E -d $sourcedir then echo "done." else echo "failed. Clean it up." exit 1 fi if [ "`find $sourcedir/ '(' -name '*.rej' -o -name '.*.rej' ')' -print`" ] then echo "Aborting. Reject files found." return 1 fi # Remove backup files find $sourcedir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \; return 0 } # set current VERSION, PATCHLEVEL, SUBLEVEL, EXTRAVERSION # force $TMPFILEs below to be in local directory: a slash character prevents # the dot command from using the search path. TMPFILE=`mktemp ./.tmpver.XXXXXX` || { echo "cannot make temp file" ; exit 1; } grep -E "^(VERSION|PATCHLEVEL|SUBLEVEL|EXTRAVERSION)" $sourcedir/Makefile > $TMPFILE tr -d [:blank:] < $TMPFILE > $TMPFILE.1 . $TMPFILE.1 rm -f $TMPFILE* if [ -z "$VERSION" -o -z "$PATCHLEVEL" -o -z "$SUBLEVEL" ] then echo "unable to determine current kernel version" >&2 exit 1 fi NAME=`grep ^NAME $sourcedir/Makefile` NAME=${NAME##*=} echo "Current kernel version is $VERSION.$PATCHLEVEL.$SUBLEVEL${EXTRAVERSION} ($NAME)" # strip EXTRAVERSION to just a number (drop leading '.' and trailing additions) EXTRAVER= if [ x$EXTRAVERSION != "x" ] then EXTRAVER=${EXTRAVERSION#.} EXTRAVER=${EXTRAVER%%[[:punct:]]*} #echo "$PNAME: changing EXTRAVERSION from $EXTRAVERSION to $EXTRAVER" fi #echo "stopvers=$stopvers" if [ $stopvers != "default" ]; then STOPSUBLEVEL=`echo $stopvers | cut -d. -f3` STOPEXTRA=`echo $stopvers | cut -d. -f4` STOPFULLVERSION=${stopvers%%.$STOPEXTRA} #echo "#___STOPSUBLEVEL=/$STOPSUBLEVEL/, STOPEXTRA=/$STOPEXTRA/" else STOPSUBLEVEL=9999 STOPEXTRA=9999 fi # This all assumes a 2.6.x[.y] kernel tree. # Don't allow backwards/reverse patching. if [ $STOPSUBLEVEL -lt $SUBLEVEL ]; then backwards fi if [ x$EXTRAVER != "x" ]; then CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL.$EXTRAVER" else CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL" fi if [ x$EXTRAVER != "x" ]; then echo "backing up to: $VERSION.$PATCHLEVEL.$SUBLEVEL" patch="patch-${CURRENTFULLVERSION}" findFile $patchdir/${patch} || noFile ${patch} reversePatch ${patch} || exit 1 fi # now current is 2.6.x, with no EXTRA applied, # so update to target SUBLEVEL (2.6.SUBLEVEL) # and then to target EXTRAVER (2.6.SUB.EXTRAVER) if requested. # If not ending sublevel is specified, it is incremented until # no further sublevels are found. if [ $STOPSUBLEVEL -gt $SUBLEVEL ]; then while : # incrementing SUBLEVEL (s in v.p.s) do CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL" EXTRAVER= if [ x$STOPFULLVERSION = x$CURRENTFULLVERSION ]; then echo "Stopping at $CURRENTFULLVERSION base as requested." break fi SUBLEVEL=$(($SUBLEVEL + 1)) FULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL" #echo "#___ trying $FULLVERSION ___" if [ $(($SUBLEVEL)) -gt $(($STOPSUBLEVEL)) ]; then echo "Stopping since sublevel ($SUBLEVEL) is beyond stop-sublevel ($STOPSUBLEVEL)" exit 1 fi patch=patch-$FULLVERSION # See if the file exists and find extension findFile $patchdir/${patch} || noFile ${patch} # Apply the patch and check all is OK applyPatch $patch || break done #echo "#___sublevel all done" fi # There is no incremental searching for extraversion... if [ "$STOPEXTRA" != "" ]; then while : # just to allow break do # apply STOPEXTRA directly (not incrementally) (x in v.p.s.x) FULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL.$STOPEXTRA" #echo "#... trying $FULLVERSION ..." patch=patch-$FULLVERSION # See if the file exists and find extension findFile $patchdir/${patch} || noFile ${patch} # Apply the patch and check all is OK applyPatch $patch || break #echo "#___extraver all done" break done fi if [ x$gotac != x ]; then # Out great user wants the -ac patches # They could have done -ac (get latest) or -acxx where xx=version they want if [ $gotac = "-ac" ]; then # They want the latest version HIGHESTPATCH=0 for PATCHNAMES in $patchdir/patch-${CURRENTFULLVERSION}-ac*\.* do ACVALUE=`echo $PATCHNAMES | sed -e 's/^.*patch-[0-9.]*-ac\([0-9]*\).*/\1/'` # Check it is actually a recognised patch type findFile $patchdir/patch-${CURRENTFULLVERSION}-ac${ACVALUE} || break if [ $ACVALUE -gt $HIGHESTPATCH ]; then HIGHESTPATCH=$ACVALUE fi done if [ $HIGHESTPATCH -ne 0 ]; then findFile $patchdir/patch-${CURRENTFULLVERSION}-ac${HIGHESTPATCH} || break applyPatch patch-${CURRENTFULLVERSION}-ac${HIGHESTPATCH} else echo "No -ac patches found" fi else # They want an exact version findFile $patchdir/patch-${CURRENTFULLVERSION}${gotac} || { echo "Sorry, I couldn't find the $gotac patch for $CURRENTFULLVERSION. Hohum." exit 1 } applyPatch patch-${CURRENTFULLVERSION}${gotac} fi fi 30b45fafe170dfc84&id2=21924af67d69d7c9fdaf845be69043cfe75196a1'>Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon48
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qaic18
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qat52
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qat_rl14
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs83
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-efi7
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs22
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-damon29
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab5
-rw-r--r--Documentation/ABI/testing/sysfs-secvar17
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.rst15
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.rst33
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst128
-rw-r--r--Documentation/accounting/delay-accounting.rst56
-rw-r--r--Documentation/admin-guide/blockdev/zoned_loop.rst2
-rw-r--r--Documentation/admin-guide/bootconfig.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst9
-rw-r--r--Documentation/admin-guide/device-mapper/thin-provisioning.rst16
-rw-r--r--Documentation/admin-guide/kdump/kdump.rst21
-rw-r--r--Documentation/admin-guide/kdump/vmcoreinfo.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt114
-rw-r--r--Documentation/admin-guide/mm/damon/index.rst1
-rw-r--r--Documentation/admin-guide/mm/damon/stat.rst69
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst46
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/slab.rst (renamed from Documentation/mm/slub.rst)19
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst19
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst56
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst8
-rw-r--r--Documentation/arch/arm64/booting.rst62
-rw-r--r--Documentation/arch/arm64/elf_hwcaps.rst6
-rw-r--r--Documentation/arch/arm64/tagged-pointers.rst11
-rw-r--r--Documentation/arch/powerpc/index.rst1
-rw-r--r--Documentation/arch/s390/driver-model.rst21
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst7
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst6
-rw-r--r--Documentation/conf.py398
-rw-r--r--Documentation/core-api/dma-api-howto.rst36
-rw-r--r--Documentation/core-api/dma-api.rst197
-rw-r--r--Documentation/core-api/entry.rst6
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/kernel-api.rst6
-rw-r--r--Documentation/core-api/list.rst776
-rw-r--r--Documentation/core-api/memory-hotplug.rst91
-rw-r--r--Documentation/core-api/mm-api.rst7
-rw-r--r--Documentation/core-api/packing.rst2
-rw-r--r--Documentation/core-api/workqueue.rst6
-rw-r--r--Documentation/crypto/crypto_engine.rst6
-rw-r--r--Documentation/dev-tools/checkpatch.rst9
-rw-r--r--Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/alphascale,acc.txt114
-rw-r--r--Documentation/devicetree/bindings/clock/alphascale,asm9260-clock-controller.yaml49
-rw-r--r--Documentation/devicetree/bindings/clock/apm,xgene-device-clock.yaml80
-rw-r--r--Documentation/devicetree/bindings/clock/apm,xgene-socpll-clock.yaml50
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt71
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt27
-rw-r--r--Documentation/devicetree/bindings/clock/artpec6.txt41
-rw-r--r--Documentation/devicetree/bindings/clock/axis,artpec6-clkctrl.yaml55
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.yaml59
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.yaml46
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.yaml44
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,ep7209-clk.yaml47
-rw-r--r--Documentation/devicetree/bindings/clock/clps711x-clock.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/dove-divider-clock.txt28
-rw-r--r--Documentation/devicetree/bindings/clock/img,pistachio-clk.yaml136
-rw-r--r--Documentation/devicetree/bindings/clock/lpc1850-ccu.txt77
-rw-r--r--Documentation/devicetree/bindings/clock/lpc1850-cgu.txt131
-rw-r--r--Documentation/devicetree/bindings/clock/lpc1850-creg-clk.txt52
-rw-r--r--Documentation/devicetree/bindings/clock/lsi,axm5516-clks.txt29
-rw-r--r--Documentation/devicetree/bindings/clock/lsi,axm5516-clks.yaml43
-rw-r--r--Documentation/devicetree/bindings/clock/lsi,nspire-cx-clock.yaml33
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,armada-370-corediv-clock.yaml52
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,armada-3700-periph-clock.yaml96
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,armada-3700-tbg-clock.yaml54
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,armada-xp-cpu-clock.yaml44
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,berlin.txt31
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,berlin2-clk.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,dove-divider-clock.yaml50
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mvebu-core-clock.yaml94
-rw-r--r--Documentation/devicetree/bindings/clock/marvell-armada-370-gating-clock.yaml227
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max9485.txt59
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max9485.yaml82
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mtmips-sysc.yaml28
-rw-r--r--Documentation/devicetree/bindings/clock/microchip,pic32.txt39
-rw-r--r--Documentation/devicetree/bindings/clock/microchip,pic32mzda-clk.yaml45
-rw-r--r--Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt48
-rw-r--r--Documentation/devicetree/bindings/clock/moxa,moxart-clock.yaml38
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt87
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt205
-rw-r--r--Documentation/devicetree/bindings/clock/nspire-clock.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt100
-rw-r--r--Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc1850-ccu.yaml104
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc1850-cgu.yaml99
-rw-r--r--Documentation/devicetree/bindings/clock/pistachio-clock.txt123
-rw-r--r--Documentation/devicetree/bindings/clock/qca,ath79-pll.txt33
-rw-r--r--Documentation/devicetree/bindings/clock/qca,ath79-pll.yaml70
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,camcc-sm8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-ipq4019.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8994.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-qcm2290.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sdx65.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm6125.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm6350.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8450.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,krait-cc.txt34
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,krait-cc.yaml43
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,milos-camcc.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,milos-dispcc.yaml63
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,milos-gcc.yaml62
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,milos-videocc.yaml53
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml24
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcs615-dispcc.yaml55
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcs615-gpucc.yaml49
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcs615-videocc.yaml47
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qdu1000-ecpricc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sa8775p-camcc.yaml15
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sa8775p-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-camcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7280-camcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7280-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7280-lpasscc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-lpasscc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdx75-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm4450-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6115-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6115-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6125-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6350-camcc.yaml11
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6375-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6375-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm7150-camcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm7150-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm7150-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml20
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml19
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8550-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8550-tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8650-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,x1e80100-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml49
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml18
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml37
-rw-r--r--Documentation/devicetree/bindings/clock/ti/autoidle.txt37
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt42
-rw-r--r--Documentation/devicetree/bindings/clock/ti/ti,autoidle.yaml34
-rw-r--r--Documentation/devicetree/bindings/clock/ti/ti,divider-clock.yaml22
-rw-r--r--Documentation/devicetree/bindings/clock/ti/ti,fixed-factor-clock.yaml76
-rw-r--r--Documentation/devicetree/bindings/clock/xgene.txt131
-rw-r--r--Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml41
-rw-r--r--Documentation/devicetree/bindings/crypto/omap-aes.txt31
-rw-r--r--Documentation/devicetree/bindings/crypto/omap-des.txt30
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,omap2-aes.yaml58
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,omap4-des.yaml65
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml34
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-axi-performance-counter.yaml57
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blit-engine.yaml204
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blitblend.yaml41
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-clut.yaml44
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-command-sequencer.yaml67
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-constframe.yaml44
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-display-engine.yaml152
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-dither.yaml45
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-extdst.yaml72
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-fetchunit.yaml141
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-filter.yaml43
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-framegen.yaml64
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-gammacor.yaml32
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-layerblend.yaml39
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-matrix.yaml44
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-pixel-engine.yaml250
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-rop.yaml43
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-safety.yaml34
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-scaling-engine.yaml83
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-signature.yaml53
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-store.yaml96
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-tcon.yaml45
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc.yaml236
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml54
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml470
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx83112b.yaml73
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/renesas,r61307.yaml94
-rw-r--r--Documentation/devicetree/bindings/display/panel/renesas,r69328.yaml73
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7567.yaml63
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am625-oldi.yaml79
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml220
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt29
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,iproc-sba.yaml41
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml33
-rw-r--r--Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt54
-rw-r--r--Documentation/devicetree/bindings/dma/marvell,orion-xor.yaml84
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor.txt40
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/sophgo,cv1800b-dmamux.yaml51
-rw-r--r--Documentation/devicetree/bindings/dpll/dpll-device.yaml76
-rw-r--r--Documentation/devicetree/bindings/dpll/dpll-pin.yaml45
-rw-r--r--Documentation/devicetree/bindings/dpll/microchip,zl30731.yaml115
-rw-r--r--Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml12
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml5
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,adm1266.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2992.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml15
-rw-r--r--Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/national,lm90.yaml8
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml15
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/isil,isl68137.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml6
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml6
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,lm87.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/apple,i2c.yaml5
-rw-r--r--Documentation/devicetree/bindings/i3c/renesas,i3c.yaml179
-rw-r--r--Documentation/devicetree/bindings/input/syna,rmi4.yaml20
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt16
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/nxp,lpc3220-tsc.yaml43
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml29
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti.tsc2007.yaml75
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml119
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt39
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5-iwb.yaml78
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5.yaml267
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,imx8qxp-dc-intc.yaml318
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml7
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp50xx.yaml19
-rw-r--r--Documentation/devicetree/bindings/leds/onnn,ncp5623.yaml4
-rw-r--r--Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml14
-rw-r--r--Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml10
-rw-r--r--Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml16
-rw-r--r--Documentation/devicetree/bindings/mailbox/aspeed,ast2700-mailbox.yaml68
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,bcm74110-mbox.yaml64
-rw-r--r--Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml9
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml9
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml10
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml18
-rw-r--r--Documentation/devicetree/bindings/media/cdns,csi2rx.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx6q-vdoa.yaml42
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx8qm-isi.yaml117
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx8qxp-isi.yaml106
-rw-r--r--Documentation/devicetree/bindings/media/fsl-vdoa.txt21
-rw-r--r--Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml13
-rw-r--r--Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml9
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml29
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml28
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml38
-rw-r--r--Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml14
-rw-r--r--Documentation/devicetree/bindings/media/renesas,fcp.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip,vdec.yaml80
-rw-r--r--Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml192
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/mxs-lradc.txt45
-rw-r--r--Documentation/devicetree/bindings/mfd/mxs-lradc.yaml134
-rw-r--r--Documentation/devicetree/bindings/mfd/nxp,lpc1850-creg.yaml148
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk806.yaml21
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml3
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,tps65910.yaml318
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt205
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/nxp,lpc1773-spifi.yaml74
-rw-r--r--Documentation/devicetree/bindings/mtd/nxp-spifi.txt58
-rw-r--r--Documentation/devicetree/bindings/net/adi,adin.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/adi,adin1110.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/airoha,an7583-mdio.yaml59
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/altr,gmii-to-sgmii-2.0.yaml49
-rw-r--r--Documentation/devicetree/bindings/net/altr,socfpga-stmmac.yaml171
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml24
-rw-r--r--Documentation/devicetree/bindings/net/dsa/micrel,ks8995.yaml135
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml21
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt27
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/atmel,at86rf233.yaml66
-rw-r--r--Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/lpc-eth.txt28
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml64
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8995.txt20
-rw-r--r--Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/nxp,lpc-eth.yaml48
-rw-r--r--Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt20
-rw-r--r--Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.yaml85
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml43
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt87
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.yaml109
-rw-r--r--Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml (renamed from Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml)4
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/socfpga-dwmac.txt57
-rw-r--r--Documentation/devicetree/bindings/net/sophgo,cv1800b-dwmac.yaml114
-rw-r--r--Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml11
-rw-r--r--Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml49
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/83xx-512x-pci.txt39
-rw-r--r--Documentation/devicetree/bindings/pci/aardvark-pci.txt59
-rw-r--r--Documentation/devicetree/bindings/pci/amazon,al-alpine-v3-pcie.yaml71
-rw-r--r--Documentation/devicetree/bindings/pci/apm,xgene-pcie.yaml84
-rw-r--r--Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt50
-rw-r--r--Documentation/devicetree/bindings/pci/axis,artpec6-pcie.yaml118
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/marvell,armada-3700-pcie.yaml99
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/pcie-al.txt46
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml32
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml122
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml18
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml16
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml14
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml21
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/sophgo,sg2044-pcie.yaml122
-rw-r--r--Documentation/devicetree/bindings/pci/spear13xx-pcie.txt14
-rw-r--r--Documentation/devicetree/bindings/pci/st,spear1340-pcie.yaml45
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci.txt50
-rw-r--r--Documentation/devicetree/bindings/phy/apm,xgene-phy.yaml169
-rw-r--r--Documentation/devicetree/bindings/phy/apm-xgene-phy.txt76
-rw-r--r--Documentation/devicetree/bindings/phy/berlin-sata-phy.txt36
-rw-r--r--Documentation/devicetree/bindings/phy/berlin-usb-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt30
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.yaml62
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt41
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.yaml46
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sr-usb-combo-phy.yaml65
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt32
-rw-r--r--Documentation/devicetree/bindings/phy/dm816x-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,hi6220-usb-phy.yaml35
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,hix5hd2-sata-phy.yaml48
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,inno-usb2-phy.yaml93
-rw-r--r--Documentation/devicetree/bindings/phy/hix5hd2-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/phy/img,pistachio-usb-phy.yaml62
-rw-r--r--Documentation/devicetree/bindings/phy/keystone-usb-phy.txt19
-rw-r--r--Documentation/devicetree/bindings/phy/lantiq,ase-usb2-phy.yaml71
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,armada-375-usb-cluster.yaml40
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,armada-380-comphy.yaml83
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,berlin2-sata-phy.yaml76
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,berlin2-usb-phy.yaml42
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,comphy-cp110.yaml154
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,mmp2-usb-phy.yaml37
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,mvebu-sata-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/motorola,cpcap-usb-phy.yaml107
-rw-r--r--Documentation/devicetree/bindings/phy/motorola,mapphone-mdm6600.yaml81
-rw-r--r--Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt48
-rw-r--r--Documentation/devicetree/bindings/phy/phy-ath79-usb.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt40
-rw-r--r--Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt40
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt16
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hisi-inno-usb2.txt71
-rw-r--r--Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt40
-rw-r--r--Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt26
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mapphone-mdm6600.txt29
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt94
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu.txt42
-rw-r--r--Documentation/devicetree/bindings/phy/phy-pxa-usb.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/pistachio-usb-phy.txt29
-rw-r--r--Documentation/devicetree/bindings/phy/qca,ar7100-usb-phy.yaml49
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,m31-eusb2-phy.yaml79
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml29
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/st,spear1310-miphy.yaml53
-rw-r--r--Documentation/devicetree/bindings/phy/st-spear-miphy.txt15
-rw-r--r--Documentation/devicetree/bindings/phy/ti,da830-usb-phy.yaml53
-rw-r--r--Documentation/devicetree/bindings/phy/ti,dm8168-usb-phy.yaml58
-rw-r--r--Documentation/devicetree/bindings/phy/ti,keystone-usbphy.yaml37
-rw-r--r--Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/eswin,eic7700-pinctrl.yaml156
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt8189-pinctrl.yaml213
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt71
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.yaml79
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,milos-tlmm.yaml133
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-hdp.yaml187
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml25
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24190.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2515x.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq256xx.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml65
-rw-r--r--Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml11
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.yaml7
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,lpc3220-rtc.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml33
-rw-r--r--Documentation/devicetree/bindings/rtc/sophgo,cv1800b-rtc.yaml (renamed from Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml)2
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml11
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml46
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/nxp,pnx4008-wdt.yaml3
-rw-r--r--Documentation/doc-guide/sphinx.rst23
-rw-r--r--Documentation/driver-api/cxl/conventions.rst47
-rw-r--r--Documentation/driver-api/cxl/devices/device-types.rst10
-rw-r--r--Documentation/driver-api/cxl/index.rst1
-rw-r--r--Documentation/driver-api/cxl/linux/cxl-driver.rst2
-rw-r--r--Documentation/driver-api/cxl/theory-of-operation.rst12
-rw-r--r--Documentation/driver-api/dpll.rst43
-rw-r--r--Documentation/driver-api/gpio/driver.rst2
-rw-r--r--Documentation/driver-api/media/v4l2-controls.rst9
-rw-r--r--Documentation/driver-api/soundwire/bra.rst2
-rw-r--r--Documentation/fault-injection/fault-injection.rst2
-rw-r--r--Documentation/filesystems/dax.rst1
-rw-r--r--Documentation/filesystems/ext4/atomic_writes.rst10
-rw-r--r--Documentation/filesystems/ext4/bitmaps.rst7
-rw-r--r--Documentation/filesystems/ext4/blockgroup.rst11
-rw-r--r--Documentation/filesystems/ext4/dynamic.rst10
-rw-r--r--Documentation/filesystems/ext4/globals.rst15
-rw-r--r--Documentation/filesystems/ext4/index.rst2
-rw-r--r--Documentation/filesystems/ext4/inode_table.rst9
-rw-r--r--Documentation/filesystems/ext4/overview.rst22
-rw-r--r--Documentation/filesystems/f2fs.rst10
-rw-r--r--Documentation/filesystems/overlayfs.rst26
-rw-r--r--Documentation/filesystems/proc.rst8
-rw-r--r--Documentation/filesystems/ubifs-authentication.rst2
-rw-r--r--Documentation/gpu/amdgpu/debugging.rst18
-rw-r--r--Documentation/gpu/drm-uapi.rst36
-rw-r--r--Documentation/gpu/i915.rst10
-rw-r--r--Documentation/gpu/nova/core/devinit.rst61
-rw-r--r--Documentation/gpu/nova/core/falcon.rst158
-rw-r--r--Documentation/gpu/nova/core/fwsec.rst181
-rw-r--r--Documentation/gpu/nova/core/todo.rst107
-rw-r--r--Documentation/gpu/nova/core/vbios.rst181
-rw-r--r--Documentation/gpu/nova/index.rst4
-rw-r--r--Documentation/gpu/rfc/gpusvm.rst12
-rw-r--r--Documentation/gpu/todo.rst15
-rw-r--r--Documentation/gpu/vkms.rst15
-rw-r--r--Documentation/gpu/xe/xe_configfs.rst10
-rw-r--r--Documentation/hid/intel-thc-hid.rst28
-rw-r--r--Documentation/hwmon/adp1050.rst71
-rw-r--r--Documentation/hwmon/asus_ec_sensors.rst2
-rw-r--r--Documentation/hwmon/corsair-psu.rst2
-rw-r--r--Documentation/hwmon/tps53679.rst8
-rw-r--r--Documentation/input/devices/edt-ft5x06.rst21
-rw-r--r--Documentation/input/gamepad.rst19
-rw-r--r--Documentation/kbuild/kconfig.rst8
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst14
-rw-r--r--Documentation/mm/damon/design.rst4
-rw-r--r--Documentation/mm/damon/maintainer-profile.rst35
-rw-r--r--Documentation/mm/index.rst1
-rw-r--r--Documentation/mm/page_migration.rst39
-rw-r--r--Documentation/mm/physical_memory.rst2
-rw-r--r--Documentation/mm/process_addrs.rst54
-rw-r--r--Documentation/mm/slab.rst7
-rw-r--r--Documentation/netlink/specs/conntrack.yaml38
-rw-r--r--Documentation/netlink/specs/devlink.yaml236
-rw-r--r--Documentation/netlink/specs/dpll.yaml57
-rw-r--r--Documentation/netlink/specs/ethtool.yaml390
-rw-r--r--Documentation/netlink/specs/fou.yaml14
-rw-r--r--Documentation/netlink/specs/handshake.yaml14
-rw-r--r--Documentation/netlink/specs/lockd.yaml4
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml192
-rw-r--r--Documentation/netlink/specs/net_shaper.yaml7
-rw-r--r--Documentation/netlink/specs/netdev.yaml56
-rw-r--r--Documentation/netlink/specs/nfsd.yaml10
-rw-r--r--Documentation/netlink/specs/nftables.yaml16
-rw-r--r--Documentation/netlink/specs/nl80211.yaml109
-rw-r--r--Documentation/netlink/specs/nlctrl.yaml6
-rw-r--r--Documentation/netlink/specs/ovpn.yaml26
-rw-r--r--Documentation/netlink/specs/ovs_datapath.yaml2
-rw-r--r--Documentation/netlink/specs/ovs_flow.yaml16
-rw-r--r--Documentation/netlink/specs/ovs_vport.yaml4
-rw-r--r--Documentation/netlink/specs/rt-addr.yaml2
-rw-r--r--Documentation/netlink/specs/rt-link.yaml2
-rw-r--r--Documentation/netlink/specs/rt-neigh.yaml3
-rw-r--r--Documentation/netlink/specs/rt-route.yaml10
-rw-r--r--Documentation/netlink/specs/rt-rule.yaml2
-rw-r--r--Documentation/netlink/specs/tc.yaml178
-rw-r--r--Documentation/netlink/specs/tcp_metrics.yaml8
-rw-r--r--Documentation/netlink/specs/team.yaml16
-rw-r--r--Documentation/networking/af_xdp.rst48
-rw-r--r--Documentation/networking/bonding.rst11
-rw-r--r--Documentation/networking/can.rst11
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst108
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst13
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst32
-rw-r--r--Documentation/networking/device_drivers/ethernet/meta/fbnic.rst30
-rw-r--r--Documentation/networking/device_drivers/ethernet/ti/cpsw.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst16
-rw-r--r--Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst16
-rw-r--r--Documentation/networking/devlink/devlink-params.rst6
-rw-r--r--Documentation/networking/devlink/devlink-port.rst8
-rw-r--r--Documentation/networking/devlink/index.rst3
-rw-r--r--Documentation/networking/devlink/kvaser_pciefd.rst24
-rw-r--r--Documentation/networking/devlink/kvaser_usb.rst33
-rw-r--r--Documentation/networking/devlink/netdevsim.rst2
-rw-r--r--Documentation/networking/devlink/zl3073x.rst51
-rw-r--r--Documentation/networking/ethtool-netlink.rst131
-rw-r--r--Documentation/networking/ip-sysctl.rst770
-rw-r--r--Documentation/networking/napi.rst9
-rw-r--r--Documentation/networking/net_cachelines/net_device.rst2
-rw-r--r--Documentation/networking/net_cachelines/snmp.rst1
-rw-r--r--Documentation/networking/net_cachelines/tcp_sock.rst2
-rw-r--r--Documentation/networking/netconsole.rst32
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst1
-rw-r--r--Documentation/networking/phy.rst7
-rw-r--r--Documentation/networking/xdp-rx-metadata.rst33
-rw-r--r--Documentation/process/changes.rst14
-rw-r--r--Documentation/process/coding-style.rst5
-rw-r--r--Documentation/scheduler/sched-deadline.rst85
-rw-r--r--Documentation/scheduler/sched-ext.rst11
-rw-r--r--Documentation/scheduler/sched-stats.rst53
-rw-r--r--Documentation/scsi/scsi_fc_transport.rst35
-rw-r--r--Documentation/sphinx-static/custom.css15
-rw-r--r--Documentation/sphinx/automarkup.py27
-rw-r--r--Documentation/sphinx/cdomain.py1
-rw-r--r--Documentation/sphinx/kernel_abi.py6
-rwxr-xr-xDocumentation/sphinx/kernel_include.py1
-rw-r--r--Documentation/sphinx/kerneldoc.py3
-rw-r--r--Documentation/sphinx/kfigure.py1
-rw-r--r--Documentation/sphinx/load_config.py1
-rw-r--r--Documentation/sphinx/min_requirements.txt11
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl5
-rw-r--r--Documentation/sphinx/requirements.txt1
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py1
-rw-r--r--Documentation/tools/rtla/common_appendix.rst11
-rw-r--r--Documentation/tools/rtla/common_timerlat_options.rst64
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-hist.rst2
-rw-r--r--Documentation/trace/boottime-trace.rst4
-rw-r--r--Documentation/trace/eprobetrace.rst269
-rw-r--r--Documentation/trace/ftrace-design.rst12
-rw-r--r--Documentation/trace/histogram.rst2
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/rv/da_monitor_synthesis.rst147
-rw-r--r--Documentation/trace/rv/index.rst4
-rw-r--r--Documentation/trace/rv/linear_temporal_logic.rst134
-rw-r--r--Documentation/trace/rv/monitor_rtapp.rst133
-rw-r--r--Documentation/trace/rv/monitor_sched.rst307
-rw-r--r--Documentation/trace/rv/monitor_synthesis.rst271
-rw-r--r--Documentation/translations/zh_CN/core-api/memory-hotplug.rst3
-rw-r--r--Documentation/translations/zh_CN/how-to.rst108
-rw-r--r--Documentation/translations/zh_CN/networking/alias.rst56
-rw-r--r--Documentation/translations/zh_CN/networking/index.rst12
-rw-r--r--Documentation/translations/zh_CN/networking/napi.rst362
-rw-r--r--Documentation/translations/zh_CN/networking/netif-msg.rst92
-rw-r--r--Documentation/translations/zh_CN/networking/netmem.rst92
-rw-r--r--Documentation/translations/zh_CN/networking/vxlan.rst85
-rw-r--r--Documentation/translations/zh_CN/networking/xfrm_proc.rst126
-rw-r--r--Documentation/translations/zh_CN/process/1.Intro.rst10
-rw-r--r--Documentation/translations/zh_CN/process/2.Process.rst7
-rw-r--r--Documentation/translations/zh_CN/process/5.Posting.rst11
-rw-r--r--Documentation/translations/zh_CN/process/6.Followthrough.rst5
-rw-r--r--Documentation/translations/zh_CN/process/7.AdvancedTopics.rst14
-rw-r--r--Documentation/translations/zh_CN/staging/index.rst2
-rw-r--r--Documentation/translations/zh_CN/staging/speculation.rst85
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/dma-buf-heaps.rst11
-rw-r--r--Documentation/userspace-api/fwctl/fwctl.rst30
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst550
-rw-r--r--Documentation/userspace-api/iommufd.rst12
-rw-r--r--Documentation/userspace-api/media/cec/cec-pin-error-inj.rst42
-rw-r--r--Documentation/userspace-api/media/rc/rc-protos.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/biblio.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/dev-sliced-vbi.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-fm-rx.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-fm-tx.rst21
-rw-r--r--Documentation/userspace-api/media/v4l/meta-formats.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-uvc-msxu-1-5.rst23
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-uvc.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-bayer.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-rawnn-cru.rst143
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-srggb14p.rst2
-rw-r--r--Documentation/userspace-api/sysfs-platform_profile.rst6
-rw-r--r--Documentation/virt/kvm/api.rst40
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-v3.rst77
-rw-r--r--MAINTAINERS380
-rw-r--r--Makefile15
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/percpu.h5
-rw-r--r--arch/alpha/include/uapi/asm/socket.h3
-rw-r--r--arch/alpha/kernel/core_marvel.c11
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm7445.dtsi9
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-wrv54g.dts92
-rw-r--r--arch/arm/common/sa1111.c4
-rw-r--r--arch/arm/common/scoop.c2
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig5
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm/include/asm/cti.h160
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-s3c/gpio-samsung.c2
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mach-sa1100/neponset.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/plat-orion/gpio.c2
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi25
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8370.dtsi16
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra264.dtsi3
-rw-r--r--arch/arm64/include/asm/asm-bug.h33
-rw-r--r--arch/arm64/include/asm/assembler.h4
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/include/asm/cfi.h7
-rw-r--r--arch/arm64/include/asm/cpufeature.h28
-rw-r--r--arch/arm64/include/asm/debug-monitors.h40
-rw-r--r--arch/arm64/include/asm/el2_setup.h116
-rw-r--r--arch/arm64/include/asm/exception.h14
-rw-r--r--arch/arm64/include/asm/gcs.h2
-rw-r--r--arch/arm64/include/asm/hwcap.h2
-rw-r--r--arch/arm64/include/asm/kgdb.h12
-rw-r--r--arch/arm64/include/asm/kprobes.h8
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h51
-rw-r--r--arch/arm64/include/asm/kvm_host.h38
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
-rw-r--r--arch/arm64/include/asm/kvm_nested.h2
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/mman.h10
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h42
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/smp.h24
-rw-r--r--arch/arm64/include/asm/stacktrace.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h87
-rw-r--r--arch/arm64/include/asm/system_misc.h4
-rw-r--r--arch/arm64/include/asm/thread_info.h5
-rw-r--r--arch/arm64/include/asm/tlbflush.h11
-rw-r--r--arch/arm64/include/asm/traps.h6
-rw-r--r--arch/arm64/include/asm/uprobes.h11
-rw-r--r--arch/arm64/include/asm/vncr_mapping.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h2
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/acpi.c10
-rw-r--r--arch/arm64/kernel/cpufeature.c142
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c263
-rw-r--r--arch/arm64/kernel/efi.c5
-rw-r--r--arch/arm64/kernel/entry-common.c156
-rw-r--r--arch/arm64/kernel/entry.S6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c60
-rw-r--r--arch/arm64/kernel/irq.c13
-rw-r--r--arch/arm64/kernel/kgdb.c39
-rw-r--r--arch/arm64/kernel/module.c101
-rw-r--r--arch/arm64/kernel/mte.c11
-rw-r--r--arch/arm64/kernel/pi/Makefile2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c31
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S2
-rw-r--r--arch/arm64/kernel/probes/uprobes.c24
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/arm64/kernel/sdei.c8
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/smp.c142
-rw-r--r--arch/arm64/kernel/stacktrace.c59
-rw-r--r--arch/arm64/kernel/traps.c84
-rw-r--r--arch/arm64/kernel/watchdog_hld.c58
-rw-r--r--arch/arm64/kvm/Makefile3
-rw-r--r--arch/arm64/kvm/arch_timer.c2
-rw-r--r--arch/arm64/kvm/arm.c36
-rw-r--r--arch/arm64/kvm/at.c80
-rw-r--r--arch/arm64/kvm/config.c255
-rw-r--r--arch/arm64/kvm/debug.c4
-rw-r--r--arch/arm64/kvm/emulate-nested.c49
-rw-r--r--arch/arm64/kvm/guest.c62
-rw-r--r--arch/arm64/kvm/handle_exit.c24
-rw-r--r--arch/arm64/kvm/hyp/exception.c16
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h53
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h49
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c32
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c53
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c14
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c6
-rw-r--r--arch/arm64/kvm/inject_fault.c235
-rw-r--r--arch/arm64/kvm/mmio.c12
-rw-r--r--arch/arm64/kvm/mmu.c105
-rw-r--r--arch/arm64/kvm/nested.c109
-rw-r--r--arch/arm64/kvm/sys_regs.c218
-rw-r--r--arch/arm64/kvm/sys_regs.h2
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h2
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c127
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c30
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c5
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c70
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c33
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3-nested.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c14
-rw-r--r--arch/arm64/kvm/vgic/vgic-v5.c52
-rw-r--r--arch/arm64/kvm/vgic/vgic.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic.h48
-rw-r--r--arch/arm64/mm/contpte.c213
-rw-r--r--arch/arm64/mm/fault.c85
-rw-r--r--arch/arm64/mm/gcs.c6
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmap.c2
-rw-r--r--arch/arm64/mm/mmu.c28
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c3
-rw-r--r--arch/arm64/net/bpf_jit.h5
-rw-r--r--arch/arm64/net/bpf_jit_comp.c197
-rw-r--r--arch/arm64/tools/cpucaps7
-rw-r--r--arch/arm64/tools/sysreg646
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts9
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi28
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts13
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi24
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts10
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi18
-rw-r--r--arch/loongarch/configs/loongson3_defconfig16
-rw-r--r--arch/loongarch/include/asm/hugetlb.h14
-rw-r--r--arch/loongarch/include/asm/inst.h3
-rw-r--r--arch/loongarch/include/asm/kvm_host.h12
-rw-r--r--arch/loongarch/include/asm/loongarch.h7
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h6
-rw-r--r--arch/loongarch/include/asm/pgtable.h19
-rw-r--r--arch/loongarch/kernel/env.c13
-rw-r--r--arch/loongarch/kernel/inst.c74
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S2
-rw-r--r--arch/loongarch/kernel/setup.c20
-rw-r--r--arch/loongarch/kernel/unwind_orc.c2
-rw-r--r--arch/loongarch/kvm/exit.c33
-rw-r--r--arch/loongarch/kvm/intc/eiointc.c553
-rw-r--r--arch/loongarch/kvm/intc/ipi.c28
-rw-r--r--arch/loongarch/kvm/intc/pch_pic.c4
-rw-r--r--arch/loongarch/kvm/interrupt.c25
-rw-r--r--arch/loongarch/kvm/trace.h14
-rw-r--r--arch/loongarch/kvm/vcpu.c8
-rw-r--r--arch/loongarch/mm/pageattr.c2
-rw-r--r--arch/loongarch/net/bpf_jit.c701
-rw-r--r--arch/loongarch/net/bpf_jit.h6
-rw-r--r--arch/loongarch/vdso/Makefile2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.debug2
-rw-r--r--arch/m68k/coldfire/gpio.c2
-rw-r--r--arch/m68k/configs/amiga_defconfig10
-rw-r--r--arch/m68k/configs/apollo_defconfig10
-rw-r--r--arch/m68k/configs/atari_defconfig10
-rw-r--r--arch/m68k/configs/bvme6000_defconfig10
-rw-r--r--arch/m68k/configs/hp300_defconfig10
-rw-r--r--arch/m68k/configs/mac_defconfig10
-rw-r--r--arch/m68k/configs/multi_defconfig10
-rw-r--r--arch/m68k/configs/mvme147_defconfig10
-rw-r--r--arch/m68k/configs/mvme16x_defconfig10
-rw-r--r--arch/m68k/configs/q40_defconfig10
-rw-r--r--arch/m68k/configs/sun3_defconfig10
-rw-r--r--arch/m68k/configs/sun3x_defconfig10
-rw-r--r--arch/m68k/include/asm/adb_iop.h4
-rw-r--r--arch/m68k/include/asm/bootinfo.h4
-rw-r--r--arch/m68k/include/asm/entry.h4
-rw-r--r--arch/m68k/include/asm/kexec.h4
-rw-r--r--arch/m68k/include/asm/mac_baboon.h4
-rw-r--r--arch/m68k/include/asm/mac_iop.h4
-rw-r--r--arch/m68k/include/asm/mac_oss.h4
-rw-r--r--arch/m68k/include/asm/mac_psc.h4
-rw-r--r--arch/m68k/include/asm/mac_via.h4
-rw-r--r--arch/m68k/include/asm/math-emu.h6
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h4
-rw-r--r--arch/m68k/include/asm/mcfmmu.h2
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h4
-rw-r--r--arch/m68k/include/asm/nettel.h4
-rw-r--r--arch/m68k/include/asm/openprom.h4
-rw-r--r--arch/m68k/include/asm/page.h4
-rw-r--r--arch/m68k/include/asm/page_mm.h4
-rw-r--r--arch/m68k/include/asm/page_no.h4
-rw-r--r--arch/m68k/include/asm/pgtable.h2
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h8
-rw-r--r--arch/m68k/include/asm/ptrace.h4
-rw-r--r--arch/m68k/include/asm/setup.h10
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h8
-rw-r--r--arch/m68k/include/asm/sun3mmu.h4
-rw-r--r--arch/m68k/include/asm/thread_info.h6
-rw-r--r--arch/m68k/include/asm/traps.h6
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo-vme.h4
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo.h8
-rw-r--r--arch/m68k/include/uapi/asm/ptrace.h4
-rw-r--r--arch/m68k/kernel/early_printk.c42
-rw-r--r--arch/m68k/kernel/head.S81
-rw-r--r--arch/m68k/mac/via.c16
-rw-r--r--arch/m68k/math-emu/fp_emu.h8
-rw-r--r--arch/m68k/mm/motorola.c56
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/alchemy/common/gpiolib.c6
-rw-r--r--arch/mips/bcm63xx/gpio.c2
-rw-r--r--arch/mips/boot/Makefile8
-rw-r--r--arch/mips/boot/dts/mobileye/eyeq5-epm5.dts8
-rw-r--r--arch/mips/boot/dts/mobileye/eyeq5.dtsi127
-rw-r--r--arch/mips/boot/dts/mobileye/eyeq6h.dtsi22
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi9
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi9
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts4
-rw-r--r--arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi10
-rw-r--r--arch/mips/boot/dts/ralink/mt7628a.dtsi11
-rw-r--r--arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts96
-rw-r--r--arch/mips/boot/dts/realtek/rtl930x.dtsi31
-rw-r--r--arch/mips/configs/eyeq5_defconfig12
-rw-r--r--arch/mips/configs/eyeq6_defconfig2
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/loongson2k_defconfig12
-rw-r--r--arch/mips/configs/loongson3_defconfig16
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rb532_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/include/asm/cpu-info.h1
-rw-r--r--arch/mips/include/asm/hugetlb.h14
-rw-r--r--arch/mips/include/asm/mach-generic/mc146818rtc.h4
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip30/spaces.h2
-rw-r--r--arch/mips/include/asm/mach-jazz/mc146818rtc.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h3
-rw-r--r--arch/mips/include/asm/mach-malta/mc146818rtc.h2
-rw-r--r--arch/mips/include/asm/mach-rm/mc146818rtc.h21
-rw-r--r--arch/mips/include/asm/mc146818-time.h105
-rw-r--r--arch/mips/include/asm/mips-cps.h4
-rw-r--r--arch/mips/include/asm/sgi/heart.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h1
-rw-r--r--arch/mips/include/asm/vpe.h8
-rw-r--r--arch/mips/include/uapi/asm/socket.h3
-rw-r--r--arch/mips/kernel/cpu-probe.c42
-rw-r--r--arch/mips/kernel/gpio_txx9.c2
-rw-r--r--arch/mips/kernel/mips-cm.c52
-rw-r--r--arch/mips/kernel/process.c16
-rw-r--r--arch/mips/kernel/relocate.c10
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp-cps.c16
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lantiq/falcon/prom.c4
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c29
-rw-r--r--arch/mips/lantiq/irq.c4
-rw-r--r--arch/mips/lantiq/xway/clk.c2
-rw-r--r--arch/mips/lantiq/xway/dcdc.c2
-rw-r--r--arch/mips/lantiq/xway/dma.c2
-rw-r--r--arch/mips/lantiq/xway/gptu.c2
-rw-r--r--arch/mips/loongson64/setup.c1
-rw-r--r--arch/mips/mm/physaddr.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c56
-rw-r--r--arch/mips/pci/pci-lantiq.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-power.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-setup.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-smp.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-timer.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-xtalk.c2
-rw-r--r--arch/mips/txx9/generic/setup.c6
-rw-r--r--arch/openrisc/include/asm/mmu.h2
-rw-r--r--arch/openrisc/include/asm/page.h8
-rw-r--r--arch/openrisc/include/asm/pgtable.h4
-rw-r--r--arch/openrisc/include/asm/processor.h4
-rw-r--r--arch/openrisc/include/asm/ptrace.h4
-rw-r--r--arch/openrisc/include/asm/setup.h2
-rw-r--r--arch/openrisc/include/asm/thread_info.h8
-rw-r--r--arch/openrisc/include/uapi/asm/ptrace.h2
-rw-r--r--arch/openrisc/kernel/dma.c4
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Makefile6
-rw-r--r--arch/parisc/include/asm/pgtable.h7
-rw-r--r--arch/parisc/include/asm/special_insns.h28
-rw-r--r--arch/parisc/include/asm/uaccess.h21
-rw-r--r--arch/parisc/include/uapi/asm/socket.h3
-rw-r--r--arch/parisc/kernel/cache.c6
-rw-r--r--arch/parisc/kernel/entry.S17
-rw-r--r--arch/parisc/kernel/syscall.S30
-rw-r--r--arch/parisc/lib/memcpy.c19
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts2
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h53
-rw-r--r--arch/powerpc/include/asm/book3s/64/pkeys.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h14
-rw-r--r--arch/powerpc/include/asm/floppy.h5
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/pkeys.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/uapi/asm/eeh.h13
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h13
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h13
-rw-r--r--arch/powerpc/include/uapi/asm/ps3fb.h13
-rw-r--r--arch/powerpc/kernel/eeh.c21
-rw-r--r--arch/powerpc/kernel/eeh_driver.c50
-rw-r--r--arch/powerpc/kernel/eeh_pe.c10
-rw-r--r--arch/powerpc/kernel/fadump.c13
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c64
-rw-r--r--arch/powerpc/kexec/core.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c2
-rw-r--r--arch/powerpc/kvm/trace_book3s.h1
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugepage.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c3
-rw-r--r--arch/powerpc/mm/book3s64/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c12
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c36
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c2
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c161
-rw-r--r--arch/powerpc/perf/hv-24x7.c8
-rw-r--r--arch/powerpc/platforms/44x/gpio.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_lpbfifo.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c4
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c32
-rw-r--r--arch/powerpc/platforms/powernv/ocxl.c12
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c52
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/plpks-secvar.c104
-rw-r--r--arch/powerpc/sysdev/cpm_common.c2
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c14
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi10
-rw-r--r--arch/riscv/include/asm/bug.h35
-rw-r--r--arch/riscv/include/asm/cfi.h16
-rw-r--r--arch/riscv/include/asm/kvm_aia.h2
-rw-r--r--arch/riscv/include/asm/kvm_gstage.h72
-rw-r--r--arch/riscv/include/asm/kvm_host.h105
-rw-r--r--arch/riscv/include/asm/kvm_mmu.h21
-rw-r--r--arch/riscv/include/asm/kvm_tlb.h84
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h12
-rw-r--r--arch/riscv/include/asm/kvm_vmid.h27
-rw-r--r--arch/riscv/include/asm/pgtable-64.h16
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h1
-rw-r--r--arch/riscv/include/asm/pgtable.h22
-rw-r--r--arch/riscv/include/asm/tlbflush.h1
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h1
-rw-r--r--arch/riscv/kernel/cfi.c53
-rw-r--r--arch/riscv/kernel/kexec_elf.c1
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/kvm/Kconfig1
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/aia_device.c6
-rw-r--r--arch/riscv/kvm/aia_imsic.c12
-rw-r--r--arch/riscv/kvm/gstage.c338
-rw-r--r--arch/riscv/kvm/main.c3
-rw-r--r--arch/riscv/kvm/mmu.c509
-rw-r--r--arch/riscv/kvm/tlb.c110
-rw-r--r--arch/riscv/kvm/vcpu.c48
-rw-r--r--arch/riscv/kvm/vcpu_exit.c20
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c83
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c49
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c17
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c3
-rw-r--r--arch/riscv/kvm/vcpu_sbi_v01.c25
-rw-r--r--arch/riscv/kvm/vm.c7
-rw-r--r--arch/riscv/kvm/vmid.c25
-rw-r--r--arch/riscv/mm/fault.c8
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/riscv/mm/pageattr.c8
-rw-r--r--arch/riscv/mm/ptdump.c3
-rw-r--r--arch/riscv/mm/tlbflush.c5
-rw-r--r--arch/s390/Kconfig5
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/boot/Makefile6
-rw-r--r--arch/s390/boot/als.c2
-rw-r--r--arch/s390/boot/boot.h5
-rw-r--r--arch/s390/boot/ipl_data.c9
-rw-r--r--arch/s390/boot/startup.c4
-rw-r--r--arch/s390/boot/trampoline.S9
-rw-r--r--arch/s390/configs/debug_defconfig2
-rw-r--r--arch/s390/configs/defconfig2
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/arch_random.c1
-rw-r--r--arch/s390/crypto/hmac_s390.c12
-rw-r--r--arch/s390/crypto/paes_s390.c2
-rw-r--r--arch/s390/crypto/phmac_s390.c1048
-rw-r--r--arch/s390/crypto/sha.h3
-rw-r--r--arch/s390/crypto/sha3_256_s390.c24
-rw-r--r--arch/s390/crypto/sha3_512_s390.c25
-rw-r--r--arch/s390/crypto/sha_common.c1
-rw-r--r--arch/s390/include/asm/alternative.h6
-rw-r--r--arch/s390/include/asm/ap.h2
-rw-r--r--arch/s390/include/asm/asm-const.h2
-rw-r--r--arch/s390/include/asm/cpacf.h4
-rw-r--r--arch/s390/include/asm/cpu.h4
-rw-r--r--arch/s390/include/asm/cpu_mf-insn.h4
-rw-r--r--arch/s390/include/asm/ctlreg.h4
-rw-r--r--arch/s390/include/asm/dwarf.h4
-rw-r--r--arch/s390/include/asm/entry-common.h10
-rw-r--r--arch/s390/include/asm/extmem.h2
-rw-r--r--arch/s390/include/asm/fpu-insn-asm.h4
-rw-r--r--arch/s390/include/asm/fpu-insn.h4
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/irq.h4
-rw-r--r--arch/s390/include/asm/jump_label.h4
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/lowcore.h6
-rw-r--r--arch/s390/include/asm/machine.h4
-rw-r--r--arch/s390/include/asm/mem_encrypt.h4
-rw-r--r--arch/s390/include/asm/nmi.h4
-rw-r--r--arch/s390/include/asm/nospec-branch.h4
-rw-r--r--arch/s390/include/asm/nospec-insn.h5
-rw-r--r--arch/s390/include/asm/page.h22
-rw-r--r--arch/s390/include/asm/percpu.h5
-rw-r--r--arch/s390/include/asm/pgtable.h45
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/purgatory.h4
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/sigp.h4
-rw-r--r--arch/s390/include/asm/skey.h32
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timex.h13
-rw-r--r--arch/s390/include/asm/tpi.h4
-rw-r--r--arch/s390/include/asm/types.h4
-rw-r--r--arch/s390/include/asm/uaccess.h204
-rw-r--r--arch/s390/include/asm/vdso.h4
-rw-r--r--arch/s390/include/asm/vdso/getrandom.h4
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h8
-rw-r--r--arch/s390/include/asm/vdso/time_data.h3
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h5
-rw-r--r--arch/s390/include/uapi/asm/schid.h4
-rw-r--r--arch/s390/include/uapi/asm/types.h4
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/cpufeature.c1
-rw-r--r--arch/s390/kernel/crash_dump.c1
-rw-r--r--arch/s390/kernel/ctlreg.c1
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/facility.c1
-rw-r--r--arch/s390/kernel/fpu.c2
-rw-r--r--arch/s390/kernel/nmi.c76
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1
-rw-r--r--arch/s390/kernel/perf_event.c1
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c3
-rw-r--r--arch/s390/kernel/perf_pai_ext.c1
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kernel/skey.c48
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/sthyi.c2
-rw-r--r--arch/s390/kernel/time.c121
-rw-r--r--arch/s390/kernel/unwind_bc.c2
-rw-r--r--arch/s390/kernel/uv.c1
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/kvm/interrupt.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c52
-rw-r--r--arch/s390/kvm/pv.c2
-rw-r--r--arch/s390/kvm/vsie.c17
-rw-r--r--arch/s390/lib/delay.c1
-rw-r--r--arch/s390/lib/uaccess.c188
-rw-r--r--arch/s390/mm/dump_pagetables.c2
-rw-r--r--arch/s390/mm/gmap.c1
-rw-r--r--arch/s390/mm/gmap_helpers.c2
-rw-r--r--arch/s390/mm/pgalloc.c5
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/mm/vmem.c5
-rw-r--r--arch/s390/net/bpf_jit.h55
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
-rw-r--r--arch/s390/net/pnet.c1
-rw-r--r--arch/s390/pci/pci_bus.c1
-rw-r--r--arch/s390/pci/pci_kvm_hook.c2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile10
-rw-r--r--arch/sh/boot/compressed/Makefile4
-rw-r--r--arch/sh/boot/romimage/Makefile4
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/kernel/machine_kexec.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/hugetlb.h5
-rw-r--r--arch/sparc/include/asm/mman.h4
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/mm/hugetlbpage.c119
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/um/Kconfig5
-rw-r--r--arch/um/drivers/Kconfig1
-rw-r--r--arch/um/drivers/rtc_user.c2
-rw-r--r--arch/um/drivers/vfio_kern.c62
-rw-r--r--arch/um/drivers/virt-pci.c45
-rw-r--r--arch/um/drivers/virtio_pcidev.c8
-rw-r--r--arch/um/include/asm/cpufeature.h4
-rw-r--r--arch/um/include/asm/current.h4
-rw-r--r--arch/um/include/asm/mmu_context.h9
-rw-r--r--arch/um/include/asm/page.h4
-rw-r--r--arch/um/include/asm/ptrace-generic.h2
-rw-r--r--arch/um/include/asm/thread_info.h8
-rw-r--r--arch/um/include/shared/as-layout.h2
-rw-r--r--arch/um/include/shared/skas/mm_id.h2
-rw-r--r--arch/um/include/shared/skas/skas.h1
-rw-r--r--arch/um/kernel/exec.c2
-rw-r--r--arch/um/kernel/process.c20
-rw-r--r--arch/um/kernel/ptrace.c9
-rw-r--r--arch/um/kernel/skas/mmu.c4
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/os-Linux/skas/process.c35
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/boot/cpuflags.c13
-rw-r--r--arch/x86/boot/startup/sev-shared.c7
-rw-r--r--arch/x86/coco/sev/core.c21
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c40
-rw-r--r--arch/x86/crypto/aria_aesni_avx2_glue.c1
-rw-r--r--arch/x86/crypto/aria_aesni_avx_glue.c1
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c1
-rw-r--r--arch/x86/crypto/camellia_glue.c1
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c1
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c1
-rw-r--r--arch/x86/crypto/sm4_aesni_avx_glue.c1
-rw-r--r--arch/x86/crypto/twofish_glue.c1
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c1
-rw-r--r--arch/x86/include/asm/apic.h66
-rw-r--r--arch/x86/include/asm/bug.h56
-rw-r--r--arch/x86/include/asm/cfi.h10
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h12
-rw-r--r--arch/x86/include/asm/init.h2
-rw-r--r--arch/x86/include/asm/intel-family.h5
-rw-r--r--arch/x86/include/asm/irq_remapping.h17
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h76
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/pgtable.h51
-rw-r--r--arch/x86/include/asm/pgtable_types.h5
-rw-r--r--arch/x86/include/asm/sev.h19
-rw-r--r--arch/x86/include/asm/svm.h13
-rw-r--r--arch/x86/include/asm/tlbflush.h5
-rw-r--r--arch/x86/kernel/alternative.c40
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c8
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h2
-rw-r--r--arch/x86/kernel/crash.c26
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/irq.c63
-rw-r--r--arch/x86/kernel/kprobes/core.c18
-rw-r--r--arch/x86/kernel/setup.c9
-rw-r--r--arch/x86/kvm/Kconfig10
-rw-r--r--arch/x86/kvm/Makefile7
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/hyperv.c10
-rw-r--r--arch/x86/kvm/hyperv.h3
-rw-r--r--arch/x86/kvm/i8254.c94
-rw-r--r--arch/x86/kvm/i8254.h17
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/ioapic.c55
-rw-r--r--arch/x86/kvm/ioapic.h24
-rw-r--r--arch/x86/kvm/irq.c560
-rw-r--r--arch/x86/kvm/irq.h35
-rw-r--r--arch/x86/kvm/irq_comm.c469
-rw-r--r--arch/x86/kvm/lapic.c104
-rw-r--r--arch/x86/kvm/lapic.h26
-rw-r--r--arch/x86/kvm/mmu/mmu.c75
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h3
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h8
-rw-r--r--arch/x86/kvm/mmu/spte.c43
-rw-r--r--arch/x86/kvm/mmu/spte.h10
-rw-r--r--arch/x86/kvm/svm/avic.c688
-rw-r--r--arch/x86/kvm/svm/nested.c128
-rw-r--r--arch/x86/kvm/svm/sev.c149
-rw-r--r--arch/x86/kvm/svm/svm.c506
-rw-r--r--arch/x86/kvm/svm/svm.h137
-rw-r--r--arch/x86/kvm/trace.h99
-rw-r--r--arch/x86/kvm/vmx/capabilities.h1
-rw-r--r--arch/x86/kvm/vmx/common.h2
-rw-r--r--arch/x86/kvm/vmx/main.c61
-rw-r--r--arch/x86/kvm/vmx/nested.c27
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c8
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c140
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h10
-rw-r--r--arch/x86/kvm/vmx/run_flags.h10
-rw-r--r--arch/x86/kvm/vmx/tdx.c71
-rw-r--r--arch/x86/kvm/vmx/tdx.h1
-rw-r--r--arch/x86/kvm/vmx/vmx.c296
-rw-r--r--arch/x86/kvm/vmx/vmx.h57
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h16
-rw-r--r--arch/x86/kvm/x86.c389
-rw-r--r--arch/x86/kvm/x86.h40
-rw-r--r--arch/x86/mm/init.c24
-rw-r--r--arch/x86/mm/pat/memtype.c1
-rw-r--r--arch/x86/mm/pgprot.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c10
-rw-r--r--arch/x86/um/asm/syscall.h2
-rw-r--r--arch/x86/um/shared/sysdep/ptrace.h12
-rw-r--r--arch/x86/um/shared/sysdep/syscalls.h6
-rw-r--r--arch/x86/um/shared/sysdep/syscalls_32.h14
-rw-r--r--arch/x86/um/shared/sysdep/syscalls_64.h28
-rw-r--r--arch/x86/um/tls_32.c2
-rw-r--r--arch/xtensa/include/asm/bootparam.h2
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h4
-rw-r--r--arch/xtensa/include/asm/coprocessor.h8
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/ftrace.h8
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h4
-rw-r--r--arch/xtensa/include/asm/jump_label.h4
-rw-r--r--arch/xtensa/include/asm/kasan.h2
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h2
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/include/asm/pgtable.h8
-rw-r--r--arch/xtensa/include/asm/processor.h4
-rw-r--r--arch/xtensa/include/asm/ptrace.h6
-rw-r--r--arch/xtensa/include/asm/signal.h4
-rw-r--r--arch/xtensa/include/asm/thread_info.h8
-rw-r--r--arch/xtensa/include/asm/tlbflush.h4
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h2
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h6
-rw-r--r--arch/xtensa/include/uapi/asm/types.h4
-rw-r--r--block/bfq-iosched.c69
-rw-r--r--block/bfq-iosched.h13
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-ioc.c16
-rw-r--r--block/blk-mq-sched.c223
-rw-r--r--block/blk-mq-sched.h12
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-settings.c33
-rw-r--r--block/blk-sysfs.c14
-rw-r--r--block/blk-wbt.c15
-rw-r--r--block/blk.h5
-rw-r--r--block/elevator.c38
-rw-r--r--block/elevator.h16
-rw-r--r--block/genhd.c2
-rw-r--r--block/kyber-iosched.c20
-rw-r--r--block/mq-deadline.c30
-rw-r--r--crypto/ahash.c39
-rw-r--r--crypto/async_tx/async_pq.c2
-rw-r--r--crypto/async_tx/async_raid6_recov.c4
-rw-r--r--crypto/cryptd.c6
-rw-r--r--crypto/crypto_engine.c55
-rw-r--r--crypto/deflate.c7
-rw-r--r--crypto/jitterentropy-kcapi.c9
-rw-r--r--crypto/jitterentropy.c2
-rw-r--r--crypto/krb5/selftest.c1
-rw-r--r--crypto/pcrypt.c7
-rw-r--r--crypto/testmgr.c39
-rw-r--r--crypto/zstd.c356
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c7
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c193
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h3
-rw-r--r--drivers/accel/drm_accel.c16
-rw-r--r--drivers/accel/habanalabs/common/device.c25
-rw-r--r--drivers/accel/habanalabs/common/memory.c23
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c1
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h15
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c4
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c1
-rw-r--r--drivers/accel/ivpu/ivpu_job.c81
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c15
-rw-r--r--drivers/accel/qaic/Makefile1
-rw-r--r--drivers/accel/qaic/qaic.h10
-rw-r--r--drivers/accel/qaic/qaic_data.c1
-rw-r--r--drivers/accel/qaic/qaic_drv.c6
-rw-r--r--drivers/accel/qaic/qaic_ras.c642
-rw-r--r--drivers/accel/qaic/qaic_ras.h10
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/numa/hmat.c8
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/libata-sata.c53
-rw-r--r--drivers/ata/libata-scsi.c49
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c12
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/base/memory.c21
-rw-r--r--drivers/base/node.c121
-rw-r--r--drivers/base/power/main.c14
-rw-r--r--drivers/base/regmap/regmap-irq.c30
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c59
-rw-r--r--drivers/block/drbd/drbd_receiver.c264
-rw-r--r--drivers/block/drbd/drbd_worker.c56
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/ublk_drv.c28
-rw-r--r--drivers/block/zloop.c3
-rw-r--r--drivers/bluetooth/btintel.c6
-rw-r--r--drivers/bluetooth/btintel.h2
-rw-r--r--drivers/bluetooth/btintel_pcie.c347
-rw-r--r--drivers/bluetooth/btintel_pcie.h4
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c131
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btusb.c145
-rw-r--r--drivers/bluetooth/hci_bcm4377.c2
-rw-r--r--drivers/bluetooth/hci_intel.c10
-rw-r--r--drivers/bluetooth/hci_qca.c1
-rw-r--r--drivers/char/hw_random/atmel-rng.c1
-rw-r--r--drivers/char/hw_random/cctrng.c1
-rw-r--r--drivers/char/hw_random/mtk-rng.c5
-rw-r--r--drivers/char/hw_random/npcm-rng.c1
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/hw_random/rockchip-rng.c3
-rw-r--r--drivers/char/hw_random/stm32-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c59
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/sam9x7.c20
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c2
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c19
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c2
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c2
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-axi-clkgen.c159
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clk-eyeq.c2
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-hsdk-pll.c2
-rw-r--r--drivers/clk/clk-pwm.c49
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk-scmi.c2
-rw-r--r--drivers/clk/clk-si5351.c6
-rw-r--r--drivers/clk/clk-si544.c2
-rw-r--r--drivers/clk/clk-si570.c4
-rw-r--r--drivers/clk/clk-sp7021.c2
-rw-r--r--drivers/clk/clk-stm32f4.c2
-rw-r--r--drivers/clk/clk-versaclock5.c2
-rw-r--r--drivers/clk/clk-versaclock7.c2
-rw-r--r--drivers/clk/clk.c12
-rw-r--r--drivers/clk/clk_test.c226
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc.c5
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c16
-rw-r--r--drivers/clk/imx/clk-busy.c8
-rw-r--r--drivers/clk/imx/clk-composite-8m.c16
-rw-r--r--drivers/clk/imx/clk-composite-93.c7
-rw-r--r--drivers/clk/imx/clk-cpu.c10
-rw-r--r--drivers/clk/imx/clk-fixup-div.c10
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c20
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c17
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx5.c2
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c1
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c93
-rw-r--r--drivers/clk/imx/clk-pfd.c18
-rw-r--r--drivers/clk/imx/clk-pll14xx.c29
-rw-r--r--drivers/clk/imx/clk-pllv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv3.c72
-rw-r--r--drivers/clk/imx/clk-pllv4.c29
-rw-r--r--drivers/clk/imx/clk-scu.c39
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/kunit_clk_hw_get_dev_of_node.dtso10
-rw-r--r--drivers/clk/meson/Kconfig4
-rw-r--r--drivers/clk/meson/a1-peripherals.c194
-rw-r--r--drivers/clk/meson/a1-peripherals.h46
-rw-r--r--drivers/clk/meson/a1-pll.c28
-rw-r--r--drivers/clk/meson/a1-pll.h28
-rw-r--r--drivers/clk/meson/axg-aoclk.c22
-rw-r--r--drivers/clk/meson/axg-audio.c603
-rw-r--r--drivers/clk/meson/axg-audio.h70
-rw-r--r--drivers/clk/meson/axg.c220
-rw-r--r--drivers/clk/meson/axg.h105
-rw-r--r--drivers/clk/meson/c3-peripherals.c210
-rw-r--r--drivers/clk/meson/c3-pll.c32
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c1
-rw-r--r--drivers/clk/meson/clk-dualdiv.c2
-rw-r--r--drivers/clk/meson/clk-mpll.c6
-rw-r--r--drivers/clk/meson/clk-phase.c11
-rw-r--r--drivers/clk/meson/clk-pll.c7
-rw-r--r--drivers/clk/meson/clk-regmap.c49
-rw-r--r--drivers/clk/meson/clk-regmap.h4
-rw-r--r--drivers/clk/meson/g12a-aoclk.c34
-rw-r--r--drivers/clk/meson/g12a.c378
-rw-r--r--drivers/clk/meson/g12a.h130
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c19
-rw-r--r--drivers/clk/meson/gxbb.c494
-rw-r--r--drivers/clk/meson/gxbb.h115
-rw-r--r--drivers/clk/meson/meson-aoclk.c5
-rw-r--r--drivers/clk/meson/meson-aoclk.h2
-rw-r--r--drivers/clk/meson/meson-eeclk.c4
-rw-r--r--drivers/clk/meson/meson-eeclk.h2
-rw-r--r--drivers/clk/meson/meson8-ddr.c9
-rw-r--r--drivers/clk/meson/meson8b.c267
-rw-r--r--drivers/clk/meson/meson8b.h80
-rw-r--r--drivers/clk/meson/s4-peripherals.c388
-rw-r--r--drivers/clk/meson/s4-peripherals.h56
-rw-r--r--drivers/clk/meson/s4-pll.c60
-rw-r--r--drivers/clk/meson/s4-pll.h38
-rw-r--r--drivers/clk/meson/sclk-div.c5
-rw-r--r--drivers/clk/meson/vclk.c2
-rw-r--r--drivers/clk/meson/vid-pll-div.c1
-rw-r--r--drivers/clk/microchip/clk-core.c2
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mvebu/armada-xp.c5
-rw-r--r--drivers/clk/mxs/clk-div.c2
-rw-r--r--drivers/clk/nuvoton/Kconfig4
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/qcom/Kconfig104
-rw-r--r--drivers/clk/qcom/Makefile11
-rw-r--r--drivers/clk/qcom/camcc-milos.c2161
-rw-r--r--drivers/clk/qcom/camcc-qcs615.c1597
-rw-r--r--drivers/clk/qcom/camcc-sc8180x.c2889
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c89
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c85
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c83
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c67
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c249
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-rpm.c10
-rw-r--r--drivers/clk/qcom/clk-rpmh.c34
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c8
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c12
-rw-r--r--drivers/clk/qcom/common.c91
-rw-r--r--drivers/clk/qcom/common.h12
-rw-r--r--drivers/clk/qcom/dispcc-milos.c974
-rw-r--r--drivers/clk/qcom/dispcc-qcs615.c792
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c10
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c14
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c6
-rw-r--r--drivers/clk/qcom/gcc-milos.c3225
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c6
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c2
-rw-r--r--drivers/clk/qcom/gpucc-milos.c562
-rw-r--r--drivers/clk/qcom/gpucc-qcs615.c531
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c50
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c2
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c2
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/videocc-milos.c403
-rw-r--r--drivers/clk/qcom/videocc-qcs615.c338
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c2
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c4
-rw-r--r--drivers/clk/qcom/videocc-sm6350.c355
-rw-r--r--drivers/clk/qcom/videocc-sm7150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c62
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c91
-rw-r--r--drivers/clk/renesas/Kconfig10
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c132
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c168
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c229
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c116
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c72
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c202
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c127
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c256
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c6
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c193
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h32
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c509
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h66
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c130
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h49
-rw-r--r--drivers/clk/rockchip/clk-cpu.c6
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c4
-rw-r--r--drivers/clk/rockchip/clk-pll.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/clk/rockchip/clk.h2
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-exynos850.c2
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c72
-rw-r--r--drivers/clk/samsung/clk-gs101.c4
-rw-r--r--drivers/clk/samsung/clk-pll.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c4
-rw-r--r--drivers/clk/spacemit/Kconfig1
-rw-r--r--drivers/clk/spacemit/ccu-k1.c242
-rw-r--r--drivers/clk/spacemit/ccu_mix.h11
-rw-r--r--drivers/clk/spacemit/ccu_pll.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/ums512-clk.c4
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c2
-rw-r--r--drivers/clk/stm32/Kconfig8
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c43
-rw-r--r--drivers/clk/tegra/clk-periph.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/tegra/clk.h1
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c109
-rw-r--r--drivers/clk/ti/autoidle.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clk.c27
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clk/visconti/pll.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c33
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernv-trace.h44
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs5
-rw-r--r--drivers/cpuidle/governors/menu.c21
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c15
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c800
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h28
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/ctrl.c13
-rw-r--r--drivers/crypto/caam/debugfs.c2
-rw-r--r--drivers/crypto/caam/debugfs.h2
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/caam/qi.c5
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-ops.c163
-rw-r--r--drivers/crypto/ccp/sev-dev.c26
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c54
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/ccree/cc_hash.c30
-rw-r--r--drivers/crypto/ccree/cc_pm.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c8
-rw-r--r--drivers/crypto/hisilicon/qm.c1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h63
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c587
-rw-r--r--drivers/crypto/hisilicon/sgl.c15
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c13
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c8
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c18
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c129
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h22
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.h49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c229
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c105
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c124
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c146
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h198
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h36
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c8
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c4
-rw-r--r--drivers/crypto/marvell/cesa/hash.c10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h128
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c55
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c14
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c1
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c36
-rw-r--r--drivers/cxl/acpi.c59
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c6
-rw-r--r--drivers/cxl/core/core.h36
-rw-r--r--drivers/cxl/core/edac.c55
-rw-r--r--drivers/cxl/core/hdm.c125
-rw-r--r--drivers/cxl/core/mbox.c37
-rw-r--r--drivers/cxl/core/mce.h2
-rw-r--r--drivers/cxl/core/memdev.c52
-rw-r--r--drivers/cxl/core/port.c29
-rw-r--r--drivers/cxl/core/region.c502
-rw-r--r--drivers/cxl/core/trace.h133
-rw-r--r--drivers/cxl/cxl.h22
-rw-r--r--drivers/cxl/cxlmem.h12
-rw-r--r--drivers/cxl/pci.c2
-rw-r--r--drivers/dax/device.c23
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c1
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/dma-fence-chain.c7
-rw-r--r--drivers/dma-buf/dma-fence.c167
-rw-r--r--drivers/dma-buf/heaps/Kconfig10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c36
-rw-r--r--drivers/dma-buf/heaps/system_heap.c43
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c24
-rw-r--r--drivers/dma-buf/udmabuf.c23
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c12
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c5
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/idxd/init.c1
-rw-r--r--drivers/dma/idxd/registers.h60
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mv_xor.c21
-rw-r--r--drivers/dma/nbpfaxi.c13
-rw-r--r--drivers/dma/qcom/gpi.c11
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/stm32/stm32-dma.c12
-rw-r--r--drivers/dma/stm32/stm32-dma3.c10
-rw-r--r--drivers/dma/stm32/stm32-mdma.c8
-rw-r--r--drivers/dma/sun4i-dma.c46
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dpll/Kconfig6
-rw-r--r--drivers/dpll/Makefile2
-rw-r--r--drivers/dpll/dpll_core.c45
-rw-r--r--drivers/dpll/dpll_core.h3
-rw-r--r--drivers/dpll/dpll_netlink.c259
-rw-r--r--drivers/dpll/dpll_netlink.h2
-rw-r--r--drivers/dpll/dpll_nl.c15
-rw-r--r--drivers/dpll/dpll_nl.h1
-rw-r--r--drivers/dpll/zl3073x/Kconfig39
-rw-r--r--drivers/dpll/zl3073x/Makefile10
-rw-r--r--drivers/dpll/zl3073x/core.c1030
-rw-r--r--drivers/dpll/zl3073x/core.h383
-rw-r--r--drivers/dpll/zl3073x/devlink.c259
-rw-r--r--drivers/dpll/zl3073x/devlink.h12
-rw-r--r--drivers/dpll/zl3073x/dpll.c2318
-rw-r--r--drivers/dpll/zl3073x/dpll.h46
-rw-r--r--drivers/dpll/zl3073x/i2c.c76
-rw-r--r--drivers/dpll/zl3073x/prop.c358
-rw-r--r--drivers/dpll/zl3073x/prop.h34
-rw-r--r--drivers/dpll/zl3073x/regs.h263
-rw-r--r--drivers/dpll/zl3073x/spi.c76
-rw-r--r--drivers/firewire/core-card.c59
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/core-device.c15
-rw-r--r--drivers/firewire/core-transaction.c98
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/ohci.c162
-rw-r--r--drivers/firmware/efi/Kconfig8
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot2
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-adnp.c2
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5585.c2
-rw-r--r--drivers/gpio/gpio-aggregator.c4
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-bd71815.c2
-rw-r--r--drivers/gpio/gpio-bd71828.c2
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c2
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-cgbc.c2
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-cros-ec.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-em.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-gw-pld.c2
-rw-r--r--drivers/gpio/gpio-htc-egpio.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-imx-scu.c2
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c2
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-latch.c4
-rw-r--r--drivers/gpio/gpio-ljca.c2
-rw-r--r--drivers/gpio/gpio-logicvc.c2
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c2
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c2
-rw-r--r--drivers/gpio/gpio-lpc18xx.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c10
-rw-r--r--drivers/gpio/gpio-macsmc.c2
-rw-r--r--drivers/gpio/gpio-madera.c2
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-max732x.c4
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-max77650.c2
-rw-r--r--drivers/gpio/gpio-max77759.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mlxbf2.c2
-rw-r--r--drivers/gpio/gpio-mlxbf3.c54
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mmio.c24
-rw-r--r--drivers/gpio/gpio-mockup.c4
-rw-r--r--drivers/gpio/gpio-moxtet.c2
-rw-r--r--drivers/gpio/gpio-mpc5200.c4
-rw-r--r--drivers/gpio/gpio-mpfs.c2
-rw-r--r--drivers/gpio/gpio-mpsse.c4
-rw-r--r--drivers/gpio/gpio-msc313.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-nomadik.c2
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c4
-rw-r--r--drivers/gpio/gpio-octeon.c2
-rw-r--r--drivers/gpio/gpio-omap.c4
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pch.c2
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c10
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c2
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-reg.c6
-rw-r--r--drivers/gpio/gpio-regmap.c4
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-rtd.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c2
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-sch311x.c2
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpio-siox.c2
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sprd.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-syscon.c4
-rw-r--r--drivers/gpio/gpio-tangier.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-tpic2810.c4
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c2
-rw-r--r--drivers/gpio/gpio-tps65219.c4
-rw-r--r--drivers/gpio/gpio-tps6586x.c2
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c2
-rw-r--r--drivers/gpio/gpio-tps68470.c2
-rw-r--r--drivers/gpio/gpio-tqmx86.c2
-rw-r--r--drivers/gpio/gpio-ts4900.c2
-rw-r--r--drivers/gpio/gpio-ts5500.c2
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-viperboard.c4
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpio-winbond.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c2
-rw-r--r--drivers/gpio/gpio-wm8350.c2
-rw-r--r--drivers/gpio/gpio-wm8994.c2
-rw-r--r--drivers/gpio/gpio-xgene.c2
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-xra1403.c2
-rw-r--r--drivers/gpio/gpio-xtensa.c2
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c2
-rw-r--r--drivers/gpio/gpiolib.c31
-rw-r--r--drivers/gpu/drm/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c497
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c236
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c8
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c219
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c429
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c508
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h181
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c287
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h367
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c72
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c118
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c137
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c105
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c23
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c5
-rw-r--r--drivers/gpu/drm/ast/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c149
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c348
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1328
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c569
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c44
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c69
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2027
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h53
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c77
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c360
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c9
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c11
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c72
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c11
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c8
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c10
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c11
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c27
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c10
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c11
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c14
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c23
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c3
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c9
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c9
-rw-r--r--drivers/gpu/drm/bridge/panel.c13
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c8
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c70
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c10
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c8
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c23
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c62
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c71
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c10
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c16
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh17
-rw-r--r--drivers/gpu/drm/ci/build.yml10
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml30
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml56
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh1
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml20
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh6
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml47
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt139
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt350
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c7
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c126
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c137
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c3
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c129
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c161
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_bridge.c50
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c1
-rw-r--r--drivers/gpu/drm/drm_client.c36
-rw-r--r--drivers/gpu/drm/drm_client_event.c1
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c210
-rw-r--r--drivers/gpu/drm/drm_connector.c44
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c128
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c38
-rw-r--r--drivers/gpu/drm/drm_edid.c252
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_file.c18
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c313
-rw-r--r--drivers/gpu/drm/drm_format_internal.h16
-rw-r--r--drivers/gpu/drm/drm_fourcc.c45
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c48
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c61
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c80
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c797
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c133
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_managed.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_pagemap.c838
-rw-r--r--drivers/gpu/drm/drm_panel.c52
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panic.c1
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs4
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c58
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c1
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c8
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c206
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c16
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c38
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c36
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h53
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c196
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2932
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c531
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h489
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c94
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c539
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h80
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c246
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c154
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.c)200
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.h)22
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c19
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c39
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c465
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h33
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c32
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c46
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c142
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h67
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c156
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2977
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h27
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c6
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c12
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c92
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h26
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c (renamed from drivers/gpu/drm/i915/vlv_sideband.c)178
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/i915/vlv_sideband_reg.h)6
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h125
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c59
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c9
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h2
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c12
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c82
-rw-r--r--drivers/gpu/drm/msm/Kconfig36
-rw-r--r--drivers/gpu/drm/msm/Makefile26
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c42
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c104
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c145
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c224
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c41
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c216
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1298
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h113
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c607
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c156
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c258
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c75
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c4
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c97
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c371
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h64
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c537
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h296
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c65
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c318
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1514
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c208
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h144
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h14
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c302
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c59
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h48
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c331
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h38
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c10
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c96
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml3582
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml198
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml383
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml223
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml302
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml14
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c107
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c2
-rw-r--r--drivers/gpu/drm/nova/nova.rs2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig36
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c8
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c236
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c10
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c12
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c13
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c12
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c12
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c50
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c11
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c27
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c132
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c10
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c9
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c257
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c142
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c186
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h89
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c63
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c9
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c31
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c160
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c49
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h100
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c355
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c293
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c452
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c89
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h103
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c26
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c203
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c109
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h3
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c93
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig10
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c95
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c29
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c26
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/stm/lvds.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c168
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h4
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c138
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c6
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c85
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c5
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c106
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c176
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c266
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c651
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h374
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c1
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c287
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h20
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c598
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/bochs.c19
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c60
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c232
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c8
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile6
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c280
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c28
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h39
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c467
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig15
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug11
-rw-r--r--drivers/gpu/drm/xe/Makefile20
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h35
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h14
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h28
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h)2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c91
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c101
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c61
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c13
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c52
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c90
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c121
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h20
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c163
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h11
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c62
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c12
-rw-r--r--drivers/gpu/drm/xe/xe_device.c82
-rw-r--r--drivers/gpu/drm/xe/xe_device.h55
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c148
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c6
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c45
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c253
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h24
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c266
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c47
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c106
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c131
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c168
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c318
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c58
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c166
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c400
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c64
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c5
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c40
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c47
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c259
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c332
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h62
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c9
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c60
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c316
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c72
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c24
-rw-r--r--drivers/gpu/drm/xe/xe_module.c48
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c167
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c224
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c78
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c29
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c30
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c9
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c135
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c147
-rw-r--r--drivers/gpu/drm/xe/xe_query.c33
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c47
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h14
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c17
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c96
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c88
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h7
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c163
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c403
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h136
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c13
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c254
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c5
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c78
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c58
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c385
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h24
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c120
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h22
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules12
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c34
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c3
-rw-r--r--drivers/gpu/nova-core/dma.rs58
-rw-r--r--drivers/gpu/nova-core/driver.rs8
-rw-r--r--drivers/gpu/nova-core/falcon.rs588
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs24
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs54
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs119
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs10
-rw-r--r--drivers/gpu/nova-core/fb.rs147
-rw-r--r--drivers/gpu/nova-core/fb/hal.rs39
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga100.rs57
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga102.rs36
-rw-r--r--drivers/gpu/nova-core/fb/hal/tu102.rs58
-rw-r--r--drivers/gpu/nova-core/firmware.rs113
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs423
-rw-r--r--drivers/gpu/nova-core/gfw.rs71
-rw-r--r--drivers/gpu/nova-core/gpu.rs127
-rw-r--r--drivers/gpu/nova-core/nova_core.rs7
-rw-r--r--drivers/gpu/nova-core/regs.rs305
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs67
-rw-r--r--drivers/gpu/nova-core/util.rs26
-rw-r--r--drivers/gpu/nova-core/vbios.rs1166
-rw-r--r--drivers/gpu/trace/Kconfig11
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c23
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/hid-apple.c244
-rw-r--r--drivers/hid/hid-core.c12
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-debug.c4
-rw-r--r--drivers/hid/hid-ids.h30
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-mcp2200.c4
-rw-r--r--drivers/hid/hid-mcp2221.c110
-rw-r--r--drivers/hid/hid-multitouch.c62
-rw-r--r--drivers/hid/hid-quirks.c9
-rw-r--r--drivers/hid/hid-steam.c35
-rw-r--r--drivers/hid/hid-uclogic-core.c66
-rw-r--r--drivers/hid/hid-uclogic-params.c134
-rw-r--r--drivers/hid/hid-uclogic-params.h5
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c44
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h4
-rw-r--r--drivers/hid/hid-universal-pidff.c5
-rw-r--r--drivers/hid/intel-thc-hid/Makefile1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c218
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h55
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c15
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c140
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h33
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c40
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h38
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h5
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c94
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h26
-rw-r--r--drivers/hid/usbhid/hid-pidff.c46
-rw-r--r--drivers/hid/usbhid/hid-pidff.h3
-rw-r--r--drivers/hv/mshv_eventfd.c8
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/amc6821.c127
-rw-r--r--drivers/hwmon/asus-ec-sensors.c60
-rw-r--r--drivers/hwmon/axi-fan-control.c2
-rw-r--r--drivers/hwmon/corsair-psu.c1
-rw-r--r--drivers/hwmon/emc2305.c181
-rw-r--r--drivers/hwmon/gsc-hwmon.c4
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/ina238.c134
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/ltc4282.c16
-rw-r--r--drivers/hwmon/max31827.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/adp1050.c72
-rw-r--r--drivers/hwmon/pmbus/isl68137.c3
-rw-r--r--drivers/hwmon/pmbus/tps53679.c37
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/w83627ehf.c9
-rw-r--r--drivers/hwtracing/intel_th/msu.c3
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c18
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c32
-rw-r--r--drivers/i2c/busses/i2c-tegra.c64
-rw-r--r--drivers/i2c/i2c-core-acpi.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c3
-rw-r--r--drivers/i3c/device.c11
-rw-r--r--drivers/i3c/internals.h38
-rw-r--r--drivers/i3c/master.c38
-rw-r--r--drivers/i3c/master/Kconfig10
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c47
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c90
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c32
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/adc/ad4130.c2
-rw-r--r--drivers/iio/adc/ad4170-4.c2
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/adi-axi-adc.c3
-rw-r--r--drivers/iio/adc/rohm-bd79124.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/addac/ad74115.c2
-rw-r--r--drivers/iio/addac/ad74413r.c4
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/adi-axi-dac.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile1
-rw-r--r--drivers/infiniband/core/cm.c47
-rw-r--r--drivers/infiniband/core/counters.c2
-rw-r--r--drivers/infiniband/core/cq.c12
-rw-r--r--drivers/infiniband/core/device.c47
-rw-r--r--drivers/infiniband/core/mad.c468
-rw-r--r--drivers/infiniband/core/mad_priv.h76
-rw-r--r--drivers/infiniband/core/mad_rmpp.c41
-rw-r--r--drivers/infiniband/core/nldev.c24
-rw-r--r--drivers/infiniband/core/rdma_core.c29
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c87
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dmah.c145
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c172
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c2
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/core/verbs.c5
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/efa/efa.h5
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h17
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c53
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c91
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c9
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c96
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c120
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c4
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mana/counters.c78
-rw-r--r--drivers/infiniband/hw/mana/counters.h18
-rw-r--r--drivers/infiniband/hw/mana/device.c120
-rw-r--r--drivers/infiniband/hw/mana/main.c13
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h30
-rw-r--r--drivers/infiniband/hw/mana/mr.c8
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c30
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h13
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c19
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c6
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c121
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h8
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h99
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c116
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c32
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c307
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/Kconfig17
-rw-r--r--drivers/infiniband/hw/qib/Makefile17
-rw-r--r--drivers/infiniband/hw/qib/qib.h1492
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h798
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c274
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c906
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c798
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c271
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2401
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3533
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4596
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8475
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1782
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c241
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2450
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c598
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c454
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h188
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2131
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c314
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1445
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c999
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c731
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c502
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c566
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c521
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c583
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c137
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1470
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1705
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h398
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c192
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c6
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c64
-rw-r--r--drivers/input/keyboard/adp5588-keys.c7
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c17
-rw-r--r--drivers/input/keyboard/samsung-keypad.c137
-rw-r--r--drivers/input/misc/Kconfig7
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/cs40l50-vibra.c1
-rw-r--r--drivers/input/misc/max77693-haptic.c41
-rw-r--r--drivers/input/misc/max8997_haptic.c96
-rw-r--r--drivers/input/misc/pcf50633-input.c113
-rw-r--r--drivers/input/rmi4/Kconfig15
-rw-r--r--drivers/input/rmi4/Makefile2
-rw-r--r--drivers/input/rmi4/rmi_bus.c6
-rw-r--r--drivers/input/rmi4/rmi_driver.h2
-rw-r--r--drivers/input/rmi4/rmi_f1a.c143
-rw-r--r--drivers/input/rmi4/rmi_f21.c179
-rw-r--r--drivers/input/touch-overlay.c277
-rw-r--r--drivers/input/touchscreen/ad7879.c9
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c26
-rw-r--r--drivers/input/touchscreen/goodix.c50
-rw-r--r--drivers/input/touchscreen/st1232.c35
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/amd_iommu.h6
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h17
-rw-r--r--drivers/iommu/amd/debugfs.c378
-rw-r--r--drivers/iommu/amd/init.c52
-rw-r--r--drivers/iommu/amd/io_pgtable.c4
-rw-r--r--drivers/iommu/amd/iommu.c192
-rw-r--r--drivers/iommu/apple-dart.c1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c70
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c37
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h35
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c493
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c12
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c6
-rw-r--r--drivers/iommu/exynos-iommu.c5
-rw-r--r--drivers/iommu/intel/cache.c55
-rw-r--r--drivers/iommu/intel/dmar.c3
-rw-r--r--drivers/iommu/intel/iommu.c363
-rw-r--r--drivers/iommu/intel/iommu.h22
-rw-r--r--drivers/iommu/intel/irq_remapping.c38
-rw-r--r--drivers/iommu/intel/nested.c4
-rw-r--r--drivers/iommu/intel/pasid.c17
-rw-r--r--drivers/iommu/intel/pasid.h11
-rw-r--r--drivers/iommu/intel/svm.c3
-rw-r--r--drivers/iommu/intel/trace.h5
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/iommufd/device.c143
-rw-r--r--drivers/iommu/iommufd/driver.c113
-rw-r--r--drivers/iommu/iommufd/eventq.c14
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c10
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c57
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h5
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h135
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h20
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c1
-rw-r--r--drivers/iommu/iommufd/main.c206
-rw-r--r--drivers/iommu/iommufd/pages.c21
-rw-r--r--drivers/iommu/iommufd/selftest.c208
-rw-r--r--drivers/iommu/iommufd/viommu.c309
-rw-r--r--drivers/iommu/ipmmu-vmsa.c4
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c11
-rw-r--r--drivers/iommu/omap-iommu.c27
-rw-r--r--drivers/iommu/riscv/iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c3
-rw-r--r--drivers/iommu/sun50i-iommu.c3
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/iommu/virtio-iommu.c6
-rw-r--r--drivers/irqchip/Kconfig13
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.c (renamed from drivers/irqchip/irq-gic-v3-its-msi-parent.c)168
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.h12
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1
-rw-r--r--drivers/irqchip/irq-gic-v4.c4
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c822
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c1227
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c277
-rw-r--r--drivers/irqchip/irq-gic-v5.c1137
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-msi-lib.c5
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c10
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/Kconfig1
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c15
-rw-r--r--drivers/leds/led-class.c3
-rw-r--r--drivers/leds/leds-lp50xx.c11
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c6
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c10
-rw-r--r--drivers/mailbox/pcc.c102
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/md/dm-flakey.c9
-rw-r--r--drivers/md/dm-ima.c42
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-path-selector.c8
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-ps-historical-service-time.c9
-rw-r--r--drivers/md/dm-ps-io-affinity.c5
-rw-r--r--drivers/md/dm-ps-queue-length.c9
-rw-r--r--drivers/md/dm-ps-round-robin.c9
-rw-r--r--drivers/md/dm-ps-service-time.c9
-rw-r--r--drivers/md/dm-raid.c49
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-thin.c7
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c3
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c185
-rw-r--r--drivers/md/dm-verity.h22
-rw-r--r--drivers/md/dm-writecache.c11
-rw-r--r--drivers/md/dm-zone.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c13
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md-cluster.c16
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1-10.c2
-rw-r--r--drivers/md/raid1.c94
-rw-r--r--drivers/md/raid1.h22
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c30
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c59
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h8
-rw-r--r--drivers/media/cec/core/cec-pin.c31
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c58
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c3
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c8
-rw-r--r--drivers/media/i2c/Kconfig41
-rw-r--r--drivers/media/i2c/adv7180.c16
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c17
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c10
-rw-r--r--drivers/media/i2c/adv7604.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c12
-rw-r--r--drivers/media/i2c/ds90ub953.c17
-rw-r--r--drivers/media/i2c/ds90ub960.c8
-rw-r--r--drivers/media/i2c/dw9714.c62
-rw-r--r--drivers/media/i2c/hi556.c73
-rw-r--r--drivers/media/i2c/imx214.c263
-rw-r--r--drivers/media/i2c/imx290.c1
-rw-r--r--drivers/media/i2c/imx415.c2
-rw-r--r--drivers/media/i2c/lt6911uxe.c2
-rw-r--r--drivers/media/i2c/max9286.c6
-rw-r--r--drivers/media/i2c/max96714.c7
-rw-r--r--drivers/media/i2c/max96717.c9
-rw-r--r--drivers/media/i2c/mt9m114.c171
-rw-r--r--drivers/media/i2c/ov2659.c3
-rw-r--r--drivers/media/i2c/ov2740.c18
-rw-r--r--drivers/media/i2c/ov5670.c9
-rw-r--r--drivers/media/i2c/ov5693.c7
-rw-r--r--drivers/media/i2c/ov7251.c7
-rw-r--r--drivers/media/i2c/ov8865.c3
-rw-r--r--drivers/media/i2c/saa7115.c12
-rw-r--r--drivers/media/i2c/tc358743.c138
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/i2c/vd55g1.c32
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c12
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h12
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c13
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c82
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c12
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h2
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c4
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c4
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c9
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c20
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c28
-rw-r--r--drivers/media/pci/saa7164/saa7164.h2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c20
-rw-r--r--drivers/media/platform/amphion/vdec.c294
-rw-r--r--drivers/media/platform/amphion/vpu.h7
-rw-r--r--drivers/media/platform/amphion/vpu_color.c73
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c15
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h12
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c123
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.h12
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c5
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c4
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.h1
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c11
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c131
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c47
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h1
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c68
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c135
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h6
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c18
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c169
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c39
-rw-r--r--drivers/media/platform/qcom/camss/camss.c107
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c35
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h3
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c35
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h1
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c48
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h5
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c37
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c143
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h5
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c56
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h6
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h28
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c198
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h126
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c15
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h1
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c18
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c116
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h11
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c36
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c397
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h46
-rw-r--r--drivers/media/platform/qcom/venus/core.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c83
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c5
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c62
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c8
-rw-r--r--drivers/media/platform/qcom/venus/venc.c8
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c207
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c4
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c336
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c36
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c694
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c77
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c492
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h16
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c6
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h14
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c45
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c108
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c42
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c25
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c22
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c633
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.h16
-rw-r--r--drivers/media/platform/rockchip/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/Makefile1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c150
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h99
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Kconfig (renamed from drivers/staging/media/rkvdec/Kconfig)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Makefile (renamed from drivers/staging/media/rkvdec/Makefile)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c (renamed from drivers/staging/media/rkvdec/rkvdec-h264.c)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h (renamed from drivers/staging/media/rkvdec/rkvdec-regs.h)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c (renamed from drivers/staging/media/rkvdec/rkvdec-vp9.c)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c (renamed from drivers/staging/media/rkvdec/rkvdec.c)43
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h (renamed from drivers/staging/media/rkvdec/rkvdec.h)1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c27
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c34
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c2
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c32
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h3
-rw-r--r--drivers/media/platform/verisilicon/hantro.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c6
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c9
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c7
-rw-r--r--drivers/media/rc/ir-spi.c40
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c4
-rw-r--r--drivers/media/usb/gspca/vicam.c10
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c131
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c74
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c122
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c199
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c366
-rw-r--r--drivers/media/usb/uvc/uvc_video.c21
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h46
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c40
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c80
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c31
-rw-r--r--drivers/mfd/Kconfig96
-rw-r--r--drivers/mfd/ab8500-core.c3
-rw-r--r--drivers/mfd/arizona-irq.c1
-rw-r--r--drivers/mfd/atmel-smc.c9
-rw-r--r--drivers/mfd/axp20x.c8
-rw-r--r--drivers/mfd/cros_ec_dev.c10
-rw-r--r--drivers/mfd/cs40l50-core.c3
-rw-r--r--drivers/mfd/cs42l43.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c4
-rw-r--r--drivers/mfd/ioc3.c2
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/mt6358-irq.c3
-rw-r--r--drivers/mfd/mt6370.c2
-rw-r--r--drivers/mfd/mt6370.h2
-rw-r--r--drivers/mfd/mt6397-core.c12
-rw-r--r--drivers/mfd/mt6397-irq.c4
-rw-r--r--drivers/mfd/qcom-pm8xxx.c4
-rw-r--r--drivers/mfd/rk8xx-core.c12
-rw-r--r--drivers/mfd/rohm-bd71828.c12
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stm32-timers.c1
-rw-r--r--drivers/mfd/stmfx.c5
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps65219.c17
-rw-r--r--drivers/mfd/tps6586x.c6
-rw-r--r--drivers/mfd/twl6030-irq.c79
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/wm831x-irq.c10
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c83
-rw-r--r--drivers/misc/ti_fpc202.c2
-rw-r--r--drivers/misc/vmw_balloon.c3
-rw-r--r--drivers/mtd/devices/Kconfig11
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c830
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c6
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c62
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c4
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c15
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c12
-rw-r--r--drivers/mtd/nand/spi/ato.c6
-rw-r--r--drivers/mtd/nand/spi/core.c27
-rw-r--r--drivers/mtd/nand/spi/esmt.c8
-rw-r--r--drivers/mtd/nand/spi/foresee.c8
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c68
-rw-r--r--drivers/mtd/nand/spi/macronix.c8
-rw-r--r--drivers/mtd/nand/spi/micron.c20
-rw-r--r--drivers/mtd/nand/spi/paragon.c12
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c12
-rw-r--r--drivers/mtd/nand/spi/toshiba.c8
-rw-r--r--drivers/mtd/nand/spi/winbond.c163
-rw-r--r--drivers/mtd/nand/spi/xtx.c12
-rw-r--r--drivers/mtd/nftlcore.c43
-rw-r--r--drivers/mtd/spi-nor/micron-st.c8
-rw-r--r--drivers/mtd/spi-nor/spansion.c35
-rw-r--r--drivers/mtd/spi-nor/swp.c19
-rw-r--r--drivers/mtd/ubi/kapi.c27
-rw-r--r--drivers/net/amt.c11
-rw-r--r--drivers/net/bareudp.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c24
-rw-r--r--drivers/net/bonding/bond_main.c96
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c42
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c11
-rw-r--r--drivers/net/can/dev/calc_bittiming.c2
-rw-r--r--drivers/net/can/dev/netlink.c26
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c (renamed from drivers/net/can/kvaser_pciefd.c)144
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/rcar/rcar_can.c9
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c309
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/spi/mcp251x.c33
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/Kconfig1
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h33
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c139
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c65
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c75
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c17
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c311
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c107
-rw-r--r--drivers/net/dsa/b53/b53_priv.h63
-rw-r--r--drivers/net/dsa/b53/b53_regs.h27
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c20
-rw-r--r--drivers/net/dsa/microchip/ksz8.c207
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h53
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c164
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h37
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c10
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c104
-rw-r--r--drivers/net/dsa/mt7530-mdio.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c21
-rw-r--r--drivers/net/dsa/mt7530.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c8
-rw-r--r--drivers/net/ethernet/agere/et131x.c36
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c5
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c31
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c57
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h76
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c267
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c142
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c401
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c75
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h49
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile12
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h218
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c388
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c508
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c703
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c268
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h206
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c605
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c438
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h188
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c119
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c26
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c105
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c57
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c11
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c35
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c48
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c36
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c110
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.h15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c216
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c40
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c28
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h83
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c101
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h30
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c25
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c34
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c362
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c139
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c201
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c384
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c72
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1048
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1367
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c220
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c27
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c77
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c730
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c165
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c293
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c73
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c80
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c35
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c10
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c6
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h297
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c717
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1424
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c313
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h177
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c582
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c132
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h170
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c49
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c334
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c127
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c33
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c136
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c45
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c315
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c55
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h278
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c48
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c100
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c276
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c239
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h226
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c150
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile4
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c243
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c184
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c232
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c177
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c6
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c229
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c25
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1038
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c529
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h161
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c29
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c239
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c230
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h52
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c169
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h27
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c21
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c126
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h9
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c31
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c544
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c14
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c327
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c82
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c39
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c99
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c77
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c718
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c19
-rw-r--r--drivers/net/ethernet/sun/niu.h4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c27
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c26
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c153
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h17
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c4
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig35
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c14
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c9
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c243
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h11
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h127
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c280
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h14
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c261
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c314
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/gtp.c12
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c59
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_main.c12
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/mdio/Kconfig7
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c26
-rw-r--r--drivers/net/mdio/mdio-airoha.c276
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c5
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c3
-rw-r--r--drivers/net/netconsole.c270
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c66
-rw-r--r--drivers/net/netdevsim/ethtool.c21
-rw-r--r--drivers/net/netdevsim/hwstats.c5
-rw-r--r--drivers/net/netdevsim/netdev.c164
-rw-r--r--drivers/net/netdevsim/netdevsim.h18
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c12
-rw-r--r--drivers/net/netkit.c10
-rw-r--r--drivers/net/ovpn/udp.c4
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c6
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/air_en8811h.c45
-rw-r--r--drivers/net/phy/broadcom.c39
-rw-r--r--drivers/net/phy/dp83822.c7
-rw-r--r--drivers/net/phy/dp83869.c7
-rw-r--r--drivers/net/phy/dp83tg720.c185
-rw-r--r--drivers/net/phy/intel-xway.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c29
-rw-r--r--drivers/net/phy/mdio-boardinfo.h9
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/mdio_bus_provider.c4
-rw-r--r--drivers/net/phy/mdio_device.c5
-rw-r--r--drivers/net/phy/mediatek/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c255
-rw-r--r--drivers/net/phy/mscc/mscc_main.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c1
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c23
-rw-r--r--drivers/net/phy/phy-c45.c7
-rw-r--r--drivers/net/phy/phy-core.c79
-rw-r--r--drivers/net/phy/phy_caps.c13
-rw-r--r--drivers/net/phy/phy_device.c174
-rw-r--r--drivers/net/phy/phy_package.c71
-rw-r--r--drivers/net/phy/phylib-internal.h6
-rw-r--r--drivers/net/phy/phylink.c74
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c167
-rw-r--r--drivers/net/phy/qcom/qca807x.c40
-rw-r--r--drivers/net/phy/qcom/qca808x.c23
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c75
-rw-r--r--drivers/net/phy/qcom/qcom.h23
-rw-r--r--drivers/net/phy/realtek/realtek_main.c10
-rw-r--r--drivers/net/phy/sfp.c21
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c86
-rw-r--r--drivers/net/ppp/pppoe.c6
-rw-r--r--drivers/net/ppp/pptp.c18
-rw-r--r--drivers/net/pse-pd/pd692x0.c233
-rw-r--r--drivers/net/pse-pd/pse_core.c1066
-rw-r--r--drivers/net/pse-pd/tps23881.c403
-rw-r--r--drivers/net/tap.c10
-rw-r--r--drivers/net/team/team_core.c96
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c72
-rw-r--r--drivers/net/tun_vnet.h101
-rw-r--r--drivers/net/usb/Kconfig3
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/usb/lan78xx.c740
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/smsc95xx.c72
-rw-r--r--drivers/net/usb/usbnet.c55
-rw-r--r--drivers/net/virtio_net.c190
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c74
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c60
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c31
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireguard/device.c2
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c41
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c85
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h57
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c564
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h207
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c137
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h43
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c30
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c90
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c160
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c40
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c57
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h32
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c2086
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c148
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c383
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h167
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c71
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c443
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c178
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c501
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c366
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)607
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h)6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c)43
-rw-r--r--drivers/net/wireless/intersil/p54/main.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c16
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c29
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c110
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c151
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c25
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c49
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig26
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c95
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h33
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c557
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1287
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c275
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h220
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c622
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h108
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c180
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h39
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c321
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c54
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h36
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c149
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c171
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c156
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c39
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c100
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c55
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c51
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1042
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h65
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h14
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c9
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c4
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c26
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c8
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c9
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h14
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c2
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/trf7970a.c91
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvdimm/pmem.h4
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/core.c16
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fc.c6
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c6
-rw-r--r--drivers/of/irq.c22
-rw-r--r--drivers/parisc/power.c20
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h20
-rw-r--r--drivers/pci/controller/dwc/Kconfig12
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c40
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c107
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c327
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c48
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h1
-rw-r--r--drivers/pci/controller/pci-aardvark.c59
-rw-r--r--drivers/pci/controller/pci-host-common.c5
-rw-r--r--drivers/pci/controller/pci-host-common.h2
-rw-r--r--drivers/pci/controller/pci-hyperv.c110
-rw-r--r--drivers/pci/controller/pci-mvebu.c6
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c428
-rw-r--r--drivers/pci/controller/pci-xgene.c33
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c45
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c80
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c68
-rw-r--r--drivers/pci/controller/pcie-mediatek.c48
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c70
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c64
-rw-r--r--drivers/pci/controller/pcie-rockchip.h26
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c49
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c46
-rw-r--r--drivers/pci/controller/pcie-xilinx.c56
-rw-r--r--drivers/pci/controller/plda/Kconfig1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c45
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h1
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c2
-rw-r--r--drivers/pci/controller/vmd.c243
-rw-r--r--drivers/pci/endpoint/Kconfig8
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c130
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c144
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c40
-rw-r--r--drivers/pci/hotplug/TODO4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pnv_php.c248
-rw-r--r--drivers/pci/iov.c153
-rw-r--r--drivers/pci/msi/irqdomain.c25
-rw-r--r--drivers/pci/msi/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c7
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/pci/pci.h84
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pci/pcie/aspm.c11
-rw-r--r--drivers/pci/pcie/portdrv.c2
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/setup-bus.c3
-rw-r--r--drivers/pci/setup-res.c35
-rw-r--r--drivers/pci/tph.c11
-rw-r--r--drivers/perf/Kconfig11
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cmn.c20
-rw-r--r--drivers/perf/arm-ni.c153
-rw-r--r--drivers/perf/arm_brbe.c805
-rw-r--r--drivers/perf/arm_brbe.h47
-rw-r--r--drivers/perf/arm_pmu.c16
-rw-r--r--drivers/perf/arm_pmuv3.c107
-rw-r--r--drivers/perf/arm_spe_pmu.c18
-rw-r--r--drivers/perf/cxl_pmu.c12
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c8
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c354
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c11
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c220
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c180
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c288
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c65
-rw-r--r--drivers/phy/phy-snps-eusb2.c46
-rw-r--r--drivers/phy/qualcomm/Kconfig16
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c89
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c324
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c224
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c89
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h64
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c141
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c15
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c52
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c32
-rw-r--r--drivers/phy/st/phy-stih407-usb.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c1
-rw-r--r--drivers/pinctrl/Kconfig21
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c14
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c8
-rw-r--r--drivers/pinctrl/berlin/berlin.c10
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c21
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c23
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c14
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c1700
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h2452
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c120
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c4
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c4
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c4
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c21
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c10
-rw-r--r--drivers/pinctrl/pinctrl-at91.c4
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c22
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c4
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c4
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c12
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c6
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c704
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c9
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c7
-rw-r--r--drivers/pinctrl/pinctrl-k210.c2
-rw-r--r--drivers/pinctrl/pinctrl-k230.c13
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c17
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c9
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c16
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c2
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c6
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c10
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c2
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c4
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c4
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/pinctrl-xway.c16
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinmux.c45
-rw-r--r--drivers/pinctrl/pinmux.h10
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c1339
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c47
-rw-r--r--drivers/pinctrl/renesas/Kconfig249
-rw-r--r--drivers/pinctrl/renesas/gpio.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c5
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c5
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c51
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c103
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c8
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c5
-rw-r--r--drivers/pinctrl/stm32/Kconfig20
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c720
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c146
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h22
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c15
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c19
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c4
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c2
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c128
-rw-r--r--drivers/platform/x86/portwell-ec.c4
-rw-r--r--drivers/platform/x86/silicom-platform.c2
-rw-r--r--drivers/power/reset/Kconfig1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c2
-rw-r--r--drivers/power/reset/qcom-pon.c30
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/bq2415x_charger.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c16
-rw-r--r--drivers/power/supply/bq256xx_charger.c6
-rw-r--r--drivers/power/supply/bq25980_charger.c6
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/max14577_charger.c4
-rw-r--r--drivers/power/supply/max1720x_battery.c13
-rw-r--r--drivers/power/supply/power_supply_core.c185
-rw-r--r--drivers/power/supply/qcom_battmgr.c25
-rw-r--r--drivers/power/supply/qcom_smbx.c (renamed from drivers/power/supply/qcom_pmi8998_charger.c)152
-rw-r--r--drivers/power/supply/twl4030_charger.c1
-rw-r--r--drivers/power/supply/ug3105_battery.c81
-rw-r--r--drivers/ptp/ptp_chardev.c748
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ptp/ptp_private.h5
-rw-r--r--drivers/ptp/ptp_vclock.c7
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c9
-rw-r--r--drivers/pwm/pwm-mediatek.c71
-rw-r--r--drivers/pwm/pwm-pca9685.c2
-rw-r--r--drivers/regulator/act8865-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig11
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/pru_rproc.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c621
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_common.c4
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c74
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-spacemit.c304
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig21
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/lib.c40
-rw-r--r--drivers/rtc/rtc-ds1307.c30
-rw-r--r--drivers/rtc/rtc-ds1685.c4
-rw-r--r--drivers/rtc/rtc-hym8563.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c25
-rw-r--r--drivers/rtc/rtc-max31335.c12
-rw-r--r--drivers/rtc/rtc-nct3018y.c15
-rw-r--r--drivers/rtc/rtc-pcf85063.c267
-rw-r--r--drivers/rtc/rtc-pcf8563.c15
-rw-r--r--drivers/rtc/rtc-rv3028.c15
-rw-r--r--drivers/rtc/rtc-rv3032.c21
-rw-r--r--drivers/rtc/rtc-s3c.c8
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/rtc/rtc-stm32.c2
-rw-r--r--drivers/rtc/sysfs.c64
-rw-r--r--drivers/rtc/test_rtc_lib.c (renamed from drivers/rtc/lib_test.c)0
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_devmap.c1
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c1
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/dcssblk.c10
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/char/sclp.c5
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_sd.c1
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_class.c1
-rw-r--r--drivers/s390/char/tape_core.c1
-rw-r--r--drivers/s390/char/tape_std.c1
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/cio/airq.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/device_fsm.c1
-rw-r--r--drivers/s390/cio/eadm_sch.c1
-rw-r--r--drivers/s390/cio/fcx.c1
-rw-r--r--drivers/s390/cio/isc.c1
-rw-r--r--drivers/s390/cio/itcw.c1
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/cio/scm.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c1
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c1
-rw-r--r--drivers/s390/crypto/pkey_api.c1
-rw-r--r--drivers/s390/crypto/pkey_base.c1
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1
-rw-r--r--drivers/s390/crypto/zcrypt_card.c1
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c1
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c1
-rw-r--r--drivers/s390/net/Kconfig12
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/ism_drv.c1
-rw-r--r--drivers/s390/net/netiucv.c2083
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/net/smsgiucv.c1
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/bfa/bfad_im.c1
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c22
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c6
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libiscsi.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_internal.h78
-rw-r--r--drivers/scsi/libsas/sas_phy.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c84
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c17
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c22
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c22
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c60
-rw-r--r--drivers/scsi/qla2xxx/Kconfig6
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c48
-rw-r--r--drivers/scsi/scsi.c15
-rw-r--r--drivers/scsi/scsi_debug.c93
-rw-r--r--drivers/scsi/scsi_devinfo.c11
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c72
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/qcom/Kconfig8
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/ubwc_config.c282
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c2
-rw-r--r--drivers/soc/tegra/pmc.c51
-rw-r--r--drivers/soundwire/amd_manager.c14
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/debugfs.c6
-rw-r--r--drivers/soundwire/intel_ace2x.c11
-rw-r--r--drivers/soundwire/intel_auxdevice.c1
-rw-r--r--drivers/soundwire/mipi_disco.c4
-rw-r--r--drivers/soundwire/qcom.c6
-rw-r--r--drivers/soundwire/stream.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c2
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-mem.c27
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/ssb/driver_gpio.c12
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/atomisp/Kconfig1
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c611
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h16
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c233
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c155
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c129
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c5
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h5
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c91
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c5
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h157
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c29
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c27
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h12
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c12
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c2
-rw-r--r--drivers/staging/media/ipu7/Kconfig19
-rw-r--r--drivers/staging/media/ipu7/Makefile23
-rw-r--r--drivers/staging/media/ipu7/TODO28
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h163
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h175
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h412
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h465
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h24
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h49
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.c430
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.h25
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.c158
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.h69
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress-regs.h461
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.c1192
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.h77
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.c276
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.c477
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.h46
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.c301
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.h39
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c1034
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h1197
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c543
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.h64
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c829
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.h72
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c348
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h53
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c1112
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.h117
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.c1166
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.h140
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.c853
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.h414
-rw-r--r--drivers/staging/media/ipu7/ipu7-platform-regs.h82
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.c78
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.h35
-rw-r--r--drivers/staging/media/ipu7/ipu7.c2783
-rw-r--r--drivers/staging/media/ipu7/ipu7.h242
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c19
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/target/target_core_fabric_lib.c63
-rw-r--r--drivers/target/target_core_iblock.c33
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c18
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/sysrq.c41
-rw-r--r--drivers/ufs/core/ufs-sysfs.c193
-rw-r--r--drivers/ufs/core/ufshcd.c188
-rw-r--r--drivers/ufs/host/ufs-exynos.c4
-rw-r--r--drivers/ufs/host/ufs-mediatek.c330
-rw-r--r--drivers/ufs/host/ufs-mediatek.h32
-rw-r--r--drivers/ufs/host/ufs-qcom.c98
-rw-r--r--drivers/ufs/host/ufs-qcom.h9
-rw-r--r--drivers/ufs/host/ufshcd-pci.c33
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/vdpa/mlx5/core/mr.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c12
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1
-rw-r--r--drivers/vfio/device_cdev.c38
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/iommufd.c4
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c1
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c4
-rw-r--r--drivers/vfio/pci/mlx5/main.c1
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c2
-rw-r--r--drivers/vfio/pci/qat/main.c5
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c31
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c10
-rw-r--r--drivers/vfio/pci/virtio/main.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c7
-rw-r--r--drivers/vfio/vfio_main.c3
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/net.c201
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/vhost/vdpa.c10
-rw-r--r--drivers/vhost/vhost.c380
-rw-r--r--drivers/vhost/vhost.h34
-rw-r--r--drivers/vhost/vringh.c118
-rw-r--r--drivers/vhost/vsock.c15
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c1
-rw-r--r--drivers/video/fbdev/c2p_planar.c1
-rw-r--r--drivers/video/fbdev/core/Kconfig17
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c2
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c2
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c2
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c1
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c1
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c91
-rw-r--r--drivers/video/fbdev/core/fbmem.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c5
-rw-r--r--drivers/video/fbdev/core/modedb.c1
-rw-r--r--drivers/video/fbdev/core/svgalib.c96
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c2
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c2
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c2
-rw-r--r--drivers/video/fbdev/cyber2000fb.c36
-rw-r--r--drivers/video/fbdev/cyber2000fb.h2
-rw-r--r--drivers/video/fbdev/imxfb.c9
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c24
-rw-r--r--drivers/video/fbdev/macmodes.c3
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c47
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c62
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c21
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c1
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h2
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c1
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c1
-rw-r--r--drivers/video/fbdev/pxafb.c17
-rw-r--r--drivers/video/fbdev/sbuslib.c1
-rw-r--r--drivers/video/fbdev/simplefb.c17
-rw-r--r--drivers/video/fbdev/sis/sis.h2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c25
-rw-r--r--drivers/video/fbdev/via/via-core.c1
-rw-r--r--drivers/video/fbdev/via/via-gpio.c3
-rw-r--r--drivers/video/fbdev/via/via_i2c.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/virtio/virtio.c50
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_debug.c27
-rw-r--r--drivers/virtio/virtio_dma_buf.c2
-rw-r--r--drivers/virtio/virtio_mem.c2
-rw-r--r--drivers/virtio/virtio_mmio.c52
-rw-r--r--drivers/virtio/virtio_pci_modern.c10
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c69
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/virtio/virtio_vdpa.c44
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/it87_wdt.c4
-rw-r--r--drivers/watchdog/renesas_wdt.c8
-rw-r--r--drivers/watchdog/rti_wdt.c14
-rw-r--r--drivers/watchdog/sbsa_gwdt.c50
-rw-r--r--drivers/watchdog/watchdog_core.h8
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c3
-rw-r--r--drivers/xen/gntdev-common.h4
-rw-r--r--drivers/xen/gntdev-dmabuf.c28
-rw-r--r--drivers/xen/gntdev.c71
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/time.c8
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c12
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c17
-rw-r--r--fs/Kconfig11
-rw-r--r--fs/btrfs/extent_io.c11
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/relocation.c19
-rw-r--r--fs/btrfs/tree-log.c67
-rw-r--r--fs/btrfs/zoned.c2
-rw-r--r--fs/ceph/file.c22
-rw-r--r--fs/cramfs/inode.c5
-rw-r--r--fs/dax.c70
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/erofs/Kconfig20
-rw-r--r--fs/erofs/super.c28
-rw-r--r--fs/erofs/zdata.c13
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exfat/dir.c12
-rw-r--r--fs/exfat/fatent.c10
-rw-r--r--fs/exfat/file.c5
-rw-r--r--fs/exfat/namei.c5
-rw-r--r--fs/exfat/super.c32
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h74
-rw-r--r--fs/ext4/ext4_extents.h7
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c3
-rw-r--r--fs/ext4/inline.c91
-rw-r--r--fs/ext4/inode.c358
-rw-r--r--fs/ext4/mballoc-test.c5
-rw-r--r--fs/ext4/mballoc.c895
-rw-r--r--fs/ext4/mballoc.h9
-rw-r--r--fs/ext4/move_extent.c3
-rw-r--r--fs/ext4/namei.c69
-rw-r--r--fs/ext4/page-io.c16
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/f2fs/checkpoint.c8
-rw-r--r--fs/f2fs/compress.c120
-rw-r--r--fs/f2fs/data.c183
-rw-r--r--fs/f2fs/debug.c21
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/extent_cache.c10
-rw-r--r--fs/f2fs/f2fs.h151
-rw-r--r--fs/f2fs/file.c107
-rw-r--r--fs/f2fs/gc.c54
-rw-r--r--fs/f2fs/gc.h5
-rw-r--r--fs/f2fs/inline.c20
-rw-r--r--fs/f2fs/inode.c84
-rw-r--r--fs/f2fs/namei.c12
-rw-r--r--fs/f2fs/node.c261
-rw-r--r--fs/f2fs/node.h77
-rw-r--r--fs/f2fs/recovery.c116
-rw-r--r--fs/f2fs/segment.c62
-rw-r--r--fs/f2fs/segment.h59
-rw-r--r--fs/f2fs/super.c2111
-rw-r--r--fs/f2fs/sysfs.c48
-rw-r--r--fs/fat/fatent.c2
-rw-r--r--fs/fat/misc.c6
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fuse/dax.c3
-rw-r--r--fs/fuse/file.c11
-rw-r--r--fs/fuse/virtio_fs.c5
-rw-r--r--fs/hugetlbfs/inode.c16
-rw-r--r--fs/jfs/file.c3
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_dmap.c10
-rw-r--r--fs/jfs/jfs_metapage.c8
-rw-r--r--fs/jfs/jfs_xtree.c142
-rw-r--r--fs/kernfs/inode.c4
-rw-r--r--fs/nfs/blocklayout/blocklayout.c4
-rw-r--r--fs/nfs/blocklayout/dev.c5
-rw-r--r--fs/nfs/blocklayout/extent_tree.c104
-rw-r--r--fs/nfs/client.c47
-rw-r--r--fs/nfs/delegation.c114
-rw-r--r--fs/nfs/delegation.h3
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/export.c11
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c26
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c6
-rw-r--r--fs/nfs/fs_context.c42
-rw-r--r--fs/nfs/inode.c69
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/localio.c7
-rw-r--r--fs/nfs/mount_clnt.c68
-rw-r--r--fs/nfs/nfs4_fs.h5
-rw-r--r--fs/nfs/nfs4client.c185
-rw-r--r--fs/nfs/nfs4file.c25
-rw-r--r--fs/nfs/nfs4getroot.c14
-rw-r--r--fs/nfs/nfs4proc.c139
-rw-r--r--fs/nfs/nfs4trace.c2
-rw-r--r--fs/nfs/nfs4trace.h168
-rw-r--r--fs/nfs/nfs4xdr.c24
-rw-r--r--fs/nfs/nfstrace.h11
-rw-r--r--fs/nfs/pnfs.c39
-rw-r--r--fs/nfs/pnfs_nfs.c14
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/nfs_common/nfslocalio.c28
-rw-r--r--fs/nfsd/localio.c5
-rw-r--r--fs/nfsd/nfsctl.c36
-rw-r--r--fs/nfsd/vfs.c10
-rw-r--r--fs/notify/fanotify/fanotify.c8
-rw-r--r--fs/notify/fsnotify.c87
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/ocfs2/dir.c8
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/inode.c70
-rw-r--r--fs/ocfs2/move_extents.c19
-rw-r--r--fs/ocfs2/namei.c11
-rw-r--r--fs/ocfs2/stack_user.c15
-rw-r--r--fs/open.c6
-rw-r--r--fs/orangefs/orangefs-debugfs.c8
-rw-r--r--fs/orangefs/orangefs-sysfs.c28
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h10
-rw-r--r--fs/proc/meminfo.c3
-rw-r--r--fs/proc/page.c54
-rw-r--r--fs/proc/task_mmu.c182
-rw-r--r--fs/proc/vmcore.c29
-rw-r--r--fs/smb/client/Makefile2
-rw-r--r--fs/smb/client/cached_dir.c8
-rw-r--r--fs/smb/client/cached_dir.h4
-rw-r--r--fs/smb/client/cifs_debug.c77
-rw-r--r--fs/smb/client/cifs_spnego.c47
-rw-r--r--fs/smb/client/cifsencrypt.c83
-rw-r--r--fs/smb/client/cifsfs.c8
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h58
-rw-r--r--fs/smb/client/cifsproto.h23
-rw-r--r--fs/smb/client/cifssmb.c142
-rw-r--r--fs/smb/client/cifstransport.c565
-rw-r--r--fs/smb/client/compress.c71
-rw-r--r--fs/smb/client/connect.c53
-rw-r--r--fs/smb/client/fs_context.c51
-rw-r--r--fs/smb/client/fs_context.h18
-rw-r--r--fs/smb/client/inode.c34
-rw-r--r--fs/smb/client/link.c24
-rw-r--r--fs/smb/client/reparse.c18
-rw-r--r--fs/smb/client/reparse.h4
-rw-r--r--fs/smb/client/sess.c9
-rw-r--r--fs/smb/client/smb1ops.c48
-rw-r--r--fs/smb/client/smb2inode.c14
-rw-r--r--fs/smb/client/smb2ops.c88
-rw-r--r--fs/smb/client/smb2proto.h5
-rw-r--r--fs/smb/client/smb2transport.c5
-rw-r--r--fs/smb/client/smbdirect.c475
-rw-r--r--fs/smb/client/smbdirect.h92
-rw-r--r--fs/smb/client/transport.c605
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h118
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/smb2pdu.c2
-rw-r--r--fs/smb/server/smb_common.c2
-rw-r--r--fs/smb/server/transport_rdma.c97
-rw-r--r--fs/smb/server/transport_tcp.c23
-rw-r--r--fs/squashfs/block.c47
-rw-r--r--fs/squashfs/file.c7
-rw-r--r--fs/ubifs/file.c10
-rw-r--r--fs/userfaultfd.c97
-rw-r--r--fs/xfs/scrub/trace.h2
-rw-r--r--fs/xfs/xfs_file.c8
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_itable.h10
-rw-r--r--fs/xfs/xfs_mount.c19
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_zone_alloc.c42
-rw-r--r--include/acpi/pcc.h29
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/codetag.lds.h16
-rw-r--r--include/asm-generic/hugetlb.h17
-rw-r--r--include/asm-generic/msi.h1
-rw-r--r--include/asm-generic/unwind_user.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/algapi.h4
-rw-r--r--include/crypto/engine.h1
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/internal/acompress.h5
-rw-r--r--include/crypto/internal/engine.h15
-rw-r--r--include/crypto/internal/hash.h36
-rw-r--r--include/cxl/event.h37
-rw-r--r--include/drm/amd/isp.h51
-rw-r--r--include/drm/display/drm_dp.h24
-rw-r--r--include/drm/display/drm_dp_helper.h17
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h1
-rw-r--r--include/drm/display/drm_hdmi_cec_helper.h72
-rw-r--r--include/drm/drm_accel.h5
-rw-r--r--include/drm/drm_bridge.h423
-rw-r--r--include/drm/drm_color_mgmt.h27
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_debugfs.h11
-rw-r--r--include/drm/drm_device.h11
-rw-r--r--include/drm/drm_drv.h22
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_format_helper.h19
-rw-r--r--include/drm/drm_fourcc.h3
-rw-r--r--include/drm/drm_gem.h13
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h6
-rw-r--r--include/drm/drm_gem_shmem_helper.h11
-rw-r--r--include/drm/drm_gem_vram_helper.h2
-rw-r--r--include/drm/drm_gpusvm.h101
-rw-r--r--include/drm/drm_gpuvm.h8
-rw-r--r--include/drm/drm_managed.h15
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_mode_config.h3
-rw-r--r--include/drm/drm_modeset_helper.h2
-rw-r--r--include/drm/drm_pagemap.h135
-rw-r--r--include/drm/drm_panic.h6
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/gpu_scheduler.h40
-rw-r--r--include/drm/intel/pciids.h12
-rw-r--r--include/drm/ttm/ttm_bo.h73
-rw-r--r--include/drm/ttm/ttm_device.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h2
-rw-r--r--include/dt-bindings/clock/nxp,imx94-clock.h13
-rw-r--r--include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h16
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,milos-camcc.h131
-rw-r--r--include/dt-bindings/clock/qcom,milos-dispcc.h61
-rw-r--r--include/dt-bindings/clock/qcom,milos-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,milos-gpucc.h56
-rw-r--r--include/dt-bindings/clock/qcom,milos-videocc.h36
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-camcc.h110
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-dispcc.h52
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gpucc.h39
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-videocc.h30
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h2
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h53
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h71
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h9
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h1
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h1
-rw-r--r--include/kvm/arm_vgic.h11
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/adi-axi-common.h56
-rw-r--r--include/linux/alloc_tag.h6
-rw-r--r--include/linux/amd-iommu.h25
-rw-r--r--include/linux/avf/virtchnl.h23
-rw-r--r--include/linux/balloon_compaction.h90
-rw-r--r--include/linux/bcm47xx_nvram.h1
-rw-r--r--include/linux/bcm47xx_sprom.h2
-rw-r--r--include/linux/bitfield.h8
-rw-r--r--include/linux/bits.h29
-rw-r--r--include/linux/bnxt/hsi.h (renamed from drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h)0
-rw-r--r--include/linux/bpf-cgroup-defs.h1
-rw-r--r--include/linux/bpf-cgroup.h6
-rw-r--r--include/linux/bpf.h185
-rw-r--r--include/linux/bpf_verifier.h81
-rw-r--r--include/linux/brcmphy.h6
-rw-r--r--include/linux/btf.h3
-rw-r--r--include/linux/can/bittiming.h2
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/cfi.h47
-rw-r--r--include/linux/cfi_types.h23
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/cleanup.h94
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/codetag.h1
-rw-r--r--include/linux/compiler-clang.h3
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h40
-rw-r--r--include/linux/crash_reserve.h15
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/damon.h80
-rw-r--r--include/linux/dax.h9
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/dma-fence.h45
-rw-r--r--include/linux/dmapool.h8
-rw-r--r--include/linux/dpll.h21
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/ethtool.h29
-rw-r--r--include/linux/ethtool_netlink.h7
-rw-r--r--include/linux/execmem.h54
-rw-r--r--include/linux/f2fs_fs.h2
-rw-r--r--include/linux/filter.h18
-rw-r--r--include/linux/find.h29
-rw-r--r--include/linux/firewire.h16
-rw-r--r--include/linux/fortify-string.h2
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fprobe.h5
-rw-r--r--include/linux/fs.h25
-rw-r--r--include/linux/fscrypt.h10
-rw-r--r--include/linux/fsnotify.h35
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gcd.h3
-rw-r--r--include/linux/gfp.h7
-rw-r--r--include/linux/gpio/driver.h22
-rw-r--r--include/linux/gpio/generic.h4
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/highmem-internal.h2
-rw-r--r--include/linux/highmem.h12
-rw-r--r--include/linux/hisi_acc_qm.h4
-rw-r--r--include/linux/huge_mm.h52
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/hung_task.h18
-rw-r--r--include/linux/hypervisor.h3
-rw-r--r--include/linux/i3c/device.h4
-rw-r--r--include/linux/i3c/master.h13
-rw-r--r--include/linux/ieee80211.h53
-rw-r--r--include/linux/if_team.h3
-rw-r--r--include/linux/if_tun.h5
-rw-r--r--include/linux/if_vlan.h23
-rw-r--r--include/linux/in6.h7
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/input/touch-overlay.h25
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/io-mapping.h3
-rw-r--r--include/linux/iommu.h76
-rw-r--r--include/linux/iommufd.h196
-rw-r--r--include/linux/ioprio.h3
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irq-entry-common.h18
-rw-r--r--include/linux/irqbypass.h46
-rw-r--r--include/linux/irqchip/arm-gic-v4.h2
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-vgic-info.h4
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/jhash.h8
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kexec.h10
-rw-r--r--include/linux/khugepaged.h4
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/ksm.h12
-rw-r--r--include/linux/kvm_dirty_ring.h18
-rw-r--r--include/linux/kvm_host.h36
-rw-r--r--include/linux/kvm_irqfd.h5
-rw-r--r--include/linux/led-class-flash.h2
-rw-r--r--include/linux/leds.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/libnvdimm.h15
-rw-r--r--include/linux/llist.h6
-rw-r--r--include/linux/maple_tree.h4
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/memcontrol.h44
-rw-r--r--include/linux/memfd.h4
-rw-r--r--include/linux/memory-tiers.h2
-rw-r--r--include/linux/memory.h20
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mfd/davinci_voicecodec.h8
-rw-r--r--include/linux/mfd/madera/pdata.h3
-rw-r--r--include/linux/mfd/pcf50633/core.h229
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h8
-rw-r--r--include/linux/mfd/tps65219.h5
-rw-r--r--include/linux/mfd/twl.h21
-rw-r--r--include/linux/mfd/wm8350/core.h10
-rw-r--r--include/linux/migrate.h46
-rw-r--r--include/linux/mlx5/device.h1
-rw-r--r--include/linux/mlx5/driver.h25
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h150
-rw-r--r--include/linux/mm.h91
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmap_lock.h41
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmdebug.h12
-rw-r--r--include/linux/mmzone.h36
-rw-r--r--include/linux/module.h39
-rw-r--r--include/linux/moduleparam.h24
-rw-r--r--include/linux/mroute6.h7
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mtd/map.h13
-rw-r--r--include/linux/mtd/spinand.h70
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/net/intel/iidc_rdma_idpf.h55
-rw-r--r--include/linux/net/intel/libie/adminq.h308
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/netdevice.h59
-rw-r--r--include/linux/netfilter.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h38
-rw-r--r--include/linux/netfilter/x_tables.h10
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs_fs.h8
-rw-r--r--include/linux/nfs_fs_sb.h8
-rw-r--r--include/linux/nfs_xdr.h57
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/node.h77
-rw-r--r--include/linux/nodemask.h18
-rw-r--r--include/linux/of_irq.h5
-rw-r--r--include/linux/packing.h6
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/page-flags.h108
-rw-r--r--include/linux/page-isolation.h47
-rw-r--r--include/linux/page_owner.h8
-rw-r--r--include/linux/pageblock-flags.h56
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagewalk.h9
-rw-r--r--include/linux/panic.h5
-rw-r--r--include/linux/pci-ep-msi.h28
-rw-r--r--include/linux/pci-epf.h18
-rw-r--r--include/linux/pci-pwrctrl.h2
-rw-r--r--include/linux/pci-tph.h1
-rw-r--r--include/linux/pci.h27
-rw-r--r--include/linux/pci_hotplug.h3
-rw-r--r--include/linux/percpu-defs.h7
-rw-r--r--include/linux/perf/arm_pmu.h8
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/pfn_t.h131
-rw-r--r--include/linux/pgtable.h163
-rw-r--r--include/linux/phy.h49
-rw-r--r--include/linux/pinctrl/pinctrl.h8
-rw-r--r--include/linux/platform_data/emc2305.h6
-rw-r--r--include/linux/platform_data/microchip-ksz.h1
-rw-r--r--include/linux/platform_data/video-pxafb.h1
-rw-r--r--include/linux/power_supply.h16
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/pse-pd/pse.h114
-rw-r--r--include/linux/ptp_clock_kernel.h34
-rw-r--r--include/linux/raid/pq.h12
-rw-r--r--include/linux/ref_tracker.h50
-rw-r--r--include/linux/relay.h24
-rw-r--r--include/linux/ring_buffer.h7
-rw-r--r--include/linux/rmap.h26
-rw-r--r--include/linux/rtc/ds1685.h2
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rv.h86
-rw-r--r--include/linux/rwsem.h15
-rw-r--r--include/linux/sbitmap.h19
-rw-r--r--include/linux/sched.h20
-rw-r--r--include/linux/sched/ext.h23
-rw-r--r--include/linux/shmem_fs.h5
-rw-r--r--include/linux/skbuff.h45
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/soc/qcom/ubwc.h75
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h9
-rw-r--r--include/linux/soundwire/sdw_amd.h1
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/sprintf.h2
-rw-r--r--include/linux/srcu.h54
-rw-r--r--include/linux/srcutiny.h3
-rw-r--r--include/linux/srcutree.h38
-rw-r--r--include/linux/sunrpc/xdr.h9
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/sys_info.h28
-rw-r--r--include/linux/sysctl.h5
-rw-r--r--include/linux/sysfb.h6
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/tnum.h2
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/unwind_deferred.h81
-rw-r--r--include/linux/unwind_deferred_types.h39
-rw-r--r--include/linux/unwind_user.h14
-rw-r--r--include/linux/unwind_user_types.h44
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/linux/usb/uvc.h3
-rw-r--r--include/linux/userfaultfd_k.h15
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/vfio_pci_core.h2
-rw-r--r--include/linux/virtio.h11
-rw-r--r--include/linux/virtio_config.h43
-rw-r--r--include/linux/virtio_features.h88
-rw-r--r--include/linux/virtio_net.h197
-rw-r--r--include/linux/virtio_pci_modern.h43
-rw-r--r--include/linux/virtio_vsock.h46
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/vringh.h12
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/watchdog.h12
-rw-r--r--include/linux/workqueue.h34
-rw-r--r--include/linux/writeback.h11
-rw-r--r--include/linux/xxhash.h26
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/rcar-fcp.h5
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-dev.h14
-rw-r--r--include/media/v4l2-ioctl.h1
-rw-r--r--include/media/v4l2-jpeg.h9
-rw-r--r--include/media/v4l2-subdev.h3
-rw-r--r--include/media/vsp1.h89
-rw-r--r--include/net/act_api.h25
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/aligned_data.h22
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h10
-rw-r--r--include/net/bluetooth/hci_core.h41
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h221
-rw-r--r--include/net/devlink.h24
-rw-r--r--include/net/dropreason-core.h39
-rw-r--r--include/net/dsa.h2
-rw-r--r--include/net/dst.h50
-rw-r--r--include/net/gro.h6
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_hashtables.h8
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip6_tunnel.h5
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ip_vs.h13
-rw-r--r--include/net/kcm.h1
-rw-r--r--include/net/libeth/rx.h28
-rw-r--r--include/net/libeth/tx.h36
-rw-r--r--include/net/libeth/types.h106
-rw-r--r--include/net/libeth/xdp.h1879
-rw-r--r--include/net/libeth/xsk.h685
-rw-r--r--include/net/lwtunnel.h8
-rw-r--r--include/net/mac80211.h69
-rw-r--r--include/net/mana/gdma.h27
-rw-r--r--include/net/mana/mana.h173
-rw-r--r--include/net/mctp.h57
-rw-r--r--include/net/ndisc.h9
-rw-r--r--include/net/neighbour.h22
-rw-r--r--include/net/netdev_queues.h9
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h13
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_reject.h1
-rw-r--r--include/net/netfilter/nf_tables.h19
-rw-r--r--include/net/netfilter/nf_tables_core.h50
-rw-r--r--include/net/netlink.h14
-rw-r--r--include/net/netmem.h181
-rw-r--r--include/net/netns/conntrack.h13
-rw-r--r--include/net/netns/mctp.h20
-rw-r--r--include/net/page_pool/helpers.h14
-rw-r--r--include/net/page_pool/types.h2
-rw-r--r--include/net/pfcp.h2
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h19
-rw-r--r--include/net/tc_act/tc_connmark.h1
-rw-r--r--include/net/tc_act/tc_csum.h10
-rw-r--r--include/net/tc_act/tc_ct.h11
-rw-r--r--include/net/tc_act/tc_ctinfo.h7
-rw-r--r--include/net/tc_act/tc_gate.h9
-rw-r--r--include/net/tc_act/tc_mpls.h10
-rw-r--r--include/net/tc_act/tc_nat.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_police.h12
-rw-r--r--include/net/tc_act/tc_sample.h9
-rw-r--r--include/net/tc_act/tc_skbedit.h1
-rw-r--r--include/net/tc_act/tc_vlan.h9
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/net/tcx.h1
-rw-r--r--include/net/udp.h25
-rw-r--r--include/net/udp_tunnel.h103
-rw-r--r--include/net/vxlan.h5
-rw-r--r--include/net/x25.h1
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/ras/ras_event.h2
-rw-r--r--include/rdma/ib_umem.h25
-rw-r--r--include/rdma/ib_verbs.h65
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/rv/da_monitor.h172
-rw-r--r--include/rv/ltl_monitor.h186
-rw-r--r--include/scsi/sas_ata.h91
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_transport_fc.h5
-rw-r--r--include/soc/spacemit/k1-syscon.h160
-rw-r--r--include/sound/sdca_function.h14
-rw-r--r--include/sound/tas2770-tlv.h4
-rw-r--r--include/sound/tas2781-tlv.h2
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/btrfs.h7
-rw-r--r--include/trace/events/cgroup.h47
-rw-r--r--include/trace/events/damon.h41
-rw-r--r--include/trace/events/dma_fence.h38
-rw-r--r--include/trace/events/ext4.h50
-rw-r--r--include/trace/events/fs_dax.h6
-rw-r--r--include/trace/events/ipi.h58
-rw-r--r--include/trace/events/kmem.h38
-rw-r--r--include/trace/events/kvm.h111
-rw-r--r--include/trace/events/mmap.h52
-rw-r--r--include/trace/events/power.h28
-rw-r--r--include/trace/events/sched.h16
-rw-r--r--include/trace/events/scsi.h13
-rw-r--r--include/trace/events/tcp.h29
-rw-r--r--include/trace/events/thp.h2
-rw-r--r--include/trace/events/writeback.h8
-rw-r--r--include/trace/events/xdp.h21
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h56
-rw-r--r--include/uapi/drm/ivpu_accel.h14
-rw-r--r--include/uapi/drm/msm_drm.h149
-rw-r--r--include/uapi/drm/panfrost_drm.h21
-rw-r--r--include/uapi/drm/panthor_drm.h41
-rw-r--r--include/uapi/drm/xe_drm.h12
-rw-r--r--include/uapi/linux/bpf.h45
-rw-r--r--include/uapi/linux/capability.h5
-rw-r--r--include/uapi/linux/devlink.h16
-rw-r--r--include/uapi/linux/dpll.h13
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/ethtool_netlink.h2
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h83
-rw-r--r--include/uapi/linux/handshake.h1
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/if_tun.h9
-rw-r--r--include/uapi/linux/if_xdp.h1
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/input-event-codes.h8
-rw-r--r--include/uapi/linux/io_uring.h4
-rw-r--r--include/uapi/linux/iommufd.h154
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/mctp.h8
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_be_config.h9
-rw-r--r--include/uapi/linux/neighbour.h5
-rw-r--r--include/uapi/linux/net_dropmon.h7
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netdev.h6
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h2
-rw-r--r--include/uapi/linux/nl80211.h61
-rw-r--r--include/uapi/linux/openvswitch.h6
-rw-r--r--include/uapi/linux/pci_regs.h9
-rw-r--r--include/uapi/linux/pcitest.h1
-rw-r--r--include/uapi/linux/pkt_sched.h68
-rw-r--r--include/uapi/linux/prctl.h2
-rw-r--r--include/uapi/linux/raid/md_p.h2
-rw-r--r--include/uapi/linux/rkisp1-config.h106
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/vfio.h12
-rw-r--r--include/uapi/linux/vhost.h35
-rw-r--r--include/uapi/linux/vhost_types.h5
-rw-r--r--include/uapi/linux/videodev2.h9
-rw-r--r--include/uapi/linux/virtio_net.h33
-rw-r--r--include/uapi/linux/vt.h34
-rw-r--r--include/uapi/rdma/efa-abi.h3
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h36
-rw-r--r--include/ufs/ufs.h26
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/video/edid.h3
-rw-r--r--include/video/sisfb.h6
-rw-r--r--include/xen/xen-ops.h2
-rw-r--r--include/xen/xenbus.h4
-rw-r--r--init/Kconfig10
-rw-r--r--init/main.c6
-rw-r--r--io_uring/io-wq.c8
-rw-r--r--io_uring/memmap.c2
-rw-r--r--io_uring/net.c36
-rw-r--r--kernel/.gitignore2
-rw-r--r--kernel/Makefile49
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arena.c43
-rw-r--r--kernel/bpf/arraymap.c11
-rw-r--r--kernel/bpf/bpf_iter.c14
-rw-r--r--kernel/bpf/bpf_local_storage.c8
-rw-r--r--kernel/bpf/bpf_struct_ops.c5
-rw-r--r--kernel/bpf/btf.c116
-rw-r--r--kernel/bpf/cgroup.c203
-rw-r--r--kernel/bpf/core.c201
-rw-r--r--kernel/bpf/cpumap.c3
-rw-r--r--kernel/bpf/helpers.c469
-rw-r--r--kernel/bpf/link_iter.c3
-rw-r--r--kernel/bpf/local_storage.c9
-rw-r--r--kernel/bpf/net_namespace.c10
-rw-r--r--kernel/bpf/preload/Kconfig5
-rw-r--r--kernel/bpf/preload/iterators/iterators.lskel-big-endian.h492
-rw-r--r--kernel/bpf/prog_iter.c3
-rw-r--r--kernel/bpf/rqspinlock.c23
-rw-r--r--kernel/bpf/stream.c526
-rw-r--r--kernel/bpf/syscall.c308
-rw-r--r--kernel/bpf/tcx.c16
-rw-r--r--kernel/bpf/tnum.c5
-rw-r--r--kernel/bpf/token.c25
-rw-r--r--kernel/bpf/trampoline.c51
-rw-r--r--kernel/bpf/verifier.c2348
-rw-r--r--kernel/cfi.c15
-rw-r--r--kernel/cgroup/cgroup-v1.c14
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/cgroup/rstat.c195
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/crash_core.c15
-rw-r--r--kernel/crash_reserve.c68
-rw-r--r--kernel/entry/common.c3
-rw-r--r--kernel/events/core.c36
-rw-r--r--kernel/events/uprobes.c9
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c133
-rwxr-xr-xkernel/gen_kheaders.sh99
-rw-r--r--kernel/hung_task.c29
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/irq/irq_test.c4
-rw-r--r--kernel/irq/irqdomain.c1
-rw-r--r--kernel/kallsyms.c3
-rw-r--r--kernel/kcov.c2
-rw-r--r--kernel/kcsan/kcsan_test.c2
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kexec_core.c100
-rw-r--r--kernel/kexec_file.c51
-rw-r--r--kernel/kexec_handover.c4
-rw-r--r--kernel/kexec_internal.h2
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/kthread.c12
-rw-r--r--kernel/locking/rtmutex_api.c18
-rw-r--r--kernel/locking/rwsem.c31
-rw-r--r--kernel/module/internal.h10
-rw-r--r--kernel/module/main.c57
-rw-r--r--kernel/padata.c154
-rw-r--r--kernel/panic.c149
-rw-r--r--kernel/pid.c31
-rw-r--r--kernel/power/hibernate.c26
-rw-r--r--kernel/printk/internal.h3
-rw-r--r--kernel/printk/nbcon.c89
-rw-r--r--kernel/printk/printk.c20
-rw-r--r--kernel/rcu/rcutorture.c356
-rw-r--r--kernel/rcu/refscale.c42
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/tree.c82
-rw-r--r--kernel/rcu/tree.h14
-rw-r--r--kernel/rcu/tree_exp.h59
-rw-r--r--kernel/rcu/tree_nocb.h10
-rw-r--r--kernel/rcu/tree_plugin.h126
-rw-r--r--kernel/rcu/tree_stall.h43
-rw-r--r--kernel/relay.c69
-rw-r--r--kernel/sched/core.c42
-rw-r--r--kernel/sched/ext.c250
-rw-r--r--kernel/sched/ext.h20
-rw-r--r--kernel/sched/ext_idle.c45
-rw-r--r--kernel/sched/ext_idle.h12
-rw-r--r--kernel/sched/psi.c6
-rw-r--r--kernel/sched/sched.h9
-rw-r--r--kernel/sched/wait.c22
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/sys.c79
-rw-r--r--kernel/sysctl.c270
-rw-r--r--kernel/time/clocksource.c5
-rw-r--r--kernel/trace/Kconfig53
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/blktrace.c22
-rw-r--r--kernel/trace/bpf_trace.c90
-rw-r--r--kernel/trace/fgraph.c16
-rw-r--r--kernel/trace/fprobe.c9
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--kernel/trace/preemptirq_delay_test.c13
-rw-r--r--kernel/trace/ring_buffer.c184
-rw-r--r--kernel/trace/rv/Kconfig43
-rw-r--r--kernel/trace/rv/Makefile9
-rw-r--r--kernel/trace/rv/monitors/nrp/Kconfig (renamed from kernel/trace/rv/monitors/tss/Kconfig)12
-rw-r--r--kernel/trace/rv/monitors/nrp/nrp.c138
-rw-r--r--kernel/trace/rv/monitors/nrp/nrp.h75
-rw-r--r--kernel/trace/rv/monitors/nrp/nrp_trace.h15
-rw-r--r--kernel/trace/rv/monitors/opid/Kconfig19
-rw-r--r--kernel/trace/rv/monitors/opid/opid.c168
-rw-r--r--kernel/trace/rv/monitors/opid/opid.h104
-rw-r--r--kernel/trace/rv/monitors/opid/opid_trace.h (renamed from kernel/trace/rv/monitors/sncid/sncid_trace.h)8
-rw-r--r--kernel/trace/rv/monitors/pagefault/Kconfig20
-rw-r--r--kernel/trace/rv/monitors/pagefault/pagefault.c88
-rw-r--r--kernel/trace/rv/monitors/pagefault/pagefault.h64
-rw-r--r--kernel/trace/rv/monitors/pagefault/pagefault_trace.h14
-rw-r--r--kernel/trace/rv/monitors/rtapp/Kconfig11
-rw-r--r--kernel/trace/rv/monitors/rtapp/rtapp.c33
-rw-r--r--kernel/trace/rv/monitors/rtapp/rtapp.h3
-rw-r--r--kernel/trace/rv/monitors/sched/Kconfig1
-rw-r--r--kernel/trace/rv/monitors/sched/sched.c3
-rw-r--r--kernel/trace/rv/monitors/sco/sco.c7
-rw-r--r--kernel/trace/rv/monitors/scpd/Kconfig2
-rw-r--r--kernel/trace/rv/monitors/scpd/scpd.c7
-rw-r--r--kernel/trace/rv/monitors/sleep/Kconfig22
-rw-r--r--kernel/trace/rv/monitors/sleep/sleep.c237
-rw-r--r--kernel/trace/rv/monitors/sleep/sleep.h257
-rw-r--r--kernel/trace/rv/monitors/sleep/sleep_trace.h14
-rw-r--r--kernel/trace/rv/monitors/sncid/sncid.c96
-rw-r--r--kernel/trace/rv/monitors/sncid/sncid.h49
-rw-r--r--kernel/trace/rv/monitors/snep/Kconfig2
-rw-r--r--kernel/trace/rv/monitors/snep/snep.c7
-rw-r--r--kernel/trace/rv/monitors/snep/snep.h14
-rw-r--r--kernel/trace/rv/monitors/snroc/snroc.c3
-rw-r--r--kernel/trace/rv/monitors/sssw/Kconfig (renamed from kernel/trace/rv/monitors/sncid/Kconfig)10
-rw-r--r--kernel/trace/rv/monitors/sssw/sssw.c116
-rw-r--r--kernel/trace/rv/monitors/sssw/sssw.h105
-rw-r--r--kernel/trace/rv/monitors/sssw/sssw_trace.h15
-rw-r--r--kernel/trace/rv/monitors/sts/Kconfig19
-rw-r--r--kernel/trace/rv/monitors/sts/sts.c156
-rw-r--r--kernel/trace/rv/monitors/sts/sts.h117
-rw-r--r--kernel/trace/rv/monitors/sts/sts_trace.h (renamed from kernel/trace/rv/monitors/tss/tss_trace.h)8
-rw-r--r--kernel/trace/rv/monitors/tss/tss.c91
-rw-r--r--kernel/trace/rv/monitors/tss/tss.h47
-rw-r--r--kernel/trace/rv/monitors/wip/Kconfig2
-rw-r--r--kernel/trace/rv/monitors/wip/wip.c3
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.c3
-rw-r--r--kernel/trace/rv/reactor_panic.c8
-rw-r--r--kernel/trace/rv/reactor_printk.c8
-rw-r--r--kernel/trace/rv/rv.c226
-rw-r--r--kernel/trace/rv/rv.h39
-rw-r--r--kernel/trace/rv/rv_reactors.c138
-rw-r--r--kernel/trace/rv/rv_trace.h166
-rw-r--r--kernel/trace/trace.c352
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_eprobe.c53
-rw-r--r--kernel/trace/trace_events.c154
-rw-r--r--kernel/trace/trace_events_filter.c32
-rw-r--r--kernel/trace/trace_events_synth.c6
-rw-r--r--kernel/trace/trace_fprobe.c614
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hwlat.c5
-rw-r--r--kernel/trace/trace_kdb.c8
-rw-r--r--kernel/trace/trace_kprobe.c57
-rw-r--r--kernel/trace/trace_output.c8
-rw-r--r--kernel/trace/trace_probe.c150
-rw-r--r--kernel/trace/trace_probe.h26
-rw-r--r--kernel/trace/trace_uprobe.c53
-rw-r--r--kernel/ucount.c16
-rw-r--r--kernel/unwind/Makefile1
-rw-r--r--kernel/unwind/deferred.c362
-rw-r--r--kernel/unwind/user.c128
-rw-r--r--kernel/usermode_driver.c191
-rw-r--r--kernel/vhost_task.c2
-rw-r--r--kernel/watchdog_buddy.c5
-rw-r--r--kernel/watchdog_perf.c22
-rw-r--r--kernel/workqueue.c74
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/Makefile3
-rw-r--r--lib/alloc_tag.c31
-rw-r--r--lib/codetag.c17
-rw-r--r--lib/crypto/s390/chacha-glue.c1
-rw-r--r--lib/find_bit.c24
-rw-r--r--lib/kobject_uevent.c20
-rw-r--r--lib/kunit/test.c8
-rw-r--r--lib/maple_tree.c40
-rw-r--r--lib/math/div64.c13
-rw-r--r--lib/math/gcd.c27
-rw-r--r--lib/raid6/algos.c3
-rw-r--r--lib/raid6/recov.c6
-rw-r--r--lib/raid6/recov_avx2.c6
-rw-r--r--lib/raid6/recov_avx512.c6
-rw-r--r--lib/raid6/recov_loongarch_simd.c12
-rw-r--r--lib/raid6/recov_neon.c6
-rw-r--r--lib/raid6/recov_rvv.c6
-rw-r--r--lib/raid6/recov_s390xc.c7
-rw-r--r--lib/raid6/recov_ssse3.c6
-rw-r--r--lib/ref_tracker.c289
-rw-r--r--lib/sbitmap.c74
-rw-r--r--lib/stackdepot.c67
-rw-r--r--lib/sys_info.c122
-rw-r--r--lib/test_hmm.c14
-rw-r--r--lib/test_kho.c305
-rw-r--r--lib/test_maple_tree.c32
-rw-r--r--lib/test_objagg.c77
-rw-r--r--lib/test_vmalloc.c42
-rw-r--r--lib/tests/test_bits.c19
-rw-r--r--lib/vsprintf.c70
-rw-r--r--lib/xarray.c3
-rw-r--r--lib/xxhash.c107
-rw-r--r--mm/Kconfig32
-rw-r--r--mm/Makefile1
-rw-r--r--mm/balloon_compaction.c21
-rw-r--r--mm/cma.c341
-rw-r--r--mm/cma_debug.c10
-rw-r--r--mm/compaction.c44
-rw-r--r--mm/damon/Kconfig16
-rw-r--r--mm/damon/Makefile1
-rw-r--r--mm/damon/core.c189
-rw-r--r--mm/damon/lru_sort.c75
-rw-r--r--mm/damon/ops-common.c274
-rw-r--r--mm/damon/ops-common.h5
-rw-r--r--mm/damon/paddr.c277
-rw-r--r--mm/damon/reclaim.c71
-rw-r--r--mm/damon/stat.c264
-rw-r--r--mm/damon/sysfs-schemes.c504
-rw-r--r--mm/damon/sysfs.c171
-rw-r--r--mm/damon/tests/core-kunit.h4
-rw-r--r--mm/damon/tests/vaddr-kunit.h2
-rw-r--r--mm/damon/vaddr.c245
-rw-r--r--mm/debug.c44
-rw-r--r--mm/debug_vm_pgtable.c113
-rw-r--r--mm/dmapool.c6
-rw-r--r--mm/execmem.c198
-rw-r--r--mm/filemap.c68
-rw-r--r--mm/gup.c245
-rw-r--r--mm/hmm.c14
-rw-r--r--mm/huge_memory.c489
-rw-r--r--mm/hugetlb.c173
-rw-r--r--mm/hugetlb_vmemmap.c2
-rw-r--r--mm/internal.h150
-rw-r--r--mm/io-mapping.c30
-rw-r--r--mm/kasan/common.c25
-rw-r--r--mm/kasan/kasan_test_c.c10
-rw-r--r--mm/kfence/core.c4
-rw-r--r--mm/khugepaged.c83
-rw-r--r--mm/kmemleak.c10
-rw-r--r--mm/ksm.c55
-rw-r--r--mm/list_lru.c34
-rw-r--r--mm/maccess.c1
-rw-r--r--mm/madvise.c893
-rw-r--r--mm/mapping_dirty_helpers.c6
-rw-r--r--mm/memcontrol.c92
-rw-r--r--mm/memfd.c38
-rw-r--r--mm/memory-failure.c16
-rw-r--r--mm/memory-tiers.c19
-rw-r--r--mm/memory.c171
-rw-r--r--mm/memory_hotplug.c197
-rw-r--r--mm/mempolicy.c31
-rw-r--r--mm/mempool.c40
-rw-r--r--mm/memremap.c34
-rw-r--r--mm/migrate.c283
-rw-r--r--mm/migrate_device.c2
-rw-r--r--mm/mincore.c3
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/mm_init.c26
-rw-r--r--mm/mmap.c28
-rw-r--r--mm/mmap_lock.c93
-rw-r--r--mm/mprotect.c308
-rw-r--r--mm/mremap.c629
-rw-r--r--mm/mseal.c166
-rw-r--r--mm/nommu.c10
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c394
-rw-r--r--mm/page_ext.c17
-rw-r--r--mm/page_io.c71
-rw-r--r--mm/page_isolation.c112
-rw-r--r--mm/page_owner.c4
-rw-r--r--mm/page_vma_mapped.c5
-rw-r--r--mm/pagewalk.c90
-rw-r--r--mm/percpu-stats.c1
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/pgtable-generic.c7
-rw-r--r--mm/ptdump.c5
-rw-r--r--mm/readahead.c36
-rw-r--r--mm/rmap.c28
-rw-r--r--mm/secretmem.c16
-rw-r--r--mm/shmem.c391
-rw-r--r--mm/show_mem.c2
-rw-r--r--mm/slab.h28
-rw-r--r--mm/slub.c146
-rw-r--r--mm/swap.c33
-rw-r--r--mm/swap.h9
-rw-r--r--mm/swapfile.c70
-rw-r--r--mm/userfaultfd.c109
-rw-r--r--mm/util.c116
-rw-r--r--mm/vma.c163
-rw-r--r--mm/vma.h56
-rw-r--r--mm/vma_exec.c2
-rw-r--r--mm/vmpressure.c2
-rw-r--r--mm/vmscan.c466
-rw-r--r--mm/vmstat.c454
-rw-r--r--mm/zpdesc.h15
-rw-r--r--mm/zsmalloc.c33
-rw-r--r--mm/zswap.c5
-rw-r--r--net/6lowpan/ndisc.c16
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/Kconfig6
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/batman-adv/bat_algo.c1
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c25
-rw-r--r--net/batman-adv/bat_v.c6
-rw-r--r--net/batman-adv/bat_v_elp.c8
-rw-r--r--net/batman-adv/bat_v_ogm.c14
-rw-r--r--net/batman-adv/hard-interface.c39
-rw-r--r--net/batman-adv/main.c7
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/mesh-interface.c6
-rw-r--r--net/batman-adv/multicast.c6
-rw-r--r--net/batman-adv/netlink.c7
-rw-r--r--net/batman-adv/originator.c7
-rw-r--r--net/batman-adv/send.c7
-rw-r--r--net/bluetooth/af_bluetooth.c9
-rw-r--r--net/bluetooth/aosp.c2
-rw-r--r--net/bluetooth/coredump.c6
-rw-r--r--net/bluetooth/hci_conn.c19
-rw-r--r--net/bluetooth/hci_core.c31
-rw-r--r--net/bluetooth/hci_event.c76
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hci_sync.c14
-rw-r--r--net/bluetooth/iso.c52
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/lib.c2
-rw-r--r--net/bluetooth/mgmt.c1
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c3
-rw-r--r--net/bpf/test_run.c2
-rw-r--r--net/bridge/br.c7
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/bridge/netfilter/Kconfig11
-rw-r--r--net/caif/cfctrl.c294
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/can/isotp.c5
-rw-r--r--net/can/j1939/socket.c5
-rw-r--r--net/can/raw.c5
-rw-r--r--net/core/dev.c301
-rw-r--r--net/core/dev.h22
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/dev_api.c13
-rw-r--r--net/core/dev_ioctl.c5
-rw-r--r--net/core/devmem.c6
-rw-r--r--net/core/devmem.h7
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/dst_cache.c2
-rw-r--r--net/core/filter.c38
-rw-r--r--net/core/hotdata.c5
-rw-r--r--net/core/ieee8021q_helpers.c44
-rw-r--r--net/core/neighbour.c558
-rw-r--r--net/core/net-sysfs.c80
-rw-r--r--net/core/net-sysfs.h2
-rw-r--r--net/core/net_namespace.c68
-rw-r--r--net/core/netclassid_cgroup.c4
-rw-r--r--net/core/netdev-genl-gen.c5
-rw-r--r--net/core/netdev-genl.c14
-rw-r--r--net/core/netdev_rx_queue.c6
-rw-r--r--net/core/netpoll.c487
-rw-r--r--net/core/page_pool.c65
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/selftests.c67
-rw-r--r--net/core/skbuff.c38
-rw-r--r--net/core/skmsg.c7
-rw-r--r--net/core/sock.c73
-rw-r--r--net/core/sock_map.c13
-rw-r--r--net/core/stream.c8
-rw-r--r--net/core/sysctl_net_core.c37
-rw-r--r--net/devlink/netlink_gen.c15
-rw-r--r--net/devlink/netlink_gen.h1
-rw-r--r--net/devlink/param.c20
-rw-r--r--net/devlink/port.c2
-rw-r--r--net/devlink/rate.c127
-rw-r--r--net/dsa/Kconfig16
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/dsa/tag_brcm.c119
-rw-r--r--net/dsa/user.c2
-rw-r--r--net/ethtool/common.c58
-rw-r--r--net/ethtool/common.h13
-rw-r--r--net/ethtool/ioctl.c327
-rw-r--r--net/ethtool/netlink.c95
-rw-r--r--net/ethtool/netlink.h12
-rw-r--r--net/ethtool/pause.c1
-rw-r--r--net/ethtool/pse-pd.c65
-rw-r--r--net/ethtool/rss.c942
-rw-r--r--net/handshake/tlshd.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/icmp.c24
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c42
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c22
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipmr.c171
-rw-r--r--net/ipv4/netfilter.c4
-rw-r--r--net/ipv4/netfilter/Kconfig23
-rw-r--r--net/ipv4/nexthop.c5
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c43
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c33
-rw-r--r--net/ipv4/tcp_fastopen.c4
-rw-r--r--net/ipv4/tcp_input.c227
-rw-r--r--net/ipv4/tcp_ipv4.c309
-rw-r--r--net/ipv4/tcp_metrics.c8
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c89
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c29
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udp_offload.c12
-rw-r--r--net/ipv4/udp_tunnel_core.c21
-rw-r--r--net/ipv4/udp_tunnel_nic.c78
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c106
-rw-r--r--net/ipv6/addrlabel.c32
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/anycast.c101
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/datagram.c6
-rw-r--r--net/ipv6/exthdrs.c10
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ioam6.c17
-rw-r--r--net/ipv6/ioam6_iptunnel.c4
-rw-r--r--net/ipv6/ip6_fib.c50
-rw-r--r--net/ipv6/ip6_gre.c108
-rw-r--r--net/ipv6/ip6_input.c40
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c32
-rw-r--r--net/ipv6/ip6_tunnel.c49
-rw-r--r--net/ipv6/ip6_udp_tunnel.c20
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c157
-rw-r--r--net/ipv6/ipv6_sockglue.c28
-rw-r--r--net/ipv6/mcast.c395
-rw-r--r--net/ipv6/ndisc.c184
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/Kconfig20
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c2
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c131
-rw-r--r--net/ipv6/rpl_iptunnel.c4
-rw-r--r--net/ipv6/seg6_iptunnel.c26
-rw-r--r--net/ipv6/seg6_local.c26
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c23
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/iucv/iucv.c1
-rw-r--r--net/kcm/kcmsock.c19
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/agg-rx.c6
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c207
-rw-r--r--net/mac80211/chan.c51
-rw-r--r--net/mac80211/debugfs.c3
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/driver-ops.c5
-rw-r--r--net/mac80211/driver-ops.h59
-rw-r--r--net/mac80211/ht.c40
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h73
-rw-r--r--net/mac80211/iface.c35
-rw-r--r--net/mac80211/key.c66
-rw-r--r--net/mac80211/link.c9
-rw-r--r--net/mac80211/main.c92
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c318
-rw-r--r--net/mac80211/offchannel.c7
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rx.c113
-rw-r--r--net/mac80211/s1g.c26
-rw-r--r--net/mac80211/scan.c23
-rw-r--r--net/mac80211/sta_info.c418
-rw-r--r--net/mac80211/sta_info.h59
-rw-r--r--net/mac80211/tdls.c2
-rw-r--r--net/mac80211/trace.h105
-rw-r--r--net/mac80211/tx.c116
-rw-r--r--net/mac80211/util.c113
-rw-r--r--net/mac80211/vht.c5
-rw-r--r--net/mctp/af_mctp.c214
-rw-r--r--net/mctp/route.c653
-rw-r--r--net/mctp/test/route-test.c797
-rw-r--r--net/mctp/test/sock-test.c396
-rw-r--r--net/mctp/test/utils.c232
-rw-r--r--net/mctp/test/utils.h61
-rw-r--r--net/mpls/af_mpls.c6
-rw-r--r--net/mptcp/ctrl.c4
-rw-r--r--net/mptcp/mib.c5
-rw-r--r--net/mptcp/mib.h7
-rw-r--r--net/mptcp/options.c5
-rw-r--r--net/mptcp/protocol.c52
-rw-r--r--net/mptcp/protocol.h35
-rw-r--r--net/mptcp/sockopt.c33
-rw-r--r--net/mptcp/subflow.c16
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-rsp.c1
-rw-r--r--net/netfilter/Kconfig30
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_bpf_link.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c24
-rw-r--r--net/netfilter/nf_conntrack_netlink.c66
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c826
-rw-r--r--net/netfilter/nf_conntrack_standalone.c118
-rw-r--r--net/netfilter/nf_log.c26
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_nat_proto.c43
-rw-r--r--net/netfilter/nf_tables_api.c110
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c5
-rw-r--r--net/netfilter/nfnetlink_hook.c76
-rw-r--r--net/netfilter/nft_dynset.c10
-rw-r--r--net/netfilter/nft_exthdr.c8
-rw-r--r--net/netfilter/nft_lookup.c27
-rw-r--r--net/netfilter/nft_objref.c5
-rw-r--r--net/netfilter/nft_set_bitmap.c11
-rw-r--r--net/netfilter/nft_set_hash.c54
-rw-r--r--net/netfilter/nft_set_pipapo.c207
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c28
-rw-r--r--net/netfilter/nft_set_rbtree.c40
-rw-r--r--net/netfilter/nft_socket.c2
-rw-r--r--net/netfilter/x_tables.c16
-rw-r--r--net/netfilter/xt_nfacct.c4
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/openvswitch/actions.c6
-rw-r--r--net/openvswitch/datapath.c8
-rw-r--r--net/openvswitch/datapath.h3
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/packet/diag.c2
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rds/tcp_listen.c30
-rw-r--r--net/rose/rose_in.c3
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_connmark.c18
-rw-r--r--net/sched/act_csum.c18
-rw-r--r--net/sched/act_ct.c30
-rw-r--r--net/sched/act_ctinfo.c42
-rw-r--r--net/sched/act_mpls.c21
-rw-r--r--net/sched/act_nat.c25
-rw-r--r--net/sched/act_pedit.c20
-rw-r--r--net/sched/act_police.c18
-rw-r--r--net/sched/act_skbedit.c20
-rw-r--r--net/sched/bpf_qdisc.c9
-rw-r--r--net/sched/em_text.c2
-rw-r--r--net/sched/sch_cake.c5
-rw-r--r--net/sched/sch_dualpi2.c1175
-rw-r--r--net/sched/sch_ets.c11
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c40
-rw-r--r--net/sched/sch_taprio.c33
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/af_smc.c9
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/smc/smc_diag.c2
-rw-r--r--net/smc/smc_loopback.c6
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/socket.c8
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c4
-rw-r--r--net/sunrpc/svcsock.c43
-rw-r--r--net/sunrpc/xdr.c110
-rw-r--r--net/sunrpc/xprtsock.c40
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/udp_media.c12
-rw-r--r--net/tls/tls.h2
-rw-r--r--net/tls/tls_strp.c11
-rw-r--r--net/tls/tls_sw.c16
-rw-r--r--net/unix/af_unix.c185
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/af_vsock.c30
-rw-r--r--net/vmw_vsock/hyperv_transport.c17
-rw-r--r--net/vmw_vsock/virtio_transport.c20
-rw-r--r--net/vmw_vsock/virtio_transport_common.c3
-rw-r--r--net/wireless/core.c23
-rw-r--r--net/wireless/core.h11
-rw-r--r--net/wireless/mlme.c34
-rw-r--r--net/wireless/nl80211.c826
-rw-r--r--net/wireless/rdev-ops.h45
-rw-r--r--net/wireless/reg.c30
-rw-r--r--net/wireless/scan.c204
-rw-r--r--net/wireless/sme.c39
-rw-r--r--net/wireless/trace.h129
-rw-r--r--net/wireless/util.c36
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/wireless/wext-core.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_dev.c22
-rw-r--r--net/xdp/xsk.c38
-rw-r--r--net/xdp/xsk_diag.c2
-rw-r--r--net/xfrm/xfrm_device.c12
-rw-r--r--net/xfrm/xfrm_input.c17
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c81
-rw-r--r--rust/Makefile24
-rw-r--r--rust/bindings/lib.rs3
-rw-r--r--rust/helpers/bug.c5
-rw-r--r--rust/helpers/helpers.c5
-rw-r--r--rust/helpers/time.c35
-rw-r--r--rust/kernel/.gitignore2
-rw-r--r--rust/kernel/alloc/allocator_test.rs2
-rw-r--r--rust/kernel/alloc/kbox.rs98
-rw-r--r--rust/kernel/alloc/kvec.rs59
-rw-r--r--rust/kernel/bits.rs203
-rw-r--r--rust/kernel/block/mq.rs2
-rw-r--r--rust/kernel/block/mq/operations.rs2
-rw-r--r--rust/kernel/block/mq/request.rs11
-rw-r--r--rust/kernel/bug.rs126
-rw-r--r--rust/kernel/clk.rs48
-rw-r--r--rust/kernel/configfs.rs30
-rw-r--r--rust/kernel/cpufreq.rs10
-rw-r--r--rust/kernel/cpumask.rs4
-rw-r--r--rust/kernel/device.rs4
-rw-r--r--rust/kernel/device_id.rs4
-rw-r--r--rust/kernel/devres.rs10
-rw-r--r--rust/kernel/dma.rs205
-rw-r--r--rust/kernel/drm/device.rs12
-rw-r--r--rust/kernel/drm/file.rs8
-rw-r--r--rust/kernel/drm/gem/mod.rs20
-rw-r--r--rust/kernel/drm/ioctl.rs4
-rw-r--r--rust/kernel/error.rs11
-rw-r--r--rust/kernel/firmware.rs9
-rw-r--r--rust/kernel/fmt.rs7
-rw-r--r--rust/kernel/fs/file.rs2
-rw-r--r--rust/kernel/generated_arch_reachable_asm.rs.S7
-rw-r--r--rust/kernel/generated_arch_warn_asm.rs.S7
-rw-r--r--rust/kernel/init.rs34
-rw-r--r--rust/kernel/io.rs20
-rw-r--r--rust/kernel/kunit.rs13
-rw-r--r--rust/kernel/lib.rs10
-rw-r--r--rust/kernel/list.rs63
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs239
-rw-r--r--rust/kernel/miscdevice.rs12
-rw-r--r--rust/kernel/mm/virt.rs52
-rw-r--r--rust/kernel/net/phy.rs38
-rw-r--r--rust/kernel/of.rs6
-rw-r--r--rust/kernel/opp.rs20
-rw-r--r--rust/kernel/pci.rs13
-rw-r--r--rust/kernel/platform.rs2
-rw-r--r--rust/kernel/prelude.rs4
-rw-r--r--rust/kernel/print.rs12
-rw-r--r--rust/kernel/rbtree.rs29
-rw-r--r--rust/kernel/revocable.rs4
-rw-r--r--rust/kernel/seq_file.rs2
-rw-r--r--rust/kernel/sizes.rs24
-rw-r--r--rust/kernel/str.rs111
-rw-r--r--rust/kernel/sync.rs10
-rw-r--r--rust/kernel/sync/arc.rs102
-rw-r--r--rust/kernel/sync/aref.rs154
-rw-r--r--rust/kernel/time.rs233
-rw-r--r--rust/kernel/time/delay.rs49
-rw-r--r--rust/kernel/time/hrtimer.rs304
-rw-r--r--rust/kernel/time/hrtimer/arc.rs8
-rw-r--r--rust/kernel/time/hrtimer/pin.rs10
-rw-r--r--rust/kernel/time/hrtimer/pin_mut.rs10
-rw-r--r--rust/kernel/time/hrtimer/tbox.rs8
-rw-r--r--rust/kernel/types.rs229
-rw-r--r--rust/kernel/uaccess.rs167
-rw-r--r--rust/kernel/workqueue.rs342
-rw-r--r--rust/kernel/xarray.rs9
-rw-r--r--rust/macros/module.rs6
-rw-r--r--rust/pin-init/README.md2
-rw-r--r--rust/pin-init/examples/big_struct_in_place.rs28
-rw-r--r--rust/pin-init/examples/linked_list.rs10
-rw-r--r--rust/pin-init/examples/mutex.rs97
-rw-r--r--rust/pin-init/examples/pthread_mutex.rs4
-rw-r--r--rust/pin-init/examples/static_init.rs75
-rw-r--r--rust/pin-init/src/__internal.rs1
-rw-r--r--rust/pin-init/src/lib.rs120
-rw-r--r--rust/pin-init/src/macros.rs16
-rw-r--r--rust/uapi/lib.rs3
-rw-r--r--samples/Kconfig9
-rw-r--r--samples/damon/mtier.c78
-rw-r--r--samples/damon/prcl.c53
-rw-r--r--samples/damon/wsse.c53
-rw-r--r--samples/fanotify/fs-monitor.c7
-rw-r--r--samples/hung_task/hung_task_tests.c81
-rw-r--r--samples/rust/rust_configfs.rs2
-rw-r--r--samples/rust/rust_dma.rs28
-rw-r--r--samples/rust/rust_driver_auxiliary.rs2
-rw-r--r--samples/rust/rust_misc_device.rs2
-rw-r--r--samples/rust/rust_print_main.rs2
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.kstack_erase2
-rwxr-xr-xscripts/check-sysctl-docs184
-rwxr-xr-xscripts/checkpatch.pl47
-rwxr-xr-xscripts/checktransupdate.py38
-rw-r--r--scripts/coccinelle/misc/secs_to_jiffies.cocci49
-rwxr-xr-xscripts/extract-vmlinux13
-rw-r--r--scripts/gdb/linux/constants.py.in12
-rw-r--r--scripts/gdb/linux/symbols.py26
-rw-r--r--scripts/gendwarfksyms/cache.c2
-rw-r--r--scripts/gendwarfksyms/die.c4
-rw-r--r--scripts/gendwarfksyms/dwarf.c2
-rw-r--r--scripts/gendwarfksyms/kabi.c2
-rw-r--r--scripts/gendwarfksyms/symbols.c2
-rw-r--r--scripts/gendwarfksyms/types.c33
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--scripts/kconfig/confdata.c2
-rwxr-xr-xscripts/kconfig/gconf-cfg.sh11
-rw-r--r--scripts/kconfig/gconf.c1687
-rw-r--r--scripts/kconfig/gconf.ui (renamed from scripts/kconfig/gconf.glade)361
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c6
-rw-r--r--scripts/kconfig/lxdialog/menubox.c2
-rw-r--r--scripts/kconfig/lxdialog/util.c3
-rw-r--r--scripts/kconfig/menu.c94
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c1
-rw-r--r--scripts/kconfig/qconf.cc36
-rw-r--r--scripts/kconfig/qconf.h1
-rw-r--r--scripts/kconfig/symbol.c4
-rwxr-xr-xscripts/kernel-doc.py10
-rw-r--r--scripts/lib/kdoc/kdoc_files.py4
-rw-r--r--scripts/lib/kdoc/kdoc_item.py42
-rw-r--r--scripts/lib/kdoc/kdoc_output.py172
-rw-r--r--scripts/lib/kdoc/kdoc_parser.py858
-rw-r--r--scripts/lib/kdoc/kdoc_re.py7
-rw-r--r--scripts/module.lds.S5
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--scripts/rustdoc_test_gen.rs33
-rw-r--r--scripts/spelling.txt1
-rwxr-xr-xscripts/sphinx-pre-install6
-rwxr-xr-xscripts/test_doc_build.py513
-rwxr-xr-xscripts/ver_linux2
-rw-r--r--security/apparmor/Makefile6
-rw-r--r--security/apparmor/af_unix.c799
-rw-r--r--security/apparmor/apparmorfs.c39
-rw-r--r--security/apparmor/audit.c2
-rw-r--r--security/apparmor/capability.c61
-rw-r--r--security/apparmor/domain.c203
-rw-r--r--security/apparmor/file.c92
-rw-r--r--security/apparmor/include/af_unix.h55
-rw-r--r--security/apparmor/include/apparmor.h4
-rw-r--r--security/apparmor/include/audit.h5
-rw-r--r--security/apparmor/include/capability.h1
-rw-r--r--security/apparmor/include/cred.h31
-rw-r--r--security/apparmor/include/file.h11
-rw-r--r--security/apparmor/include/ipc.h3
-rw-r--r--security/apparmor/include/label.h51
-rw-r--r--security/apparmor/include/lib.h46
-rw-r--r--security/apparmor/include/match.h10
-rw-r--r--security/apparmor/include/net.h38
-rw-r--r--security/apparmor/include/path.h1
-rw-r--r--security/apparmor/include/perms.h8
-rw-r--r--security/apparmor/include/policy.h59
-rw-r--r--security/apparmor/include/sig_names.h6
-rw-r--r--security/apparmor/include/signal.h19
-rw-r--r--security/apparmor/ipc.c13
-rw-r--r--security/apparmor/label.c37
-rw-r--r--security/apparmor/lib.c114
-rw-r--r--security/apparmor/lsm.c468
-rw-r--r--security/apparmor/match.c23
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--security/apparmor/net.c189
-rw-r--r--security/apparmor/policy.c93
-rw-r--r--security/apparmor/policy_compat.c6
-rw-r--r--security/apparmor/policy_ns.c2
-rw-r--r--security/apparmor/policy_unpack.c67
-rw-r--r--security/apparmor/policy_unpack_test.c6
-rw-r--r--security/apparmor/procattr.c6
-rw-r--r--security/apparmor/resource.c11
-rw-r--r--security/apparmor/task.c11
-rw-r--r--security/commoncap.c20
-rw-r--r--security/integrity/ima/ima_main.c26
-rw-r--r--security/integrity/platform_certs/load_powerpc.c5
-rw-r--r--security/ipe/Kconfig1
-rw-r--r--security/ipe/audit.c33
-rw-r--r--sound/hda/codecs/ca0132.c5
-rw-r--r--sound/hda/codecs/cirrus/Kconfig29
-rw-r--r--sound/hda/codecs/hdmi/Kconfig44
-rw-r--r--sound/hda/codecs/hdmi/Makefile2
-rw-r--r--sound/hda/codecs/realtek/Kconfig40
-rw-r--r--sound/hda/codecs/realtek/alc269.c6
-rw-r--r--sound/hda/codecs/side-codecs/cirrus_scodec_test.c2
-rw-r--r--sound/hda/codecs/side-codecs/tas2781_hda.c47
-rw-r--r--sound/hda/codecs/side-codecs/tas2781_hda.h2
-rw-r--r--sound/hda/codecs/side-codecs/tas2781_hda_i2c.c4
-rw-r--r--sound/hda/controllers/intel.c5
-rw-r--r--sound/hda/core/i915.c2
-rw-r--r--sound/pci/azt3328.c8
-rw-r--r--sound/soc/Kconfig4
-rw-r--r--sound/soc/amd/acp/acp-sdw-legacy-mach.c3
-rw-r--r--sound/soc/amd/acp/acp-sdw-sof-mach.c1
-rw-r--r--sound/soc/amd/acp/soc_amd_sdw_common.h2
-rw-r--r--sound/soc/amd/ps/acp63.h1
-rw-r--r--sound/soc/amd/ps/pci-ps.c4
-rw-r--r--sound/soc/amd/ps/ps-sdw-dma.c5
-rw-r--r--sound/soc/codecs/aw87390.c8
-rw-r--r--sound/soc/codecs/aw88081.c5
-rw-r--r--sound/soc/codecs/aw88166.c8
-rw-r--r--sound/soc/codecs/aw88261.c8
-rw-r--r--sound/soc/codecs/aw88395/aw88395.c8
-rw-r--r--sound/soc/codecs/aw88399.c17
-rw-r--r--sound/soc/codecs/cs42l43-jack.c46
-rw-r--r--sound/soc/codecs/cs42l43.c24
-rw-r--r--sound/soc/codecs/cs42l43.h5
-rw-r--r--sound/soc/codecs/idt821034.c2
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c2
-rw-r--r--sound/soc/codecs/peb2466.c2
-rw-r--r--sound/soc/codecs/rt1320-sdw.c3
-rw-r--r--sound/soc/codecs/rt5677.c2
-rw-r--r--sound/soc/codecs/rt721-sdca.c2
-rw-r--r--sound/soc/codecs/rt721-sdca.h4
-rw-r--r--sound/soc/codecs/sma1307.c2
-rw-r--r--sound/soc/codecs/tas2781-i2c.c6
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c2
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm8996.c2
-rw-r--r--sound/soc/codecs/zl38060.c2
-rw-r--r--sound/soc/fsl/fsl_sai.c20
-rw-r--r--sound/soc/fsl/fsl_xcvr.c25
-rw-r--r--sound/soc/fsl/imx-card.c40
-rw-r--r--sound/soc/intel/avs/core.c3
-rw-r--r--sound/soc/intel/boards/sof_sdw.c8
-rw-r--r--sound/soc/sdca/sdca_functions.c99
-rw-r--r--sound/soc/sdca/sdca_regmap.c29
-rw-r--r--sound/soc/soc-ac97.c2
-rw-r--r--sound/soc/sof/amd/acp-loader.c6
-rw-r--r--sound/soc/sof/amd/acp.c8
-rw-r--r--sound/soc/sof/amd/acp.h1
-rw-r--r--sound/soc/sof/amd/pci-acp70.c1
-rw-r--r--sound/soc/sof/intel/Kconfig3
-rw-r--r--sound/soc/stm/stm32_i2s.c7
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/usb/mixer_scarlett2.c14
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/stream.c25
-rw-r--r--sound/usb/validate.c12
-rw-r--r--sound/x86/intel_hdmi_audio.c2
-rw-r--r--tools/accounting/Makefile2
-rw-r--r--tools/accounting/delaytop.c862
-rw-r--r--tools/accounting/getdelays.c167
-rw-r--r--tools/bootconfig/main.c43
-rw-r--r--tools/bootconfig/scripts/ftrace.sh1
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh37
-rw-r--r--tools/bpf/bpf_jit_disasm.c2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst7
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool16
-rw-r--r--tools/bpf/bpftool/btf.c8
-rw-r--r--tools/bpf/bpftool/common.c59
-rw-r--r--tools/bpf/bpftool/iter.c2
-rw-r--r--tools/bpf/bpftool/link.c8
-rw-r--r--tools/bpf/bpftool/main.c6
-rw-r--r--tools/bpf/bpftool/main.h13
-rw-r--r--tools/bpf/bpftool/map.c56
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c3
-rw-r--r--tools/bpf/bpftool/net.c15
-rw-r--r--tools/bpf/bpftool/prog.c53
-rw-r--r--tools/build/Makefile.feature9
-rw-r--r--tools/build/feature/Makefile27
-rw-r--r--tools/build/feature/test-all.c24
-rw-r--r--tools/build/feature/test-libbpf-strings.c10
-rw-r--r--tools/build/feature/test-libcrypto.c25
-rw-r--r--tools/cgroup/memcg_slabinfo.py4
-rw-r--r--tools/include/uapi/linux/bpf.h45
-rw-r--r--tools/include/uapi/linux/if_xdp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h1
-rw-r--r--tools/include/uapi/linux/netdev.h6
-rw-r--r--tools/lib/bpf/bpf.c64
-rw-r--r--tools/lib/bpf/bpf.h26
-rw-r--r--tools/lib/bpf/bpf_helpers.h17
-rw-r--r--tools/lib/bpf/btf.h3
-rw-r--r--tools/lib/bpf/btf_dump.c55
-rw-r--r--tools/lib/bpf/libbpf.c50
-rw-r--r--tools/lib/bpf/libbpf.h19
-rw-r--r--tools/lib/bpf/libbpf.map5
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/usdt.c10
-rw-r--r--tools/lib/perf/evlist.c119
-rw-r--r--tools/lib/perf/evsel.c11
-rw-r--r--tools/lib/perf/include/internal/evsel.h3
-rw-r--r--tools/lib/perf/include/perf/event.h18
-rw-r--r--tools/lib/subcmd/help.c12
-rw-r--r--tools/lib/subcmd/run-command.c15
-rw-r--r--tools/mm/show_page_info.py169
-rwxr-xr-xtools/net/ynl/pyynl/cli.py2
-rw-r--r--tools/net/ynl/pyynl/lib/ynl.py23
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py49
-rw-r--r--tools/objtool/noreturns.h1
-rw-r--r--tools/perf/.gitignore2
-rw-r--r--tools/perf/Build2
-rw-r--r--tools/perf/Documentation/perf-check.txt2
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt10
-rw-r--r--tools/perf/Documentation/perf-list.txt25
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-stat.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt8
-rw-r--r--tools/perf/Makefile.config34
-rw-r--r--tools/perf/Makefile.perf12
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/event.c60
-rw-r--r--tools/perf/arch/riscv/util/kvm-stat.c6
-rw-r--r--tools/perf/arch/riscv/util/riscv_exception_types.h35
-rw-r--r--tools/perf/arch/riscv/util/riscv_trap_types.h57
-rw-r--r--tools/perf/arch/x86/Build2
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h5
-rw-r--r--tools/perf/arch/x86/tests/Build4
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c3
-rw-r--r--tools/perf/arch/x86/tests/sample-parsing.c125
-rw-r--r--tools/perf/arch/x86/tests/topdown.c76
-rw-r--r--tools/perf/arch/x86/util/event.c46
-rw-r--r--tools/perf/arch/x86/util/evlist.c24
-rw-r--r--tools/perf/arch/x86/util/evsel.c46
-rw-r--r--tools/perf/arch/x86/util/topdown.c59
-rw-r--r--tools/perf/arch/x86/util/topdown.h6
-rw-r--r--tools/perf/bench/evlist-open-close.c36
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/bench/synthesize.c27
-rw-r--r--tools/perf/builtin-annotate.c6
-rw-r--r--tools/perf/builtin-buildid-cache.c22
-rw-r--r--tools/perf/builtin-buildid-list.c11
-rw-r--r--tools/perf/builtin-c2c.c69
-rw-r--r--tools/perf/builtin-check.c2
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-ftrace.c111
-rw-r--r--tools/perf/builtin-inject.c42
-rw-r--r--tools/perf/builtin-kallsyms.c21
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kvm.c6
-rw-r--r--tools/perf/builtin-kwork.c2
-rw-r--r--tools/perf/builtin-list.c65
-rw-r--r--tools/perf/builtin-lock.c4
-rw-r--r--tools/perf/builtin-mem.c2
-rw-r--r--tools/perf/builtin-record.c101
-rw-r--r--tools/perf/builtin-report.c27
-rw-r--r--tools/perf/builtin-sched.c160
-rw-r--r--tools/perf/builtin-script.c36
-rw-r--r--tools/perf/builtin-stat.c61
-rw-r--r--tools/perf/builtin-timechart.c2
-rw-r--r--tools/perf/builtin-top.c88
-rw-r--r--tools/perf/builtin-trace.c268
-rwxr-xr-xtools/perf/check-headers.sh9
-rw-r--r--tools/perf/include/perf/perf_dlfilter.h2
-rw-r--r--tools/perf/jvmti/libjvmti.c4
-rwxr-xr-xtools/perf/perf-archive.sh35
-rw-r--r--tools/perf/perf.c3
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json70
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json98
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json28
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json63
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json56
-rw-r--r--tools/perf/pmu-events/arch/common/common/software.json92
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json14
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/basic.json58
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/crypto6.json142
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/extended.json541
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/pai_crypto.json1213
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/pai_ext.json261
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/transaction.json72
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/cache.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/frontend.json135
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/cache.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/counter.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv29
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json72
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/cache.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/counter.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/frontend.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/memory.json215
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json325
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/frontend.json64
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json53
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json2
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c266
-rwxr-xr-xtools/perf/pmu-events/jevents.py19
-rw-r--r--tools/perf/pmu-events/pmu-events.h14
-rwxr-xr-xtools/perf/scripts/python/flamegraph.py82
-rw-r--r--tools/perf/tests/Build3
-rw-r--r--tools/perf/tests/backward-ring-buffer.c1
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/builtin-test.c92
-rw-r--r--tools/perf/tests/code-reading.c13
-rw-r--r--tools/perf/tests/dlfilter-test.c51
-rw-r--r--tools/perf/tests/dwarf-unwind.c10
-rw-r--r--tools/perf/tests/event-times.c8
-rw-r--r--tools/perf/tests/event_update.c4
-rw-r--r--tools/perf/tests/expand-cgroup.c24
-rw-r--r--tools/perf/tests/hists_cumulate.c8
-rw-r--r--tools/perf/tests/hists_filter.c8
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/tests/hwmon_pmu.c11
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/make8
-rw-r--r--tools/perf/tests/mmap-basic.c291
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c6
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c1
-rw-r--r--tools/perf/tests/openat-syscall.c2
-rw-r--r--tools/perf/tests/parse-events.c24
-rw-r--r--tools/perf/tests/parse-metric.c16
-rw-r--r--tools/perf/tests/pe-file-parsing.c2
-rw-r--r--tools/perf/tests/perf-record.c1
-rwxr-xr-xtools/perf/tests/perf-targz-src-pkg2
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/tests/pmu-events.c30
-rw-r--r--tools/perf/tests/sample-parsing.c14
-rw-r--r--tools/perf/tests/sdt.c4
-rwxr-xr-xtools/perf/tests/shell/amd-ibs-swfilt.sh2
-rwxr-xr-xtools/perf/tests/shell/annotate.sh15
-rwxr-xr-xtools/perf/tests/shell/buildid.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh2
-rwxr-xr-xtools/perf/tests/shell/diff.sh2
-rwxr-xr-xtools/perf/tests/shell/drm_pmu.sh78
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh2
-rwxr-xr-xtools/perf/tests/shell/header.sh74
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py4
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh2
-rw-r--r--tools/perf/tests/shell/lib/setup_python.sh2
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh2
-rwxr-xr-xtools/perf/tests/shell/list.sh2
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh28
-rwxr-xr-xtools/perf/tests/shell/perf-report-hierarchy.sh2
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh2
-rwxr-xr-xtools/perf/tests/shell/record.sh56
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh2
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh2
-rwxr-xr-xtools/perf/tests/shell/record_sideband.sh2
-rwxr-xr-xtools/perf/tests/shell/sched.sh116
-rwxr-xr-xtools/perf/tests/shell/script.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_all_pfm.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight_disasm.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh2
-rwxr-xr-xtools/perf/tests/shell/test_bpf_metadata.sh76
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh19
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh19
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_record_replay.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_summary.sh2
-rw-r--r--tools/perf/tests/subcmd-help.c108
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/symbols.c12
-rw-r--r--tools/perf/tests/task-exit.c1
-rw-r--r--tools/perf/tests/tests-scripts.c2
-rw-r--r--tools/perf/tests/tests.h11
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/tests/topology.c39
-rw-r--r--tools/perf/tests/util.c45
-rw-r--r--tools/perf/tests/workloads/noploop.c2
-rw-r--r--tools/perf/trace/beauty/Build2
-rw-r--r--tools/perf/ui/browser.h4
-rw-r--r--tools/perf/ui/browsers/annotate.c86
-rw-r--r--tools/perf/ui/browsers/header.c4
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/browsers/scripts.c2
-rw-r--r--tools/perf/ui/tui/setup.c2
-rw-r--r--tools/perf/util/Build6
-rw-r--r--tools/perf/util/affinity.c18
-rw-r--r--tools/perf/util/affinity.h2
-rw-r--r--tools/perf/util/amd-sample-raw.c2
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/arm-spe.c2
-rw-r--r--tools/perf/util/auxtrace.c13
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/bpf-event.c380
-rw-r--r--tools/perf/util/bpf-event.h13
-rw-r--r--tools/perf/util/bpf-filter.c35
-rw-r--r--tools/perf/util/bpf-filter.h3
-rw-r--r--tools/perf/util/bpf_ftrace.c75
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c3
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c148
-rw-r--r--tools/perf/util/bpf_skel/perf_version.h17
-rw-r--r--tools/perf/util/bpf_trace_augment.c143
-rw-r--r--tools/perf/util/branch.c2
-rw-r--r--tools/perf/util/build-id.c65
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/cap.c1
-rw-r--r--tools/perf/util/cap.h5
-rw-r--r--tools/perf/util/cgroup.c23
-rw-r--r--tools/perf/util/cgroup.h3
-rw-r--r--tools/perf/util/comm.c2
-rw-r--r--tools/perf/util/data-convert-bt.c16
-rw-r--r--tools/perf/util/data-convert-json.c36
-rw-r--r--tools/perf/util/db-export.c11
-rw-r--r--tools/perf/util/debug.c75
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/debuginfo.c2
-rw-r--r--tools/perf/util/disasm.c9
-rw-r--r--tools/perf/util/dlfilter.c2
-rw-r--r--tools/perf/util/drm_pmu.c686
-rw-r--r--tools/perf/util/drm_pmu.h39
-rw-r--r--tools/perf/util/dso.c115
-rw-r--r--tools/perf/util/dso.h75
-rw-r--r--tools/perf/util/dsos.c20
-rw-r--r--tools/perf/util/env.c132
-rw-r--r--tools/perf/util/env.h9
-rw-r--r--tools/perf/util/event.c23
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/evlist.c21
-rw-r--r--tools/perf/util/evlist.h9
-rw-r--r--tools/perf/util/evsel.c125
-rw-r--r--tools/perf/util/evsel.h8
-rw-r--r--tools/perf/util/expr.c8
-rw-r--r--tools/perf/util/ftrace.h5
-rw-r--r--tools/perf/util/genelf.c87
-rw-r--r--tools/perf/util/header.c256
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/hwmon_pmu.c40
-rw-r--r--tools/perf/util/hwmon_pmu.h4
-rw-r--r--tools/perf/util/intel-tpebs.c4
-rw-r--r--tools/perf/util/jitdump.c21
-rw-r--r--tools/perf/util/machine.c44
-rw-r--r--tools/perf/util/machine.h6
-rw-r--r--tools/perf/util/map.c15
-rw-r--r--tools/perf/util/map.h5
-rw-r--r--tools/perf/util/metricgroup.c277
-rw-r--r--tools/perf/util/metricgroup.h10
-rw-r--r--tools/perf/util/parse-events.c438
-rw-r--r--tools/perf/util/parse-events.h5
-rw-r--r--tools/perf/util/parse-events.l38
-rw-r--r--tools/perf/util/parse-events.y29
-rw-r--r--tools/perf/util/pfm.c6
-rw-r--r--tools/perf/util/pmu.c63
-rw-r--r--tools/perf/util/pmu.h5
-rw-r--r--tools/perf/util/pmus.c134
-rw-r--r--tools/perf/util/pmus.h7
-rw-r--r--tools/perf/util/print-events.c233
-rw-r--r--tools/perf/util/print-events.h4
-rw-r--r--tools/perf/util/probe-event.c12
-rw-r--r--tools/perf/util/probe-file.c4
-rw-r--r--tools/perf/util/probe-finder.c5
-rw-r--r--tools/perf/util/python.c145
-rw-r--r--tools/perf/util/s390-cpumsf.c2
-rw-r--r--tools/perf/util/sample-raw.c7
-rw-r--r--tools/perf/util/sample-raw.h2
-rw-r--r--tools/perf/util/sample.h6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/session.c23
-rw-r--r--tools/perf/util/session.h7
-rw-r--r--tools/perf/util/sha1.c97
-rw-r--r--tools/perf/util/sha1.h6
-rw-r--r--tools/perf/util/sort.c95
-rw-r--r--tools/perf/util/sort.h5
-rw-r--r--tools/perf/util/spark.c8
-rw-r--r--tools/perf/util/spark.h1
-rw-r--r--tools/perf/util/srcline.c10
-rw-r--r--tools/perf/util/stat-display.c50
-rw-r--r--tools/perf/util/stat-shadow.c12
-rw-r--r--tools/perf/util/stat.c8
-rw-r--r--tools/perf/util/stat.h12
-rw-r--r--tools/perf/util/symbol-minimal.c2
-rw-r--r--tools/perf/util/symbol.c10
-rw-r--r--tools/perf/util/symbol_conf.h2
-rw-r--r--tools/perf/util/synthetic-events.c58
-rw-r--r--tools/perf/util/synthetic-events.h2
-rw-r--r--tools/perf/util/target.c54
-rw-r--r--tools/perf/util/target.h15
-rw-r--r--tools/perf/util/thread.c26
-rw-r--r--tools/perf/util/thread.h9
-rw-r--r--tools/perf/util/thread_map.c32
-rw-r--r--tools/perf/util/thread_map.h6
-rw-r--r--tools/perf/util/tool.c16
-rw-r--r--tools/perf/util/tool.h3
-rw-r--r--tools/perf/util/tool_pmu.c56
-rw-r--r--tools/perf/util/tool_pmu.h2
-rw-r--r--tools/perf/util/top.c4
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/tp_pmu.c210
-rw-r--r--tools/perf/util/tp_pmu.h19
-rw-r--r--tools/perf/util/trace_augment.h62
-rw-r--r--tools/perf/util/unwind-libdw.c7
-rw-r--r--tools/power/x86/turbostat/turbostat.811
-rw-r--r--tools/power/x86/turbostat/turbostat.c768
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c23
-rw-r--r--tools/testing/cxl/Kbuild1
-rw-r--r--tools/testing/cxl/config_check.c1
-rw-r--r--tools/testing/cxl/test/cxl.c7
-rwxr-xr-xtools/testing/ktest/ktest.pl116
-rw-r--r--tools/testing/ktest/sample.conf2
-rw-r--r--tools/testing/nvdimm/pmem-dax.c6
-rw-r--r--tools/testing/nvdimm/test/iomap.c11
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h1
-rw-r--r--tools/testing/radix-tree/maple.c19
-rw-r--r--tools/testing/selftests/alsa/utimer-test.c1
-rw-r--r--tools/testing/selftests/arm64/abi/Makefile2
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c16
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c140
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace.c77
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c12
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c12
-rw-r--r--tools/testing/selftests/arm64/mte/check_child_memory.c8
-rw-r--r--tools/testing/selftests/arm64/mte/check_hugetlb_options.c10
-rw-r--r--tools/testing/selftests/arm64/mte/check_ksm_options.c6
-rw-r--r--tools/testing/selftests/arm64/mte/check_mmap_options.c896
-rw-r--r--tools/testing/selftests/arm64/mte/check_prctl.c29
-rw-r--r--tools/testing/selftests/arm64/mte/check_tags_inclusion.c10
-rw-r--r--tools/testing/selftests/arm64/mte/check_user_mem.c4
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.c84
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.h9
-rw-r--r--tools/testing/selftests/arm64/mte/mte_def.h8
-rw-r--r--tools/testing/selftests/bpf/DENYLIST1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h3
-rw-r--r--tools/testing/selftests/bpf/bpf_atomic.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h2
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c21
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/config4
-rw-r--r--tools/testing/selftests/bpf/config.ppc64el93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c617
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_array.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c458
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stream.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/string_kfuncs.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_helpers.h28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_veristat.c127
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c85
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_failure.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c13
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h25
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_mprog.c30
-rw-r--r--tools/testing/selftests/bpf/progs/compute_live_registers.c16
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c174
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_noreturns.c15
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c277
-rw-r--r--tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c229
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c14
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c5
-rw-r--r--tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c4
-rw-r--r--tools/testing/selftests/bpf/progs/security_bpf_map.c69
-rw-r--r--tools/testing/selftests/bpf/progs/set_global_vars.c56
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c36
-rw-r--r--tools/testing/selftests/bpf/progs/stream.c79
-rw-r--r--tools/testing/selftests/bpf/progs/stream_fail.c33
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c87
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_success.c37
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_key.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_ktls.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_change_tail.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c8
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c12
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_and.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c106
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c98
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c360
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c11
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_overflow.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c128
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c118
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c16
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c70
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ref_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c233
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c38
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_map.sh398
-rw-r--r--tools/testing/selftests/bpf/test_loader.c30
-rw-r--r--tools/testing/selftests/bpf/test_maps.c4
-rw-r--r--tools/testing/selftests/bpf/test_progs.h28
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c24
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c33
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rw-r--r--tools/testing/selftests/bpf/veristat.c610
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh9
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c56
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h1
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c62
-rw-r--r--tools/testing/selftests/cgroup/lib/cgroup_util.c4
-rw-r--r--tools/testing/selftests/cgroup/lib/include/cgroup_util.h5
-rw-r--r--tools/testing/selftests/cgroup/test_core.c84
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c63
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c5
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c2
-rw-r--r--tools/testing/selftests/damon/Makefile2
-rw-r--r--tools/testing/selftests/damon/_common.sh11
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py304
-rwxr-xr-xtools/testing/selftests/damon/drgn_dump_damon_status.py222
-rwxr-xr-xtools/testing/selftests/damon/lru_sort.sh8
-rwxr-xr-xtools/testing/selftests/damon/reclaim.sh8
-rwxr-xr-xtools/testing/selftests/damon/sysfs.py272
-rwxr-xr-xtools/testing/selftests/damon/sysfs.sh11
-rwxr-xr-xtools/testing/selftests/damon/sysfs_memcg_path_leak.sh43
-rwxr-xr-xtools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh8
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c20
-rw-r--r--tools/testing/selftests/drivers/net/Makefile4
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py465
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py5
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/iou-zcrx.py98
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py17
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c9
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_api.py476
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_input_xfrm.py8
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/tso.py101
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py14
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py2
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py2
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh165
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_id.py4
-rw-r--r--tools/testing/selftests/drivers/net/napi_id_helper.c35
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_threaded.py113
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh57
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_cmdline.sh52
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_sysdata.sh30
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh55
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh23
-rwxr-xr-xtools/testing/selftests/drivers/net/netpoll_basic.py396
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py2
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py45
-rwxr-xr-xtools/testing/selftests/drivers/net/xdp.py658
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc30
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/hid/tests/base.py46
-rw-r--r--tools/testing/selftests/hid/tests/base_device.py49
-rw-r--r--tools/testing/selftests/hid/tests/test_apple_keyboard.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_gamepad.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_ite_keyboard.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_multitouch.py2
-rw-r--r--tools/testing/selftests/hid/tests/test_sony.py7
-rw-r--r--tools/testing/selftests/hid/tests/test_tablet.py11
-rw-r--r--tools/testing/selftests/hid/tests/test_wacom_generic.py445
-rw-r--r--tools/testing/selftests/iommu/iommufd.c541
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c15
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h89
-rw-r--r--tools/testing/selftests/kho/arm64.conf9
-rw-r--r--tools/testing/selftests/kho/init.c100
-rwxr-xr-xtools/testing/selftests/kho/vmtest.sh183
-rw-r--r--tools/testing/selftests/kho/x86.conf7
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm4
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c7
-rw-r--r--tools/testing/selftests/kvm/arch_timer.c7
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c23
-rw-r--r--tools/testing/selftests/kvm/arm64/debug-exceptions.c4
-rw-r--r--tools/testing/selftests/kvm/arm64/external_aborts.c330
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c203
-rw-r--r--tools/testing/selftests/kvm/arm64/mmio_abort.c159
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c14
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_init.c259
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_irq.c12
-rw-r--r--tools/testing/selftests/kvm/config1
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h10
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h72
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h6
-rw-r--r--tools/testing/selftests/kvm/irqfd_test.c135
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c51
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c2
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c4
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c10
-rw-r--r--tools/testing/selftests/kvm/x86/aperfmperf_test.c213
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c21
-rw-r--r--tools/testing/selftests/mm/.gitignore4
-rw-r--r--tools/testing/selftests/mm/Makefile1
-rw-r--r--tools/testing/selftests/mm/cow.c101
-rw-r--r--tools/testing/selftests/mm/guard-regions.c9
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c10
-rw-r--r--tools/testing/selftests/mm/hugetlb-madvise.c5
-rw-r--r--tools/testing/selftests/mm/khugepaged.c7
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c28
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c6
-rw-r--r--tools/testing/selftests/mm/merge.c677
-rw-r--r--tools/testing/selftests/mm/migration.c21
-rw-r--r--tools/testing/selftests/mm/mremap_test.c361
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c70
-rw-r--r--tools/testing/selftests/mm/process_madv.c344
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh13
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c9
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c4
-rw-r--r--tools/testing/selftests/mm/thp_settings.c11
-rw-r--r--tools/testing/selftests/mm/thp_settings.h2
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c38
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c20
-rw-r--r--tools/testing/selftests/mm/vm_util.c31
-rw-r--r--tools/testing/selftests/mm/vm_util.h12
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile5
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/scm_inq.c125
-rw-r--r--tools/testing/selftests/net/bench/Makefile7
-rw-r--r--tools/testing/selftests/net/bench/page_pool/Makefile17
-rw-r--r--tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c267
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.c394
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.h238
-rwxr-xr-xtools/testing/selftests/net/bench/test_bench_page_pool.sh32
-rwxr-xr-xtools/testing/selftests/net/broadcast_pmtu.sh47
-rw-r--r--tools/testing/selftests/net/config11
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh69
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_ets.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh52
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh771
-rwxr-xr-xtools/testing/selftests/net/ipv6_force_forwarding.sh105
-rw-r--r--tools/testing/selftests/net/lib.sh35
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py2
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py7
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py39
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py5
-rw-r--r--tools/testing/selftests/net/lib/xdp_native.bpf.c621
-rw-r--r--tools/testing/selftests/net/mptcp/config2
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c24
-rwxr-xr-xtools/testing/selftests/net/msg_zerocopy.sh84
-rwxr-xr-xtools/testing/selftests/net/netdev-l2addr.sh59
-rw-r--r--tools/testing/selftests/net/netfilter/config7
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh4
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_interface_stress.sh5
-rw-r--r--tools/testing/selftests/net/nettest.c12
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py127
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh15
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt45
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt27
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt33
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh92
-rwxr-xr-xtools/testing/selftests/net/rtnetlink_notification.sh112
-rwxr-xr-xtools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh2
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh50
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh2
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh2
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c2
-rwxr-xr-xtools/testing/selftests/net/test_neigh.sh366
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh9
-rw-r--r--tools/testing/selftests/net/tls.c63
-rwxr-xr-xtools/testing/selftests/net/vlan_hw_filter.sh16
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh4
-rw-r--r--tools/testing/selftests/pci_endpoint/pci_endpoint_test.c28
-rw-r--r--tools/testing/selftests/perf_events/.gitignore1
-rw-r--r--tools/testing/selftests/perf_events/Makefile2
-rw-r--r--tools/testing/selftests/perf_events/mmap.c236
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile1
-rw-r--r--tools/testing/selftests/proc/proc-maps-race.c741
-rw-r--r--tools/testing/selftests/ptp/testptp.c11
-rw-r--r--tools/testing/selftests/ptrace/.gitignore1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh15
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mktestid.sh29
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh78
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-L10
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-L.boot3
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c5
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh2
-rw-r--r--tools/testing/selftests/tc-testing/config2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json5
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json254
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json81
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json36
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh6
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/workload_hint_test.c16
-rw-r--r--tools/testing/selftests/vsock/.gitignore2
-rw-r--r--tools/testing/selftests/vsock/Makefile17
-rw-r--r--tools/testing/selftests/vsock/config111
-rw-r--r--tools/testing/selftests/vsock/settings1
-rwxr-xr-xtools/testing/selftests/vsock/vmtest.sh487
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config4
-rw-r--r--tools/testing/vma/vma.c272
-rw-r--r--tools/testing/vma/vma_internal.h29
-rw-r--r--tools/testing/vsock/Makefile1
-rw-r--r--tools/testing/vsock/util.c112
-rw-r--r--tools/testing/vsock/util.h35
-rw-r--r--tools/testing/vsock/vsock_test.c353
-rw-r--r--tools/tracing/rtla/src/Build1
-rw-r--r--tools/tracing/rtla/src/actions.c260
-rw-r--r--tools/tracing/rtla/src/actions.h52
-rw-r--r--tools/tracing/rtla/src/timerlat.bpf.c13
-rw-r--r--tools/tracing/rtla/src/timerlat.c24
-rw-r--r--tools/tracing/rtla/src/timerlat.h24
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.c13
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.h3
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c140
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c165
-rw-r--r--tools/tracing/rtla/tests/engine.sh21
-rw-r--r--tools/tracing/rtla/tests/hwnoise.t13
-rw-r--r--tools/tracing/rtla/tests/osnoise.t10
-rwxr-xr-xtools/tracing/rtla/tests/scripts/check-priority.sh8
-rw-r--r--tools/tracing/rtla/tests/timerlat.t45
-rw-r--r--tools/verification/dot2/Makefile26
-rw-r--r--tools/verification/dot2/dot2k53
-rw-r--r--tools/verification/models/rtapp/pagefault.ltl1
-rw-r--r--tools/verification/models/rtapp/sleep.ltl22
-rw-r--r--tools/verification/models/sched/nrp.dot29
-rw-r--r--tools/verification/models/sched/opid.dot35
-rw-r--r--tools/verification/models/sched/sncid.dot18
-rw-r--r--tools/verification/models/sched/sssw.dot30
-rw-r--r--tools/verification/models/sched/sts.dot38
-rw-r--r--tools/verification/models/sched/tss.dot18
-rw-r--r--tools/verification/rv/src/in_kernel.c4
-rw-r--r--tools/verification/rv/src/rv.c1
-rw-r--r--tools/verification/rvgen/.gitignore3
-rw-r--r--tools/verification/rvgen/Makefile27
-rw-r--r--tools/verification/rvgen/__main__.py67
-rw-r--r--tools/verification/rvgen/dot2c (renamed from tools/verification/dot2/dot2c)2
-rw-r--r--tools/verification/rvgen/rvgen/automata.py (renamed from tools/verification/dot2/automata.py)0
-rw-r--r--tools/verification/rvgen/rvgen/container.py32
-rw-r--r--tools/verification/rvgen/rvgen/dot2c.py (renamed from tools/verification/dot2/dot2c.py)22
-rw-r--r--tools/verification/rvgen/rvgen/dot2k.py129
-rw-r--r--tools/verification/rvgen/rvgen/generator.py (renamed from tools/verification/dot2/dot2k.py)265
-rw-r--r--tools/verification/rvgen/rvgen/ltl2ba.py566
-rw-r--r--tools/verification/rvgen/rvgen/ltl2k.py271
-rw-r--r--tools/verification/rvgen/rvgen/templates/Kconfig (renamed from tools/verification/dot2/dot2k_templates/Kconfig)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/Kconfig5
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/main.c (renamed from tools/verification/dot2/dot2k_templates/main_container.c)3
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/main.h (renamed from tools/verification/dot2/dot2k_templates/main_container.h)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/dot2k/main.c (renamed from tools/verification/dot2/dot2k_templates/main.c)3
-rw-r--r--tools/verification/rvgen/rvgen/templates/dot2k/trace.h (renamed from tools/verification/dot2/dot2k_templates/trace.h)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/ltl2k/main.c102
-rw-r--r--tools/verification/rvgen/rvgen/templates/ltl2k/trace.h14
-rw-r--r--virt/kvm/dirty_ring.c109
-rw-r--r--virt/kvm/eventfd.c159
-rw-r--r--virt/kvm/guest_memfd.c11
-rw-r--r--virt/kvm/irqchip.c2
-rw-r--r--virt/kvm/kvm_main.c13
-rw-r--r--virt/kvm/vfio.c3
-rw-r--r--virt/lib/irqbypass.c190
8084 files changed, 315815 insertions, 166729 deletions
diff --git a/.gitignore b/.gitignore
index bf5ee6e01cd4..929054df5212 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,6 +114,7 @@ modules.order
!.gitignore
!.kunitconfig
!.mailmap
+!.pylintrc
!.rustfmt.toml
#
diff --git a/.mailmap b/.mailmap
index 4bb3a7f253b9..d9fa1b555116 100644
--- a/.mailmap
+++ b/.mailmap
@@ -138,6 +138,7 @@ Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
Benno Lossin <lossin@kernel.org> <benno.lossin@proton.me>
+Bernard Metzler <bernard.metzler@linux.dev> <bmt@zurich.ibm.com>
Bingwu Zhang <xtex@aosc.io> <xtexchooser@duck.com>
Bingwu Zhang <xtex@aosc.io> <xtex@xtexx.eu.org>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
@@ -286,8 +287,9 @@ Gustavo Padovan <padovan@profusion.mobi>
Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
Hans de Goede <hansg@kernel.org> <hdegoede@redhat.com>
-Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com>
-Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl>
+Hans Verkuil <hverkuil@kernel.org> <hverkuil@xs4all.nl>
+Hans Verkuil <hverkuil@kernel.org> <hverkuil-cisco@xs4all.nl>
+Hans Verkuil <hverkuil@kernel.org> <hansverk@cisco.com>
Harry Yoo <harry.yoo@oracle.com> <42.hyeyoo@gmail.com>
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
@@ -671,6 +673,7 @@ Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
+Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
Sachin P Sant <ssant@in.ibm.com>
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
diff --git a/CREDITS b/CREDITS
index cf2a53efaf4c..a357f9cbb05d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -4378,6 +4378,12 @@ S: 542 West 112th Street, 5N
S: New York, New York 10025
S: USA
+N: Masahiro Yamada
+E: masahiroy@kernel.org
+D: Kbuild Maintainer 2017-2025
+D: Kconfig Maintainer 2018-2025
+S: Japan
+
N: Li Yang
E: leoli@freescale.com
D: Freescale Highspeed USB device driver
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index ef0e6d11e919..315fffe1f831 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -46,7 +46,9 @@ Every file in these directories will contain the following information:
What: Short description of the interface
Date: Date created
-KernelVersion: Kernel version this feature first showed up in.
+KernelVersion: (Optional) Kernel version this feature first showed up in.
+ Note: git history often provides more accurate version
+ info, so this field may be omitted.
Contact: Primary contact for this interface (may be a mailing list)
Description: Long description of the interface and how to use it.
Users: All users of this interface who wish to be notified when
diff --git a/Documentation/ABI/obsolete/automount-tracefs-debugfs b/Documentation/ABI/obsolete/automount-tracefs-debugfs
new file mode 100644
index 000000000000..a5196ec78cb5
--- /dev/null
+++ b/Documentation/ABI/obsolete/automount-tracefs-debugfs
@@ -0,0 +1,20 @@
+What: /sys/kernel/debug/tracing
+Date: May 2008
+KernelVersion: 2.6.27
+Contact: linux-trace-kernel@vger.kernel.org
+Description:
+
+ The ftrace was first added to the kernel, its interface was placed
+ into the debugfs file system under the "tracing" directory. Access
+ to the files were in /sys/kernel/debug/tracing. As systems wanted
+ access to the tracing interface without having to enable debugfs, a
+ new interface was created called "tracefs". This was a stand alone
+ file system and was usually mounted in /sys/kernel/tracing.
+
+ To allow older tooling to continue to operate, when mounting
+ debugfs, the tracefs file system would automatically get mounted in
+ the "tracing" directory of debugfs. The tracefs interface was added
+ in January 2015 in the v4.1 kernel.
+
+ All tooling should now be using tracefs directly and the "tracing"
+ directory in debugfs should be removed by January 2030.
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 803f578dc023..0ddffc9133d0 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -731,7 +731,7 @@ Contact: linux-block@vger.kernel.org
Description:
[RW] If the device is registered for writeback throttling, then
this file shows the target minimum read latency. If this latency
- is exceeded in a given window of time (see wb_window_usec), then
+ is exceeded in a given window of time (see curr_win_nsec), then
the writeback throttling will start scaling back writes. Writing
a value of '0' to this file disables the feature. Writing a
value of '-1' to this file resets the value to the default
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index a02707cb7cbc..2d0e023f22a7 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -227,3 +227,12 @@ Contact: Jiaqi Yan <jiaqiyan@google.com>
Description:
Of the raw poisoned pages on a NUMA node, how many pages are
recovered by memory error recovery attempt.
+
+What: /sys/devices/system/node/nodeX/reclaim
+Date: June 2025
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Perform user-triggered proactive reclaim on a NUMA node.
+ This interface is equivalent to the memcg variant.
+
+ See Documentation/admin-guide/cgroup-v2.rst
diff --git a/Documentation/ABI/testing/debugfs-amd-iommu b/Documentation/ABI/testing/debugfs-amd-iommu
new file mode 100644
index 000000000000..5621a66aa693
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-amd-iommu
@@ -0,0 +1,131 @@
+What: /sys/kernel/debug/iommu/amd/iommu<x>/mmio
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file provides read/write access for user input. Users specify the
+ MMIO register offset for iommu<x>, and the file outputs the corresponding
+ MMIO register value of iommu<x>
+
+ Example::
+
+ $ echo "0x18" > /sys/kernel/debug/iommu/amd/iommu00/mmio
+ $ cat /sys/kernel/debug/iommu/amd/iommu00/mmio
+
+ Output::
+
+ Offset:0x18 Value:0x000c22000003f48d
+
+What: /sys/kernel/debug/iommu/amd/iommu<x>/capability
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file provides read/write access for user input. Users specify the
+ capability register offset for iommu<x>, and the file outputs the
+ corresponding capability register value of iommu<x>.
+
+ Example::
+
+ $ echo "0x10" > /sys/kernel/debug/iommu/amd/iommu00/capability
+ $ cat /sys/kernel/debug/iommu/amd/iommu00/capability
+
+ Output::
+
+ Offset:0x10 Value:0x00203040
+
+What: /sys/kernel/debug/iommu/amd/iommu<x>/cmdbuf
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file is a read-only output file containing iommu<x> command
+ buffer entries.
+
+ Examples::
+
+ $ cat /sys/kernel/debug/iommu/amd/iommu<x>/cmdbuf
+
+ Output::
+
+ CMD Buffer Head Offset:339 Tail Offset:339
+ 0: 00835001 10000001 00003c00 00000000
+ 1: 00000000 30000005 fffff003 7fffffff
+ 2: 00835001 10000001 00003c01 00000000
+ 3: 00000000 30000005 fffff003 7fffffff
+ 4: 00835001 10000001 00003c02 00000000
+ 5: 00000000 30000005 fffff003 7fffffff
+ 6: 00835001 10000001 00003c03 00000000
+ 7: 00000000 30000005 fffff003 7fffffff
+ 8: 00835001 10000001 00003c04 00000000
+ 9: 00000000 30000005 fffff003 7fffffff
+ 10: 00835001 10000001 00003c05 00000000
+ 11: 00000000 30000005 fffff003 7fffffff
+ [...]
+
+What: /sys/kernel/debug/iommu/amd/devid
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file provides read/write access for user input. Users specify the
+ device ID, which can be used to dump IOMMU data structures such as the
+ interrupt remapping table and device table.
+
+ Example:
+
+ 1.
+ ::
+
+ $ echo 0000:01:00.0 > /sys/kernel/debug/iommu/amd/devid
+ $ cat /sys/kernel/debug/iommu/amd/devid
+
+ Output::
+
+ 0000:01:00.0
+
+ 2.
+ ::
+
+ $ echo 01:00.0 > /sys/kernel/debug/iommu/amd/devid
+ $ cat /sys/kernel/debug/iommu/amd/devid
+
+ Output::
+
+ 0000:01:00.0
+
+What: /sys/kernel/debug/iommu/amd/devtbl
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file is a read-only output file containing the device table entry
+ for the device ID provided in /sys/kernel/debug/iommu/amd/devid.
+
+ Example::
+
+ $ cat /sys/kernel/debug/iommu/amd/devtbl
+
+ Output::
+
+ DeviceId QWORD[3] QWORD[2] QWORD[1] QWORD[0] iommu
+ 0000:01:00.0 0000000000000000 20000001373b8013 0000000000000038 6000000114d7b603 iommu3
+
+What: /sys/kernel/debug/iommu/amd/irqtbl
+Date: January 2025
+Contact: Dheeraj Kumar Srivastava <dheerajkumar.srivastava@amd.com>
+Description:
+ This file is a read-only output file containing valid IRT table entries
+ for the device ID provided in /sys/kernel/debug/iommu/amd/devid.
+
+ Example::
+
+ $ cat /sys/kernel/debug/iommu/amd/irqtbl
+
+ Output::
+
+ DeviceId 0000:01:00.0
+ IRT[0000] 0000000000000020 0000000000000241
+ IRT[0001] 0000000000000020 0000000000000841
+ IRT[0002] 0000000000000020 0000000000002041
+ IRT[0003] 0000000000000020 0000000000008041
+ IRT[0004] 0000000000000020 0000000000020041
+ IRT[0005] 0000000000000020 0000000000080041
+ IRT[0006] 0000000000000020 0000000000200041
+ IRT[0007] 0000000000000020 0000000000800041
+ [...]
diff --git a/Documentation/ABI/testing/debugfs-cxl b/Documentation/ABI/testing/debugfs-cxl
index 12488c14be64..e95e21f131e9 100644
--- a/Documentation/ABI/testing/debugfs-cxl
+++ b/Documentation/ABI/testing/debugfs-cxl
@@ -20,7 +20,7 @@ Description:
visible for devices supporting the capability.
-What: /sys/kernel/debug/memX/clear_poison
+What: /sys/kernel/debug/cxl/memX/clear_poison
Date: April, 2023
KernelVersion: v6.4
Contact: linux-cxl@vger.kernel.org
diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat
index bd6793760f29..3f1efbbad6ca 100644
--- a/Documentation/ABI/testing/debugfs-driver-qat
+++ b/Documentation/ABI/testing/debugfs-driver-qat
@@ -67,7 +67,7 @@ Contact: qat-linux@intel.com
Description: (RO) Read returns power management information specific to the
QAT device.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/kernel/debug/qat_<device>_<BDF>/cnv_errors
Date: January 2024
diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry
index eacee2072088..0dfd8d97e169 100644
--- a/Documentation/ABI/testing/debugfs-driver-qat_telemetry
+++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry
@@ -32,7 +32,7 @@ Description: (RW) Enables/disables the reporting of telemetry metrics.
echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/kernel/debug/qat_<device>_<BDF>/telemetry/device_data
Date: March 2024
@@ -67,6 +67,10 @@ Description: (RO) Reports device telemetry counters.
exec_xlt<N> execution count of Translator slice N
util_dcpr<N> utilization of Decompression slice N [%]
exec_dcpr<N> execution count of Decompression slice N
+ util_cnv<N> utilization of Compression and verify slice N [%]
+ exec_cnv<N> execution count of Compression and verify slice N
+ util_dcprz<N> utilization of Decompression slice N [%]
+ exec_dcprz<N> execution count of Decompression slice N
util_pke<N> utilization of PKE N [%]
exec_pke<N> execution count of PKE N
util_ucs<N> utilization of UCS slice N [%]
@@ -100,7 +104,7 @@ Description: (RO) Reports device telemetry counters.
If a device lacks of a specific accelerator, the corresponding
attribute is not reported.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/kernel/debug/qat_<device>_<BDF>/telemetry/rp_<A/B/C/D>_data
Date: March 2024
@@ -225,4 +229,4 @@ Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file,
``rp2srv`` from sysfs.
See Documentation/ABI/testing/sysfs-driver-qat for details.
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
diff --git a/Documentation/ABI/testing/sysfs-class-net-phydev b/Documentation/ABI/testing/sysfs-class-net-phydev
index ac722dd5e694..31615c59bff9 100644
--- a/Documentation/ABI/testing/sysfs-class-net-phydev
+++ b/Documentation/ABI/testing/sysfs-class-net-phydev
@@ -26,6 +26,16 @@ Description:
This ID is used to match the device with the appropriate
driver.
+What: /sys/class/mdio_bus/<bus>/<device>/c45_phy_ids/mmd<n>_device_id
+Date: June 2025
+KernelVersion: 6.17
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the 32-bit PHY Identifier as reported
+ by the device during bus enumeration, encoded in hexadecimal.
+ These C45 IDs are used to match the device with the appropriate
+ driver. These files are invisible to the C22 device.
+
What: /sys/class/mdio_bus/<bus>/<device>/phy_interface
Date: February 2014
KernelVersion: 3.15
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 5a91dcccd3ac..d9e2b17c6872 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -148,3 +148,51 @@ Contact: intel-xe@lists.freedesktop.org
Description: RO. Fan 3 speed in RPM.
Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_cap
+Date: May 2025
+KernelVersion: 6.15
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Card burst (PL2) power limit in microwatts.
+
+ The power controller will throttle the operating frequency
+ if the power averaged over a window (typically milli seconds)
+ exceeds this limit. A read value of 0 means that the PL2
+ power limit is disabled, writing 0 disables the limit.
+ PL2 is greater than PL1 and its time window is lesser
+ compared to PL1.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_cap
+Date: May 2025
+KernelVersion: 6.15
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package burst (PL2) power limit in microwatts.
+
+ The power controller will throttle the operating frequency
+ if the power averaged over a window (typically milli seconds)
+ exceeds this limit. A read value of 0 means that the PL2
+ power limit is disabled, writing 0 disables the limit.
+ PL2 is greater than PL1 and its time window is lesser
+ compared to PL1.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_cap_interval
+Date: May 2025
+KernelVersion: 6.15
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Card burst power limit interval (Tau in PL2/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_cap_interval
+Date: May 2025
+KernelVersion: 6.15
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package burst power limit interval (Tau in PL2/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel Xe graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-qaic b/Documentation/ABI/testing/sysfs-driver-qaic
new file mode 100644
index 000000000000..f794fd734163
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-qaic
@@ -0,0 +1,18 @@
+What: /sys/bus/pci/drivers/qaic/XXXX:XX:XX.X/ce_count
+Date: May 2025
+KernelVersion: 6.17
+Contact: dri-devel@lists.freedesktop.org
+Description: Number of correctable errors received from device since driver is loaded.
+
+What: /sys/bus/pci/drivers/qaic/XXXX:XX:XX.X/ue_count
+Date: May 2025
+KernelVersion: 6.17
+Contact: dri-devel@lists.freedesktop.org
+Description: Number of uncorrectable errors received from device since driver is loaded.
+
+What: /sys/bus/pci/drivers/qaic/XXXX:XX:XX.X/ue_nonfatal_count
+Date: May 2025
+KernelVersion: 6.17
+Contact: dri-devel@lists.freedesktop.org
+Description: Number of uncorrectable non-fatal errors received from device since driver
+ is loaded.
diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat
index f290e77cd590..b0561b9fc4eb 100644
--- a/Documentation/ABI/testing/sysfs-driver-qat
+++ b/Documentation/ABI/testing/sysfs-driver-qat
@@ -14,7 +14,7 @@ Description: (RW) Reports the current state of the QAT device. Write to
It is possible to transition the device from up to down only
if the device is up and vice versa.
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/cfg_services
Date: June 2022
@@ -23,24 +23,28 @@ Contact: qat-linux@intel.com
Description: (RW) Reports the current configuration of the QAT device.
Write to the file to change the configured services.
- The values are:
-
- * sym;asym: the device is configured for running crypto
- services
- * asym;sym: identical to sym;asym
- * dc: the device is configured for running compression services
- * dcc: identical to dc but enables the dc chaining feature,
- hash then compression. If this is not required chose dc
- * sym: the device is configured for running symmetric crypto
- services
- * asym: the device is configured for running asymmetric crypto
- services
- * asym;dc: the device is configured for running asymmetric
- crypto services and compression services
- * dc;asym: identical to asym;dc
- * sym;dc: the device is configured for running symmetric crypto
- services and compression services
- * dc;sym: identical to sym;dc
+ One or more services can be enabled per device.
+ Certain configurations are restricted to specific device types;
+ where applicable this is explicitly indicated, for example
+ (qat_6xxx) denotes applicability exclusively to that device series.
+
+ The available services include:
+
+ * sym: Configures the device for symmetric cryptographic operations.
+ * asym: Configures the device for asymmetric cryptographic operations.
+ * dc: Configures the device for compression and decompression
+ operations.
+ * dcc: Similar to dc, but with the additional dc chaining feature
+ enabled, cipher then compress (qat_6xxx), hash then compression.
+ If this is not required choose dc.
+ * decomp: Configures the device for decompression operations (qat_6xxx).
+
+ Service combinations are permitted for all services except dcc.
+ On QAT GEN4 devices (qat_4xxx driver) a maximum of two services can be
+ combined and on QAT GEN6 devices (qat_6xxx driver ) a maximum of three
+ services can be combined.
+ The order of services is not significant. For instance, sym;asym is
+ functionally equivalent to asym;sym.
It is possible to set the configuration only if the device
is in the `down` state (see /sys/bus/pci/devices/<BDF>/qat/state)
@@ -59,7 +63,7 @@ Description: (RW) Reports the current configuration of the QAT device.
# cat /sys/bus/pci/devices/<BDF>/qat/cfg_services
dc
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
Date: June 2023
@@ -94,7 +98,7 @@ Description: (RW) This configuration option provides a way to force the device i
# cat /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
0
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/rp2srv
Date: January 2024
@@ -126,7 +130,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat/rp2srv
sym
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/num_rps
Date: January 2024
@@ -140,7 +144,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat/num_rps
64
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/auto_reset
Date: May 2024
@@ -160,4 +164,4 @@ Description: (RW) Reports the current state of the autoreset feature
* 0/Nn/off: auto reset disabled. If the device encounters an
unrecoverable error, it will not be reset.
- This attribute is only available for qat_4xxx devices.
+ This attribute is available for qat_4xxx and qat_6xxx devices.
diff --git a/Documentation/ABI/testing/sysfs-driver-qat_rl b/Documentation/ABI/testing/sysfs-driver-qat_rl
index 8c282ae3155d..d534f89b4971 100644
--- a/Documentation/ABI/testing/sysfs-driver-qat_rl
+++ b/Documentation/ABI/testing/sysfs-driver-qat_rl
@@ -31,7 +31,7 @@ Description:
* rm_all: Removes all the configured SLAs.
* Inputs: None
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/rp
Date: January 2024
@@ -68,7 +68,7 @@ Description:
## Write
# echo 0x5 > /sys/bus/pci/devices/<BDF>/qat_rl/rp
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/id
Date: January 2024
@@ -101,7 +101,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat_rl/rp
0x5 ## ring pair ID 0 and ring pair ID 2
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/cir
Date: January 2024
@@ -135,7 +135,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat_rl/cir
500
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/pir
Date: January 2024
@@ -169,7 +169,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat_rl/pir
750
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/srv
Date: January 2024
@@ -202,7 +202,7 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat_rl/srv
dc
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat_rl/cap_rem
Date: January 2024
@@ -223,4 +223,4 @@ Description:
# cat /sys/bus/pci/devices/<BDF>/qat_rl/cap_rem
0
- This attribute is only available for qat_4xxx devices.
+ This attribute is only available for qat_4xxx and qat_6xxx devices.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 615453fcc9ff..a90612ab5780 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1685,3 +1685,86 @@ Description:
================ ========================================
The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/analysis_trigger
+What: /sys/bus/platform/devices/*.ufs/hid/analysis_trigger
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can enable or disable HID analysis operation.
+
+ ======= =========================================
+ disable disable HID analysis operation
+ enable enable HID analysis operation
+ ======= =========================================
+
+ The file is write only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/defrag_trigger
+What: /sys/bus/platform/devices/*.ufs/hid/defrag_trigger
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can enable or disable HID defragmentation operation.
+
+ ======= =========================================
+ disable disable HID defragmentation operation
+ enable enable HID defragmentation operation
+ ======= =========================================
+
+ The attribute is write only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/fragmented_size
+What: /sys/bus/platform/devices/*.ufs/hid/fragmented_size
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The total fragmented size in the device is reported through
+ this attribute.
+
+ The attribute is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/defrag_size
+What: /sys/bus/platform/devices/*.ufs/hid/defrag_size
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host sets the size to be defragmented by an HID
+ defragmentation operation.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/progress_ratio
+What: /sys/bus/platform/devices/*.ufs/hid/progress_ratio
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ Defragmentation progress is reported by this attribute,
+ indicates the ratio of the completed defragmentation size
+ over the requested defragmentation size.
+
+ ==== ============================================
+ 1 1%
+ ...
+ 100 100%
+ ==== ============================================
+
+ The attribute is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid/state
+What: /sys/bus/platform/devices/*.ufs/hid/state
+Date: May 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The HID state is reported by this attribute.
+
+ ==================== ===========================
+ idle Idle (analysis required)
+ analysis_in_progress Analysis in progress
+ defrag_required Defrag required
+ defrag_in_progress Defrag in progress
+ defrag_completed Defrag completed
+ defrag_not_required Defrag is not required
+ ==================== ===========================
+
+ The attribute is read only.
diff --git a/Documentation/ABI/testing/sysfs-firmware-efi b/Documentation/ABI/testing/sysfs-firmware-efi
index 5e4d0b27cdfe..927e362d4974 100644
--- a/Documentation/ABI/testing/sysfs-firmware-efi
+++ b/Documentation/ABI/testing/sysfs-firmware-efi
@@ -36,3 +36,10 @@ Description: Displays the content of the Runtime Configuration Interface
Table version 2 on Dell EMC PowerEdge systems in binary format
Users: It is used by Dell EMC OpenManage Server Administrator tool to
populate BIOS setup page.
+
+What: /sys/firmware/efi/ovmf_debug_log
+Date: July 2025
+Contact: Gerd Hoffmann <kraxel@redhat.com>, linux-efi@vger.kernel.org
+Description: Displays the content of the OVMF debug log buffer. The file is
+ only present in case the firmware supports logging to a memory
+ buffer.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index bf03263b9f46..bc0e7fefc39d 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -861,3 +861,25 @@ Description: This is a read-only entry to show the value of sb.s_encoding_flags,
SB_ENC_STRICT_MODE_FL 0x00000001
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
============================ ==========
+
+What: /sys/fs/f2fs/<disk>/reserved_pin_section
+Date: June 2025
+Contact: "Chao Yu" <chao@kernel.org>
+Description: This threshold is used to control triggering garbage collection while
+ fallocating on pinned file, so, it can guarantee there is enough free
+ reserved section before preallocating on pinned file.
+ By default, the value is ovp_sections, especially, for zoned ufs, the
+ value is 1.
+
+What: /sys/fs/f2fs/<disk>/gc_boost_gc_multiple
+Date: June 2025
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Set a multiplier for the background GC migration window when F2FS GC is
+ boosted. The range should be from 1 to the segment count in a section.
+ Default: 5
+
+What: /sys/fs/f2fs/<disk>/gc_boost_gc_greedy
+Date: June 2025
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Control GC algorithm for boost GC. 0: cost benefit, 1: greedy
+ Default: 1
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index 5697ab154c1f..6791d879759e 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -44,6 +44,13 @@ Contact: SeongJae Park <sj@kernel.org>
Description: Reading this file returns the pid of the kdamond if it is
running.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/refresh_ms
+Date: Jul 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a value to this file sets the time interval for
+ automatic DAMON status file contents update. Writing '0'
+ disables the update. Reading this file returns the value.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/nr_contexts
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
@@ -431,6 +438,28 @@ Description: Directory for DAMON operations set layer-handled DAMOS filters.
/sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters
directory.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/dests/nr_dests
+Date: Jul 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a number 'N' to this file creates the number of
+ directories for setting action destinations of the scheme named
+ '0' to 'N-1' under the dests/ directory.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/dests/<D>/id
+Date: Jul 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing to and reading from this file sets and gets the id of
+ the DAMOS action destination. For DAMOS_MIGRATE_{HOT,COLD}
+ actions, the destination node's node id can be written and
+ read.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/dests/<D>/weight
+Date: Jul 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing to and reading from this file sets and gets the weight
+ of the DAMOS action destination to select as the destination of
+ each action among the destinations.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/nr_tried
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 658999be5164..b26e4299f822 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -37,7 +37,8 @@ Description:
The alloc_calls file is read-only and lists the kernel code
locations from which allocations for this cache were performed.
The alloc_calls file only contains information if debugging is
- enabled for that cache (see Documentation/mm/slub.rst).
+ enabled for that cache (see
+ Documentation/admin-guide/mm/slab.rst).
What: /sys/kernel/slab/<cache>/alloc_fastpath
Date: February 2008
@@ -219,7 +220,7 @@ Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Description:
The free_calls file is read-only and lists the locations of
object frees if slab debugging is enabled (see
- Documentation/mm/slub.rst).
+ Documentation/admin-guide/mm/slab.rst).
What: /sys/kernel/slab/<cache>/free_fastpath
Date: February 2008
diff --git a/Documentation/ABI/testing/sysfs-secvar b/Documentation/ABI/testing/sysfs-secvar
index 857cf12b0904..1016967a730f 100644
--- a/Documentation/ABI/testing/sysfs-secvar
+++ b/Documentation/ABI/testing/sysfs-secvar
@@ -22,9 +22,13 @@ Description: A string indicating which backend is in use by the firmware.
and is expected to be "ibm,edk2-compat-v1".
On pseries/PLPKS, this is generated by the kernel based on the
- version number in the SB_VERSION variable in the keystore, and
- has the form "ibm,plpks-sb-v<version>", or
- "ibm,plpks-sb-unknown" if there is no SB_VERSION variable.
+ version number in the SB_VERSION variable in the keystore. The
+ version numbering in the SB_VERSION variable starts from 1. The
+ format string takes the form "ibm,plpks-sb-v<version>" in the
+ case of dynamic key management mode. If the SB_VERSION variable
+ does not exist (or there is an error while reading it), it takes
+ the form "ibm,plpks-sb-v0", indicating that the key management
+ mode is static.
What: /sys/firmware/secvar/vars/<variable name>
Date: August 2019
@@ -34,6 +38,13 @@ Description: Each secure variable is represented as a directory named as
representation. The data and size can be determined by reading
their respective attribute files.
+ Only secvars relevant to the key management mode are exposed.
+ Only in the dynamic key management mode should the user have
+ access (read and write) to the secure boot secvars db, dbx,
+ grubdb, grubdbx, and sbat. These secvars are not consumed in the
+ static key management mode. PK, trustedcadb and moduledb are the
+ secvars common to both static and dynamic key management modes.
+
What: /sys/firmware/secvar/vars/<variable_name>/size
Date: August 2019
Contact: Nayna Jain <nayna@linux.ibm.com>
diff --git a/Documentation/Makefile b/Documentation/Makefile
index d30d66ddf1ad..b98477df5ddf 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -5,6 +5,7 @@
# for cleaning
subdir- := devicetree/bindings
+ifneq ($(MAKECMDGOALS),cleandocs)
# Check for broken documentation file references
ifeq ($(CONFIG_WARN_MISSING_DOCUMENTS),y)
$(shell $(srctree)/scripts/documentation-file-ref-check --warn)
@@ -14,6 +15,7 @@ endif
ifeq ($(CONFIG_WARN_ABI_ERRORS),y)
$(shell $(srctree)/scripts/get_abi.py --dir $(srctree)/Documentation/ABI validate)
endif
+endif
# You can set these variables from the command line.
SPHINXBUILD = sphinx-build
diff --git a/Documentation/PCI/endpoint/pci-test-howto.rst b/Documentation/PCI/endpoint/pci-test-howto.rst
index aafc17ef3fd3..dd66858cde46 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.rst
+++ b/Documentation/PCI/endpoint/pci-test-howto.rst
@@ -203,3 +203,18 @@ controllers, it is advisable to skip this testcase using this
command::
# pci_endpoint_test -f pci_ep_bar -f pci_ep_basic -v memcpy -T COPY_TEST -v dma
+
+Kselftest EP Doorbell
+~~~~~~~~~~~~~~~~~~~~~
+
+If the Endpoint MSI controller is used for the doorbell usecase, run below
+command for testing it:
+
+ # pci_endpoint_test -f pcie_ep_doorbell
+
+ # Starting 1 tests from 1 test cases.
+ # RUN pcie_ep_doorbell.DOORBELL_TEST ...
+ # OK pcie_ep_doorbell.DOORBELL_TEST
+ ok 1 pcie_ep_doorbell.DOORBELL_TEST
+ # PASSED: 1 / 1 tests passed.
+ # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
index 04e16775c752..1b0aad184dd7 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
@@ -286,6 +286,39 @@ in order to detect the beginnings and ends of grace periods in a
distributed fashion. The values flow from ``rcu_state`` to ``rcu_node``
(down the tree from the root to the leaves) to ``rcu_data``.
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Given that the root rcu_node structure has a gp_seq field, |
+| why does RCU maintain a separate gp_seq in the rcu_state structure? |
+| Why not just use the root rcu_node's gp_seq as the official record |
+| and update it directly when starting a new grace period? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| On single-node RCU trees (where the root node is also a leaf), |
+| updating the root node's gp_seq immediately would create unnecessary |
+| lock contention. Here's why: |
+| |
+| If we did rcu_seq_start() directly on the root node's gp_seq: |
+| |
+| 1. All CPUs would immediately see their node's gp_seq from their rdp's|
+| gp_seq, in rcu_pending(). They would all then invoke the RCU-core. |
+| 2. Which calls note_gp_changes() and try to acquire the node lock. |
+| 3. But rnp->qsmask isn't initialized yet (happens later in |
+| rcu_gp_init()) |
+| 4. So each CPU would acquire the lock, find it can't determine if it |
+| needs to report quiescent state (no qsmask), update rdp->gp_seq, |
+| and release the lock. |
+| 5. Result: Lots of lock acquisitions with no grace period progress |
+| |
+| By having a separate rcu_state.gp_seq, we can increment the official |
+| grace period counter without immediately affecting what CPUs see in |
+| their nodes. The hierarchical propagation in rcu_gp_init() then |
+| updates the root node's gp_seq and qsmask together under the same lock|
+| acquisition, avoiding this useless contention. |
++-----------------------------------------------------------------------+
+
Miscellaneous
'''''''''''''
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
index 6125e7068d2c..b0395540296b 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.rst
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -1970,6 +1970,134 @@ corresponding CPU's leaf node lock is held. This avoids race conditions
between RCU's hotplug notifier hooks, the grace period initialization
code, and the FQS loop, all of which refer to or modify this bookkeeping.
+Note that grace period initialization (rcu_gp_init()) must carefully sequence
+CPU hotplug scanning with grace period state changes. For example, the
+following race could occur in rcu_gp_init() if rcu_seq_start() were to happen
+after the CPU hotplug scanning.
+
+.. code-block:: none
+
+ CPU0 (rcu_gp_init) CPU1 CPU2
+ --------------------- ---- ----
+ // Hotplug scan first (WRONG ORDER)
+ rcu_for_each_leaf_node(rnp) {
+ rnp->qsmaskinit = rnp->qsmaskinitnext;
+ }
+ rcutree_report_cpu_starting()
+ rnp->qsmaskinitnext |= mask;
+ rcu_read_lock()
+ r0 = *X;
+ r1 = *X;
+ X = NULL;
+ cookie = get_state_synchronize_rcu();
+ // cookie = 8 (future GP)
+ rcu_seq_start(&rcu_state.gp_seq);
+ // gp_seq = 5
+
+ // CPU1 now invisible to this GP!
+ rcu_for_each_node_breadth_first() {
+ rnp->qsmask = rnp->qsmaskinit;
+ // CPU1 not included!
+ }
+
+ // GP completes without CPU1
+ rcu_seq_end(&rcu_state.gp_seq);
+ // gp_seq = 8
+ poll_state_synchronize_rcu(cookie);
+ // Returns true!
+ kfree(r1);
+ r2 = *r0; // USE-AFTER-FREE!
+
+By incrementing gp_seq first, CPU1's RCU read-side critical section
+is guaranteed to not be missed by CPU2.
+
+**Concurrent Quiescent State Reporting for Offline CPUs**
+
+RCU must ensure that CPUs going offline report quiescent states to avoid
+blocking grace periods. This requires careful synchronization to handle
+race conditions
+
+**Race condition causing Offline CPU to hang GP**
+
+A race between CPU offlining and new GP initialization (gp_init) may occur
+because `rcu_report_qs_rnp()` in `rcutree_report_cpu_dead()` must temporarily
+release the `rcu_node` lock to wake the RCU grace-period kthread:
+
+.. code-block:: none
+
+ CPU1 (going offline) CPU0 (GP kthread)
+ -------------------- -----------------
+ rcutree_report_cpu_dead()
+ rcu_report_qs_rnp()
+ // Must release rnp->lock to wake GP kthread
+ raw_spin_unlock_irqrestore_rcu_node()
+ // Wakes up and starts new GP
+ rcu_gp_init()
+ // First loop:
+ copies qsmaskinitnext->qsmaskinit
+ // CPU1 still in qsmaskinitnext!
+
+ // Second loop:
+ rnp->qsmask = rnp->qsmaskinit
+ mask = rnp->qsmask & ~rnp->qsmaskinitnext
+ // mask is 0! CPU1 still in both masks
+ // Reacquire lock (but too late)
+ rnp->qsmaskinitnext &= ~mask // Finally clears bit
+
+Without `ofl_lock`, the new grace period includes the offline CPU and waits
+forever for its quiescent state causing a GP hang.
+
+**A solution with ofl_lock**
+
+The `ofl_lock` (offline lock) prevents `rcu_gp_init()` from running during
+the vulnerable window when `rcu_report_qs_rnp()` has released `rnp->lock`:
+
+.. code-block:: none
+
+ CPU0 (rcu_gp_init) CPU1 (rcutree_report_cpu_dead)
+ ------------------ ------------------------------
+ rcu_for_each_leaf_node(rnp) {
+ arch_spin_lock(&ofl_lock) -----> arch_spin_lock(&ofl_lock) [BLOCKED]
+
+ // Safe: CPU1 can't interfere
+ rnp->qsmaskinit = rnp->qsmaskinitnext
+
+ arch_spin_unlock(&ofl_lock) ---> // Now CPU1 can proceed
+ } // But snapshot already taken
+
+**Another race causing GP hangs in rcu_gpu_init(): Reporting QS for Now-offline CPUs**
+
+After the first loop takes an atomic snapshot of online CPUs, as shown above,
+the second loop in `rcu_gp_init()` detects CPUs that went offline between
+releasing `ofl_lock` and acquiring the per-node `rnp->lock`. This detection is
+crucial because:
+
+1. The CPU might have gone offline after the snapshot but before the second loop
+2. The offline CPU cannot report its own QS if it's already dead
+3. Without this detection, the grace period would wait forever for CPUs that
+ are now offline.
+
+The second loop performs this detection safely:
+
+.. code-block:: none
+
+ rcu_for_each_node_breadth_first(rnp) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->qsmask = rnp->qsmaskinit; // Apply the snapshot
+
+ // Detect CPUs offline after snapshot
+ mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+
+ if (mask && rcu_is_leaf_node(rnp))
+ rcu_report_qs_rnp(mask, ...) // Report QS for offline CPUs
+ }
+
+This approach ensures atomicity: quiescent state reporting for offline CPUs
+happens either in `rcu_gp_init()` (second loop) or in `rcutree_report_cpu_dead()`,
+never both and never neither. The `rnp->lock` held throughout the sequence
+prevents races - `rcutree_report_cpu_dead()` also acquires this lock when
+clearing `qsmaskinitnext`, ensuring mutual exclusion.
+
Scheduler and RCU
~~~~~~~~~~~~~~~~~
diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst
index 210c194d4a7b..8ccc5af5ea1e 100644
--- a/Documentation/accounting/delay-accounting.rst
+++ b/Documentation/accounting/delay-accounting.rst
@@ -131,3 +131,59 @@ Get IO accounting for pid 1, it works only with -p::
linuxrc: read=65536, write=0, cancelled_write=0
The above command can be used with -v to get more debug information.
+
+After the system starts, use `delaytop` to get the system-wide delay information,
+which includes system-wide PSI information and Top-N high-latency tasks.
+
+`delaytop` supports sorting by CPU latency in descending order by default,
+displays the top 20 high-latency tasks by default, and refreshes the latency
+data every 2 seconds by default.
+
+Get PSI information and Top-N tasks delay, since system boot::
+
+ bash# ./delaytop
+ System Pressure Information: (avg10/avg60/avg300/total)
+ CPU some: 0.0%/ 0.0%/ 0.0%/ 345(ms)
+ CPU full: 0.0%/ 0.0%/ 0.0%/ 0(ms)
+ Memory full: 0.0%/ 0.0%/ 0.0%/ 0(ms)
+ Memory some: 0.0%/ 0.0%/ 0.0%/ 0(ms)
+ IO full: 0.0%/ 0.0%/ 0.0%/ 65(ms)
+ IO some: 0.0%/ 0.0%/ 0.0%/ 79(ms)
+ IRQ full: 0.0%/ 0.0%/ 0.0%/ 0(ms)
+ Top 20 processes (sorted by CPU delay):
+ PID TGID COMMAND CPU(ms) IO(ms) SWAP(ms) RCL(ms) THR(ms) CMP(ms) WP(ms) IRQ(ms)
+ ----------------------------------------------------------------------------------------------
+ 161 161 zombie_memcg_re 1.40 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 130 130 blkcg_punt_bio 1.37 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 444 444 scsi_tmf_0 0.73 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 1280 1280 rsyslogd 0.53 0.04 0.00 0.00 0.00 0.00 0.00 0.00
+ 12 12 ksoftirqd/0 0.47 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 1277 1277 nbd-server 0.44 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 308 308 kworker/2:2-sys 0.41 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 55 55 netns 0.36 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 1187 1187 acpid 0.31 0.03 0.00 0.00 0.00 0.00 0.00 0.00
+ 6184 6184 kworker/1:2-sys 0.24 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 186 186 kaluad 0.24 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 18 18 ksoftirqd/1 0.24 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 185 185 kmpath_rdacd 0.23 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 190 190 kstrp 0.23 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 2759 2759 agetty 0.20 0.03 0.00 0.00 0.00 0.00 0.00 0.00
+ 1190 1190 kworker/0:3-sys 0.19 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 1272 1272 sshd 0.15 0.04 0.00 0.00 0.00 0.00 0.00 0.00
+ 1156 1156 license 0.15 0.11 0.00 0.00 0.00 0.00 0.00 0.00
+ 134 134 md 0.13 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+ 6142 6142 kworker/3:2-xfs 0.13 0.00 0.00 0.00 0.00 0.00 0.00 0.00
+
+Dynamic interactive interface of delaytop::
+
+ # ./delaytop -p pid
+ Print delayacct stats
+
+ # ./delaytop -P num
+ Display the top N tasks
+
+ # ./delaytop -n num
+ Set delaytop refresh frequency (num times)
+
+ # ./delaytop -d secs
+ Specify refresh interval as secs
diff --git a/Documentation/admin-guide/blockdev/zoned_loop.rst b/Documentation/admin-guide/blockdev/zoned_loop.rst
index 9c7aa3b482f3..64dcfde7450a 100644
--- a/Documentation/admin-guide/blockdev/zoned_loop.rst
+++ b/Documentation/admin-guide/blockdev/zoned_loop.rst
@@ -79,7 +79,7 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower than
the zone size. Default: zone size.
conv_zones Total number of conventioanl zones starting from sector 0.
Default: 8.
-base_dir Path to the base directoy where to create the directory
+base_dir Path to the base directory where to create the directory
containing the zone files of the device.
Default=/var/local/zloop.
The device directory containing the zone files is always
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index 91339efdcb54..7a86042c9b6d 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -265,7 +265,7 @@ The final kernel cmdline will be the following::
Config File Limitation
======================
-Currently the maximum config size size is 32KB and the total key-words (not
+Currently the maximum config size is 32KB and the total key-words (not
key-value entries) must be under 1024 nodes.
Note: this is not the number of entries but nodes, an entry must consume
more than 2 nodes (a key-word and a value). So theoretically, it will be
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index bd98ea3175ec..d9d3cc7df348 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -435,6 +435,15 @@ both cgroups.
Controlling Controllers
-----------------------
+Availablity
+~~~~~~~~~~~
+
+A controller is available in a cgroup when it is supported by the kernel (i.e.,
+compiled in, not disabled and not attached to a v1 hierarchy) and listed in the
+"cgroup.controllers" file. Availability means the controller's interface files
+are exposed in the cgroup’s directory, allowing the distribution of the target
+resource to be observed or controlled within that cgroup.
+
Enabling and Disabling
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/admin-guide/device-mapper/thin-provisioning.rst b/Documentation/admin-guide/device-mapper/thin-provisioning.rst
index bafebf79da4b..b2fa49a5608a 100644
--- a/Documentation/admin-guide/device-mapper/thin-provisioning.rst
+++ b/Documentation/admin-guide/device-mapper/thin-provisioning.rst
@@ -80,11 +80,11 @@ less sharing than average you'll need a larger-than-average metadata device.
As a guide, we suggest you calculate the number of bytes to use in the
metadata device as 48 * $data_dev_size / $data_block_size but round it up
-to 2MB if the answer is smaller. If you're creating large numbers of
+to 2MiB if the answer is smaller. If you're creating large numbers of
snapshots which are recording large amounts of change, you may find you
need to increase this.
-The largest size supported is 16GB: If the device is larger,
+The largest size supported is 16GiB: If the device is larger,
a warning will be issued and the excess space will not be used.
Reloading a pool table
@@ -107,13 +107,13 @@ Using an existing pool device
$data_block_size gives the smallest unit of disk space that can be
allocated at a time expressed in units of 512-byte sectors.
-$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
-multiple of 128 (64KB). $data_block_size cannot be changed after the
+$data_block_size must be between 128 (64KiB) and 2097152 (1GiB) and a
+multiple of 128 (64KiB). $data_block_size cannot be changed after the
thin-pool is created. People primarily interested in thin provisioning
-may want to use a value such as 1024 (512KB). People doing lots of
-snapshotting may want a smaller value such as 128 (64KB). If you are
+may want to use a value such as 1024 (512KiB). People doing lots of
+snapshotting may want a smaller value such as 128 (64KiB). If you are
not zeroing newly-allocated data, a larger $data_block_size in the
-region of 256000 (128MB) is suggested.
+region of 262144 (128MiB) is suggested.
$low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
@@ -291,7 +291,7 @@ i) Constructor
error_if_no_space:
Error IOs, instead of queueing, if no space.
- Data block size must be between 64KB (128 sectors) and 1GB
+ Data block size must be between 64KiB (128 sectors) and 1GiB
(2097152 sectors) inclusive.
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 20fabdf6567e..9c6cd52f69cf 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -311,6 +311,27 @@ crashkernel syntax
crashkernel=0,low
+4) crashkernel=size,cma
+
+ Reserve additional crash kernel memory from CMA. This reservation is
+ usable by the first system's userspace memory and kernel movable
+ allocations (memory balloon, zswap). Pages allocated from this memory
+ range will not be included in the vmcore so this should not be used if
+ dumping of userspace memory is intended and it has to be expected that
+ some movable kernel pages may be missing from the dump.
+
+ A standard crashkernel reservation, as described above, is still needed
+ to hold the crash kernel and initrd.
+
+ This option increases the risk of a kdump failure: DMA transfers
+ configured by the first kernel may end up corrupting the second
+ kernel's memory.
+
+ This reservation method is intended for systems that can't afford to
+ sacrifice enough memory for standard crashkernel reservation and where
+ less reliable and possibly incomplete kdump is preferable to no kdump at
+ all.
+
Boot into System Kernel
-----------------------
1) Update the boot loader (such as grub, yaboot, or lilo) configuration
diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst
index 8cf4614385b7..404a15f6782c 100644
--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
+++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
@@ -325,14 +325,14 @@ NR_FREE_PAGES
On linux-2.6.21 or later, the number of free pages is in
vm_stat[NR_FREE_PAGES]. Used to get the number of free pages.
-PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_slab|PG_hwpoision|PG_head_mask|PG_hugetlb
------------------------------------------------------------------------------------------
+PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_hwpoison|PG_head_mask
+--------------------------------------------------------------------------
Page attributes. These flags are used to filter various unnecessary for
dumping pages.
-PAGE_BUDDY_MAPCOUNT_VALUE(~PG_buddy)|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_offline)|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_unaccepted)
--------------------------------------------------------------------------------------------------------------------------
+PAGE_SLAB_MAPCOUNT_VALUE|PAGE_BUDDY_MAPCOUNT_VALUE|PAGE_OFFLINE_MAPCOUNT_VALUE|PAGE_HUGETLB_MAPCOUNT_VALUE|PAGE_UNACCEPTED_MAPCOUNT_VALUE
+------------------------------------------------------------------------------------------------------------------------------------------
More page attributes. These flags are used to filter various unnecessary for
dumping pages.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0d2ea9a60145..747a55abf494 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -633,6 +633,14 @@
named mounts. Specifying both "all" and "named" disables
all v1 hierarchies.
+ cgroup_v1_proc= [KNL] Show also missing controllers in /proc/cgroups
+ Format: { "true" | "false" }
+ /proc/cgroups lists only v1 controllers by default.
+ This compatibility option enables listing also v2
+ controllers (whose v1 code is not compiled!), so that
+ semi-legacy software can check this file to decide
+ about usage of v2 (sic) controllers.
+
cgroup_favordynmods= [KNL] Enable or Disable favordynmods.
Format: { "true" | "false" }
Defaults to the value of CONFIG_CGROUP_FAVOR_DYNMODS.
@@ -986,6 +994,28 @@
0: to disable low allocation.
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
+ crashkernel=size[KMG],cma
+ [KNL, X86] Reserve additional crash kernel memory from
+ CMA. This reservation is usable by the first system's
+ userspace memory and kernel movable allocations (memory
+ balloon, zswap). Pages allocated from this memory range
+ will not be included in the vmcore so this should not
+ be used if dumping of userspace memory is intended and
+ it has to be expected that some movable kernel pages
+ may be missing from the dump.
+
+ A standard crashkernel reservation, as described above,
+ is still needed to hold the crash kernel and initrd.
+
+ This option increases the risk of a kdump failure: DMA
+ transfers configured by the first kernel may end up
+ corrupting the second kernel's memory.
+
+ This reservation method is intended for systems that
+ can't afford to sacrifice enough memory for standard
+ crashkernel reservation and where less reliable and
+ possibly incomplete kdump is preferable to no kdump at
+ all.
cryptomgr.notests
[KNL] Disable crypto self-tests
@@ -1798,6 +1828,27 @@
backtraces on all cpus.
Format: 0 | 1
+ hash_pointers=
+ [KNL,EARLY]
+ By default, when pointers are printed to the console
+ or buffers via the %p format string, that pointer is
+ "hashed", i.e. obscured by hashing the pointer value.
+ This is a security feature that hides actual kernel
+ addresses from unprivileged users, but it also makes
+ debugging the kernel more difficult since unequal
+ pointers can no longer be compared. The choices are:
+ Format: { auto | always | never }
+ Default: auto
+
+ auto - Hash pointers unless slab_debug is enabled.
+ always - Always hash pointers (even if slab_debug is
+ enabled).
+ never - Never hash pointers. This option should only
+ be specified when debugging the kernel. Do
+ not use on production kernels. The boot
+ param "no_hash_pointers" is an alias for
+ this mode.
+
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
@@ -2212,6 +2263,11 @@
different crypto accelerators. This option can be used
to achieve best performance for particular HW.
+ ima= [IMA] Enable or disable IMA
+ Format: { "off" | "on" }
+ Default: "on"
+ Note that disabling IMA is limited to kdump kernel.
+
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
Target Selection(ITS) bug in Intel CPUs. Updated
microcode is also required for a fix in IBPB.
@@ -4181,18 +4237,7 @@
no_hash_pointers
[KNL,EARLY]
- Force pointers printed to the console or buffers to be
- unhashed. By default, when a pointer is printed via %p
- format string, that pointer is "hashed", i.e. obscured
- by hashing the pointer value. This is a security feature
- that hides actual kernel addresses from unprivileged
- users, but it also makes debugging the kernel more
- difficult since unequal pointers can no longer be
- compared. However, if this command-line option is
- specified, then all normal pointers will have their true
- value printed. This option should only be specified when
- debugging the kernel. Please do not use on production
- kernels.
+ Alias for "hash_pointers=never".
nohibernate [HIBERNATION] Disable hibernation and resume.
@@ -4544,7 +4589,7 @@
bit 2: print timer info
bit 3: print locks info if CONFIG_LOCKDEP is on
bit 4: print ftrace buffer
- bit 5: print all printk messages in buffer
+ bit 5: replay all messages on consoles at the end of panic
bit 6: print all CPUs backtrace (if available in the arch)
bit 7: print only tasks in uninterruptible (blocked) state
*Be aware* that this option may print a _lot_ of lines,
@@ -4552,6 +4597,25 @@
Use this option carefully, maybe worth to setup a
bigger log buffer with "log_buf_len" along with this.
+ panic_sys_info= A comma separated list of extra information to be dumped
+ on panic.
+ Format: val[,val...]
+ Where @val can be any of the following:
+
+ tasks: print all tasks info
+ mem: print system memory info
+ timers: print timers info
+ locks: print locks info if CONFIG_LOCKDEP is on
+ ftrace: print ftrace buffer
+ all_bt: print all CPUs backtrace (if available in the arch)
+ blocked_tasks: print only tasks in uninterruptible (blocked) state
+
+ This is a human readable alternative to the 'panic_print' option.
+
+ panic_console_replay
+ When panic happens, replay all kernel messages on
+ consoles at the end of panic.
+
parkbd.port= [HW] Parallel port number the keyboard adapter is
connected to, default is 0.
Format: <parport#>
@@ -5508,7 +5572,8 @@
echo 1 > /sys/module/rcutree/parameters/rcu_normal_wake_from_gp
or pass a boot parameter "rcutree.rcu_normal_wake_from_gp=1"
- Default is 0.
+ Default is 1 if num_possible_cpus() <= 16 and it is not explicitly
+ disabled by the boot parameter passing 0.
rcuscale.gp_async= [KNL]
Measure performance of asynchronous
@@ -6586,14 +6651,18 @@
slab_debug can create guard zones around objects and
may poison objects when not in use. Also tracks the
last alloc / free. For more information see
- Documentation/mm/slub.rst.
+ Documentation/admin-guide/mm/slab.rst.
(slub_debug legacy name also accepted for now)
+ Using this option implies the "no_hash_pointers"
+ option which can be undone by adding the
+ "hash_pointers=always" option.
+
slab_max_order= [MM]
Determines the maximum allowed order for slabs.
A high setting may cause OOMs due to memory
fragmentation. For more information see
- Documentation/mm/slub.rst.
+ Documentation/admin-guide/mm/slab.rst.
(slub_max_order legacy name also accepted for now)
slab_merge [MM]
@@ -6608,13 +6677,14 @@
the number of objects indicated. The higher the number
of objects the smaller the overhead of tracking slabs
and the less frequently locks need to be acquired.
- For more information see Documentation/mm/slub.rst.
+ For more information see
+ Documentation/admin-guide/mm/slab.rst.
(slub_min_objects legacy name also accepted for now)
slab_min_order= [MM]
Determines the minimum page order for slabs. Must be
lower or equal to slab_max_order. For more information see
- Documentation/mm/slub.rst.
+ Documentation/admin-guide/mm/slab.rst.
(slub_min_order legacy name also accepted for now)
slab_nomerge [MM]
@@ -6628,7 +6698,8 @@
cache (risks via metadata attacks are mostly
unchanged). Debug options disable merging on their
own.
- For more information see Documentation/mm/slub.rst.
+ For more information see
+ Documentation/admin-guide/mm/slab.rst.
(slub_nomerge legacy name also accepted for now)
slab_strict_numa [MM]
@@ -7016,6 +7087,11 @@
consumed by the stack hash table. By default this is set
to false.
+ stack_depot_max_pools= [KNL,EARLY]
+ Specify the maximum number of pools to use for storing
+ stack traces. Pools are allocated on-demand up to this
+ limit. Default value is 8191 pools.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Documentation/admin-guide/mm/damon/index.rst b/Documentation/admin-guide/mm/damon/index.rst
index bc7e976120e0..3ce3164480c7 100644
--- a/Documentation/admin-guide/mm/damon/index.rst
+++ b/Documentation/admin-guide/mm/damon/index.rst
@@ -14,3 +14,4 @@ access monitoring and access-aware system operations.
usage
reclaim
lru_sort
+ stat
diff --git a/Documentation/admin-guide/mm/damon/stat.rst b/Documentation/admin-guide/mm/damon/stat.rst
new file mode 100644
index 000000000000..4c517c2c219a
--- /dev/null
+++ b/Documentation/admin-guide/mm/damon/stat.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
+Data Access Monitoring Results Stat
+===================================
+
+Data Access Monitoring Results Stat (DAMON_STAT) is a static kernel module that
+is aimed to be used for simple access pattern monitoring. It monitors accesses
+on the system's entire physical memory using DAMON, and provides simplified
+access monitoring results statistics, namely idle time percentiles and
+estimated memory bandwidth.
+
+Monitoring Accuracy and Overhead
+================================
+
+DAMON_STAT uses monitoring intervals :ref:`auto-tuning
+<damon_design_monitoring_intervals_autotuning>` to make its accuracy high and
+overhead minimum. It auto-tunes the intervals aiming 4 % of observable access
+events to be captured in each snapshot, while limiting the resulting sampling
+events to be 5 milliseconds in minimum and 10 seconds in maximum. On a few
+production server systems, it resulted in consuming only 0.x % single CPU time,
+while capturing reasonable quality of access patterns.
+
+Interface: Module Parameters
+============================
+
+To use this feature, you should first ensure your system is running on a kernel
+that is built with ``CONFIG_DAMON_STAT=y``. The feature can be enabled by
+default at build time, by setting ``CONFIG_DAMON_STAT_ENABLED_DEFAULT`` true.
+
+To let sysadmins enable or disable it at boot and/or runtime, and read the
+monitoring results, DAMON_STAT provides module parameters. Following
+sections are descriptions of the parameters.
+
+enabled
+-------
+
+Enable or disable DAMON_STAT.
+
+You can enable DAMON_STAT by setting the value of this parameter as ``Y``.
+Setting it as ``N`` disables DAMON_STAT. The default value is set by
+``CONFIG_DAMON_STAT_ENABLED_DEFAULT`` build config option.
+
+estimated_memory_bandwidth
+--------------------------
+
+Estimated memory bandwidth consumption (bytes per second) of the system.
+
+DAMON_STAT reads observed access events on the current DAMON results snapshot
+and converts it to memory bandwidth consumption estimation in bytes per second.
+The resulting metric is exposed to user via this read-only parameter. Because
+DAMON uses sampling, this is only an estimation of the access intensity rather
+than accurate memory bandwidth.
+
+memory_idle_ms_percentiles
+--------------------------
+
+Per-byte idle time (milliseconds) percentiles of the system.
+
+DAMON_STAT calculates how long each byte of the memory was not accessed until
+now (idle time), based on the current DAMON results snapshot. If DAMON found a
+region of access frequency (nr_accesses) larger than zero, every byte of the
+region gets zero idle time. If a region has zero access frequency
+(nr_accesses), how long the region was keeping the zero access frequency (age)
+becomes the idle time of every byte of the region. Then, DAMON_STAT exposes
+the percentiles of the idle time values via this read-only parameter. Reading
+the parameter returns 101 idle time values in milliseconds, separated by comma.
+Each value represents 0-th, 1st, 2nd, 3rd, ..., 99th and 100th percentile idle
+times.
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index d960aba72b82..ff3a2dda1f02 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -59,7 +59,7 @@ comma (",").
:ref:`/sys/kernel/mm/damon <sysfs_root>`/admin
│ :ref:`kdamonds <sysfs_kdamonds>`/nr_kdamonds
- │ │ :ref:`0 <sysfs_kdamond>`/state,pid
+ │ │ :ref:`0 <sysfs_kdamond>`/state,pid,refresh_ms
│ │ │ :ref:`contexts <sysfs_contexts>`/nr_contexts
│ │ │ │ :ref:`0 <sysfs_context>`/avail_operations,operations
│ │ │ │ │ :ref:`monitoring_attrs <sysfs_monitoring_attrs>`/
@@ -85,6 +85,8 @@ comma (",").
│ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low
│ │ │ │ │ │ │ :ref:`{core_,ops_,}filters <sysfs_filters>`/nr_filters
│ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max
+ │ │ │ │ │ │ │ :ref:`dests <damon_sysfs_dests>`/nr_dests
+ │ │ │ │ │ │ │ │ 0/id,weight
│ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds
│ │ │ │ │ │ │ :ref:`tried_regions <sysfs_schemes_tried_regions>`/total_bytes
│ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed
@@ -121,8 +123,8 @@ kdamond.
kdamonds/<N>/
-------------
-In each kdamond directory, two files (``state`` and ``pid``) and one directory
-(``contexts``) exist.
+In each kdamond directory, three files (``state``, ``pid`` and ``refresh_ms``)
+and one directory (``contexts``) exist.
Reading ``state`` returns ``on`` if the kdamond is currently running, or
``off`` if it is not running.
@@ -159,6 +161,13 @@ Users can write below commands for the kdamond to the ``state`` file.
If the state is ``on``, reading ``pid`` shows the pid of the kdamond thread.
+Users can ask the kernel to periodically update files showing auto-tuned
+parameters and DAMOS stats instead of manually writing
+``update_tuned_intervals`` like keywords to ``state`` file. For this, users
+should write the desired update time interval in milliseconds to ``refresh_ms``
+file. If the interval is zero, the periodic update is disabled. Reading the
+file shows currently set time interval.
+
``contexts`` directory contains files for controlling the monitoring contexts
that this kdamond will execute.
@@ -307,10 +316,10 @@ to ``N-1``. Each directory represents each DAMON-based operation scheme.
schemes/<N>/
------------
-In each scheme directory, seven directories (``access_pattern``, ``quotas``,
-``watermarks``, ``core_filters``, ``ops_filters``, ``filters``, ``stats``, and
-``tried_regions``) and three files (``action``, ``target_nid`` and
-``apply_interval``) exist.
+In each scheme directory, eight directories (``access_pattern``, ``quotas``,
+``watermarks``, ``core_filters``, ``ops_filters``, ``filters``, ``dests``,
+``stats``, and ``tried_regions``) and three files (``action``, ``target_nid``
+and ``apply_interval``) exist.
The ``action`` file is for setting and getting the scheme's :ref:`action
<damon_design_damos_action>`. The keywords that can be written to and read
@@ -484,6 +493,29 @@ Refer to the :ref:`DAMOS filters design documentation
of different ``allow`` works, when each of the filters are supported, and
differences on stats.
+.. _damon_sysfs_dests:
+
+schemes/<N>/dests/
+------------------
+
+Directory for specifying the destinations of given DAMON-based operation
+scheme's action. This directory is ignored if the action of the given scheme
+is not supporting multiple destinations. Only ``DAMOS_MIGRATE_{HOT,COLD}``
+actions are supporting multiple destinations.
+
+In the beginning, the directory has only one file, ``nr_dests``. Writing a
+number (``N``) to the file creates the number of child directories named ``0``
+to ``N-1``. Each directory represents each action destination.
+
+Each destination directory contains two files, namely ``id`` and ``weight``.
+Users can write and read the identifier of the destination to ``id`` file.
+For ``DAMOS_MIGRATE_{HOT,COLD}`` actions, the migrate destination node's node
+id should be written to ``id`` file. Users can write and read the weight of
+the destination among the given destinations to the ``weight`` file. The
+weight can be an arbitrary integer. When DAMOS apply the action to each entity
+of the memory region, it will select the destination of the action based on the
+relative weights of the destinations.
+
.. _sysfs_schemes_stats:
schemes/<N>/stats/
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index 2d2f6c222308..ebc83ca20fdc 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -37,6 +37,7 @@ the Linux memory management.
numaperf
pagemap
shrinker_debugfs
+ slab
soft-dirty
swap_numa
transhuge
diff --git a/Documentation/mm/slub.rst b/Documentation/admin-guide/mm/slab.rst
index 84ca1dc94e5e..14429ab90611 100644
--- a/Documentation/mm/slub.rst
+++ b/Documentation/admin-guide/mm/slab.rst
@@ -1,13 +1,12 @@
-==========================
-Short users guide for SLUB
-==========================
-
-The basic philosophy of SLUB is very different from SLAB. SLAB
-requires rebuilding the kernel to activate debug options for all
-slab caches. SLUB always includes full debugging but it is off by default.
-SLUB can enable debugging only for selected slabs in order to avoid
-an impact on overall system performance which may make a bug more
-difficult to find.
+========================================
+Short users guide for the slab allocator
+========================================
+
+The slab allocator includes full debugging support (when built with
+CONFIG_SLUB_DEBUG=y) but it is off by default (unless built with
+CONFIG_SLUB_DEBUG_ON=y). You can enable debugging only for selected
+slabs in order to avoid an impact on overall system performance which
+may make a bug more difficult to find.
In order to switch debugging on one can add an option ``slab_debug``
to the kernel command line. That will enable full debugging for
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index dff8d5985f0f..370fba113460 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -107,7 +107,7 @@ sysfs
Global THP controls
-------------------
-Transparent Hugepage Support for anonymous memory can be entirely disabled
+Transparent Hugepage Support for anonymous memory can be disabled
(mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE
regions (to avoid the risk of consuming more memory resources) or enabled
system wide. This can be achieved per-supported-THP-size with one of::
@@ -119,6 +119,11 @@ system wide. This can be achieved per-supported-THP-size with one of::
where <size> is the hugepage size being addressed, the available sizes
for which vary by system.
+.. note:: Setting "never" in all sysfs THP controls does **not** disable
+ Transparent Huge Pages globally. This is because ``madvise(...,
+ MADV_COLLAPSE)`` ignores these settings and collapses ranges to
+ PMD-sized huge pages unconditionally.
+
For example::
echo always >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled
@@ -187,7 +192,9 @@ madvise
behaviour.
never
- should be self-explanatory.
+ should be self-explanatory. Note that ``madvise(...,
+ MADV_COLLAPSE)`` can still cause transparent huge pages to be
+ obtained even if this mode is specified everywhere.
By default kernel tries to use huge, PMD-mappable zero page on read
page fault to anonymous mapping. It's possible to disable huge zero
@@ -378,7 +385,9 @@ always
Attempt to allocate huge pages every time we need a new page;
never
- Do not allocate huge pages;
+ Do not allocate huge pages. Note that ``madvise(..., MADV_COLLAPSE)``
+ can still cause transparent huge pages to be obtained even if this mode
+ is specified everywhere;
within_size
Only allocate huge page if it will be fully within i_size.
@@ -434,7 +443,9 @@ inherit
have enabled="inherit" and all other hugepage sizes have enabled="never";
never
- Do not allocate <size> huge pages;
+ Do not allocate <size> huge pages. Note that ``madvise(...,
+ MADV_COLLAPSE)`` can still cause transparent huge pages to be obtained
+ even if this mode is specified everywhere;
within_size
Only allocate <size> huge page if it will be fully within i_size.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 19224eeac1c2..8b49eab937d0 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -177,6 +177,7 @@ core_pattern
%E executable path
%c maximum size of core file by resource limit RLIMIT_CORE
%C CPU the task ran on
+ %F pidfd number
%<OTHER> both are dropped
======== ==========================================
@@ -889,7 +890,7 @@ bit 1 print system memory info
bit 2 print timer info
bit 3 print locks info if ``CONFIG_LOCKDEP`` is on
bit 4 print ftrace buffer
-bit 5 print all printk messages in buffer
+bit 5 replay all messages on consoles at the end of panic
bit 6 print all CPUs backtrace (if available in the arch)
bit 7 print only tasks in uninterruptible (blocked) state
===== ============================================
@@ -899,6 +900,24 @@ So for example to print tasks and memory info on panic, user can::
echo 3 > /proc/sys/kernel/panic_print
+panic_sys_info
+==============
+
+A comma separated list of extra information to be dumped on panic,
+for example, "tasks,mem,timers,...". It is a human readable alternative
+to 'panic_print'. Possible values are:
+
+============= ===================================================
+tasks print all tasks info
+mem print system memory info
+timer print timers info
+lock print locks info if CONFIG_LOCKDEP is on
+ftrace print ftrace buffer
+all_bt print all CPUs backtrace (if available in the arch)
+blocked_tasks print only tasks in uninterruptible (blocked) state
+============= ===================================================
+
+
panic_on_rcu_stall
==================
@@ -1014,30 +1033,26 @@ perf_user_access (arm64 and riscv only)
Controls user space access for reading perf event counters.
-arm64
-=====
-
-The default value is 0 (access disabled).
+* for arm64
+ The default value is 0 (access disabled).
-When set to 1, user space can read performance monitor counter registers
-directly.
+ When set to 1, user space can read performance monitor counter registers
+ directly.
-See Documentation/arch/arm64/perf.rst for more information.
-
-riscv
-=====
+ See Documentation/arch/arm64/perf.rst for more information.
-When set to 0, user space access is disabled.
+* for riscv
+ When set to 0, user space access is disabled.
-The default value is 1, user space can read performance monitor counter
-registers through perf, any direct access without perf intervention will trigger
-an illegal instruction.
+ The default value is 1, user space can read performance monitor counter
+ registers through perf, any direct access without perf intervention will trigger
+ an illegal instruction.
-When set to 2, which enables legacy mode (user space has direct access to cycle
-and insret CSRs only). Note that this legacy value is deprecated and will be
-removed once all user space applications are fixed.
+ When set to 2, which enables legacy mode (user space has direct access to cycle
+ and insret CSRs only). Note that this legacy value is deprecated and will be
+ removed once all user space applications are fixed.
-Note that the time CSR is always directly accessible to all modes.
+ Note that the time CSR is always directly accessible to all modes.
pid_max
=======
@@ -1110,7 +1125,8 @@ printk_ratelimit_burst
While long term we enforce one message per `printk_ratelimit`_
seconds, we do allow a burst of messages to pass through.
``printk_ratelimit_burst`` specifies the number of messages we can
-send before ratelimiting kicks in.
+send before ratelimiting kicks in. After `printk_ratelimit`_ seconds
+have elapsed, another burst of messages may be sent.
The default value is 10 messages.
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 9bef46151d53..4d71211fdad8 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -465,8 +465,8 @@ The minimum value is 1 (1/1 -> 100%). The value less than 1 completely
disables protection of the pages.
-max_map_count:
-==============
+max_map_count
+=============
This file contains the maximum number of memory map areas a process
may have. Memory map areas are used as a side-effect of calling
@@ -495,8 +495,8 @@ memory allocations.
The default value depends on CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT.
-memory_failure_early_kill:
-==========================
+memory_failure_early_kill
+=========================
Control how to kill processes when uncorrected memory error (typically
a 2bit error in a memory module) is detected in the background by hardware
diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst
index ee9b790c0d72..2f666a7c303c 100644
--- a/Documentation/arch/arm64/booting.rst
+++ b/Documentation/arch/arm64/booting.rst
@@ -223,6 +223,47 @@ Before jumping into the kernel, the following conditions must be met:
- SCR_EL3.HCE (bit 8) must be initialised to 0b1.
+ For systems with a GICv5 interrupt controller to be used in v5 mode:
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - ICH_HFGRTR_EL2.ICC_PPI_ACTIVERn_EL1 (bit 20) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_PPI_PRIORITYRn_EL1 (bit 19) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_PPI_PENDRn_EL1 (bit 18) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_PPI_ENABLERn_EL1 (bit 17) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_PPI_HMRn_EL1 (bit 16) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_IAFFIDR_EL1 (bit 7) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_ICSR_EL1 (bit 6) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_PCR_EL1 (bit 5) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_HPPIR_EL1 (bit 4) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_HAPR_EL1 (bit 3) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_CR0_EL1 (bit 2) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_IDRn_EL1 (bit 1) must be initialised to 0b1.
+ - ICH_HFGRTR_EL2.ICC_APR_EL1 (bit 0) must be initialised to 0b1.
+
+ - ICH_HFGWTR_EL2.ICC_PPI_ACTIVERn_EL1 (bit 20) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_PPI_PRIORITYRn_EL1 (bit 19) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_PPI_PENDRn_EL1 (bit 18) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_PPI_ENABLERn_EL1 (bit 17) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_ICSR_EL1 (bit 6) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_PCR_EL1 (bit 5) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_CR0_EL1 (bit 2) must be initialised to 0b1.
+ - ICH_HFGWTR_EL2.ICC_APR_EL1 (bit 0) must be initialised to 0b1.
+
+ - ICH_HFGITR_EL2.GICRCDNMIA (bit 10) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICRCDIA (bit 9) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDDI (bit 8) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDEOI (bit 7) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDHM (bit 6) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDRCFG (bit 5) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDPEND (bit 4) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDAFF (bit 3) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDPRI (bit 2) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDDIS (bit 1) must be initialised to 0b1.
+ - ICH_HFGITR_EL2.GICCDEN (bit 0) must be initialised to 0b1.
+
+ - The DT or ACPI tables must describe a GICv5 interrupt controller.
+
For systems with a GICv3 interrupt controller to be used in v3 mode:
- If EL3 is present:
@@ -388,6 +429,27 @@ Before jumping into the kernel, the following conditions must be met:
- SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1.
+ For CPUs with the Branch Record Buffer Extension (FEAT_BRBE):
+
+ - If EL3 is present:
+
+ - MDCR_EL3.SBRBE (bits 33:32) must be initialised to 0b01 or 0b11.
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - BRBCR_EL2.CC (bit 3) must be initialised to 0b1.
+ - BRBCR_EL2.MPRED (bit 4) must be initialised to 0b1.
+
+ - HDFGRTR_EL2.nBRBDATA (bit 61) must be initialised to 0b1.
+ - HDFGRTR_EL2.nBRBCTL (bit 60) must be initialised to 0b1.
+ - HDFGRTR_EL2.nBRBIDR (bit 59) must be initialised to 0b1.
+
+ - HDFGWTR_EL2.nBRBDATA (bit 61) must be initialised to 0b1.
+ - HDFGWTR_EL2.nBRBCTL (bit 60) must be initialised to 0b1.
+
+ - HFGITR_EL2.nBRBIALL (bit 56) must be initialised to 0b1.
+ - HFGITR_EL2.nBRBINJ (bit 55) must be initialised to 0b1.
+
For CPUs with the Performance Monitors Extension (FEAT_PMUv3p9):
- If EL3 is present:
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
index 69d7afe56853..f58ada4d6cb2 100644
--- a/Documentation/arch/arm64/elf_hwcaps.rst
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
@@ -435,6 +435,12 @@ HWCAP2_SME_SF8DP4
HWCAP2_POE
Functionality implied by ID_AA64MMFR3_EL1.S1POE == 0b0001.
+HWCAP3_MTE_FAR
+ Functionality implied by ID_AA64PFR2_EL1.MTEFAR == 0b0001.
+
+HWCAP3_MTE_STORE_ONLY
+ Functionality implied by ID_AA64PFR2_EL1.MTESTOREONLY == 0b0001.
+
4. Unused AT_HWCAP bits
-----------------------
diff --git a/Documentation/arch/arm64/tagged-pointers.rst b/Documentation/arch/arm64/tagged-pointers.rst
index 81b6c2a770dd..f87a925ca9a5 100644
--- a/Documentation/arch/arm64/tagged-pointers.rst
+++ b/Documentation/arch/arm64/tagged-pointers.rst
@@ -60,11 +60,12 @@ that signal handlers in applications making use of tags cannot rely
on the tag information for user virtual addresses being maintained
in these fields unless the flag was set.
-Due to architecture limitations, bits 63:60 of the fault address
-are not preserved in response to synchronous tag check faults
-(SEGV_MTESERR) even if SA_EXPOSE_TAGBITS was set. Applications should
-treat the values of these bits as undefined in order to accommodate
-future architecture revisions which may preserve the bits.
+If FEAT_MTE_TAGGED_FAR (Armv8.9) is supported, bits 63:60 of the fault address
+are preserved in response to synchronous tag check faults (SEGV_MTESERR)
+otherwise not preserved even if SA_EXPOSE_TAGBITS was set.
+Applications should interpret the values of these bits based on
+the support for the HWCAP3_MTE_FAR. If the support is not present,
+the values of these bits should be considered as undefined otherwise valid.
For signals raised in response to watchpoint debug exceptions, the
tag information will be preserved regardless of the SA_EXPOSE_TAGBITS
diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst
index 0560cbae5fa1..53fc9f89f3e4 100644
--- a/Documentation/arch/powerpc/index.rst
+++ b/Documentation/arch/powerpc/index.rst
@@ -19,6 +19,7 @@ powerpc
elf_hwcaps
elfnote
firmware-assisted-dump
+ htm
hvcs
imc
isa-versions
diff --git a/Documentation/arch/s390/driver-model.rst b/Documentation/arch/s390/driver-model.rst
index ad18f129fb0b..e7488f02bb78 100644
--- a/Documentation/arch/s390/driver-model.rst
+++ b/Documentation/arch/s390/driver-model.rst
@@ -305,24 +305,3 @@ xpram shows up under devices/system/ as 'xpram'.
For each cpu, a directory is created under devices/system/cpu/. Each cpu has an
attribute 'online' which can be 0 or 1.
-
-
-4. Other devices
-----------------
-
-4.1 Netiucv
------------
-
-The netiucv driver creates an attribute 'connection' under
-bus/iucv/drivers/netiucv. Piping to this attribute creates a new netiucv
-connection to the specified host.
-
-Netiucv connections show up under devices/iucv/ as "netiucv<ifnum>". The interface
-number is assigned sequentially to the connections defined via the 'connection'
-attribute.
-
-user
- - shows the connection partner.
-
-buffer
- - maximum buffer size. Pipe to it to change buffer size.
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index 0acb4c9b8d90..45bc5c5cd793 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -611,9 +611,10 @@ Q: I have added a new BPF instruction to the kernel, how can I integrate
it into LLVM?
A: LLVM has a ``-mcpu`` selector for the BPF back end in order to allow
-the selection of BPF instruction set extensions. By default the
-``generic`` processor target is used, which is the base instruction set
-(v1) of BPF.
+the selection of BPF instruction set extensions. Before llvm version 20,
+the ``generic`` processor target is used, which is the base instruction
+set (v1) of BPF. Since llvm 20, the default processor target has changed
+to instruction set v3.
LLVM has an option to select ``-mcpu=probe`` where it will probe the host
kernel for supported BPF instruction set extensions and selects the
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index fbe975585236..39c74611752b 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -350,9 +350,9 @@ Underflow and overflow are allowed during arithmetic operations, meaning
the 64-bit or 32-bit value will wrap. If BPF program execution would
result in division by zero, the destination register is instead set to zero.
Otherwise, for ``ALU64``, if execution would result in ``LLONG_MIN``
-dividing -1, the desination register is instead set to ``LLONG_MIN``. For
-``ALU``, if execution would result in ``INT_MIN`` dividing -1, the
-desination register is instead set to ``INT_MIN``.
+divided by -1, the destination register is instead set to ``LLONG_MIN``. For
+``ALU``, if execution would result in ``INT_MIN`` divided by -1, the
+destination register is instead set to ``INT_MIN``.
If execution would result in modulo by zero, for ``ALU64`` the value of
the destination register is unchanged whereas for ``ALU`` the upper
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 12de52a2b17e..700516238d3f 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -1,25 +1,87 @@
-# -*- coding: utf-8 -*-
-#
-# The Linux Kernel documentation build configuration file, created by
-# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
+# SPDX-License-Identifier: GPL-2.0-only
+# pylint: disable=C0103,C0209
+
+"""
+The Linux Kernel documentation build configuration file.
+"""
-import sys
import os
-import sphinx
import shutil
+import sys
+
+import sphinx
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath("sphinx"))
+
+from load_config import loadConfig # pylint: disable=C0413,E0401
+
+# Minimal supported version
+needs_sphinx = "3.4.3"
+
+# Get Sphinx version
+major, minor, patch = sphinx.version_info[:3] # pylint: disable=I1101
+
+# Include_patterns were added on Sphinx 5.1
+if (major < 5) or (major == 5 and minor < 1):
+ has_include_patterns = False
+else:
+ has_include_patterns = True
+ # Include patterns that don't contain directory names, in glob format
+ include_patterns = ["**.rst"]
+
+# Location of Documentation/ directory
+doctree = os.path.abspath(".")
+
+# Exclude of patterns that don't contain directory names, in glob format.
+exclude_patterns = []
+
+# List of patterns that contain directory names in glob format.
+dyn_include_patterns = []
+dyn_exclude_patterns = ["output"]
+
+# Properly handle include/exclude patterns
+# ----------------------------------------
+
+def update_patterns(app, config):
+ """
+ On Sphinx, all directories are relative to what it is passed as
+ SOURCEDIR parameter for sphinx-build. Due to that, all patterns
+ that have directory names on it need to be dynamically set, after
+ converting them to a relative patch.
+
+ As Sphinx doesn't include any patterns outside SOURCEDIR, we should
+ exclude relative patterns that start with "../".
+ """
+
+ # setup include_patterns dynamically
+ if has_include_patterns:
+ for p in dyn_include_patterns:
+ full = os.path.join(doctree, p)
+
+ rel_path = os.path.relpath(full, start=app.srcdir)
+ if rel_path.startswith("../"):
+ continue
+
+ config.include_patterns.append(rel_path)
+
+ # setup exclude_patterns dynamically
+ for p in dyn_exclude_patterns:
+ full = os.path.join(doctree, p)
+
+ rel_path = os.path.relpath(full, start=app.srcdir)
+ if rel_path.startswith("../"):
+ continue
+
+ config.exclude_patterns.append(rel_path)
+
# helper
# ------
+
def have_command(cmd):
"""Search ``cmd`` in the ``PATH`` environment.
@@ -28,24 +90,23 @@ def have_command(cmd):
"""
return shutil.which(cmd) is not None
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('sphinx'))
-from load_config import loadConfig
# -- General configuration ------------------------------------------------
-# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '3.4.3'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
- 'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
- 'maintainers_include', 'sphinx.ext.autosectionlabel',
- 'kernel_abi', 'kernel_feat', 'translations']
+# Add any Sphinx extensions in alphabetic order
+extensions = [
+ "automarkup",
+ "kernel_abi",
+ "kerneldoc",
+ "kernel_feat",
+ "kernel_include",
+ "kfigure",
+ "maintainers_include",
+ "rstFlatTable",
+ "sphinx.ext.autosectionlabel",
+ "sphinx.ext.ifconfig",
+ "translations",
+]
# Since Sphinx version 3, the C function parser is more pedantic with regards
# to type checking. Due to that, having macros at c:function cause problems.
@@ -120,28 +181,28 @@ autosectionlabel_maxdepth = 2
# Load math renderer:
# For html builder, load imgmath only when its dependencies are met.
# mathjax is the default math renderer since Sphinx 1.8.
-have_latex = have_command('latex')
-have_dvipng = have_command('dvipng')
+have_latex = have_command("latex")
+have_dvipng = have_command("dvipng")
load_imgmath = have_latex and have_dvipng
# Respect SPHINX_IMGMATH (for html docs only)
-if 'SPHINX_IMGMATH' in os.environ:
- env_sphinx_imgmath = os.environ['SPHINX_IMGMATH']
- if 'yes' in env_sphinx_imgmath:
+if "SPHINX_IMGMATH" in os.environ:
+ env_sphinx_imgmath = os.environ["SPHINX_IMGMATH"]
+ if "yes" in env_sphinx_imgmath:
load_imgmath = True
- elif 'no' in env_sphinx_imgmath:
+ elif "no" in env_sphinx_imgmath:
load_imgmath = False
else:
sys.stderr.write("Unknown env SPHINX_IMGMATH=%s ignored.\n" % env_sphinx_imgmath)
if load_imgmath:
extensions.append("sphinx.ext.imgmath")
- math_renderer = 'imgmath'
+ math_renderer = "imgmath"
else:
- math_renderer = 'mathjax'
+ math_renderer = "mathjax"
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['sphinx/templates']
+templates_path = ["sphinx/templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
@@ -149,15 +210,15 @@ templates_path = ['sphinx/templates']
source_suffix = '.rst'
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = 'The Linux Kernel'
-copyright = 'The kernel development community'
-author = 'The kernel development community'
+project = "The Linux Kernel"
+copyright = "The kernel development community" # pylint: disable=W0622
+author = "The kernel development community"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -172,86 +233,86 @@ author = 'The kernel development community'
try:
makefile_version = None
makefile_patchlevel = None
- for line in open('../Makefile'):
- key, val = [x.strip() for x in line.split('=', 2)]
- if key == 'VERSION':
- makefile_version = val
- elif key == 'PATCHLEVEL':
- makefile_patchlevel = val
- if makefile_version and makefile_patchlevel:
- break
-except:
+ with open("../Makefile", encoding="utf=8") as fp:
+ for line in fp:
+ key, val = [x.strip() for x in line.split("=", 2)]
+ if key == "VERSION":
+ makefile_version = val
+ elif key == "PATCHLEVEL":
+ makefile_patchlevel = val
+ if makefile_version and makefile_patchlevel:
+ break
+except Exception:
pass
finally:
if makefile_version and makefile_patchlevel:
- version = release = makefile_version + '.' + makefile_patchlevel
+ version = release = makefile_version + "." + makefile_patchlevel
else:
version = release = "unknown version"
-#
-# HACK: there seems to be no easy way for us to get at the version and
-# release information passed in from the makefile...so go pawing through the
-# command-line options and find it for ourselves.
-#
+
def get_cline_version():
- c_version = c_release = ''
+ """
+ HACK: There seems to be no easy way for us to get at the version and
+ release information passed in from the makefile...so go pawing through the
+ command-line options and find it for ourselves.
+ """
+
+ c_version = c_release = ""
for arg in sys.argv:
- if arg.startswith('version='):
+ if arg.startswith("version="):
c_version = arg[8:]
- elif arg.startswith('release='):
+ elif arg.startswith("release="):
c_release = arg[8:]
if c_version:
if c_release:
- return c_version + '-' + c_release
+ return c_version + "-" + c_release
return c_version
- return version # Whatever we came up with before
+ return version # Whatever we came up with before
+
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = 'en'
+language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['output']
+# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
+# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
-primary_domain = 'c'
-highlight_language = 'none'
+primary_domain = "c"
+highlight_language = "none"
# -- Options for HTML output ----------------------------------------------
@@ -259,43 +320,45 @@ highlight_language = 'none'
# a list of builtin themes.
# Default theme
-html_theme = 'alabaster'
+html_theme = "alabaster"
html_css_files = []
if "DOCS_THEME" in os.environ:
html_theme = os.environ["DOCS_THEME"]
-if html_theme == 'sphinx_rtd_theme' or html_theme == 'sphinx_rtd_dark_mode':
+if html_theme in ["sphinx_rtd_theme", "sphinx_rtd_dark_mode"]:
# Read the Docs theme
try:
import sphinx_rtd_theme
+
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_css_files = [
- 'theme_overrides.css',
+ "theme_overrides.css",
]
# Read the Docs dark mode override theme
- if html_theme == 'sphinx_rtd_dark_mode':
+ if html_theme == "sphinx_rtd_dark_mode":
try:
- import sphinx_rtd_dark_mode
- extensions.append('sphinx_rtd_dark_mode')
+ import sphinx_rtd_dark_mode # pylint: disable=W0611
+
+ extensions.append("sphinx_rtd_dark_mode")
except ImportError:
- html_theme == 'sphinx_rtd_theme'
+ html_theme = "sphinx_rtd_theme"
- if html_theme == 'sphinx_rtd_theme':
- # Add color-specific RTD normal mode
- html_css_files.append('theme_rtd_colors.css')
+ if html_theme == "sphinx_rtd_theme":
+ # Add color-specific RTD normal mode
+ html_css_files.append("theme_rtd_colors.css")
html_theme_options = {
- 'navigation_depth': -1,
+ "navigation_depth": -1,
}
except ImportError:
- html_theme = 'alabaster'
+ html_theme = "alabaster"
if "DOCS_CSS" in os.environ:
css = os.environ["DOCS_CSS"].split(" ")
@@ -303,14 +366,14 @@ if "DOCS_CSS" in os.environ:
for l in css:
html_css_files.append(l)
-if html_theme == 'alabaster':
+if html_theme == "alabaster":
html_theme_options = {
- 'description': get_cline_version(),
- 'page_width': '65em',
- 'sidebar_width': '15em',
- 'fixed_sidebar': 'true',
- 'font_size': 'inherit',
- 'font_family': 'serif',
+ "description": get_cline_version(),
+ "page_width": "65em",
+ "sidebar_width": "15em",
+ "fixed_sidebar": "true",
+ "font_size": "inherit",
+ "font_family": "serif",
}
sys.stderr.write("Using %s theme\n" % html_theme)
@@ -318,104 +381,79 @@ sys.stderr.write("Using %s theme\n" % html_theme)
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['sphinx-static']
+html_static_path = ["sphinx-static"]
# If true, Docutils "smart quotes" will be used to convert quotes and dashes
# to typographically correct entities. However, conversion of "--" to "—"
# is not always what we want, so enable only quotes.
-smartquotes_action = 'q'
+smartquotes_action = "q"
# Custom sidebar templates, maps document names to template names.
# Note that the RTD theme ignores this
-html_sidebars = { '**': ['searchbox.html', 'kernel-toc.html', 'sourcelink.html']}
+html_sidebars = {"**": ["searchbox.html",
+ "kernel-toc.html",
+ "sourcelink.html"]}
# about.html is available for alabaster theme. Add it at the front.
-if html_theme == 'alabaster':
- html_sidebars['**'].insert(0, 'about.html')
+if html_theme == "alabaster":
+ html_sidebars["**"].insert(0, "about.html")
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = 'images/logo.svg'
+html_logo = "images/logo.svg"
# Output file base name for HTML help builder.
-htmlhelp_basename = 'TheLinuxKerneldoc'
+htmlhelp_basename = "TheLinuxKerneldoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
- 'papersize': 'a4paper',
-
+ "papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
- 'pointsize': '11pt',
-
+ "pointsize": "11pt",
# Latex figure (float) alignment
- #'figure_align': 'htbp',
-
+ # 'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
- 'inputenc': '',
- 'utf8extra': '',
-
+ "inputenc": "",
+ "utf8extra": "",
# Set document margins
- 'sphinxsetup': '''
+ "sphinxsetup": """
hmargin=0.5in, vmargin=1in,
parsedliteralwraps=true,
verbatimhintsturnover=false,
- ''',
-
+ """,
#
# Some of our authors are fond of deep nesting; tell latex to
# cope.
#
- 'maxlistdepth': '10',
-
+ "maxlistdepth": "10",
# For CJK One-half spacing, need to be in front of hyperref
- 'extrapackages': r'\usepackage{setspace}',
-
+ "extrapackages": r"\usepackage{setspace}",
# Additional stuff for the LaTeX preamble.
- 'preamble': '''
+ "preamble": """
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Sans}
\\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
- ''',
+ """,
}
# Load kerneldoc specific LaTeX settings
-latex_elements['preamble'] += '''
+latex_elements["preamble"] += """
% Load kerneldoc specific LaTeX settings
- \\input{kerneldoc-preamble.sty}
-'''
-
-# With Sphinx 1.6, it is possible to change the Bg color directly
-# by using:
-# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
-# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204}
-# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204}
-# \definecolor{sphinximportantBgColor}{RGB}{192,255,204}
-#
-# However, it require to use sphinx heavy box with:
-#
-# \renewenvironment{sphinxlightbox} {%
-# \\begin{sphinxheavybox}
-# }
-# \\end{sphinxheavybox}
-# }
-#
-# Unfortunately, the implementation is buggy: if a note is inside a
-# table, it isn't displayed well. So, for now, let's use boring
-# black and white notes.
+ \\input{kerneldoc-preamble.sty}
+"""
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# Sorted in alphabetical order
-latex_documents = [
-]
+latex_documents = []
# Add all other index files from Documentation/ subdirectories
-for fn in os.listdir('.'):
+for fn in os.listdir("."):
doc = os.path.join(fn, "index")
if os.path.exists(doc + ".rst"):
has = False
@@ -424,34 +462,39 @@ for fn in os.listdir('.'):
has = True
break
if not has:
- latex_documents.append((doc, fn + '.tex',
- 'Linux %s Documentation' % fn.capitalize(),
- 'The kernel development community',
- 'manual'))
+ latex_documents.append(
+ (
+ doc,
+ fn + ".tex",
+ "Linux %s Documentation" % fn.capitalize(),
+ "The kernel development community",
+ "manual",
+ )
+ )
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# Additional LaTeX stuff to be copied to build directory
latex_additional_files = [
- 'sphinx/kerneldoc-preamble.sty',
+ "sphinx/kerneldoc-preamble.sty",
]
@@ -460,12 +503,11 @@ latex_additional_files = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- (master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
- [author], 1)
+ (master_doc, "thelinuxkernel", "The Linux Kernel Documentation", [author], 1)
]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@@ -473,11 +515,15 @@ man_pages = [
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
-texinfo_documents = [
- (master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
- author, 'TheLinuxKernel', 'One line description of project.',
- 'Miscellaneous'),
-]
+texinfo_documents = [(
+ master_doc,
+ "TheLinuxKernel",
+ "The Linux Kernel Documentation",
+ author,
+ "TheLinuxKernel",
+ "One line description of project.",
+ "Miscellaneous",
+ ),]
# -- Options for Epub output ----------------------------------------------
@@ -488,9 +534,9 @@ epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
-epub_exclude_files = ['search.html']
+epub_exclude_files = ["search.html"]
-#=======
+# =======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
@@ -502,17 +548,23 @@ epub_exclude_files = ['search.html']
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
- ('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
+ ("kernel-documentation", "Kernel", "Kernel", "J. Random Bozo"),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
-kerneldoc_bin = '../scripts/kernel-doc.py'
-kerneldoc_srctree = '..'
+kerneldoc_bin = "../scripts/kernel-doc.py"
+kerneldoc_srctree = ".."
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
+
+
+def setup(app):
+ """Patterns need to be updated at init time on older Sphinx versions"""
+
+ app.connect('config-inited', update_patterns)
diff --git a/Documentation/core-api/dma-api-howto.rst b/Documentation/core-api/dma-api-howto.rst
index 0bf31b6c4383..96fce2a9aa90 100644
--- a/Documentation/core-api/dma-api-howto.rst
+++ b/Documentation/core-api/dma-api-howto.rst
@@ -155,7 +155,7 @@ a device with limitations, it needs to be decreased.
Special note about PCI: PCI-X specification requires PCI-X devices to support
64-bit addressing (DAC) for all transactions. And at least one platform (SGI
-SN2) requires 64-bit consistent allocations to operate correctly when the IO
+SN2) requires 64-bit coherent allocations to operate correctly when the IO
bus is in PCI-X mode.
For correct operation, you must set the DMA mask to inform the kernel about
@@ -174,7 +174,7 @@ used instead:
int dma_set_mask(struct device *dev, u64 mask);
- The setup for consistent allocations is performed via a call
+ The setup for coherent allocations is performed via a call
to dma_set_coherent_mask()::
int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -241,7 +241,7 @@ it would look like this::
The coherent mask will always be able to set the same or a smaller mask as
the streaming mask. However for the rare case that a device driver only
-uses consistent allocations, one would have to check the return value from
+uses coherent allocations, one would have to check the return value from
dma_set_coherent_mask().
Finally, if your device can only drive the low 24-bits of
@@ -298,20 +298,20 @@ Types of DMA mappings
There are two types of DMA mappings:
-- Consistent DMA mappings which are usually mapped at driver
+- Coherent DMA mappings which are usually mapped at driver
initialization, unmapped at the end and for which the hardware should
guarantee that the device and the CPU can access the data
in parallel and will see updates made by each other without any
explicit software flushing.
- Think of "consistent" as "synchronous" or "coherent".
+ Think of "coherent" as "synchronous".
- The current default is to return consistent memory in the low 32
+ The current default is to return coherent memory in the low 32
bits of the DMA space. However, for future compatibility you should
- set the consistent mask even if this default is fine for your
+ set the coherent mask even if this default is fine for your
driver.
- Good examples of what to use consistent mappings for are:
+ Good examples of what to use coherent mappings for are:
- Network card DMA ring descriptors.
- SCSI adapter mailbox command data structures.
@@ -320,13 +320,13 @@ There are two types of DMA mappings:
The invariant these examples all require is that any CPU store
to memory is immediately visible to the device, and vice
- versa. Consistent mappings guarantee this.
+ versa. Coherent mappings guarantee this.
.. important::
- Consistent DMA memory does not preclude the usage of
+ Coherent DMA memory does not preclude the usage of
proper memory barriers. The CPU may reorder stores to
- consistent memory just as it may normal memory. Example:
+ coherent memory just as it may normal memory. Example:
if it is important for the device to see the first word
of a descriptor updated before the second, you must do
something like::
@@ -365,10 +365,10 @@ Also, systems with caches that aren't DMA-coherent will work better
when the underlying buffers don't share cache lines with other data.
-Using Consistent DMA mappings
-=============================
+Using Coherent DMA mappings
+===========================
-To allocate and map large (PAGE_SIZE or so) consistent DMA regions,
+To allocate and map large (PAGE_SIZE or so) coherent DMA regions,
you should do::
dma_addr_t dma_handle;
@@ -385,10 +385,10 @@ __get_free_pages() (but takes size instead of a page order). If your
driver needs regions sized smaller than a page, you may prefer using
the dma_pool interface, described below.
-The consistent DMA mapping interfaces, will by default return a DMA address
+The coherent DMA mapping interfaces, will by default return a DMA address
which is 32-bit addressable. Even if the device indicates (via the DMA mask)
-that it may address the upper 32-bits, consistent allocation will only
-return > 32-bit addresses for DMA if the consistent DMA mask has been
+that it may address the upper 32-bits, coherent allocation will only
+return > 32-bit addresses for DMA if the coherent DMA mask has been
explicitly changed via dma_set_coherent_mask(). This is true of the
dma_pool interface as well.
@@ -497,7 +497,7 @@ program address space. Such platforms can and do report errors in the
kernel logs when the DMA controller hardware detects violation of the
permission setting.
-Only streaming mappings specify a direction, consistent mappings
+Only streaming mappings specify a direction, coherent mappings
implicitly have a direction attribute setting of
DMA_BIDIRECTIONAL.
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 2ad08517e626..3087bea715ed 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -8,15 +8,15 @@ This document describes the DMA API. For a more gentle introduction
of the API (and actual examples), see Documentation/core-api/dma-api-howto.rst.
This API is split into two pieces. Part I describes the basic API.
-Part II describes extensions for supporting non-consistent memory
+Part II describes extensions for supporting non-coherent memory
machines. Unless you know that your driver absolutely has to support
-non-consistent platforms (this is usually only legacy platforms) you
+non-coherent platforms (this is usually only legacy platforms) you
should only use the API described in part I.
-Part I - dma_API
+Part I - DMA API
----------------
-To get the dma_API, you must #include <linux/dma-mapping.h>. This
+To get the DMA API, you must #include <linux/dma-mapping.h>. This
provides dma_addr_t and the interfaces described below.
A dma_addr_t can hold any valid DMA address for the platform. It can be
@@ -33,13 +33,13 @@ Part Ia - Using large DMA-coherent buffers
dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
-Consistent memory is memory for which a write by either the device or
+Coherent memory is memory for which a write by either the device or
the processor can immediately be read by the processor or device
without having to worry about caching effects. (You may however need
to make sure to flush the processor's write buffers before telling
devices to read that memory.)
-This routine allocates a region of <size> bytes of consistent memory.
+This routine allocates a region of <size> bytes of coherent memory.
It returns a pointer to the allocated region (in the processor's virtual
address space) or NULL if the allocation failed.
@@ -48,15 +48,14 @@ It also returns a <dma_handle> which may be cast to an unsigned integer the
same width as the bus and given to the device as the DMA address base of
the region.
-Note: consistent memory can be expensive on some platforms, and the
+Note: coherent memory can be expensive on some platforms, and the
minimum allocation length may be as big as a page, so you should
-consolidate your requests for consistent memory as much as possible.
+consolidate your requests for coherent memory as much as possible.
The simplest way to do that is to use the dma_pool calls (see below).
-The flag parameter (dma_alloc_coherent() only) allows the caller to
-specify the ``GFP_`` flags (see kmalloc()) for the allocation (the
-implementation may choose to ignore flags that affect the location of
-the returned memory, like GFP_DMA).
+The flag parameter allows the caller to specify the ``GFP_`` flags (see
+kmalloc()) for the allocation (the implementation may ignore flags that affect
+the location of the returned memory, like GFP_DMA).
::
@@ -64,19 +63,18 @@ the returned memory, like GFP_DMA).
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
-Free a region of consistent memory you previously allocated. dev,
-size and dma_handle must all be the same as those passed into
-dma_alloc_coherent(). cpu_addr must be the virtual address returned by
-the dma_alloc_coherent().
+Free a previously allocated region of coherent memory. dev, size and dma_handle
+must all be the same as those passed into dma_alloc_coherent(). cpu_addr must
+be the virtual address returned by dma_alloc_coherent().
-Note that unlike their sibling allocation calls, these routines
-may only be called with IRQs enabled.
+Note that unlike the sibling allocation call, this routine may only be called
+with IRQs enabled.
Part Ib - Using small DMA-coherent buffers
------------------------------------------
-To get this part of the dma_API, you must #include <linux/dmapool.h>
+To get this part of the DMA API, you must #include <linux/dmapool.h>
Many drivers need lots of small DMA-coherent memory regions for DMA
descriptors or I/O buffers. Rather than allocating in units of a page
@@ -85,78 +83,29 @@ much like a struct kmem_cache, except that they use the DMA-coherent allocator,
not __get_free_pages(). Also, they understand common hardware constraints
for alignment, like queue heads needing to be aligned on N-byte boundaries.
+.. kernel-doc:: mm/dmapool.c
+ :export:
-::
-
- struct dma_pool *
- dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t alloc);
-
-dma_pool_create() initializes a pool of DMA-coherent buffers
-for use with a given device. It must be called in a context which
-can sleep.
-
-The "name" is for diagnostics (like a struct kmem_cache name); dev and size
-are like what you'd pass to dma_alloc_coherent(). The device's hardware
-alignment requirement for this type of data is "align" (which is expressed
-in bytes, and must be a power of two). If your device has no boundary
-crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated
-from this pool must not cross 4KByte boundaries.
-
-::
-
- void *
- dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-
-Wraps dma_pool_alloc() and also zeroes the returned memory if the
-allocation attempt succeeded.
-
-
-::
-
- void *
- dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags,
- dma_addr_t *dma_handle);
-
-This allocates memory from the pool; the returned memory will meet the
-size and alignment requirements specified at creation time. Pass
-GFP_ATOMIC to prevent blocking, or if it's permitted (not
-in_interrupt, not holding SMP locks), pass GFP_KERNEL to allow
-blocking. Like dma_alloc_coherent(), this returns two values: an
-address usable by the CPU, and the DMA address usable by the pool's
-device.
-
-::
-
- void
- dma_pool_free(struct dma_pool *pool, void *vaddr,
- dma_addr_t addr);
-
-This puts memory back into the pool. The pool is what was passed to
-dma_pool_alloc(); the CPU (vaddr) and DMA addresses are what
-were returned when that routine allocated the memory being freed.
-
-::
-
- void
- dma_pool_destroy(struct dma_pool *pool);
-
-dma_pool_destroy() frees the resources of the pool. It must be
-called in a context which can sleep. Make sure you've freed all allocated
-memory back to the pool before you destroy it.
+.. kernel-doc:: include/linux/dmapool.h
Part Ic - DMA addressing limitations
------------------------------------
+DMA mask is a bit mask of the addressable region for the device. In other words,
+if applying the DMA mask (a bitwise AND operation) to the DMA address of a
+memory region does not clear any bits in the address, then the device can
+perform DMA to that memory region.
+
+All the below functions which set a DMA mask may fail if the requested mask
+cannot be used with the device, or if the device is not capable of doing DMA.
+
::
int
dma_set_mask_and_coherent(struct device *dev, u64 mask)
-Checks to see if the mask is possible and updates the device
-streaming and coherent DMA mask parameters if it is.
+Updates both streaming and coherent DMA masks.
Returns: 0 if successful and a negative error if not.
@@ -165,8 +114,7 @@ Returns: 0 if successful and a negative error if not.
int
dma_set_mask(struct device *dev, u64 mask)
-Checks to see if the mask is possible and updates the device
-parameters if it is.
+Updates only the streaming DMA mask.
Returns: 0 if successful and a negative error if not.
@@ -175,8 +123,7 @@ Returns: 0 if successful and a negative error if not.
int
dma_set_coherent_mask(struct device *dev, u64 mask)
-Checks to see if the mask is possible and updates the device
-parameters if it is.
+Updates only the coherent DMA mask.
Returns: 0 if successful and a negative error if not.
@@ -231,12 +178,32 @@ transfer memory ownership. Returns %false if those calls can be skipped.
unsigned long
dma_get_merge_boundary(struct device *dev);
-Returns the DMA merge boundary. If the device cannot merge any the DMA address
+Returns the DMA merge boundary. If the device cannot merge any DMA address
segments, the function returns 0.
Part Id - Streaming DMA mappings
--------------------------------
+Streaming DMA allows to map an existing buffer for DMA transfers and then
+unmap it when finished. Map functions are not guaranteed to succeed, so the
+return value must be checked.
+
+.. note::
+
+ In particular, mapping may fail for memory not addressable by the
+ device, e.g. if it is not within the DMA mask of the device and/or a
+ connecting bus bridge. Streaming DMA functions try to overcome such
+ addressing constraints, either by using an IOMMU (a device which maps
+ I/O DMA addresses to physical memory addresses), or by copying the
+ data to/from a bounce buffer if the kernel is configured with a
+ :doc:`SWIOTLB <swiotlb>`. However, these methods are not always
+ available, and even if they are, they may still fail for a number of
+ reasons.
+
+ In short, a device driver may need to be wary of where buffers are
+ located in physical memory, especially if the DMA mask is less than 32
+ bits.
+
::
dma_addr_t
@@ -246,9 +213,7 @@ Part Id - Streaming DMA mappings
Maps a piece of processor virtual memory so it can be accessed by the
device and returns the DMA address of the memory.
-The direction for both APIs may be converted freely by casting.
-However the dma_API uses a strongly typed enumerator for its
-direction:
+The DMA API uses a strongly typed enumerator for its direction:
======================= =============================================
DMA_NONE no direction (used for debugging)
@@ -259,31 +224,13 @@ DMA_BIDIRECTIONAL direction isn't known
.. note::
- Not all memory regions in a machine can be mapped by this API.
- Further, contiguous kernel virtual space may not be contiguous as
+ Contiguous kernel virtual space may not be contiguous as
physical memory. Since this API does not provide any scatter/gather
capability, it will fail if the user tries to map a non-physically
contiguous piece of memory. For this reason, memory to be mapped by
this API should be obtained from sources which guarantee it to be
physically contiguous (like kmalloc).
- Further, the DMA address of the memory must be within the
- dma_mask of the device (the dma_mask is a bit mask of the
- addressable region for the device, i.e., if the DMA address of
- the memory ANDed with the dma_mask is still equal to the DMA
- address, then the device can perform DMA to the memory). To
- ensure that the memory allocated by kmalloc is within the dma_mask,
- the driver may specify various platform-dependent flags to restrict
- the DMA address range of the allocation (e.g., on x86, GFP_DMA
- guarantees to be within the first 16MB of available DMA addresses,
- as required by ISA devices).
-
- Note also that the above constraints on physical contiguity and
- dma_mask may not apply if the platform has an IOMMU (a device which
- maps an I/O DMA address to a physical memory address). However, to be
- portable, device driver writers may *not* assume that such an IOMMU
- exists.
-
.. warning::
Memory coherency operates at a granularity called the cache
@@ -325,8 +272,7 @@ DMA_BIDIRECTIONAL direction isn't known
enum dma_data_direction direction)
Unmaps the region previously mapped. All the parameters passed in
-must be identical to those passed in (and returned) by the mapping
-API.
+must be identical to those passed to (and returned by) dma_map_single().
::
@@ -376,10 +322,10 @@ action (e.g. reduce current DMA mapping usage or delay and try again later).
dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
-Returns: the number of DMA address segments mapped (this may be shorter
-than <nents> passed in if some elements of the scatter/gather list are
-physically or virtually adjacent and an IOMMU maps them with a single
-entry).
+Maps a scatter/gather list for DMA. Returns the number of DMA address segments
+mapped, which may be smaller than <nents> passed in if several consecutive
+sglist entries are merged (e.g. with an IOMMU, or if some adjacent segments
+just happen to be physically contiguous).
Please note that the sg cannot be mapped again if it has been mapped once.
The mapping process is allowed to destroy information in the sg.
@@ -403,9 +349,8 @@ With scatterlists, you use the resulting mapping like this::
where nents is the number of entries in the sglist.
The implementation is free to merge several consecutive sglist entries
-into one (e.g. with an IOMMU, or if several pages just happen to be
-physically contiguous) and returns the actual number of sg entries it
-mapped them to. On failure 0, is returned.
+into one. The returned number is the actual number of sg entries it
+mapped them to. On failure, 0 is returned.
Then you should loop count times (note: this can be less than nents times)
and use sg_dma_address() and sg_dma_len() macros where you previously
@@ -775,19 +720,19 @@ memory or doing partial flushes.
of two for easy alignment.
-Part III - Debug drivers use of the DMA-API
+Part III - Debug drivers use of the DMA API
-------------------------------------------
-The DMA-API as described above has some constraints. DMA addresses must be
+The DMA API as described above has some constraints. DMA addresses must be
released with the corresponding function with the same size for example. With
the advent of hardware IOMMUs it becomes more and more important that drivers
do not violate those constraints. In the worst case such a violation can
result in data corruption up to destroyed filesystems.
-To debug drivers and find bugs in the usage of the DMA-API checking code can
+To debug drivers and find bugs in the usage of the DMA API checking code can
be compiled into the kernel which will tell the developer about those
violations. If your architecture supports it you can select the "Enable
-debugging of DMA-API usage" option in your kernel configuration. Enabling this
+debugging of DMA API usage" option in your kernel configuration. Enabling this
option has a performance impact. Do not enable it in production kernels.
If you boot the resulting kernel will contain code which does some bookkeeping
@@ -826,7 +771,7 @@ example warning message may look like this::
<EOI> <4>---[ end trace f6435a98e2a38c0e ]---
The driver developer can find the driver and the device including a stacktrace
-of the DMA-API call which caused this warning.
+of the DMA API call which caused this warning.
Per default only the first error will result in a warning message. All other
errors will only silently counted. This limitation exist to prevent the code
@@ -834,7 +779,7 @@ from flooding your kernel log. To support debugging a device driver this can
be disabled via debugfs. See the debugfs interface documentation below for
details.
-The debugfs directory for the DMA-API debugging code is called dma-api/. In
+The debugfs directory for the DMA API debugging code is called dma-api/. In
this directory the following files can currently be found:
=============================== ===============================================
@@ -882,7 +827,7 @@ dma-api/driver_filter You can write a name of a driver into this file
If you have this code compiled into your kernel it will be enabled by default.
If you want to boot without the bookkeeping anyway you can provide
-'dma_debug=off' as a boot parameter. This will disable DMA-API debugging.
+'dma_debug=off' as a boot parameter. This will disable DMA API debugging.
Notice that you can not enable it again at runtime. You have to reboot to do
so.
@@ -915,3 +860,9 @@ the driver. When driver does unmap, debug_dma_unmap() checks the flag and if
this flag is still set, prints warning message that includes call trace that
leads up to the unmap. This interface can be called from dma_mapping_error()
routines to enable DMA mapping error check debugging.
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/scatterlist.h
+.. kernel-doc:: lib/scatterlist.c
diff --git a/Documentation/core-api/entry.rst b/Documentation/core-api/entry.rst
index a15f9b1767a2..71d8eedc0549 100644
--- a/Documentation/core-api/entry.rst
+++ b/Documentation/core-api/entry.rst
@@ -105,7 +105,7 @@ has to do extra work between the various steps. In such cases it has to
ensure that enter_from_user_mode() is called first on entry and
exit_to_user_mode() is called last on exit.
-Do not nest syscalls. Nested systcalls will cause RCU and/or context tracking
+Do not nest syscalls. Nested syscalls will cause RCU and/or context tracking
to print a warning.
KVM
@@ -115,8 +115,8 @@ Entering or exiting guest mode is very similar to syscalls. From the host
kernel point of view the CPU goes off into user space when entering the
guest and returns to the kernel on exit.
-kvm_guest_enter_irqoff() is a KVM-specific variant of exit_to_user_mode()
-and kvm_guest_exit_irqoff() is the KVM variant of enter_from_user_mode().
+guest_state_enter_irqoff() is a KVM-specific variant of exit_to_user_mode()
+and guest_state_exit_irqoff() is the KVM variant of enter_from_user_mode().
The state operations have the same ordering.
Task work handling is done separately for guest at the boundary of the
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 7a4ca18ca6e2..a03a99c2cac5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -54,6 +54,7 @@ Library functionality that is used throughout the kernel.
union_find
min_heap
parser
+ list
Low level entry and exit
========================
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 111f6a595e48..e8211c4ca662 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -3,12 +3,6 @@ The Linux Kernel API
====================
-List Management Functions
-=========================
-
-.. kernel-doc:: include/linux/list.h
- :internal:
-
Basic C Library Functions
=========================
diff --git a/Documentation/core-api/list.rst b/Documentation/core-api/list.rst
new file mode 100644
index 000000000000..86873ce9adbf
--- /dev/null
+++ b/Documentation/core-api/list.rst
@@ -0,0 +1,776 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=====================
+Linked Lists in Linux
+=====================
+
+:Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+
+.. contents::
+
+Introduction
+============
+
+Linked lists are one of the most basic data structures used in many programs.
+The Linux kernel implements several different flavours of linked lists. The
+purpose of this document is not to explain linked lists in general, but to show
+new kernel developers how to use the Linux kernel implementations of linked
+lists.
+
+Please note that while linked lists certainly are ubiquitous, they are rarely
+the best data structure to use in cases where a simple array doesn't already
+suffice. In particular, due to their poor data locality, linked lists are a bad
+choice in situations where performance may be of consideration. Familiarizing
+oneself with other in-kernel generic data structures, especially for concurrent
+accesses, is highly encouraged.
+
+Linux implementation of doubly linked lists
+===========================================
+
+Linux's linked list implementations can be used by including the header file
+``<linux/list.h>``.
+
+The doubly-linked list will likely be the most familiar to many readers. It's a
+list that can efficiently be traversed forwards and backwards.
+
+The Linux kernel's doubly-linked list is circular in nature. This means that to
+get from the head node to the tail, we can just travel one edge backwards.
+Similarly, to get from the tail node to the head, we can simply travel forwards
+"beyond" the tail and arrive back at the head.
+
+Declaring a node
+----------------
+
+A node in a doubly-linked list is declared by adding a struct list_head
+member to the data structure you wish to be contained in the list:
+
+.. code-block:: c
+
+ struct clown {
+ unsigned long long shoe_size;
+ const char *name;
+ struct list_head node; /* the aforementioned member */
+ };
+
+This may be an unfamiliar approach to some, as the classical explanation of a
+linked list is a list node data structure with pointers to the previous and next
+list node, as well the payload data. Linux chooses this approach because it
+allows for generic list modification code regardless of what data structure is
+contained within the list. Since the struct list_head member is not a pointer
+but part of the data structure proper, the container_of() pattern can be used by
+the list implementation to access the payload data regardless of its type, while
+staying oblivious to what said type actually is.
+
+Declaring and initializing a list
+---------------------------------
+
+A doubly-linked list can then be declared as just another struct list_head,
+and initialized with the LIST_HEAD_INIT() macro during initial assignment, or
+with the INIT_LIST_HEAD() function later:
+
+.. code-block:: c
+
+ struct clown_car {
+ int tyre_pressure[4];
+ struct list_head clowns; /* Looks like a node! */
+ };
+
+ /* ... Somewhere later in our driver ... */
+
+ static int circus_init(struct circus_priv *circus)
+ {
+ struct clown_car other_car = {
+ .tyre_pressure = {10, 12, 11, 9},
+ .clowns = LIST_HEAD_INIT(other_car.clowns)
+ };
+
+ INIT_LIST_HEAD(&circus->car.clowns);
+
+ return 0;
+ }
+
+A further point of confusion to some may be that the list itself doesn't really
+have its own type. The concept of the entire linked list and a
+struct list_head member that points to other entries in the list are one and
+the same.
+
+Adding nodes to the list
+------------------------
+
+Adding a node to the linked list is done through the list_add() macro.
+
+We'll return to our clown car example to illustrate how nodes get added to the
+list:
+
+.. code-block:: c
+
+ static int circus_fill_car(struct circus_priv *circus)
+ {
+ struct clown_car *car = &circus->car;
+ struct clown *grock;
+ struct clown *dimitri;
+
+ /* State 1 */
+
+ grock = kzalloc(sizeof(*grock), GFP_KERNEL);
+ if (!grock)
+ return -ENOMEM;
+ grock->name = "Grock";
+ grock->shoe_size = 1000;
+
+ /* Note that we're adding the "node" member */
+ list_add(&grock->node, &car->clowns);
+
+ /* State 2 */
+
+ dimitri = kzalloc(sizeof(*dimitri), GFP_KERNEL);
+ if (!dimitri)
+ return -ENOMEM;
+ dimitri->name = "Dimitri";
+ dimitri->shoe_size = 50;
+
+ list_add(&dimitri->node, &car->clowns);
+
+ /* State 3 */
+
+ return 0;
+ }
+
+In State 1, our list of clowns is still empty::
+
+ .------.
+ v |
+ .--------. |
+ | clowns |--'
+ '--------'
+
+This diagram shows the singular "clowns" node pointing at itself. In this
+diagram, and all following diagrams, only the forward edges are shown, to aid in
+clarity.
+
+In State 2, we've added Grock after the list head::
+
+ .--------------------.
+ v |
+ .--------. .-------. |
+ | clowns |---->| Grock |--'
+ '--------' '-------'
+
+This diagram shows the "clowns" node pointing at a new node labeled "Grock".
+The Grock node is pointing back at the "clowns" node.
+
+In State 3, we've added Dimitri after the list head, resulting in the following::
+
+ .------------------------------------.
+ v |
+ .--------. .---------. .-------. |
+ | clowns |---->| Dimitri |---->| Grock |--'
+ '--------' '---------' '-------'
+
+This diagram shows the "clowns" node pointing at a new node labeled "Dimitri",
+which then points at the node labeled "Grock". The "Grock" node still points
+back at the "clowns" node.
+
+If we wanted to have Dimitri inserted at the end of the list instead, we'd use
+list_add_tail(). Our code would then look like this:
+
+.. code-block:: c
+
+ static int circus_fill_car(struct circus_priv *circus)
+ {
+ /* ... */
+
+ list_add_tail(&dimitri->node, &car->clowns);
+
+ /* State 3b */
+
+ return 0;
+ }
+
+This results in the following list::
+
+ .------------------------------------.
+ v |
+ .--------. .-------. .---------. |
+ | clowns |---->| Grock |---->| Dimitri |--'
+ '--------' '-------' '---------'
+
+This diagram shows the "clowns" node pointing at the node labeled "Grock",
+which points at the new node labeled "Dimitri". The node labeled "Dimitri"
+points back at the "clowns" node.
+
+Traversing the list
+-------------------
+
+To iterate the list, we can loop through all nodes within the list with
+list_for_each().
+
+In our clown example, this results in the following somewhat awkward code:
+
+.. code-block:: c
+
+ static unsigned long long circus_get_max_shoe_size(struct circus_priv *circus)
+ {
+ unsigned long long res = 0;
+ struct clown *e;
+ struct list_head *cur;
+
+ list_for_each(cur, &circus->car.clowns) {
+ e = list_entry(cur, struct clown, node);
+ if (e->shoe_size > res)
+ res = e->shoe_size;
+ }
+
+ return res;
+ }
+
+The list_entry() macro internally uses the aforementioned container_of() to
+retrieve the data structure instance that ``node`` is a member of.
+
+Note how the additional list_entry() call is a little awkward here. It's only
+there because we're iterating through the ``node`` members, but we really want
+to iterate through the payload, i.e. the ``struct clown`` that contains each
+node's struct list_head. For this reason, there is a second macro:
+list_for_each_entry()
+
+Using it would change our code to something like this:
+
+.. code-block:: c
+
+ static unsigned long long circus_get_max_shoe_size(struct circus_priv *circus)
+ {
+ unsigned long long res = 0;
+ struct clown *e;
+
+ list_for_each_entry(e, &circus->car.clowns, node) {
+ if (e->shoe_size > res)
+ res = e->shoe_size;
+ }
+
+ return res;
+ }
+
+This eliminates the need for the list_entry() step, and our loop cursor is now
+of the type of our payload. The macro is given the member name that corresponds
+to the list's struct list_head within the clown data structure so that it can
+still walk the list.
+
+Removing nodes from the list
+----------------------------
+
+The list_del() function can be used to remove entries from the list. It not only
+removes the given entry from the list, but poisons the entry's ``prev`` and
+``next`` pointers, so that unintended use of the entry after removal does not
+go unnoticed.
+
+We can extend our previous example to remove one of the entries:
+
+.. code-block:: c
+
+ static int circus_fill_car(struct circus_priv *circus)
+ {
+ /* ... */
+
+ list_add(&dimitri->node, &car->clowns);
+
+ /* State 3 */
+
+ list_del(&dimitri->node);
+
+ /* State 4 */
+
+ return 0;
+ }
+
+The result of this would be this::
+
+ .--------------------.
+ v |
+ .--------. .-------. | .---------.
+ | clowns |---->| Grock |--' | Dimitri |
+ '--------' '-------' '---------'
+
+This diagram shows the "clowns" node pointing at the node labeled "Grock",
+which points back at the "clowns" node. Off to the side is a lone node labeled
+"Dimitri", which has no arrows pointing anywhere.
+
+Note how the Dimitri node does not point to itself; its pointers are
+intentionally set to a "poison" value that the list code refuses to traverse.
+
+If we wanted to reinitialize the removed node instead to make it point at itself
+again like an empty list head, we can use list_del_init() instead:
+
+.. code-block:: c
+
+ static int circus_fill_car(struct circus_priv *circus)
+ {
+ /* ... */
+
+ list_add(&dimitri->node, &car->clowns);
+
+ /* State 3 */
+
+ list_del_init(&dimitri->node);
+
+ /* State 4b */
+
+ return 0;
+ }
+
+This results in the deleted node pointing to itself again::
+
+ .--------------------. .-------.
+ v | v |
+ .--------. .-------. | .---------. |
+ | clowns |---->| Grock |--' | Dimitri |--'
+ '--------' '-------' '---------'
+
+This diagram shows the "clowns" node pointing at the node labeled "Grock",
+which points back at the "clowns" node. Off to the side is a lone node labeled
+"Dimitri", which points to itself.
+
+Traversing whilst removing nodes
+--------------------------------
+
+Deleting entries while we're traversing the list will cause problems if we use
+list_for_each() and list_for_each_entry(), as deleting the current entry would
+modify the ``next`` pointer of it, which means the traversal can't properly
+advance to the next list entry.
+
+There is a solution to this however: list_for_each_safe() and
+list_for_each_entry_safe(). These take an additional parameter of a pointer to
+a struct list_head to use as temporary storage for the next entry during
+iteration, solving the issue.
+
+An example of how to use it:
+
+.. code-block:: c
+
+ static void circus_eject_insufficient_clowns(struct circus_priv *circus)
+ {
+ struct clown *e;
+ struct clown *n; /* temporary storage for safe iteration */
+
+ list_for_each_entry_safe(e, n, &circus->car.clowns, node) {
+ if (e->shoe_size < 500)
+ list_del(&e->node);
+ }
+ }
+
+Proper memory management (i.e. freeing the deleted node while making sure
+nothing still references it) in this case is left as an exercise to the reader.
+
+Cutting a list
+--------------
+
+There are two helper functions to cut lists with. Both take elements from the
+list ``head``, and replace the contents of the list ``list``.
+
+The first such function is list_cut_position(). It removes all list entries from
+``head`` up to and including ``entry``, placing them in ``list`` instead.
+
+In this example, it's assumed we start with the following list::
+
+ .----------------------------------------------------------------.
+ v |
+ .--------. .-------. .---------. .-----. .---------. |
+ | clowns |---->| Grock |---->| Dimitri |---->| Pic |---->| Alfredo |--'
+ '--------' '-------' '---------' '-----' '---------'
+
+With the following code, every clown up to and including "Pic" is moved from
+the "clowns" list head to a separate struct list_head initialized at local
+stack variable ``retirement``:
+
+.. code-block:: c
+
+ static void circus_retire_clowns(struct circus_priv *circus)
+ {
+ struct list_head retirement = LIST_HEAD_INIT(retirement);
+ struct clown *grock, *dimitri, *pic, *alfredo;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ list_cut_position(&retirement, &car->clowns, &pic->node);
+
+ /* State 1 */
+ }
+
+The resulting ``car->clowns`` list would be this::
+
+ .----------------------.
+ v |
+ .--------. .---------. |
+ | clowns |---->| Alfredo |--'
+ '--------' '---------'
+
+Meanwhile, the ``retirement`` list is transformed to the following::
+
+ .--------------------------------------------------.
+ v |
+ .------------. .-------. .---------. .-----. |
+ | retirement |---->| Grock |---->| Dimitri |---->| Pic |--'
+ '------------' '-------' '---------' '-----'
+
+The second function, list_cut_before(), is much the same, except it cuts before
+the ``entry`` node, i.e. it removes all list entries from ``head`` up to but
+excluding ``entry``, placing them in ``list`` instead. This example assumes the
+same initial starting list as the previous example:
+
+.. code-block:: c
+
+ static void circus_retire_clowns(struct circus_priv *circus)
+ {
+ struct list_head retirement = LIST_HEAD_INIT(retirement);
+ struct clown *grock, *dimitri, *pic, *alfredo;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ list_cut_before(&retirement, &car->clowns, &pic->node);
+
+ /* State 1b */
+ }
+
+The resulting ``car->clowns`` list would be this::
+
+ .----------------------------------.
+ v |
+ .--------. .-----. .---------. |
+ | clowns |---->| Pic |---->| Alfredo |--'
+ '--------' '-----' '---------'
+
+Meanwhile, the ``retirement`` list is transformed to the following::
+
+ .--------------------------------------.
+ v |
+ .------------. .-------. .---------. |
+ | retirement |---->| Grock |---->| Dimitri |--'
+ '------------' '-------' '---------'
+
+It should be noted that both functions will destroy links to any existing nodes
+in the destination ``struct list_head *list``.
+
+Moving entries and partial lists
+--------------------------------
+
+The list_move() and list_move_tail() functions can be used to move an entry
+from one list to another, to either the start or end respectively.
+
+In the following example, we'll assume we start with two lists ("clowns" and
+"sidewalk" in the following initial state "State 0"::
+
+ .----------------------------------------------------------------.
+ v |
+ .--------. .-------. .---------. .-----. .---------. |
+ | clowns |---->| Grock |---->| Dimitri |---->| Pic |---->| Alfredo |--'
+ '--------' '-------' '---------' '-----' '---------'
+
+ .-------------------.
+ v |
+ .----------. .-----. |
+ | sidewalk |---->| Pio |--'
+ '----------' '-----'
+
+We apply the following example code to the two lists:
+
+.. code-block:: c
+
+ static void circus_clowns_exit_car(struct circus_priv *circus)
+ {
+ struct list_head sidewalk = LIST_HEAD_INIT(sidewalk);
+ struct clown *grock, *dimitri, *pic, *alfredo, *pio;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ /* State 0 */
+
+ list_move(&pic->node, &sidewalk);
+
+ /* State 1 */
+
+ list_move_tail(&dimitri->node, &sidewalk);
+
+ /* State 2 */
+ }
+
+In State 1, we arrive at the following situation::
+
+ .-----------------------------------------------------.
+ | |
+ v |
+ .--------. .-------. .---------. .---------. |
+ | clowns |---->| Grock |---->| Dimitri |---->| Alfredo |--'
+ '--------' '-------' '---------' '---------'
+
+ .-------------------------------.
+ v |
+ .----------. .-----. .-----. |
+ | sidewalk |---->| Pic |---->| Pio |--'
+ '----------' '-----' '-----'
+
+In State 2, after we've moved Dimitri to the tail of sidewalk, the situation
+changes as follows::
+
+ .-------------------------------------.
+ | |
+ v |
+ .--------. .-------. .---------. |
+ | clowns |---->| Grock |---->| Alfredo |--'
+ '--------' '-------' '---------'
+
+ .-----------------------------------------------.
+ v |
+ .----------. .-----. .-----. .---------. |
+ | sidewalk |---->| Pic |---->| Pio |---->| Dimitri |--'
+ '----------' '-----' '-----' '---------'
+
+As long as the source and destination list head are part of the same list, we
+can also efficiently bulk move a segment of the list to the tail end of the
+list. We continue the previous example by adding a list_bulk_move_tail() after
+State 2, moving Pic and Pio to the tail end of the sidewalk list.
+
+.. code-block:: c
+
+ static void circus_clowns_exit_car(struct circus_priv *circus)
+ {
+ struct list_head sidewalk = LIST_HEAD_INIT(sidewalk);
+ struct clown *grock, *dimitri, *pic, *alfredo, *pio;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ /* State 0 */
+
+ list_move(&pic->node, &sidewalk);
+
+ /* State 1 */
+
+ list_move_tail(&dimitri->node, &sidewalk);
+
+ /* State 2 */
+
+ list_bulk_move_tail(&sidewalk, &pic->node, &pio->node);
+
+ /* State 3 */
+ }
+
+For the sake of brevity, only the altered "sidewalk" list at State 3 is depicted
+in the following diagram::
+
+ .-----------------------------------------------.
+ v |
+ .----------. .---------. .-----. .-----. |
+ | sidewalk |---->| Dimitri |---->| Pic |---->| Pio |--'
+ '----------' '---------' '-----' '-----'
+
+Do note that list_bulk_move_tail() does not do any checking as to whether all
+three supplied ``struct list_head *`` parameters really do belong to the same
+list. If you use it outside the constraints the documentation gives, then the
+result is a matter between you and the implementation.
+
+Rotating entries
+----------------
+
+A common write operation on lists, especially when using them as queues, is
+to rotate it. A list rotation means entries at the front are sent to the back.
+
+For rotation, Linux provides us with two functions: list_rotate_left() and
+list_rotate_to_front(). The former can be pictured like a bicycle chain, taking
+the entry after the supplied ``struct list_head *`` and moving it to the tail,
+which in essence means the entire list, due to its circular nature, rotates by
+one position.
+
+The latter, list_rotate_to_front(), takes the same concept one step further:
+instead of advancing the list by one entry, it advances it *until* the specified
+entry is the new front.
+
+In the following example, our starting state, State 0, is the following::
+
+ .-----------------------------------------------------------------.
+ v |
+ .--------. .-------. .---------. .-----. .---------. .-----. |
+ | clowns |-->| Grock |-->| Dimitri |-->| Pic |-->| Alfredo |-->| Pio |-'
+ '--------' '-------' '---------' '-----' '---------' '-----'
+
+The example code being used to demonstrate list rotations is the following:
+
+.. code-block:: c
+
+ static void circus_clowns_rotate(struct circus_priv *circus)
+ {
+ struct clown *grock, *dimitri, *pic, *alfredo, *pio;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ /* State 0 */
+
+ list_rotate_left(&car->clowns);
+
+ /* State 1 */
+
+ list_rotate_to_front(&alfredo->node, &car->clowns);
+
+ /* State 2 */
+
+ }
+
+In State 1, we arrive at the following situation::
+
+ .-----------------------------------------------------------------.
+ v |
+ .--------. .---------. .-----. .---------. .-----. .-------. |
+ | clowns |-->| Dimitri |-->| Pic |-->| Alfredo |-->| Pio |-->| Grock |-'
+ '--------' '---------' '-----' '---------' '-----' '-------'
+
+Next, after the list_rotate_to_front() call, we arrive in the following
+State 2::
+
+ .-----------------------------------------------------------------.
+ v |
+ .--------. .---------. .-----. .-------. .---------. .-----. |
+ | clowns |-->| Alfredo |-->| Pio |-->| Grock |-->| Dimitri |-->| Pic |-'
+ '--------' '---------' '-----' '-------' '---------' '-----'
+
+As is hopefully evident from the diagrams, the entries in front of "Alfredo"
+were cycled to the tail end of the list.
+
+Swapping entries
+----------------
+
+Another common operation is that two entries need to be swapped with each other.
+
+For this, Linux provides us with list_swap().
+
+In the following example, we have a list with three entries, and swap two of
+them. This is our starting state in "State 0"::
+
+ .-----------------------------------------.
+ v |
+ .--------. .-------. .---------. .-----. |
+ | clowns |-->| Grock |-->| Dimitri |-->| Pic |-'
+ '--------' '-------' '---------' '-----'
+
+.. code-block:: c
+
+ static void circus_clowns_swap(struct circus_priv *circus)
+ {
+ struct clown *grock, *dimitri, *pic;
+ struct clown_car *car = &circus->car;
+
+ /* ... clown initialization, list adding ... */
+
+ /* State 0 */
+
+ list_swap(&dimitri->node, &pic->node);
+
+ /* State 1 */
+ }
+
+The resulting list at State 1 is the following::
+
+ .-----------------------------------------.
+ v |
+ .--------. .-------. .-----. .---------. |
+ | clowns |-->| Grock |-->| Pic |-->| Dimitri |-'
+ '--------' '-------' '-----' '---------'
+
+As is evident by comparing the diagrams, the "Pic" and "Dimitri" nodes have
+traded places.
+
+Splicing two lists together
+---------------------------
+
+Say we have two lists, in the following example one represented by a list head
+we call "knie" and one we call "stey". In a hypothetical circus acquisition,
+the two list of clowns should be spliced together. The following is our
+situation in "State 0"::
+
+ .-----------------------------------------.
+ | |
+ v |
+ .------. .-------. .---------. .-----. |
+ | knie |-->| Grock |-->| Dimitri |-->| Pic |--'
+ '------' '-------' '---------' '-----'
+
+ .-----------------------------.
+ v |
+ .------. .---------. .-----. |
+ | stey |-->| Alfredo |-->| Pio |--'
+ '------' '---------' '-----'
+
+The function to splice these two lists together is list_splice(). Our example
+code is as follows:
+
+.. code-block:: c
+
+ static void circus_clowns_splice(void)
+ {
+ struct clown *grock, *dimitri, *pic, *alfredo, *pio;
+ struct list_head knie = LIST_HEAD_INIT(knie);
+ struct list_head stey = LIST_HEAD_INIT(stey);
+
+ /* ... Clown allocation and initialization here ... */
+
+ list_add_tail(&grock->node, &knie);
+ list_add_tail(&dimitri->node, &knie);
+ list_add_tail(&pic->node, &knie);
+ list_add_tail(&alfredo->node, &stey);
+ list_add_tail(&pio->node, &stey);
+
+ /* State 0 */
+
+ list_splice(&stey, &dimitri->node);
+
+ /* State 1 */
+ }
+
+The list_splice() call here adds all the entries in ``stey`` to the list
+``dimitri``'s ``node`` list_head is in, after the ``node`` of ``dimitri``. A
+somewhat surprising diagram of the resulting "State 1" follows::
+
+ .-----------------------------------------------------------------.
+ | |
+ v |
+ .------. .-------. .---------. .---------. .-----. .-----. |
+ | knie |-->| Grock |-->| Dimitri |-->| Alfredo |-->| Pio |-->| Pic |--'
+ '------' '-------' '---------' '---------' '-----' '-----'
+ ^
+ .-------------------------------'
+ |
+ .------. |
+ | stey |--'
+ '------'
+
+Traversing the ``stey`` list no longer results in correct behavior. A call of
+list_for_each() on ``stey`` results in an infinite loop, as it never returns
+back to the ``stey`` list head.
+
+This is because list_splice() did not reinitialize the list_head it took
+entries from, leaving its pointer pointing into what is now a different list.
+
+If we want to avoid this situation, list_splice_init() can be used. It does the
+same thing as list_splice(), except reinitalizes the donor list_head after the
+transplant.
+
+Concurrency considerations
+--------------------------
+
+Concurrent access and modification of a list needs to be protected with a lock
+in most cases. Alternatively and preferably, one may use the RCU primitives for
+lists in read-mostly use-cases, where read accesses to the list are common but
+modifications to the list less so. See Documentation/RCU/listRCU.rst for more
+details.
+
+Further reading
+---------------
+
+* `How does the kernel implements Linked Lists? - KernelNewbies <https://kernelnewbies.org/FAQ/LinkedLists>`_
+
+Full List API
+=============
+
+.. kernel-doc:: include/linux/list.h
+ :internal:
diff --git a/Documentation/core-api/memory-hotplug.rst b/Documentation/core-api/memory-hotplug.rst
index 682259ee633a..8fc97c2379de 100644
--- a/Documentation/core-api/memory-hotplug.rst
+++ b/Documentation/core-api/memory-hotplug.rst
@@ -9,6 +9,9 @@ Memory hotplug event notifier
Hotplugging events are sent to a notification queue.
+Memory notifier
+----------------
+
There are six types of notification defined in ``include/linux/memory.h``:
MEM_GOING_ONLINE
@@ -56,20 +59,18 @@ The third argument (arg) passes a pointer of struct memory_notify::
struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid;
}
- start_pfn is start_pfn of online/offline memory.
- nr_pages is # of pages of online/offline memory.
-- status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
- is (will be) set/clear, if this is -1, then nodemask status is not changed.
-- status_change_nid is set node id when N_MEMORY of nodemask is (will be)
- set/clear. It means a new(memoryless) node gets new memory by online and a
- node loses all memory. If this is -1, then nodemask status is not changed.
- If status_changed_nid* >= 0, callback should create/discard structures for the
- node if necessary.
+It is possible to get notified for MEM_CANCEL_ONLINE without having been notified
+for MEM_GOING_ONLINE, and the same applies to MEM_CANCEL_OFFLINE and
+MEM_GOING_OFFLINE.
+This can happen when a consumer fails, meaning we break the callchain and we
+stop calling the remaining consumers of the notifier.
+It is then important that users of memory_notify make no assumptions and get
+prepared to handle such cases.
The callback routine shall return one of the values
NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
@@ -83,6 +84,78 @@ further processing of the notification queue.
NOTIFY_STOP stops further processing of the notification queue.
+Numa node notifier
+------------------
+
+There are six types of notification defined in ``include/linux/node.h``:
+
+NODE_ADDING_FIRST_MEMORY
+ Generated before memory becomes available to this node for the first time.
+
+NODE_CANCEL_ADDING_FIRST_MEMORY
+ Generated if NODE_ADDING_FIRST_MEMORY fails.
+
+NODE_ADDED_FIRST_MEMORY
+ Generated when memory has become available fo this node for the first time.
+
+NODE_REMOVING_LAST_MEMORY
+ Generated when the last memory available to this node is about to be offlined.
+
+NODE_CANCEL_REMOVING_LAST_MEMORY
+ Generated when NODE_CANCEL_REMOVING_LAST_MEMORY fails.
+
+NODE_REMOVED_LAST_MEMORY
+ Generated when the last memory available to this node has been offlined.
+
+A callback routine can be registered by calling::
+
+ hotplug_node_notifier(callback_func, priority)
+
+Callback functions with higher values of priority are called before callback
+functions with lower values.
+
+A callback function must have the following prototype::
+
+ int callback_func(
+
+ struct notifier_block *self, unsigned long action, void *arg);
+
+The first argument of the callback function (self) is a pointer to the block
+of the notifier chain that points to the callback function itself.
+The second argument (action) is one of the event types described above.
+The third argument (arg) passes a pointer of struct node_notify::
+
+ struct node_notify {
+ int nid;
+ }
+
+- nid is the node we are adding or removing memory to.
+
+It is possible to get notified for NODE_CANCEL_ADDING_FIRST_MEMORY without
+having been notified for NODE_ADDING_FIRST_MEMORY, and the same applies to
+NODE_CANCEL_REMOVING_LAST_MEMORY and NODE_REMOVING_LAST_MEMORY.
+This can happen when a consumer fails, meaning we break the callchain and we
+stop calling the remaining consumers of the notifier.
+It is then important that users of node_notify make no assumptions and get
+prepared to handle such cases.
+
+The callback routine shall return one of the values
+NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
+defined in ``include/linux/notifier.h``
+
+NOTIFY_DONE and NOTIFY_OK have no effect on the further processing.
+
+NOTIFY_BAD is used as response to the NODE_ADDING_FIRST_MEMORY,
+NODE_REMOVING_LAST_MEMORY, NODE_ADDED_FIRST_MEMORY or
+NODE_REMOVED_LAST_MEMORY action to cancel hotplugging.
+It stops further processing of the notification queue.
+
+NOTIFY_STOP stops further processing of the notification queue.
+
+Please note that we should not fail for NODE_ADDED_FIRST_MEMORY /
+NODE_REMOVED_FIRST_MEMORY, as memory_hotplug code cannot rollback at that
+point anymore.
+
Locking Internals
=================
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index af8151db88b2..5063179cfc70 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -91,12 +91,6 @@ Memory pools
.. kernel-doc:: mm/mempool.c
:export:
-DMA pools
-=========
-
-.. kernel-doc:: mm/dmapool.c
- :export:
-
More Memory Management Functions
================================
@@ -139,4 +133,3 @@ More Memory Management Functions
.. kernel-doc:: mm/mmu_notifier.c
.. kernel-doc:: mm/balloon_compaction.c
.. kernel-doc:: mm/huge_memory.c
-.. kernel-doc:: mm/io-mapping.c
diff --git a/Documentation/core-api/packing.rst b/Documentation/core-api/packing.rst
index 0ce2078c8e13..f68f1e08fef9 100644
--- a/Documentation/core-api/packing.rst
+++ b/Documentation/core-api/packing.rst
@@ -319,7 +319,7 @@ Here is an example of how to use the fields APIs:
#define SIZE 13
- typdef struct __packed { u8 buf[SIZE]; } packed_buf_t;
+ typedef struct __packed { u8 buf[SIZE]; } packed_buf_t;
static const struct packed_field_u8 fields[] = {
PACKED_FIELD(100, 90, struct data, field1),
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index e295835fc116..165ca73e8351 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -183,6 +183,12 @@ resources, scheduled and executed.
BH work items cannot sleep. All other features such as delayed queueing,
flushing and canceling are supported.
+``WQ_PERCPU``
+ Work items queued to a per-cpu wq are bound to a specific CPU.
+ This flag is the right choice when cpu locality is important.
+
+ This flag is the complement of ``WQ_UNBOUND``.
+
``WQ_UNBOUND``
Work items queued to an unbound wq are served by the special
worker-pools which host workers which are not bound to any
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst
index d562ea17d994..7ef850e28016 100644
--- a/Documentation/crypto/crypto_engine.rst
+++ b/Documentation/crypto/crypto_engine.rst
@@ -36,12 +36,6 @@ engine using ``crypto_engine_stop()`` and destroy the engine with
Before transferring any request, you have to fill the context enginectx by
providing functions for the following:
-* ``prepare_crypt_hardware``: Called once before any prepare functions are
- called.
-
-* ``unprepare_crypt_hardware``: Called once after all unprepare functions have
- been called.
-
* ``prepare_cipher_request``/``prepare_hash_request``: Called before each
corresponding request is performed. If some processing or other preparatory
work is required, do it here.
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index 76bd0ddb0041..d5c47e560324 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -495,6 +495,15 @@ Comments
See: https://lore.kernel.org/lkml/20131006222342.GT19510@leaf/
+ **UNCOMMENTED_RGMII_MODE**
+ Historically, the RGMII PHY modes specified in Device Trees have been
+ used inconsistently, often referring to the usage of delays on the PHY
+ side rather than describing the board.
+
+ PHY modes "rgmii", "rgmii-rxid" and "rgmii-txid" modes require the clock
+ signal to be delayed on the PCB; this unusual configuration should be
+ described in a comment. If they are not (meaning that the delay is realized
+ internally in the MAC or PHY), "rgmii-id" is the correct PHY mode.
Commit message
--------------
diff --git a/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml b/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml
index 7e1ffc551046..4adbb7afa889 100644
--- a/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml
+++ b/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml
@@ -103,11 +103,14 @@ examples:
clock-names = "msi", "ahb";
power-domains = <&pd IMX_SC_R_DC_0>;
- syscon@56221000 {
- compatible = "fsl,imx8qxp-mipi-lvds-csr", "syscon", "simple-mfd";
+ bus@56221000 {
+ compatible = "simple-pm-bus", "syscon";
reg = <0x56221000 0x1000>;
clocks = <&mipi_lvds_0_di_mipi_lvds_regs_lpcg IMX_LPCG_CLK_4>;
clock-names = "ipg";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
pxl2dpi {
compatible = "fsl,imx8qxp-pxl2dpi";
diff --git a/Documentation/devicetree/bindings/clock/alphascale,acc.txt b/Documentation/devicetree/bindings/clock/alphascale,acc.txt
deleted file mode 100644
index c9fb9324c634..000000000000
--- a/Documentation/devicetree/bindings/clock/alphascale,acc.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-Alphascale Clock Controller
-
-The ACC (Alphascale Clock Controller) is responsible for choosing proper
-clock source, setting dividers and clock gates.
-
-Required properties for the ACC node:
- - compatible: must be "alphascale,asm9260-clock-controller"
- - reg: must contain the ACC register base and size
- - #clock-cells : shall be set to 1.
-
-Simple one-cell clock specifier format is used, where the only cell is used
-as an index of the clock inside the provider.
-It is encouraged to use dt-binding for clock index definitions. SoC specific
-dt-binding should be included to the device tree descriptor. For example
-Alphascale ASM9260:
-#include <dt-bindings/clock/alphascale,asm9260.h>
-
-This binding contains two types of clock providers:
- _AHB_ - AHB gate;
- _SYS_ - adjustable clock source. Not all peripheral have _SYS_ clock provider.
-All clock specific details can be found in the SoC documentation.
-CLKID_AHB_ROM 0
-CLKID_AHB_RAM 1
-CLKID_AHB_GPIO 2
-CLKID_AHB_MAC 3
-CLKID_AHB_EMI 4
-CLKID_AHB_USB0 5
-CLKID_AHB_USB1 6
-CLKID_AHB_DMA0 7
-CLKID_AHB_DMA1 8
-CLKID_AHB_UART0 9
-CLKID_AHB_UART1 10
-CLKID_AHB_UART2 11
-CLKID_AHB_UART3 12
-CLKID_AHB_UART4 13
-CLKID_AHB_UART5 14
-CLKID_AHB_UART6 15
-CLKID_AHB_UART7 16
-CLKID_AHB_UART8 17
-CLKID_AHB_UART9 18
-CLKID_AHB_I2S0 19
-CLKID_AHB_I2C0 20
-CLKID_AHB_I2C1 21
-CLKID_AHB_SSP0 22
-CLKID_AHB_IOCONFIG 23
-CLKID_AHB_WDT 24
-CLKID_AHB_CAN0 25
-CLKID_AHB_CAN1 26
-CLKID_AHB_MPWM 27
-CLKID_AHB_SPI0 28
-CLKID_AHB_SPI1 29
-CLKID_AHB_QEI 30
-CLKID_AHB_QUADSPI0 31
-CLKID_AHB_CAMIF 32
-CLKID_AHB_LCDIF 33
-CLKID_AHB_TIMER0 34
-CLKID_AHB_TIMER1 35
-CLKID_AHB_TIMER2 36
-CLKID_AHB_TIMER3 37
-CLKID_AHB_IRQ 38
-CLKID_AHB_RTC 39
-CLKID_AHB_NAND 40
-CLKID_AHB_ADC0 41
-CLKID_AHB_LED 42
-CLKID_AHB_DAC0 43
-CLKID_AHB_LCD 44
-CLKID_AHB_I2S1 45
-CLKID_AHB_MAC1 46
-
-CLKID_SYS_CPU 47
-CLKID_SYS_AHB 48
-CLKID_SYS_I2S0M 49
-CLKID_SYS_I2S0S 50
-CLKID_SYS_I2S1M 51
-CLKID_SYS_I2S1S 52
-CLKID_SYS_UART0 53
-CLKID_SYS_UART1 54
-CLKID_SYS_UART2 55
-CLKID_SYS_UART3 56
-CLKID_SYS_UART4 56
-CLKID_SYS_UART5 57
-CLKID_SYS_UART6 58
-CLKID_SYS_UART7 59
-CLKID_SYS_UART8 60
-CLKID_SYS_UART9 61
-CLKID_SYS_SPI0 62
-CLKID_SYS_SPI1 63
-CLKID_SYS_QUADSPI 64
-CLKID_SYS_SSP0 65
-CLKID_SYS_NAND 66
-CLKID_SYS_TRACE 67
-CLKID_SYS_CAMM 68
-CLKID_SYS_WDT 69
-CLKID_SYS_CLKOUT 70
-CLKID_SYS_MAC 71
-CLKID_SYS_LCD 72
-CLKID_SYS_ADCANA 73
-
-Example of clock consumer with _SYS_ and _AHB_ sinks.
-uart4: serial@80010000 {
- compatible = "alphascale,asm9260-uart";
- reg = <0x80010000 0x4000>;
- clocks = <&acc CLKID_SYS_UART4>, <&acc CLKID_AHB_UART4>;
- interrupts = <19>;
-};
-
-Clock consumer with only one, _AHB_ sink.
-timer0: timer@80088000 {
- compatible = "alphascale,asm9260-timer";
- reg = <0x80088000 0x4000>;
- clocks = <&acc CLKID_AHB_TIMER0>;
- interrupts = <29>;
-};
-
diff --git a/Documentation/devicetree/bindings/clock/alphascale,asm9260-clock-controller.yaml b/Documentation/devicetree/bindings/clock/alphascale,asm9260-clock-controller.yaml
new file mode 100644
index 000000000000..1caad419ce9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/alphascale,asm9260-clock-controller.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/alphascale,asm9260-clock-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Alphascale Clock Controller
+
+maintainers:
+ - Oleksij Rempel <linux@rempel-privat.de>
+
+description: |
+ The ACC (Alphascale Clock Controller) is responsible for choosing proper
+ clock source, setting dividers and clock gates.
+
+ Simple one-cell clock specifier format is used, where the only cell is used
+ as an index of the clock inside the provider.
+ It is encouraged to use dt-binding for clock index definitions. SoC specific
+ dt-binding should be included to the device tree descriptor. For example
+ Alphascale ASM9260:
+
+ #include <dt-bindings/clock/alphascale,asm9260.h>
+
+ This binding contains two types of clock providers:
+
+ _AHB_ - AHB gate;
+ _SYS_ - adjustable clock source. Not all peripheral have _SYS_ clock provider.
+
+ All clock specific details can be found in the SoC documentation.
+
+properties:
+ compatible:
+ const: alphascale,asm9260-clock-controller
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/apm,xgene-device-clock.yaml b/Documentation/devicetree/bindings/clock/apm,xgene-device-clock.yaml
new file mode 100644
index 000000000000..b27bcb2a9ee0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/apm,xgene-device-clock.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/apm,xgene-device-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene SoC device clocks
+
+maintainers:
+ - Khuong Dinh <khuong@os.amperecomputing.com>
+
+properties:
+ compatible:
+ const: apm,xgene-device-clock
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ items:
+ - enum: [ csr-reg, div-reg ]
+ - const: div-reg
+ minItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+ csr-offset:
+ description: Offset to the CSR reset register
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ csr-mask:
+ description: CSR reset mask bit
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0xf
+
+ enable-offset:
+ description: Offset to the enable register
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 8
+
+ enable-mask:
+ description: CSR enable mask bit
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0xf
+
+ divider-offset:
+ description: Offset to the divider register
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ divider-width:
+ description: Width of the divider register
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ divider-shift:
+ description: Bit shift of the divider register
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - clock-output-names
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/apm,xgene-socpll-clock.yaml b/Documentation/devicetree/bindings/clock/apm,xgene-socpll-clock.yaml
new file mode 100644
index 000000000000..bdd4a6b92bbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/apm,xgene-socpll-clock.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/apm,xgene-socpll-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene SoC PLL, PCPPLL, and PMD clocks
+
+maintainers:
+ - Khuong Dinh <khuong@os.amperecomputing.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apm,xgene-pcppll-clock
+ - apm,xgene-pcppll-v2-clock
+ - apm,xgene-pmd-clock
+ - apm,xgene-socpll-clock
+ - apm,xgene-socpll-v2-clock
+
+ reg:
+ maxItems: 1
+
+ reg-names:
+ items:
+ - enum: [ csr-reg, div-reg ]
+ - const: div-reg
+ minItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ enum: [ pcppll, socpll ]
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - clock-output-names
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
deleted file mode 100644
index fbf58c443c04..000000000000
--- a/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-* Peripheral Clock bindings for Marvell Armada 37xx SoCs
-
-Marvell Armada 37xx SoCs provide peripheral clocks which are
-used as clock source for the peripheral of the SoC.
-
-There are two different blocks associated to north bridge and south
-bridge.
-
-The peripheral clock consumer should specify the desired clock by
-having the clock ID in its "clocks" phandle cell.
-
-The following is a list of provided IDs for Armada 3700 North bridge clocks:
-ID Clock name Description
------------------------------------
-0 mmc MMC controller
-1 sata_host Sata Host
-2 sec_at Security AT
-3 sac_dap Security DAP
-4 tsecm Security Engine
-5 setm_tmx Serial Embedded Trace Module
-6 avs Adaptive Voltage Scaling
-7 sqf SPI
-8 pwm PWM
-9 i2c_2 I2C 2
-10 i2c_1 I2C 1
-11 ddr_phy DDR PHY
-12 ddr_fclk DDR F clock
-13 trace Trace
-14 counter Counter
-15 eip97 EIP 97
-16 cpu CPU
-
-The following is a list of provided IDs for Armada 3700 South bridge clocks:
-ID Clock name Description
------------------------------------
-0 gbe-50 50 MHz parent clock for Gigabit Ethernet
-1 gbe-core parent clock for Gigabit Ethernet core
-2 gbe-125 125 MHz parent clock for Gigabit Ethernet
-3 gbe1-50 50 MHz clock for Gigabit Ethernet port 1
-4 gbe0-50 50 MHz clock for Gigabit Ethernet port 0
-5 gbe1-125 125 MHz clock for Gigabit Ethernet port 1
-6 gbe0-125 125 MHz clock for Gigabit Ethernet port 0
-7 gbe1-core Gigabit Ethernet core port 1
-8 gbe0-core Gigabit Ethernet core port 0
-9 gbe-bm Gigabit Ethernet Buffer Manager
-10 sdio SDIO
-11 usb32-sub2-sys USB 2 clock
-12 usb32-ss-sys USB 3 clock
-13 pcie PCIe controller
-
-Required properties:
-
-- compatible : shall be "marvell,armada-3700-periph-clock-nb" for the
- north bridge block, or
- "marvell,armada-3700-periph-clock-sb" for the south bridge block
-- reg : must be the register address of North/South Bridge Clock register
-- #clock-cells : from common clock binding; shall be set to 1
-
-- clocks : list of the parent clock phandle in the following order:
- TBG-A P, TBG-B P, TBG-A S, TBG-B S and finally the xtal clock.
-
-
-Example:
-
-nb_perih_clk: nb-periph-clk@13000{
- compatible = "marvell,armada-3700-periph-clock-nb";
- reg = <0x13000 0x1000>;
- clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
- <&tbg 3>, <&xtalclk>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt
deleted file mode 100644
index ed1df32c577a..000000000000
--- a/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Time Base Generator Clock bindings for Marvell Armada 37xx SoCs
-
-Marvell Armada 37xx SoCs provide Time Base Generator clocks which are
-used as parent clocks for the peripheral clocks.
-
-The TBG clock consumer should specify the desired clock by having the
-clock ID in its "clocks" phandle cell.
-
-The following is a list of provided IDs and clock names on Armada 3700:
- 0 = TBG A P
- 1 = TBG B P
- 2 = TBG A S
- 3 = TBG B S
-
-Required properties:
-- compatible : shall be "marvell,armada-3700-tbg-clock"
-- reg : must be the register address of North Bridge PLL register
-- #clock-cells : from common clock binding; shall be set to 1
-
-Example:
-
-tbg: tbg@13200 {
- compatible = "marvell,armada-3700-tbg-clock";
- reg = <0x13200 0x1000>;
- clocks = <&xtalclk>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/artpec6.txt b/Documentation/devicetree/bindings/clock/artpec6.txt
deleted file mode 100644
index dff9cdf0009c..000000000000
--- a/Documentation/devicetree/bindings/clock/artpec6.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Clock bindings for Axis ARTPEC-6 chip
-
-The bindings are based on the clock provider binding in
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-External clocks:
-----------------
-
-There are two external inputs to the main clock controller which should be
-provided using the common clock bindings.
-- "sys_refclk": External 50 Mhz oscillator (required)
-- "i2s_refclk": Alternate audio reference clock (optional).
-
-Main clock controller
----------------------
-
-Required properties:
-- #clock-cells: Should be <1>
- See dt-bindings/clock/axis,artpec6-clkctrl.h for the list of valid identifiers.
-- compatible: Should be "axis,artpec6-clkctrl"
-- reg: Must contain the base address and length of the system controller
-- clocks: Must contain a phandle entry for each clock in clock-names
-- clock-names: Must include the external oscillator ("sys_refclk"). Optional
- ones are the audio reference clock ("i2s_refclk") and the audio fractional
- dividers ("frac_clk0" and "frac_clk1").
-
-Examples:
-
-ext_clk: ext_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
-};
-
-clkctrl: clkctrl@f8000000 {
- #clock-cells = <1>;
- compatible = "axis,artpec6-clkctrl";
- reg = <0xf8000000 0x48>;
- clocks = <&ext_clk>;
- clock-names = "sys_refclk";
-};
diff --git a/Documentation/devicetree/bindings/clock/axis,artpec6-clkctrl.yaml b/Documentation/devicetree/bindings/clock/axis,artpec6-clkctrl.yaml
new file mode 100644
index 000000000000..a78269369df8
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/axis,artpec6-clkctrl.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/axis,artpec6-clkctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Axis ARTPEC-6 clock controller
+
+maintainers:
+ - Lars Persson <lars.persson@axis.com>
+
+properties:
+ compatible:
+ const: axis,artpec6-clkctrl
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: external 50 MHz oscillator.
+ - description: optional audio reference clock.
+ - description: fractional audio clock divider 0.
+ - description: fractional audio clock divider 1.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: sys_refclk
+ - const: i2s_refclk
+ - const: frac_clk0
+ - const: frac_clk1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@f8000000 {
+ compatible = "axis,artpec6-clkctrl";
+ reg = <0xf8000000 0x48>;
+ #clock-cells = <1>;
+ clocks = <&ext_clk>;
+ clock-names = "sys_refclk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt
deleted file mode 100644
index 9e0b03a6519b..000000000000
--- a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-Broadcom BCM2835 CPRMAN clocks
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CPRMAN clock controller generates clocks in the audio power domain
-of the BCM2835. There is a level of PLLs deriving from an external
-oscillator, a level of PLL dividers that produce channels off of the
-few PLLs, and a level of mostly-generic clock generators sourcing from
-the PLL channels. Most other hardware components source from the
-clock generators, but a few (like the ARM or HDMI) will source from
-the PLL dividers directly.
-
-Required properties:
-- compatible: should be one of the following,
- "brcm,bcm2711-cprman"
- "brcm,bcm2835-cprman"
-- #clock-cells: Should be <1>. The permitted clock-specifier values can be
- found in include/dt-bindings/clock/bcm2835.h
-- reg: Specifies base physical address and size of the registers
-- clocks: phandles to the parent clocks used as input to the module, in
- the following order:
-
- - External oscillator
- - DSI0 byte clock
- - DSI0 DDR2 clock
- - DSI0 DDR clock
- - DSI1 byte clock
- - DSI1 DDR2 clock
- - DSI1 DDR clock
-
- Only external oscillator is required. The DSI clocks may
- not be present, in which case their children will be
- unusable.
-
-Example:
-
- clk_osc: clock@3 {
- compatible = "fixed-clock";
- reg = <3>;
- #clock-cells = <0>;
- clock-output-names = "osc";
- clock-frequency = <19200000>;
- };
-
- clocks: cprman@7e101000 {
- compatible = "brcm,bcm2835-cprman";
- #clock-cells = <1>;
- reg = <0x7e101000 0x2000>;
- clocks = <&clk_osc>;
- };
-
- i2c0: i2c@7e205000 {
- compatible = "brcm,bcm2835-i2c";
- reg = <0x7e205000 0x1000>;
- interrupts = <2 21>;
- clocks = <&clocks BCM2835_CLOCK_VPU>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.yaml b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.yaml
new file mode 100644
index 000000000000..b0cf76c74bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/brcm,bcm2835-cprman.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 CPRMAN clocks
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description:
+ The CPRMAN clock controller generates clocks in the audio power domain of the
+ BCM2835. There is a level of PLLs deriving from an external oscillator, a
+ level of PLL dividers that produce channels off of the few PLLs, and a level
+ of mostly-generic clock generators sourcing from the PLL channels. Most other
+ hardware components source from the clock generators, but a few (like the ARM
+ or HDMI) will source from the PLL dividers directly.
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2711-cprman
+ - brcm,bcm2835-cprman
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: External oscillator clock.
+ - description: DSI0 byte clock.
+ - description: DSI0 DDR2 clock.
+ - description: DSI0 DDR clock.
+ - description: DSI1 byte clock.
+ - description: DSI1 DDR2 clock.
+ - description: DSI1 DDR clock.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - '#clock-cells'
+ - reg
+ - clocks
+
+examples:
+ - |
+ clock-controller@7e101000 {
+ compatible = "brcm,bcm2835-cprman";
+ reg = <0x7e101000 0x2000>;
+ #clock-cells = <1>;
+ clocks = <&clk_osc>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt b/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt
deleted file mode 100644
index 2ebb107331dd..000000000000
--- a/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Broadcom BCM53573 ILP clock
-===========================
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-This binding is used for ILP clock (sometimes referred as "slow clock")
-on Broadcom BCM53573 devices using Cortex-A7 CPU.
-
-ILP's rate has to be calculated on runtime and it depends on ALP clock
-which has to be referenced.
-
-This clock is part of PMU (Power Management Unit), a Broadcom's device
-handing power-related aspects. Its node must be sub-node of the PMU
-device.
-
-Required properties:
-- compatible: "brcm,bcm53573-ilp"
-- clocks: has to reference an ALP clock
-- #clock-cells: should be <0>
-- clock-output-names: from common clock bindings, should contain clock
- name
-
-Example:
-
-pmu@18012000 {
- compatible = "simple-mfd", "syscon";
- reg = <0x18012000 0x00001000>;
-
- ilp {
- compatible = "brcm,bcm53573-ilp";
- clocks = <&alp>;
- #clock-cells = <0>;
- clock-output-names = "ilp";
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.yaml b/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.yaml
new file mode 100644
index 000000000000..cd291f428a8d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/brcm,bcm53573-ilp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM53573 ILP clock
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+description: >
+ ILP clock (sometimes referred as "slow clock") on Broadcom BCM53573 devices
+ using Cortex-A7 CPU.
+
+ ILP's rate has to be calculated on runtime and it depends on ALP clock which
+ has to be referenced.
+
+ This clock is part of PMU (Power Management Unit), a Broadcom device handling
+ power-related aspects. Its node must be sub-node of the PMU device.
+
+properties:
+ compatible:
+ items:
+ - const: brcm,bcm53573-ilp
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+ clock-output-names:
+ items:
+ - const: ilp
+
+additionalProperties: false
+
+examples:
+ - |
+ ilp {
+ compatible = "brcm,bcm53573-ilp";
+ clocks = <&alp>;
+ #clock-cells = <0>;
+ clock-output-names = "ilp";
+ };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.txt
deleted file mode 100644
index 3e7ca5530775..000000000000
--- a/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Gated Clock Controller Bindings for MIPS based BCM63XX SoCs
-
-Required properties:
-- compatible: must be one of:
- "brcm,bcm3368-clocks"
- "brcm,bcm6318-clocks"
- "brcm,bcm6318-ubus-clocks"
- "brcm,bcm6328-clocks"
- "brcm,bcm6358-clocks"
- "brcm,bcm6362-clocks"
- "brcm,bcm6368-clocks"
- "brcm,bcm63268-clocks"
-
-- reg: Address and length of the register set
-- #clock-cells: must be <1>
-
-
-Example:
-
-clkctl: clock-controller@10000004 {
- compatible = "brcm,bcm6328-clocks";
- reg = <0x10000004 0x4>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.yaml b/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.yaml
new file mode 100644
index 000000000000..56909ea499a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm63xx-clocks.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/brcm,bcm63xx-clocks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MIPS based BCM63XX SoCs Gated Clock Controller
+
+maintainers:
+ - Ãlvaro Fernández Rojas <noltari@gmail.com>
+ - Jonas Gorski <jonas.gorski@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm3368-clocks
+ - brcm,bcm6318-clocks
+ - brcm,bcm6318-ubus-clocks
+ - brcm,bcm6328-clocks
+ - brcm,bcm6358-clocks
+ - brcm,bcm6362-clocks
+ - brcm,bcm6368-clocks
+ - brcm,bcm63268-clocks
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@10000004 {
+ compatible = "brcm,bcm6328-clocks";
+ reg = <0x10000004 0x4>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/cirrus,ep7209-clk.yaml b/Documentation/devicetree/bindings/clock/cirrus,ep7209-clk.yaml
new file mode 100644
index 000000000000..fbd0d50d46a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/cirrus,ep7209-clk.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/cirrus,ep7209-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CLPS711X Clock Controller
+
+maintainers:
+ - Alexander Shiyan <shc_work@mail.ru>
+
+description:
+ See include/dt-bindings/clock/clps711x-clock.h for the full list of CLPS711X
+ clock IDs.
+
+properties:
+ compatible:
+ items:
+ - const: cirrus,ep7312-clk
+ - const: cirrus,ep7209-clk
+
+ reg:
+ maxItems: 1
+
+ startup-frequency:
+ description: Factory set CPU startup frequency in HZ.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ "#clock-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - startup-frequency
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@80000000 {
+ compatible = "cirrus,ep7312-clk", "cirrus,ep7209-clk";
+ reg = <0x80000000 0xc000>;
+ #clock-cells = <1>;
+ startup-frequency = <73728000>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/clps711x-clock.txt b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
deleted file mode 100644
index f1bd53f79d91..000000000000
--- a/Documentation/devicetree/bindings/clock/clps711x-clock.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-* Clock bindings for the Cirrus Logic CLPS711X CPUs
-
-Required properties:
-- compatible : Shall contain "cirrus,ep7209-clk".
-- reg : Address of the internal register set.
-- startup-frequency: Factory set CPU startup frequency in HZ.
-- #clock-cells : Should be <1>.
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/clps711x-clock.h
-for the full list of CLPS711X clock IDs.
-
-Example:
- clks: clks@80000000 {
- #clock-cells = <1>;
- compatible = "cirrus,ep7312-clk", "cirrus,ep7209-clk";
- reg = <0x80000000 0xc000>;
- startup-frequency = <73728000>;
- };
diff --git a/Documentation/devicetree/bindings/clock/dove-divider-clock.txt b/Documentation/devicetree/bindings/clock/dove-divider-clock.txt
deleted file mode 100644
index 217871f483c0..000000000000
--- a/Documentation/devicetree/bindings/clock/dove-divider-clock.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-PLL divider based Dove clocks
-
-Marvell Dove has a 2GHz PLL, which feeds into a set of dividers to provide
-high speed clocks for a number of peripherals. These dividers are part of
-the PMU, and thus this node should be a child of the PMU node.
-
-The following clocks are provided:
-
-ID Clock
--------------
-0 AXI bus clock
-1 GPU clock
-2 VMeta clock
-3 LCD clock
-
-Required properties:
-- compatible : shall be "marvell,dove-divider-clock"
-- reg : shall be the register address of the Core PLL and Clock Divider
- Control 0 register. This will cover that register, as well as the
- Core PLL and Clock Divider Control 1 register. Thus, it will have
- a size of 8.
-- #clock-cells : from common clock binding; shall be set to 1
-
-divider_clk: core-clock@64 {
- compatible = "marvell,dove-divider-clock";
- reg = <0x0064 0x8>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/img,pistachio-clk.yaml b/Documentation/devicetree/bindings/clock/img,pistachio-clk.yaml
new file mode 100644
index 000000000000..e70feee8e894
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,pistachio-clk.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/img,pistachio-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagination Technologies Pistachio SoC clock controllers
+
+maintainers:
+ - Andrew Bresticker <abrestic@chromium.org>
+
+description: |
+ Pistachio has four clock controllers (core clock, peripheral clock, peripheral
+ general control, and top general control) which are instantiated individually
+ from the device-tree.
+
+ Core clock controller:
+
+ The core clock controller generates clocks for the CPU, RPU (WiFi + BT
+ co-processor), audio, and several peripherals.
+
+ Peripheral clock controller:
+
+ The peripheral clock controller generates clocks for the DDR, ROM, and other
+ peripherals. The peripheral system clock ("periph_sys") generated by the core
+ clock controller is the input clock to the peripheral clock controller.
+
+ Peripheral general control:
+
+ The peripheral general control block generates system interface clocks and
+ resets for various peripherals. It also contains miscellaneous peripheral
+ control registers.
+
+ Top-level general control:
+
+ The top-level general control block contains miscellaneous control registers
+ and gates for the external clocks "audio_clk_in" and "enet_clk_in".
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - img,pistachio-clk
+ - img,pistachio-clk-periph
+ - img,pistachio-cr-periph
+ - img,pistachio-cr-top
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ minItems: 1
+ maxItems: 3
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,pistachio-clk
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External 52Mhz oscillator
+ - description: Alternate audio reference clock
+ - description: Alternate ethernet PHY clock
+
+ clock-names:
+ items:
+ - const: xtal
+ - const: audio_refclk_ext_gate
+ - const: ext_enet_in_gate
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,pistachio-clk-periph
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Peripheral system clock
+
+ clock-names:
+ items:
+ - const: periph_sys_core
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,pistachio-cr-periph
+ then:
+ properties:
+ clocks:
+ items:
+ - description: System interface clock
+
+ clock-names:
+ items:
+ - const: sys
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: img,pistachio-cr-top
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External audio reference clock
+ - description: External ethernet PHY clock
+
+ clock-names:
+ items:
+ - const: audio_clk_in
+ - const: enet_clk_in
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt b/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt
deleted file mode 100644
index 8cf8f0ecdd16..000000000000
--- a/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-* NXP LPC1850 Clock Control Unit (CCU)
-
-Each CGU base clock has several clock branches which can be turned on
-or off independently by the Clock Control Units CCU1 or CCU2. The
-branch clocks are distributed between CCU1 and CCU2.
-
- - Above text taken from NXP LPC1850 User Manual.
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible:
- Should be "nxp,lpc1850-ccu"
-- reg:
- Shall define the base and range of the address space
- containing clock control registers
-- #clock-cells:
- Shall have value <1>. The permitted clock-specifier values
- are the branch clock names defined in table below.
-- clocks:
- Shall contain a list of phandles for the base clocks routed
- from the CGU to the specific CCU. See mapping of base clocks
- and CCU in table below.
-- clock-names:
- Shall contain a list of names for the base clock routed
- from the CGU to the specific CCU. Valid CCU clock names:
- "base_usb0_clk", "base_periph_clk", "base_usb1_clk",
- "base_cpu_clk", "base_spifi_clk", "base_spi_clk",
- "base_apb1_clk", "base_apb3_clk", "base_adchs_clk",
- "base_sdio_clk", "base_ssp0_clk", "base_ssp1_clk",
- "base_uart0_clk", "base_uart1_clk", "base_uart2_clk",
- "base_uart3_clk", "base_audio_clk"
-
-Which branch clocks that are available on the CCU depends on the
-specific LPC part. Check the user manual for your specific part.
-
-A list of CCU clocks can be found in dt-bindings/clock/lpc18xx-ccu.h.
-
-Example board file:
-
-soc {
- ccu1: clock-controller@40051000 {
- compatible = "nxp,lpc1850-ccu";
- reg = <0x40051000 0x1000>;
- #clock-cells = <1>;
- clocks = <&cgu BASE_APB3_CLK>, <&cgu BASE_APB1_CLK>,
- <&cgu BASE_SPIFI_CLK>, <&cgu BASE_CPU_CLK>,
- <&cgu BASE_PERIPH_CLK>, <&cgu BASE_USB0_CLK>,
- <&cgu BASE_USB1_CLK>, <&cgu BASE_SPI_CLK>;
- clock-names = "base_apb3_clk", "base_apb1_clk",
- "base_spifi_clk", "base_cpu_clk",
- "base_periph_clk", "base_usb0_clk",
- "base_usb1_clk", "base_spi_clk";
- };
-
- ccu2: clock-controller@40052000 {
- compatible = "nxp,lpc1850-ccu";
- reg = <0x40052000 0x1000>;
- #clock-cells = <1>;
- clocks = <&cgu BASE_AUDIO_CLK>, <&cgu BASE_UART3_CLK>,
- <&cgu BASE_UART2_CLK>, <&cgu BASE_UART1_CLK>,
- <&cgu BASE_UART0_CLK>, <&cgu BASE_SSP1_CLK>,
- <&cgu BASE_SSP0_CLK>, <&cgu BASE_SDIO_CLK>;
- clock-names = "base_audio_clk", "base_uart3_clk",
- "base_uart2_clk", "base_uart1_clk",
- "base_uart0_clk", "base_ssp1_clk",
- "base_ssp0_clk", "base_sdio_clk";
- };
-
- /* A user of CCU branch clocks */
- uart1: serial@40082000 {
- ...
- clocks = <&ccu2 CLK_APB0_UART1>, <&ccu1 CLK_CPU_UART1>;
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt b/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt
deleted file mode 100644
index 2cc32a9a945a..000000000000
--- a/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt
+++ /dev/null
@@ -1,131 +0,0 @@
-* NXP LPC1850 Clock Generation Unit (CGU)
-
-The CGU generates multiple independent clocks for the core and the
-peripheral blocks of the LPC18xx. Each independent clock is called
-a base clock and itself is one of the inputs to the two Clock
-Control Units (CCUs) which control the branch clocks to the
-individual peripherals.
-
-The CGU selects the inputs to the clock generators from multiple
-clock sources, controls the clock generation, and routes the outputs
-of the clock generators through the clock source bus to the output
-stages. Each output stage provides an independent clock source and
-corresponds to one of the base clocks for the LPC18xx.
-
- - Above text taken from NXP LPC1850 User Manual.
-
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible:
- Should be "nxp,lpc1850-cgu"
-- reg:
- Shall define the base and range of the address space
- containing clock control registers
-- #clock-cells:
- Shall have value <1>. The permitted clock-specifier values
- are the base clock numbers defined below.
-- clocks:
- Shall contain a list of phandles for the external input
- sources to the CGU. The list shall be in the following
- order: xtal, 32khz, enet_rx_clk, enet_tx_clk, gp_clkin.
-- clock-indices:
- Shall be an ordered list of numbers defining the base clock
- number provided by the CGU.
-- clock-output-names:
- Shall be an ordered list of strings defining the names of
- the clocks provided by the CGU.
-
-Which base clocks that are available on the CGU depends on the
-specific LPC part. Base clocks are numbered from 0 to 27.
-
-Number: Name: Description:
- 0 BASE_SAFE_CLK Base safe clock (always on) for WWDT
- 1 BASE_USB0_CLK Base clock for USB0
- 2 BASE_PERIPH_CLK Base clock for Cortex-M0SUB subsystem,
- SPI, and SGPIO
- 3 BASE_USB1_CLK Base clock for USB1
- 4 BASE_CPU_CLK System base clock for ARM Cortex-M core
- and APB peripheral blocks #0 and #2
- 5 BASE_SPIFI_CLK Base clock for SPIFI
- 6 BASE_SPI_CLK Base clock for SPI
- 7 BASE_PHY_RX_CLK Base clock for Ethernet PHY Receive clock
- 8 BASE_PHY_TX_CLK Base clock for Ethernet PHY Transmit clock
- 9 BASE_APB1_CLK Base clock for APB peripheral block # 1
-10 BASE_APB3_CLK Base clock for APB peripheral block # 3
-11 BASE_LCD_CLK Base clock for LCD
-12 BASE_ADCHS_CLK Base clock for ADCHS
-13 BASE_SDIO_CLK Base clock for SD/MMC
-14 BASE_SSP0_CLK Base clock for SSP0
-15 BASE_SSP1_CLK Base clock for SSP1
-16 BASE_UART0_CLK Base clock for UART0
-17 BASE_UART1_CLK Base clock for UART1
-18 BASE_UART2_CLK Base clock for UART2
-19 BASE_UART3_CLK Base clock for UART3
-20 BASE_OUT_CLK Base clock for CLKOUT pin
-24-21 - Reserved
-25 BASE_AUDIO_CLK Base clock for audio system (I2S)
-26 BASE_CGU_OUT0_CLK Base clock for CGU_OUT0 clock output
-27 BASE_CGU_OUT1_CLK Base clock for CGU_OUT1 clock output
-
-BASE_PERIPH_CLK and BASE_SPI_CLK is only available on LPC43xx.
-BASE_ADCHS_CLK is only available on LPC4370.
-
-
-Example board file:
-
-/ {
- clocks {
- xtal: xtal {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- };
-
- xtal32: xtal32 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
-
- enet_rx_clk: enet_rx_clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <0>;
- clock-output-names = "enet_rx_clk";
- };
-
- enet_tx_clk: enet_tx_clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <0>;
- clock-output-names = "enet_tx_clk";
- };
-
- gp_clkin: gp_clkin {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <0>;
- clock-output-names = "gp_clkin";
- };
- };
-
- soc {
- cgu: clock-controller@40050000 {
- compatible = "nxp,lpc1850-cgu";
- reg = <0x40050000 0x1000>;
- #clock-cells = <1>;
- clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
- };
-
- /* A CGU and CCU clock consumer */
- lcdc: lcdc@40008000 {
- ...
- clocks = <&cgu BASE_LCD_CLK>, <&ccu1 CLK_CPU_LCD>;
- clock-names = "clcdclk", "apb_pclk";
- ...
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/lpc1850-creg-clk.txt b/Documentation/devicetree/bindings/clock/lpc1850-creg-clk.txt
deleted file mode 100644
index b6b2547a3d17..000000000000
--- a/Documentation/devicetree/bindings/clock/lpc1850-creg-clk.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* NXP LPC1850 CREG clocks
-
-The NXP LPC18xx/43xx CREG (Configuration Registers) block contains
-control registers for two low speed clocks. One of the clocks is a
-32 kHz oscillator driver with power up/down and clock gating. Next
-is a fixed divider that creates a 1 kHz clock from the 32 kHz osc.
-
-These clocks are used by the RTC and the Event Router peripherals.
-The 32 kHz can also be routed to other peripherals to enable low
-power modes.
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible:
- Should be "nxp,lpc1850-creg-clk"
-- #clock-cells:
- Shall have value <1>.
-- clocks:
- Shall contain a phandle to the fixed 32 kHz crystal.
-
-The creg-clk node must be a child of the creg syscon node.
-
-The following clocks are available from the clock node.
-
-Clock ID Name
- 0 1 kHz clock
- 1 32 kHz Oscillator
-
-Example:
-soc {
- creg: syscon@40043000 {
- compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
- reg = <0x40043000 0x1000>;
-
- creg_clk: clock-controller {
- compatible = "nxp,lpc1850-creg-clk";
- clocks = <&xtal32>;
- #clock-cells = <1>;
- };
-
- ...
- };
-
- rtc: rtc@40046000 {
- ...
- clocks = <&creg_clk 0>, <&ccu1 CLK_CPU_BUS>;
- clock-names = "rtc", "reg";
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.txt b/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.txt
deleted file mode 100644
index 3ce97cfe999b..000000000000
--- a/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-AXM5516 clock driver bindings
------------------------------
-
-Required properties :
-- compatible : shall contain "lsi,axm5516-clks"
-- reg : shall contain base register location and length
-- #clock-cells : shall contain 1
-
-The consumer specifies the desired clock by having the clock ID in its "clocks"
-phandle cell. See <dt-bindings/clock/lsi,axxia-clock.h> for the list of
-supported clock IDs.
-
-Example:
-
- clks: clock-controller@2010020000 {
- compatible = "lsi,axm5516-clks";
- #clock-cells = <1>;
- reg = <0x20 0x10020000 0 0x20000>;
- };
-
- serial0: uart@2010080000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0x20 0x10080000 0 0x1000>;
- interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks AXXIA_CLK_PER>;
- clock-names = "apb_pclk";
- };
- };
-
diff --git a/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.yaml b/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.yaml
new file mode 100644
index 000000000000..7a792dbeffb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/lsi,axm5516-clks.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2025 LSI
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/lsi,axm5516-clks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LSI AXM5516 Clock Controller
+
+maintainers:
+ - Anders Berg <anders.berg@lsi.com>
+
+description:
+ See <dt-bindings/clock/lsi,axxia-clock.h> for the list of supported clock IDs.
+
+properties:
+ compatible:
+ const: lsi,axm5516-clks
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ clock-controller@2010020000 {
+ compatible = "lsi,axm5516-clks";
+ #clock-cells = <1>;
+ reg = <0x20 0x10020000 0x20000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/lsi,nspire-cx-clock.yaml b/Documentation/devicetree/bindings/clock/lsi,nspire-cx-clock.yaml
new file mode 100644
index 000000000000..52c217d210d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/lsi,nspire-cx-clock.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/lsi,nspire-cx-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-NSPIRE Clocks
+
+maintainers:
+ - Daniel Tang <dt.tangr@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - lsi,nspire-cx-ahb-divider
+ - lsi,nspire-classic-ahb-divider
+ - lsi,nspire-cx-clock
+ - lsi,nspire-classic-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
diff --git a/Documentation/devicetree/bindings/clock/marvell,armada-370-corediv-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,armada-370-corediv-clock.yaml
new file mode 100644
index 000000000000..9d766558cdb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,armada-370-corediv-clock.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/marvell,armada-370-corediv-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MVEBU Core Divider Clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - marvell,armada-370-corediv-clock
+ - marvell,armada-375-corediv-clock
+ - marvell,armada-380-corediv-clock
+ - marvell,mv98dx3236-corediv-clock
+ - items:
+ - const: marvell,armada-390-corediv-clock
+ - const: marvell,armada-380-corediv-clock
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-output-names:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@18740 {
+ compatible = "marvell,armada-370-corediv-clock";
+ reg = <0x18740 0xc>;
+ #clock-cells = <1>;
+ clocks = <&pll>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,armada-3700-periph-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,armada-3700-periph-clock.yaml
new file mode 100644
index 000000000000..87e8e4ca111a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,armada-3700-periph-clock.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,armada-3700-periph-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 37xx SoCs Peripheral Clocks
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description: >
+ Marvell Armada 37xx SoCs provide peripheral clocks which are used as clock
+ source for the peripheral of the SoC.
+
+ There are two different blocks associated to north bridge and south bridge.
+
+ The following is a list of provided IDs for Armada 3700 North bridge clocks:
+
+ ID Clock name Description
+ -----------------------------------
+ 0 mmc MMC controller
+ 1 sata_host Sata Host
+ 2 sec_at Security AT
+ 3 sac_dap Security DAP
+ 4 tsecm Security Engine
+ 5 setm_tmx Serial Embedded Trace Module
+ 6 avs Adaptive Voltage Scaling
+ 7 sqf SPI
+ 8 pwm PWM
+ 9 i2c_2 I2C 2
+ 10 i2c_1 I2C 1
+ 11 ddr_phy DDR PHY
+ 12 ddr_fclk DDR F clock
+ 13 trace Trace
+ 14 counter Counter
+ 15 eip97 EIP 97
+ 16 cpu CPU
+
+ The following is a list of provided IDs for Armada 3700 South bridge clocks:
+
+ ID Clock name Description
+ -----------------------------------
+ 0 gbe-50 50 MHz parent clock for Gigabit Ethernet
+ 1 gbe-core parent clock for Gigabit Ethernet core
+ 2 gbe-125 125 MHz parent clock for Gigabit Ethernet
+ 3 gbe1-50 50 MHz clock for Gigabit Ethernet port 1
+ 4 gbe0-50 50 MHz clock for Gigabit Ethernet port 0
+ 5 gbe1-125 125 MHz clock for Gigabit Ethernet port 1
+ 6 gbe0-125 125 MHz clock for Gigabit Ethernet port 0
+ 7 gbe1-core Gigabit Ethernet core port 1
+ 8 gbe0-core Gigabit Ethernet core port 0
+ 9 gbe-bm Gigabit Ethernet Buffer Manager
+ 10 sdio SDIO
+ 11 usb32-sub2-sys USB 2 clock
+ 12 usb32-ss-sys USB 3 clock
+ 13 pcie PCIe controller
+
+properties:
+ compatible:
+ oneOf:
+ - const: marvell,armada-3700-periph-clock-sb
+ - items:
+ - const: marvell,armada-3700-periph-clock-nb
+ - const: syscon
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: TBG-A P clock and specifier
+ - description: TBG-B P clock and specifier
+ - description: TBG-A S clock and specifier
+ - description: TBG-B S clock and specifier
+ - description: Xtal clock and specifier
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@13000{
+ compatible = "marvell,armada-3700-periph-clock-sb";
+ reg = <0x13000 0x1000>;
+ clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, <&tbg 3>, <&xtalclk>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,armada-3700-tbg-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,armada-3700-tbg-clock.yaml
new file mode 100644
index 000000000000..7fd1d758f794
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,armada-3700-tbg-clock.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,armada-3700-tbg-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 3700 Time Base Generator Clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description: >
+ Marvell Armada 37xx SoCs provide Time Base Generator clocks which are used as
+ parent clocks for the peripheral clocks.
+
+ The TBG clock consumer should specify the desired clock by having the clock ID
+ in its "clocks" phandle cell.
+
+ The following is a list of provided IDs and clock names on Armada 3700:
+
+ 0 = TBG A P
+ 1 = TBG B P
+ 2 = TBG A S
+ 3 = TBG B S
+
+properties:
+ compatible:
+ const: marvell,armada-3700-tbg-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@13200 {
+ compatible = "marvell,armada-3700-tbg-clock";
+ reg = <0x13200 0x1000>;
+ clocks = <&xtalclk>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,armada-xp-cpu-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,armada-xp-cpu-clock.yaml
new file mode 100644
index 000000000000..f2ac6741da9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,armada-xp-cpu-clock.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+---
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+$id: http://devicetree.org/schemas/clock/marvell,armada-xp-cpu-clock.yaml#
+
+title: Marvell EBU CPU Clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ enum:
+ - marvell,armada-xp-cpu-clock
+ - marvell,mv98dx3236-cpu-clock
+
+ reg:
+ items:
+ - description: Clock complex registers
+ - description: PMU DFS registers
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@d0018700 {
+ #clock-cells = <1>;
+ compatible = "marvell,armada-xp-cpu-clock";
+ reg = <0xd0018700 0xa0>, <0x1c054 0x10>;
+ clocks = <&coreclk 1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,berlin.txt b/Documentation/devicetree/bindings/clock/marvell,berlin.txt
deleted file mode 100644
index c611c495f3ff..000000000000
--- a/Documentation/devicetree/bindings/clock/marvell,berlin.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Device Tree Clock bindings for Marvell Berlin
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Clock related registers are spread among the chip control registers. Berlin
-clock node should be a sub-node of the chip controller node. Marvell Berlin2
-(BG2, BG2CD, BG2Q) SoCs share the same IP for PLLs and clocks, with some
-minor differences in features and register layout.
-
-Required properties:
-- compatible: must be "marvell,berlin2-clk" or "marvell,berlin2q-clk"
-- #clock-cells: must be 1
-- clocks: must be the input parent clock phandle
-- clock-names: name of the input parent clock
- Allowed clock-names for the reference clocks are
- "refclk" for the SoCs oscillator input on all SoCs,
- and SoC-specific input clocks for
- BG2/BG2CD: "video_ext0" for the external video clock input
-
-
-Example:
-
-chip_clk: clock {
- compatible = "marvell,berlin2q-clk";
-
- #clock-cells = <1>;
- clocks = <&refclk>;
- clock-names = "refclk";
-};
diff --git a/Documentation/devicetree/bindings/clock/marvell,berlin2-clk.yaml b/Documentation/devicetree/bindings/clock/marvell,berlin2-clk.yaml
new file mode 100644
index 000000000000..8d48a2c7e381
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,berlin2-clk.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,berlin2-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Berlin Clock Controller
+
+maintainers:
+ - Jisheng Zhang <jszhang@kernel.org>
+
+description:
+ Clock related registers are spread among the chip control registers. Berlin
+ clock node should be a sub-node of the chip controller node. Marvell Berlin2
+ (BG2, BG2CD, BG2Q) SoCs share the same IP for PLLs and clocks, with some minor
+ differences in features and register layout.
+
+properties:
+ compatible:
+ enum:
+ - marvell,berlin2-clk
+ - marvell,berlin2q-clk
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - enum:
+ - refclk
+ - video_ext0
+
+required:
+ - compatible
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller {
+ compatible = "marvell,berlin2q-clk";
+ #clock-cells = <1>;
+ clocks = <&refclk>;
+ clock-names = "refclk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,dove-divider-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,dove-divider-clock.yaml
new file mode 100644
index 000000000000..7a8e0e281b63
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,dove-divider-clock.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/marvell,dove-divider-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Dove PLL Divider Clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description: >
+ Marvell Dove has a 2GHz PLL, which feeds into a set of dividers to provide
+ high speed clocks for a number of peripherals. These dividers are part of the
+ PMU, and thus this node should be a child of the PMU node.
+
+ The following clocks are provided:
+
+ ID Clock
+ -------------
+ 0 AXI bus clock
+ 1 GPU clock
+ 2 VMeta clock
+ 3 LCD clock
+
+properties:
+ compatible:
+ const: marvell,dove-divider-clock
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@64 {
+ compatible = "marvell,dove-divider-clock";
+ reg = <0x0064 0x8>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,mvebu-core-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mvebu-core-clock.yaml
new file mode 100644
index 000000000000..215bcd9080c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,mvebu-core-clock.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,mvebu-core-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MVEBU SoC core clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description: >
+ Marvell MVEBU SoCs usually allow to determine core clock frequencies by
+ reading the Sample-At-Reset (SAR) register. The core clock consumer should
+ specify the desired clock by having the clock ID in its "clocks" phandle cell.
+
+ The following is a list of provided IDs and clock names on Armada 370/XP:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = nbclk (L2 Cache clock)
+ 3 = hclk (DRAM control clock)
+ 4 = dramclk (DDR clock)
+
+ The following is a list of provided IDs and clock names on Armada 375:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = l2clk (L2 Cache clock)
+ 3 = ddrclk (DDR clock)
+
+ The following is a list of provided IDs and clock names on Armada 380/385:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = l2clk (L2 Cache clock)
+ 3 = ddrclk (DDR clock)
+
+ The following is a list of provided IDs and clock names on Armada 39x:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = nbclk (Coherent Fabric clock)
+ 3 = hclk (SDRAM Controller Internal Clock)
+ 4 = dclk (SDRAM Interface Clock)
+ 5 = refclk (Reference Clock)
+
+ The following is a list of provided IDs and clock names on 98dx3236:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = ddrclk (DDR clock)
+ 3 = mpll (MPLL Clock)
+
+ The following is a list of provided IDs and clock names on Kirkwood and Dove:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU0 clock)
+ 2 = l2clk (L2 Cache clock derived from CPU0 clock)
+ 3 = ddrclk (DDR controller clock derived from CPU0 clock)
+
+ The following is a list of provided IDs and clock names on Orion5x:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU0 clock)
+ 2 = ddrclk (DDR controller clock derived from CPU0 clock)
+
+properties:
+ compatible:
+ enum:
+ - marvell,armada-370-core-clock
+ - marvell,armada-375-core-clock
+ - marvell,armada-380-core-clock
+ - marvell,armada-390-core-clock
+ - marvell,armada-xp-core-clock
+ - marvell,dove-core-clock
+ - marvell,kirkwood-core-clock
+ - marvell,mv88f5181-core-clock
+ - marvell,mv88f5182-core-clock
+ - marvell,mv88f5281-core-clock
+ - marvell,mv88f6180-core-clock
+ - marvell,mv88f6183-core-clock
+ - marvell,mv98dx1135-core-clock
+ - marvell,mv98dx3236-core-clock
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clock-output-names:
+ description: Overwrite default clock output names.
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/marvell-armada-370-gating-clock.yaml b/Documentation/devicetree/bindings/clock/marvell-armada-370-gating-clock.yaml
new file mode 100644
index 000000000000..0475360d2b6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell-armada-370-gating-clock.yaml
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+---
+$id: http://devicetree.org/schemas/clock/marvell-armada-370-gating-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell EBU SoC gating-clock
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description: >
+ Marvell Armada 370/375/380/385/39x/XP, Dove and Kirkwood allow some peripheral
+ clocks to be gated to save some power. The clock ID is directly mapped to the
+ corresponding clock gating control bit in HW to ease manual clock lookup in
+ datasheet.
+
+ The following is a list of provided IDs for Armada 370:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 0 Audio AC97 Cntrl
+ 1 pex0_en PCIe 0 Clock out
+ 2 pex1_en PCIe 1 Clock out
+ 3 ge1 Gigabit Ethernet 1
+ 4 ge0 Gigabit Ethernet 0
+ 5 pex0 PCIe Cntrl 0
+ 9 pex1 PCIe Cntrl 1
+ 15 sata0 SATA Host 0
+ 17 sdio SDHCI Host
+ 23 crypto CESA (crypto engine)
+ 25 tdm Time Division Mplx
+ 28 ddr DDR Cntrl
+ 30 sata1 SATA Host 0
+
+ The following is a list of provided IDs for Armada 375:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 2 mu Management Unit
+ 3 pp Packet Processor
+ 4 ptp PTP
+ 5 pex0 PCIe 0 Clock out
+ 6 pex1 PCIe 1 Clock out
+ 8 audio Audio Cntrl
+ 11 nd_clk Nand Flash Cntrl
+ 14 sata0_link SATA 0 Link
+ 15 sata0_core SATA 0 Core
+ 16 usb3 USB3 Host
+ 17 sdio SDHCI Host
+ 18 usb USB Host
+ 19 gop Gigabit Ethernet MAC
+ 20 sata1_link SATA 1 Link
+ 21 sata1_core SATA 1 Core
+ 22 xor0 XOR DMA 0
+ 23 xor1 XOR DMA 0
+ 24 copro Coprocessor
+ 25 tdm Time Division Mplx
+ 28 crypto0_enc Cryptographic Unit Port 0 Encryption
+ 29 crypto0_core Cryptographic Unit Port 0 Core
+ 30 crypto1_enc Cryptographic Unit Port 1 Encryption
+ 31 crypto1_core Cryptographic Unit Port 1 Core
+
+ The following is a list of provided IDs for Armada 380/385:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 0 audio Audio
+ 2 ge2 Gigabit Ethernet 2
+ 3 ge1 Gigabit Ethernet 1
+ 4 ge0 Gigabit Ethernet 0
+ 5 pex1 PCIe 1
+ 6 pex2 PCIe 2
+ 7 pex3 PCIe 3
+ 8 pex0 PCIe 0
+ 9 usb3h0 USB3 Host 0
+ 10 usb3h1 USB3 Host 1
+ 11 usb3d USB3 Device
+ 13 bm Buffer Management
+ 14 crypto0z Cryptographic 0 Z
+ 15 sata0 SATA 0
+ 16 crypto1z Cryptographic 1 Z
+ 17 sdio SDIO
+ 18 usb2 USB 2
+ 21 crypto1 Cryptographic 1
+ 22 xor0 XOR 0
+ 23 crypto0 Cryptographic 0
+ 25 tdm Time Division Multiplexing
+ 28 xor1 XOR 1
+ 30 sata1 SATA 1
+
+ The following is a list of provided IDs for Armada 39x:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 5 pex1 PCIe 1
+ 6 pex2 PCIe 2
+ 7 pex3 PCIe 3
+ 8 pex0 PCIe 0
+ 9 usb3h0 USB3 Host 0
+ 10 usb3h1 USB3 Host 1
+ 15 sata0 SATA 0
+ 17 sdio SDIO
+ 22 xor0 XOR 0
+ 28 xor1 XOR 1
+
+ The following is a list of provided IDs for Armada XP:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 0 audio Audio Cntrl
+ 1 ge3 Gigabit Ethernet 3
+ 2 ge2 Gigabit Ethernet 2
+ 3 ge1 Gigabit Ethernet 1
+ 4 ge0 Gigabit Ethernet 0
+ 5 pex0 PCIe Cntrl 0
+ 6 pex1 PCIe Cntrl 1
+ 7 pex2 PCIe Cntrl 2
+ 8 pex3 PCIe Cntrl 3
+ 13 bp
+ 14 sata0lnk
+ 15 sata0 SATA Host 0
+ 16 lcd LCD Cntrl
+ 17 sdio SDHCI Host
+ 18 usb0 USB Host 0
+ 19 usb1 USB Host 1
+ 20 usb2 USB Host 2
+ 22 xor0 XOR DMA 0
+ 23 crypto CESA engine
+ 25 tdm Time Division Mplx
+ 28 xor1 XOR DMA 1
+ 29 sata1lnk
+ 30 sata1 SATA Host 1
+
+ The following is a list of provided IDs for 98dx3236:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 3 ge1 Gigabit Ethernet 1
+ 4 ge0 Gigabit Ethernet 0
+ 5 pex0 PCIe Cntrl 0
+ 17 sdio SDHCI Host
+ 18 usb0 USB Host 0
+ 22 xor0 XOR DMA 0
+
+ The following is a list of provided IDs for Dove:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 0 usb0 USB Host 0
+ 1 usb1 USB Host 1
+ 2 ge Gigabit Ethernet
+ 3 sata SATA Host
+ 4 pex0 PCIe Cntrl 0
+ 5 pex1 PCIe Cntrl 1
+ 8 sdio0 SDHCI Host 0
+ 9 sdio1 SDHCI Host 1
+ 10 nand NAND Cntrl
+ 11 camera Camera Cntrl
+ 12 i2s0 I2S Cntrl 0
+ 13 i2s1 I2S Cntrl 1
+ 15 crypto CESA engine
+ 21 ac97 AC97 Cntrl
+ 22 pdma Peripheral DMA
+ 23 xor0 XOR DMA 0
+ 24 xor1 XOR DMA 1
+ 30 gephy Gigabit Ethernet PHY
+ Note: gephy(30) is implemented as a parent clock of ge(2)
+
+ The following is a list of provided IDs for Kirkwood:
+
+ ID Clock Peripheral
+ -----------------------------------
+ 0 ge0 Gigabit Ethernet 0
+ 2 pex0 PCIe Cntrl 0
+ 3 usb0 USB Host 0
+ 4 sdio SDIO Cntrl
+ 5 tsu Transp. Stream Unit
+ 6 dunit SDRAM Cntrl
+ 7 runit Runit
+ 8 xor0 XOR DMA 0
+ 9 audio I2S Cntrl 0
+ 14 sata0 SATA Host 0
+ 15 sata1 SATA Host 1
+ 16 xor1 XOR DMA 1
+ 17 crypto CESA engine
+ 18 pex1 PCIe Cntrl 1
+ 19 ge1 Gigabit Ethernet 1
+ 20 tdm Time Division Mplx
+
+properties:
+ compatible:
+ enum:
+ - marvell,armada-370-gating-clock
+ - marvell,armada-375-gating-clock
+ - marvell,armada-380-gating-clock
+ - marvell,armada-390-gating-clock
+ - marvell,armada-xp-gating-clock
+ - marvell,mv98dx3236-gating-clock
+ - marvell,dove-gating-clock
+ - marvell,kirkwood-gating-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@d0038 {
+ compatible = "marvell,dove-gating-clock";
+ reg = <0xd0038 0x4>;
+ /* default parent clock is tclk */
+ clocks = <&core_clk 0>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/maxim,max9485.txt b/Documentation/devicetree/bindings/clock/maxim,max9485.txt
deleted file mode 100644
index b8f5c3bbf12b..000000000000
--- a/Documentation/devicetree/bindings/clock/maxim,max9485.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Devicetree bindings for Maxim MAX9485 Programmable Audio Clock Generator
-
-This device exposes 4 clocks in total:
-
-- MAX9485_MCLKOUT: A gated, buffered output of the input clock of 27 MHz
-- MAX9485_CLKOUT: A PLL that can be configured to 16 different discrete
- frequencies
-- MAX9485_CLKOUT[1,2]: Two gated outputs for MAX9485_CLKOUT
-
-MAX9485_CLKOUT[1,2] are children of MAX9485_CLKOUT which upchain all rate set
-requests.
-
-Required properties:
-- compatible: "maxim,max9485"
-- clocks: Input clock, must provide 27.000 MHz
-- clock-names: Must be set to "xclk"
-- #clock-cells: From common clock binding; shall be set to 1
-
-Optional properties:
-- reset-gpios: GPIO descriptor connected to the #RESET input pin
-- vdd-supply: A regulator node for Vdd
-- clock-output-names: Name of output clocks, as defined in common clock
- bindings
-
-If not explicitly set, the output names are "mclkout", "clkout", "clkout1"
-and "clkout2".
-
-Clocks are defined as preprocessor macros in the dt-binding header.
-
-Example:
-
- #include <dt-bindings/clock/maxim,max9485.h>
-
- xo-27mhz: xo-27mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <27000000>;
- };
-
- &i2c0 {
- max9485: audio-clock@63 {
- reg = <0x63>;
- compatible = "maxim,max9485";
- clock-names = "xclk";
- clocks = <&xo-27mhz>;
- reset-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
- vdd-supply = <&3v3-reg>;
- #clock-cells = <1>;
- };
- };
-
- // Clock consumer node
-
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "foo-input-clk";
- clocks = <&max9485 MAX9485_CLKOUT1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/maxim,max9485.yaml b/Documentation/devicetree/bindings/clock/maxim,max9485.yaml
new file mode 100644
index 000000000000..f9d8941c7235
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/maxim,max9485.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/maxim,max9485.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX9485 Programmable Audio Clock Generator
+
+maintainers:
+ - Daniel Mack <daniel@zonque.org>
+
+description: >
+ Maxim MAX9485 Programmable Audio Clock Generator exposes 4 clocks in total:
+
+ - MAX9485_MCLKOUT: A gated, buffered output of the input clock of 27 MHz
+ - MAX9485_CLKOUT: A PLL that can be configured to 16 different discrete
+ frequencies
+ - MAX9485_CLKOUT[1,2]: Two gated outputs for MAX9485_CLKOUT
+
+ MAX9485_CLKOUT[1,2] are children of MAX9485_CLKOUT which upchain all rate set
+ requests.
+
+properties:
+ compatible:
+ const: maxim,max9485
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: Input clock. Must provide 27 MHz
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: xclk
+
+ '#clock-cells':
+ const: 1
+
+ reset-gpios:
+ description: >
+ GPIO descriptor connected to the #RESET input pin
+
+ vdd-supply:
+ description: A regulator node for Vdd
+
+ clock-output-names:
+ description: Name of output clocks, as defined in common clock bindings
+ items:
+ - const: mclkout
+ - const: clkout
+ - const: clkout1
+ - const: clkout2
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clock-controller@63 {
+ compatible = "maxim,max9485";
+ reg = <0x63>;
+ #clock-cells = <1>;
+ clock-names = "xclk";
+ clocks = <&xo_27mhz>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&reg_3v3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/mediatek,mtmips-sysc.yaml b/Documentation/devicetree/bindings/clock/mediatek,mtmips-sysc.yaml
index 83c1803ffd16..56bbd69b16d9 100644
--- a/Documentation/devicetree/bindings/clock/mediatek,mtmips-sysc.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mtmips-sysc.yaml
@@ -26,18 +26,22 @@ description: |
properties:
compatible:
- items:
- - enum:
- - ralink,mt7620-sysc
- - ralink,mt7628-sysc
- - ralink,mt7688-sysc
- - ralink,rt2880-sysc
- - ralink,rt3050-sysc
- - ralink,rt3052-sysc
- - ralink,rt3352-sysc
- - ralink,rt3883-sysc
- - ralink,rt5350-sysc
- - const: syscon
+ oneOf:
+ - items:
+ - enum:
+ - ralink,mt7620-sysc
+ - ralink,mt7688-sysc
+ - ralink,rt2880-sysc
+ - ralink,rt3050-sysc
+ - ralink,rt3052-sysc
+ - ralink,rt3352-sysc
+ - ralink,rt3883-sysc
+ - ralink,rt5350-sysc
+ - const: syscon
+ - items:
+ - const: ralink,mt7628-sysc
+ - const: ralink,mt7688-sysc
+ - const: syscon
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/microchip,pic32.txt b/Documentation/devicetree/bindings/clock/microchip,pic32.txt
deleted file mode 100644
index c93d88fdd858..000000000000
--- a/Documentation/devicetree/bindings/clock/microchip,pic32.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Microchip PIC32 Clock Controller Binding
-----------------------------------------
-Microchip clock controller is consists of few oscillators, PLL, multiplexer
-and few divider modules.
-
-This binding uses common clock bindings.
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible: shall be "microchip,pic32mzda-clk".
-- reg: shall contain base address and length of clock registers.
-- #clock-cells: shall be 1.
-
-Optional properties:
-- microchip,pic32mzda-sosc: shall be added only if platform has
- secondary oscillator connected.
-
-Example:
- rootclk: clock-controller@1f801200 {
- compatible = "microchip,pic32mzda-clk";
- reg = <0x1f801200 0x200>;
- #clock-cells = <1>;
- /* optional */
- microchip,pic32mzda-sosc;
- };
-
-
-The clock consumer shall specify the desired clock-output of the clock
-controller (as defined in [2]) by specifying output-id in its "clock"
-phandle cell.
-[2] include/dt-bindings/clock/microchip,pic32-clock.h
-
-For example for UART2:
-uart2: serial@2 {
- compatible = "microchip,pic32mzda-uart";
- reg = <>;
- interrupts = <>;
- clocks = <&rootclk PB2CLK>;
-};
diff --git a/Documentation/devicetree/bindings/clock/microchip,pic32mzda-clk.yaml b/Documentation/devicetree/bindings/clock/microchip,pic32mzda-clk.yaml
new file mode 100644
index 000000000000..a14a838140f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/microchip,pic32mzda-clk.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/microchip,pic32mzda-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PIC32MZDA Clock Controller
+
+maintainers:
+ - Purna Chandra Mandal <purna.mandal@microchip.com>
+
+description:
+ Microchip clock controller consists of a few oscillators, PLL, multiplexer
+ and divider modules.
+
+properties:
+ compatible:
+ const: microchip,pic32mzda-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ microchip,pic32mzda-sosc:
+ description: Presence of secondary oscillator.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@1f801200 {
+ compatible = "microchip,pic32mzda-clk";
+ reg = <0x1f801200 0x200>;
+ #clock-cells = <1>;
+ /* optional */
+ microchip,pic32mzda-sosc;
+ };
diff --git a/Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt b/Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt
deleted file mode 100644
index fedea84314a1..000000000000
--- a/Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Device Tree Clock bindings for arch-moxart
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-MOXA ART SoCs allow to determine PLL output and APB frequencies
-by reading registers holding multiplier and divisor information.
-
-
-PLL:
-
-Required properties:
-- compatible : Must be "moxa,moxart-pll-clock"
-- #clock-cells : Should be 0
-- reg : Should contain registers location and length
-- clocks : Should contain phandle + clock-specifier for the parent clock
-
-Optional properties:
-- clock-output-names : Should contain clock name
-
-
-APB:
-
-Required properties:
-- compatible : Must be "moxa,moxart-apb-clock"
-- #clock-cells : Should be 0
-- reg : Should contain registers location and length
-- clocks : Should contain phandle + clock-specifier for the parent clock
-
-Optional properties:
-- clock-output-names : Should contain clock name
-
-
-For example:
-
- clk_pll: clk_pll@98100000 {
- compatible = "moxa,moxart-pll-clock";
- #clock-cells = <0>;
- reg = <0x98100000 0x34>;
- };
-
- clk_apb: clk_apb@98100000 {
- compatible = "moxa,moxart-apb-clock";
- #clock-cells = <0>;
- reg = <0x98100000 0x34>;
- clocks = <&clk_pll>;
- };
diff --git a/Documentation/devicetree/bindings/clock/moxa,moxart-clock.yaml b/Documentation/devicetree/bindings/clock/moxa,moxart-clock.yaml
new file mode 100644
index 000000000000..bcf7cc240eba
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/moxa,moxart-clock.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/moxa,moxart-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MOXA ART Clock Controllers
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description:
+ MOXA ART SoCs allow to determine PLL output and APB frequencies by reading
+ registers holding multiplier and divisor information.
+
+properties:
+ compatible:
+ enum:
+ - moxa,moxart-apb-clock
+ - moxa,moxart-pll-clock
+
+ "#clock-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-output-names: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
deleted file mode 100644
index d8f5c490f893..000000000000
--- a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Core Clock bindings for Marvell MVEBU SoCs
-
-Marvell MVEBU SoCs usually allow to determine core clock frequencies by
-reading the Sample-At-Reset (SAR) register. The core clock consumer should
-specify the desired clock by having the clock ID in its "clocks" phandle cell.
-
-The following is a list of provided IDs and clock names on Armada 370/XP:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU clock)
- 2 = nbclk (L2 Cache clock)
- 3 = hclk (DRAM control clock)
- 4 = dramclk (DDR clock)
-
-The following is a list of provided IDs and clock names on Armada 375:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU clock)
- 2 = l2clk (L2 Cache clock)
- 3 = ddrclk (DDR clock)
-
-The following is a list of provided IDs and clock names on Armada 380/385:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU clock)
- 2 = l2clk (L2 Cache clock)
- 3 = ddrclk (DDR clock)
-
-The following is a list of provided IDs and clock names on Armada 39x:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU clock)
- 2 = nbclk (Coherent Fabric clock)
- 3 = hclk (SDRAM Controller Internal Clock)
- 4 = dclk (SDRAM Interface Clock)
- 5 = refclk (Reference Clock)
-
-The following is a list of provided IDs and clock names on 98dx3236:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU clock)
- 2 = ddrclk (DDR clock)
- 3 = mpll (MPLL Clock)
-
-The following is a list of provided IDs and clock names on Kirkwood and Dove:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU0 clock)
- 2 = l2clk (L2 Cache clock derived from CPU0 clock)
- 3 = ddrclk (DDR controller clock derived from CPU0 clock)
-
-The following is a list of provided IDs and clock names on Orion5x:
- 0 = tclk (Internal Bus clock)
- 1 = cpuclk (CPU0 clock)
- 2 = ddrclk (DDR controller clock derived from CPU0 clock)
-
-Required properties:
-- compatible : shall be one of the following:
- "marvell,armada-370-core-clock" - For Armada 370 SoC core clocks
- "marvell,armada-375-core-clock" - For Armada 375 SoC core clocks
- "marvell,armada-380-core-clock" - For Armada 380/385 SoC core clocks
- "marvell,armada-390-core-clock" - For Armada 39x SoC core clocks
- "marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
- "marvell,mv98dx3236-core-clock" - For 98dx3236 family SoC core clocks
- "marvell,dove-core-clock" - for Dove SoC core clocks
- "marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
- "marvell,mv88f6180-core-clock" - for Kirkwood MV88f6180 SoC
- "marvell,mv98dx1135-core-clock" - for Kirkwood 98dx1135 SoC
- "marvell,mv88f5181-core-clock" - for Orion MV88F5181 SoC
- "marvell,mv88f5182-core-clock" - for Orion MV88F5182 SoC
- "marvell,mv88f5281-core-clock" - for Orion MV88F5281 SoC
- "marvell,mv88f6183-core-clock" - for Orion MV88F6183 SoC
-- reg : shall be the register address of the Sample-At-Reset (SAR) register
-- #clock-cells : from common clock binding; shall be set to 1
-
-Optional properties:
-- clock-output-names : from common clock binding; allows overwrite default clock
- output names ("tclk", "cpuclk", "l2clk", "ddrclk")
-
-Example:
-
-core_clk: core-clocks@d0214 {
- compatible = "marvell,dove-core-clock";
- reg = <0xd0214 0x4>;
- #clock-cells = <1>;
-};
-
-spi0: spi@10600 {
- compatible = "marvell,orion-spi";
- /* ... */
- /* get tclk from core clock provider */
- clocks = <&core_clk 0>;
-};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt
deleted file mode 100644
index c7b4e3a6b2c6..000000000000
--- a/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Core Divider Clock bindings for Marvell MVEBU SoCs
-
-The following is a list of provided IDs and clock names on Armada 370/XP:
- 0 = nand (NAND clock)
-
-Required properties:
-- compatible : must be "marvell,armada-370-corediv-clock",
- "marvell,armada-375-corediv-clock",
- "marvell,armada-380-corediv-clock",
- "marvell,mv98dx3236-corediv-clock",
-
-- reg : must be the register address of Core Divider control register
-- #clock-cells : from common clock binding; shall be set to 1
-- clocks : must be set to the parent's phandle
-
-Example:
-
-corediv_clk: corediv-clocks@18740 {
- compatible = "marvell,armada-370-corediv-clock";
- reg = <0x18740 0xc>;
- #clock-cells = <1>;
- clocks = <&pll>;
-};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
deleted file mode 100644
index 7f28506eaee7..000000000000
--- a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Device Tree Clock bindings for cpu clock of Marvell EBU platforms
-
-Required properties:
-- compatible : shall be one of the following:
- "marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
- "marvell,mv98dx3236-cpu-clock" - cpu clocks for 98DX3236 SoC
-- reg : Address and length of the clock complex register set, followed
- by address and length of the PMU DFS registers
-- #clock-cells : should be set to 1.
-- clocks : shall be the input parent clock phandle for the clock.
-
-cpuclk: clock-complex@d0018700 {
- #clock-cells = <1>;
- compatible = "marvell,armada-xp-cpu-clock";
- reg = <0xd0018700 0xA0>, <0x1c054 0x10>;
- clocks = <&coreclk 1>;
-}
-
-cpu@0 {
- compatible = "marvell,sheeva-v7";
- reg = <0>;
- clocks = <&cpuclk 0>;
-};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
deleted file mode 100644
index de562da2ae77..000000000000
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ /dev/null
@@ -1,205 +0,0 @@
-* Gated Clock bindings for Marvell EBU SoCs
-
-Marvell Armada 370/375/380/385/39x/XP, Dove and Kirkwood allow some
-peripheral clocks to be gated to save some power. The clock consumer
-should specify the desired clock by having the clock ID in its
-"clocks" phandle cell. The clock ID is directly mapped to the
-corresponding clock gating control bit in HW to ease manual clock
-lookup in datasheet.
-
-The following is a list of provided IDs for Armada 370:
-ID Clock Peripheral
------------------------------------
-0 Audio AC97 Cntrl
-1 pex0_en PCIe 0 Clock out
-2 pex1_en PCIe 1 Clock out
-3 ge1 Gigabit Ethernet 1
-4 ge0 Gigabit Ethernet 0
-5 pex0 PCIe Cntrl 0
-9 pex1 PCIe Cntrl 1
-15 sata0 SATA Host 0
-17 sdio SDHCI Host
-23 crypto CESA (crypto engine)
-25 tdm Time Division Mplx
-28 ddr DDR Cntrl
-30 sata1 SATA Host 0
-
-The following is a list of provided IDs for Armada 375:
-ID Clock Peripheral
------------------------------------
-2 mu Management Unit
-3 pp Packet Processor
-4 ptp PTP
-5 pex0 PCIe 0 Clock out
-6 pex1 PCIe 1 Clock out
-8 audio Audio Cntrl
-11 nd_clk Nand Flash Cntrl
-14 sata0_link SATA 0 Link
-15 sata0_core SATA 0 Core
-16 usb3 USB3 Host
-17 sdio SDHCI Host
-18 usb USB Host
-19 gop Gigabit Ethernet MAC
-20 sata1_link SATA 1 Link
-21 sata1_core SATA 1 Core
-22 xor0 XOR DMA 0
-23 xor1 XOR DMA 0
-24 copro Coprocessor
-25 tdm Time Division Mplx
-28 crypto0_enc Cryptographic Unit Port 0 Encryption
-29 crypto0_core Cryptographic Unit Port 0 Core
-30 crypto1_enc Cryptographic Unit Port 1 Encryption
-31 crypto1_core Cryptographic Unit Port 1 Core
-
-The following is a list of provided IDs for Armada 380/385:
-ID Clock Peripheral
------------------------------------
-0 audio Audio
-2 ge2 Gigabit Ethernet 2
-3 ge1 Gigabit Ethernet 1
-4 ge0 Gigabit Ethernet 0
-5 pex1 PCIe 1
-6 pex2 PCIe 2
-7 pex3 PCIe 3
-8 pex0 PCIe 0
-9 usb3h0 USB3 Host 0
-10 usb3h1 USB3 Host 1
-11 usb3d USB3 Device
-13 bm Buffer Management
-14 crypto0z Cryptographic 0 Z
-15 sata0 SATA 0
-16 crypto1z Cryptographic 1 Z
-17 sdio SDIO
-18 usb2 USB 2
-21 crypto1 Cryptographic 1
-22 xor0 XOR 0
-23 crypto0 Cryptographic 0
-25 tdm Time Division Multiplexing
-28 xor1 XOR 1
-30 sata1 SATA 1
-
-The following is a list of provided IDs for Armada 39x:
-ID Clock Peripheral
------------------------------------
-5 pex1 PCIe 1
-6 pex2 PCIe 2
-7 pex3 PCIe 3
-8 pex0 PCIe 0
-9 usb3h0 USB3 Host 0
-10 usb3h1 USB3 Host 1
-15 sata0 SATA 0
-17 sdio SDIO
-22 xor0 XOR 0
-28 xor1 XOR 1
-
-The following is a list of provided IDs for Armada XP:
-ID Clock Peripheral
------------------------------------
-0 audio Audio Cntrl
-1 ge3 Gigabit Ethernet 3
-2 ge2 Gigabit Ethernet 2
-3 ge1 Gigabit Ethernet 1
-4 ge0 Gigabit Ethernet 0
-5 pex0 PCIe Cntrl 0
-6 pex1 PCIe Cntrl 1
-7 pex2 PCIe Cntrl 2
-8 pex3 PCIe Cntrl 3
-13 bp
-14 sata0lnk
-15 sata0 SATA Host 0
-16 lcd LCD Cntrl
-17 sdio SDHCI Host
-18 usb0 USB Host 0
-19 usb1 USB Host 1
-20 usb2 USB Host 2
-22 xor0 XOR DMA 0
-23 crypto CESA engine
-25 tdm Time Division Mplx
-28 xor1 XOR DMA 1
-29 sata1lnk
-30 sata1 SATA Host 1
-
-The following is a list of provided IDs for 98dx3236:
-ID Clock Peripheral
------------------------------------
-3 ge1 Gigabit Ethernet 1
-4 ge0 Gigabit Ethernet 0
-5 pex0 PCIe Cntrl 0
-17 sdio SDHCI Host
-18 usb0 USB Host 0
-22 xor0 XOR DMA 0
-
-The following is a list of provided IDs for Dove:
-ID Clock Peripheral
------------------------------------
-0 usb0 USB Host 0
-1 usb1 USB Host 1
-2 ge Gigabit Ethernet
-3 sata SATA Host
-4 pex0 PCIe Cntrl 0
-5 pex1 PCIe Cntrl 1
-8 sdio0 SDHCI Host 0
-9 sdio1 SDHCI Host 1
-10 nand NAND Cntrl
-11 camera Camera Cntrl
-12 i2s0 I2S Cntrl 0
-13 i2s1 I2S Cntrl 1
-15 crypto CESA engine
-21 ac97 AC97 Cntrl
-22 pdma Peripheral DMA
-23 xor0 XOR DMA 0
-24 xor1 XOR DMA 1
-30 gephy Gigabit Ethernel PHY
-Note: gephy(30) is implemented as a parent clock of ge(2)
-
-The following is a list of provided IDs for Kirkwood:
-ID Clock Peripheral
------------------------------------
-0 ge0 Gigabit Ethernet 0
-2 pex0 PCIe Cntrl 0
-3 usb0 USB Host 0
-4 sdio SDIO Cntrl
-5 tsu Transp. Stream Unit
-6 dunit SDRAM Cntrl
-7 runit Runit
-8 xor0 XOR DMA 0
-9 audio I2S Cntrl 0
-14 sata0 SATA Host 0
-15 sata1 SATA Host 1
-16 xor1 XOR DMA 1
-17 crypto CESA engine
-18 pex1 PCIe Cntrl 1
-19 ge1 Gigabit Ethernet 1
-20 tdm Time Division Mplx
-
-Required properties:
-- compatible : shall be one of the following:
- "marvell,armada-370-gating-clock" - for Armada 370 SoC clock gating
- "marvell,armada-375-gating-clock" - for Armada 375 SoC clock gating
- "marvell,armada-380-gating-clock" - for Armada 380/385 SoC clock gating
- "marvell,armada-390-gating-clock" - for Armada 39x SoC clock gating
- "marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating
- "marvell,mv98dx3236-gating-clock" - for 98dx3236 SoC clock gating
- "marvell,dove-gating-clock" - for Dove SoC clock gating
- "marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
-- reg : shall be the register address of the Clock Gating Control register
-- #clock-cells : from common clock binding; shall be set to 1
-
-Optional properties:
-- clocks : default parent clock phandle (e.g. tclk)
-
-Example:
-
-gate_clk: clock-gating-control@d0038 {
- compatible = "marvell,dove-gating-clock";
- reg = <0xd0038 0x4>;
- /* default parent clock is tclk */
- clocks = <&core_clk 0>;
- #clock-cells = <1>;
-};
-
-sdio0: sdio@92000 {
- compatible = "marvell,dove-sdhci";
- /* get clk gate bit 8 (sdio0) */
- clocks = <&gate_clk 8>;
-};
diff --git a/Documentation/devicetree/bindings/clock/nspire-clock.txt b/Documentation/devicetree/bindings/clock/nspire-clock.txt
deleted file mode 100644
index 7c3bc8bb5b9f..000000000000
--- a/Documentation/devicetree/bindings/clock/nspire-clock.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-TI-NSPIRE Clocks
-
-Required properties:
-- compatible: Valid compatible properties include:
- "lsi,nspire-cx-ahb-divider" for the AHB divider in the CX model
- "lsi,nspire-classic-ahb-divider" for the AHB divider in the older model
- "lsi,nspire-cx-clock" for the base clock in the CX model
- "lsi,nspire-classic-clock" for the base clock in the older model
-
-- reg: Physical base address of the controller and length of memory mapped
- region.
-
-Optional:
-- clocks: For the "nspire-*-ahb-divider" compatible clocks, this is the parent
- clock where it divides the rate from.
-
-Example:
-
-ahb_clk {
- #clock-cells = <0>;
- compatible = "lsi,nspire-cx-clock";
- reg = <0x900B0000 0x4>;
- clocks = <&base_clk>;
-};
diff --git a/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt b/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt
deleted file mode 100644
index f82064546d11..000000000000
--- a/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-* Nuvoton NPCM7XX Clock Controller
-
-Nuvoton Poleg BMC NPCM7XX contains an integrated clock controller, which
-generates and supplies clocks to all modules within the BMC.
-
-External clocks:
-
-There are six fixed clocks that are generated outside the BMC. All clocks are of
-a known fixed value that cannot be changed. clk_refclk, clk_mcbypck and
-clk_sysbypck are inputs to the clock controller.
-clk_rg1refck, clk_rg2refck and clk_xin are external clocks suppling the
-network. They are set on the device tree, but not used by the clock module. The
-network devices use them directly.
-Example can be found below.
-
-All available clocks are defined as preprocessor macros in:
-dt-bindings/clock/nuvoton,npcm7xx-clock.h
-and can be reused as DT sources.
-
-Required Properties of clock controller:
-
- - compatible: "nuvoton,npcm750-clk" : for clock controller of Nuvoton
- Poleg BMC NPCM750
-
- - reg: physical base address of the clock controller and length of
- memory mapped region.
-
- - #clock-cells: should be 1.
-
-Example: Clock controller node:
-
- clk: clock-controller@f0801000 {
- compatible = "nuvoton,npcm750-clk";
- #clock-cells = <1>;
- reg = <0xf0801000 0x1000>;
- clock-names = "refclk", "sysbypck", "mcbypck";
- clocks = <&clk_refclk>, <&clk_sysbypck>, <&clk_mcbypck>;
- };
-
-Example: Required external clocks for network:
-
- /* external reference clock */
- clk_refclk: clk-refclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <25000000>;
- clock-output-names = "refclk";
- };
-
- /* external reference clock for cpu. float in normal operation */
- clk_sysbypck: clk-sysbypck {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <800000000>;
- clock-output-names = "sysbypck";
- };
-
- /* external reference clock for MC. float in normal operation */
- clk_mcbypck: clk-mcbypck {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <800000000>;
- clock-output-names = "mcbypck";
- };
-
- /* external clock signal rg1refck, supplied by the phy */
- clk_rg1refck: clk-rg1refck {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <125000000>;
- clock-output-names = "clk_rg1refck";
- };
-
- /* external clock signal rg2refck, supplied by the phy */
- clk_rg2refck: clk-rg2refck {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <125000000>;
- clock-output-names = "clk_rg2refck";
- };
-
- clk_xin: clk-xin {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <50000000>;
- clock-output-names = "clk_xin";
- };
-
-
-Example: GMAC controller node that consumes two clocks: a generated clk by the
-clock controller and a fixed clock from DT (clk_rg1refck).
-
- ethernet0: ethernet@f0802000 {
- compatible = "snps,dwmac";
- reg = <0xf0802000 0x2000>;
- interrupts = <0 14 4>;
- interrupt-names = "macirq";
- clocks = <&clk_rg1refck>, <&clk NPCM7XX_CLK_AHB>;
- clock-names = "stmmaceth", "clk_gmac";
- };
diff --git a/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.yaml b/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.yaml
new file mode 100644
index 000000000000..694dac68619c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nuvoton,npcm750-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM7XX Clock Controller
+
+maintainers:
+ - Tali Perry <tali.perry1@gmail.com>
+
+description: >
+ Nuvoton Poleg BMC NPCM7XX contains an integrated clock controller, which
+ generates and supplies clocks to all modules within the BMC.
+
+ External clocks:
+
+ There are six fixed clocks that are generated outside the BMC. All clocks are of
+ a known fixed value that cannot be changed. clk_refclk, clk_mcbypck and
+ clk_sysbypck are inputs to the clock controller.
+ clk_rg1refck, clk_rg2refck and clk_xin are external clocks suppling the
+ network. They are set on the device tree, but not used by the clock module. The
+ network devices use them directly.
+
+ All available clocks are defined as preprocessor macros in:
+ dt-bindings/clock/nuvoton,npcm7xx-clock.h
+ and can be reused as DT sources.
+
+properties:
+ compatible:
+ const: nuvoton,npcm750-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: refclk
+ - const: sysbypck
+ - const: mcbypck
+
+ clocks:
+ items:
+ - description: refclk
+ - description: sysbypck
+ - description: mcbypck
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@f0801000 {
+ compatible = "nuvoton,npcm750-clk";
+ #clock-cells = <1>;
+ reg = <0xf0801000 0x1000>;
+ clock-names = "refclk", "sysbypck", "mcbypck";
+ clocks = <&clk_refclk>, <&clk_sysbypck>, <&clk_mcbypck>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
index d0291bfff23a..27403b4c52d6 100644
--- a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
+++ b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
@@ -13,6 +13,8 @@ properties:
compatible:
items:
- enum:
+ - nxp,imx94-display-csr
+ - nxp,imx94-lvds-csr
- nxp,imx95-camera-csr
- nxp,imx95-display-csr
- nxp,imx95-hsio-blk-ctl
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc1850-ccu.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc1850-ccu.yaml
new file mode 100644
index 000000000000..5459038cc954
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc1850-ccu.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc1850-ccu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC1850 Clock Control Unit (CCU)
+
+description:
+ Each CGU base clock has several clock branches which can be turned on
+ or off independently by the Clock Control Units CCU1 or CCU2. The
+ branch clocks are distributed between CCU1 and CCU2.
+
+ Above text taken from NXP LPC1850 User Manual
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc1850-ccu
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 8
+
+ clock-names:
+ minItems: 1
+ maxItems: 8
+ items:
+ enum:
+ - base_usb0_clk
+ - base_periph_clk
+ - base_usb1_clk
+ - base_cpu_clk
+ - base_spifi_clk
+ - base_spi_clk
+ - base_apb1_clk
+ - base_apb3_clk
+ - base_adchs_clk
+ - base_sdio_clk
+ - base_ssp0_clk
+ - base_ssp1_clk
+ - base_uart0_clk
+ - base_uart1_clk
+ - base_uart2_clk
+ - base_uart3_clk
+ - base_audio_clk
+ description:
+ Which branch clocks that are available on the CCU depends on the
+ specific LPC part. Check the user manual for your specific part.
+
+ A list of CCU clocks can be found in dt-bindings/clock/lpc18xx-ccu.h.
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-cgu.h>
+
+ clock-controller@40051000 {
+ compatible = "nxp,lpc1850-ccu";
+ reg = <0x40051000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&cgu BASE_APB3_CLK>, <&cgu BASE_APB1_CLK>,
+ <&cgu BASE_SPIFI_CLK>, <&cgu BASE_CPU_CLK>,
+ <&cgu BASE_PERIPH_CLK>, <&cgu BASE_USB0_CLK>,
+ <&cgu BASE_USB1_CLK>, <&cgu BASE_SPI_CLK>;
+ clock-names = "base_apb3_clk", "base_apb1_clk",
+ "base_spifi_clk", "base_cpu_clk",
+ "base_periph_clk", "base_usb0_clk",
+ "base_usb1_clk", "base_spi_clk";
+ };
+
+ - |
+ #include <dt-bindings/clock/lpc18xx-cgu.h>
+
+ clock-controller@40052000 {
+ compatible = "nxp,lpc1850-ccu";
+ reg = <0x40052000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&cgu BASE_AUDIO_CLK>, <&cgu BASE_UART3_CLK>,
+ <&cgu BASE_UART2_CLK>, <&cgu BASE_UART1_CLK>,
+ <&cgu BASE_UART0_CLK>, <&cgu BASE_SSP1_CLK>,
+ <&cgu BASE_SSP0_CLK>, <&cgu BASE_SDIO_CLK>;
+ clock-names = "base_audio_clk", "base_uart3_clk",
+ "base_uart2_clk", "base_uart1_clk",
+ "base_uart0_clk", "base_ssp1_clk",
+ "base_ssp0_clk", "base_sdio_clk";
+ };
+
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc1850-cgu.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc1850-cgu.yaml
new file mode 100644
index 000000000000..ed178c7df00c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc1850-cgu.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc1850-cgu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC1850 Clock Generation Unit (CGU)
+
+description: >
+ The CGU generates multiple independent clocks for the core and the
+ peripheral blocks of the LPC18xx. Each independent clock is called
+ a base clock and itself is one of the inputs to the two Clock
+ Control Units (CCUs) which control the branch clocks to the
+ individual peripherals.
+
+ The CGU selects the inputs to the clock generators from multiple
+ clock sources, controls the clock generation, and routes the outputs
+ of the clock generators through the clock source bus to the output
+ stages. Each output stage provides an independent clock source and
+ corresponds to one of the base clocks for the LPC18xx.
+
+ Above text taken from NXP LPC1850 User Manual.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc1850-cgu
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+ description: |
+ Which base clocks that are available on the CGU depends on the
+ specific LPC part. Base clocks are numbered from 0 to 27.
+
+ Number: Name: Description:
+ 0 BASE_SAFE_CLK Base safe clock (always on) for WWDT
+ 1 BASE_USB0_CLK Base clock for USB0
+ 2 BASE_PERIPH_CLK Base clock for Cortex-M0SUB subsystem,
+ SPI, and SGPIO
+ 3 BASE_USB1_CLK Base clock for USB1
+ 4 BASE_CPU_CLK System base clock for ARM Cortex-M core
+ and APB peripheral blocks #0 and #2
+ 5 BASE_SPIFI_CLK Base clock for SPIFI
+ 6 BASE_SPI_CLK Base clock for SPI
+ 7 BASE_PHY_RX_CLK Base clock for Ethernet PHY Receive clock
+ 8 BASE_PHY_TX_CLK Base clock for Ethernet PHY Transmit clock
+ 9 BASE_APB1_CLK Base clock for APB peripheral block # 1
+ 10 BASE_APB3_CLK Base clock for APB peripheral block # 3
+ 11 BASE_LCD_CLK Base clock for LCD
+ 12 BASE_ADCHS_CLK Base clock for ADCHS
+ 13 BASE_SDIO_CLK Base clock for SD/MMC
+ 14 BASE_SSP0_CLK Base clock for SSP0
+ 15 BASE_SSP1_CLK Base clock for SSP1
+ 16 BASE_UART0_CLK Base clock for UART0
+ 17 BASE_UART1_CLK Base clock for UART1
+ 18 BASE_UART2_CLK Base clock for UART2
+ 19 BASE_UART3_CLK Base clock for UART3
+ 20 BASE_OUT_CLK Base clock for CLKOUT pin
+ 24-21 - Reserved
+ 25 BASE_AUDIO_CLK Base clock for audio system (I2S)
+ 26 BASE_CGU_OUT0_CLK Base clock for CGU_OUT0 clock output
+ 27 BASE_CGU_OUT1_CLK Base clock for CGU_OUT1 clock output
+
+ BASE_PERIPH_CLK and BASE_SPI_CLK is only available on LPC43xx.
+ BASE_ADCHS_CLK is only available on LPC4370.
+
+ clocks:
+ maxItems: 5
+
+ clock-indices:
+ minItems: 1
+ maxItems: 28
+
+ clock-output-names:
+ minItems: 1
+ maxItems: 28
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@40050000 {
+ compatible = "nxp,lpc1850-cgu";
+ reg = <0x40050000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
+ };
+
diff --git a/Documentation/devicetree/bindings/clock/pistachio-clock.txt b/Documentation/devicetree/bindings/clock/pistachio-clock.txt
deleted file mode 100644
index 868db499eed2..000000000000
--- a/Documentation/devicetree/bindings/clock/pistachio-clock.txt
+++ /dev/null
@@ -1,123 +0,0 @@
-Imagination Technologies Pistachio SoC clock controllers
-========================================================
-
-Pistachio has four clock controllers (core clock, peripheral clock, peripheral
-general control, and top general control) which are instantiated individually
-from the device-tree.
-
-External clocks:
-----------------
-
-There are three external inputs to the clock controllers which should be
-defined with the following clock-output-names:
-- "xtal": External 52Mhz oscillator (required)
-- "audio_clk_in": Alternate audio reference clock (optional)
-- "enet_clk_in": Alternate ethernet PHY clock (optional)
-
-Core clock controller:
-----------------------
-
-The core clock controller generates clocks for the CPU, RPU (WiFi + BT
-co-processor), audio, and several peripherals.
-
-Required properties:
-- compatible: Must be "img,pistachio-clk".
-- reg: Must contain the base address and length of the core clock controller.
-- #clock-cells: Must be 1. The single cell is the clock identifier.
- See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
-- clocks: Must contain an entry for each clock in clock-names.
-- clock-names: Must include "xtal" (see "External clocks") and
- "audio_clk_in_gate", "enet_clk_in_gate" which are generated by the
- top-level general control.
-
-Example:
- clk_core: clock-controller@18144000 {
- compatible = "img,pistachio-clk";
- reg = <0x18144000 0x800>;
- clocks = <&xtal>, <&cr_top EXT_CLK_AUDIO_IN>,
- <&cr_top EXT_CLK_ENET_IN>;
- clock-names = "xtal", "audio_clk_in_gate", "enet_clk_in_gate";
-
- #clock-cells = <1>;
- };
-
-Peripheral clock controller:
-----------------------------
-
-The peripheral clock controller generates clocks for the DDR, ROM, and other
-peripherals. The peripheral system clock ("periph_sys") generated by the core
-clock controller is the input clock to the peripheral clock controller.
-
-Required properties:
-- compatible: Must be "img,pistachio-periph-clk".
-- reg: Must contain the base address and length of the peripheral clock
- controller.
-- #clock-cells: Must be 1. The single cell is the clock identifier.
- See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
-- clocks: Must contain an entry for each clock in clock-names.
-- clock-names: Must include "periph_sys", the peripheral system clock generated
- by the core clock controller.
-
-Example:
- clk_periph: clock-controller@18144800 {
- compatible = "img,pistachio-clk-periph";
- reg = <0x18144800 0x800>;
- clocks = <&clk_core CLK_PERIPH_SYS>;
- clock-names = "periph_sys";
-
- #clock-cells = <1>;
- };
-
-Peripheral general control:
----------------------------
-
-The peripheral general control block generates system interface clocks and
-resets for various peripherals. It also contains miscellaneous peripheral
-control registers. The system clock ("sys") generated by the peripheral clock
-controller is the input clock to the system clock controller.
-
-Required properties:
-- compatible: Must include "img,pistachio-periph-cr" and "syscon".
-- reg: Must contain the base address and length of the peripheral general
- control registers.
-- #clock-cells: Must be 1. The single cell is the clock identifier.
- See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
-- clocks: Must contain an entry for each clock in clock-names.
-- clock-names: Must include "sys", the system clock generated by the peripheral
- clock controller.
-
-Example:
- cr_periph: syscon@18144800 {
- compatible = "img,pistachio-cr-periph", "syscon";
- reg = <0x18148000 0x1000>;
- clocks = <&clock_periph PERIPH_CLK_PERIPH_SYS>;
- clock-names = "sys";
-
- #clock-cells = <1>;
- };
-
-Top-level general control:
---------------------------
-
-The top-level general control block contains miscellaneous control registers and
-gates for the external clocks "audio_clk_in" and "enet_clk_in".
-
-Required properties:
-- compatible: Must include "img,pistachio-cr-top" and "syscon".
-- reg: Must contain the base address and length of the top-level
- control registers.
-- clocks: Must contain an entry for each clock in clock-names.
-- clock-names: Two optional clocks, "audio_clk_in" and "enet_clk_in" (see
- "External clocks").
-- #clock-cells: Must be 1. The single cell is the clock identifier.
- See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
-
-Example:
- cr_top: syscon@18144800 {
- compatible = "img,pistachio-cr-top", "syscon";
- reg = <0x18149000 0x200>;
- clocks = <&audio_refclk>, <&ext_enet_in>;
- clock-names = "audio_clk_in", "enet_clk_in";
-
- #clock-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
deleted file mode 100644
index 241fb0545b9e..000000000000
--- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
-
-The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
-
-Required Properties:
-- compatible: has to be "qca,<soctype>-pll" and one of the following
- fallbacks:
- - "qca,ar7100-pll"
- - "qca,ar7240-pll"
- - "qca,ar9130-pll"
- - "qca,ar9330-pll"
- - "qca,ar9340-pll"
- - "qca,qca9550-pll"
-- reg: Base address and size of the controllers memory area
-- clock-names: Name of the input clock, has to be "ref"
-- clocks: phandle of the external reference clock
-- #clock-cells: has to be one
-
-Optional properties:
-- clock-output-names: should be "cpu", "ddr", "ahb"
-
-Example:
-
- pll-controller@18050000 {
- compatible = "qca,ar9132-pll", "qca,ar9130-pll";
- reg = <0x18050000 0x20>;
-
- clock-names = "ref";
- clocks = <&extosc>;
-
- #clock-cells = <1>;
- clock-output-names = "cpu", "ddr", "ahb";
- };
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.yaml b/Documentation/devicetree/bindings/clock/qca,ath79-pll.yaml
new file mode 100644
index 000000000000..69863e8a4648
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qca,ath79-pll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros ATH79 PLL controller
+
+maintainers:
+ - Alban Bedel <albeu@free.fr>
+ - Antony Pavlov <antonynpavlov@gmail.com>
+
+description: >
+ The PLL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: qca,ar9132-pll
+ - const: qca,ar9130-pll
+ - items:
+ - enum:
+ - qca,ar7100-pll
+ - qca,ar7240-pll
+ - qca,ar9130-pll
+ - qca,ar9330-pll
+ - qca,ar9340-pll
+ - qca,qca9530-pll
+ - qca,qca9550-pll
+ - qca,qca9560-pll
+
+ reg:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clock-output-names:
+ items:
+ - const: cpu
+ - const: ddr
+ - const: ahb
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@18050000 {
+ compatible = "qca,ar9132-pll", "qca,ar9130-pll";
+ reg = <0x18050000 0x20>;
+ clock-names = "ref";
+ clocks = <&extosc>;
+ #clock-cells = <1>;
+ clock-output-names = "cpu", "ddr", "ahb";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc-sm8250.yaml b/Documentation/devicetree/bindings/clock/qcom,camcc-sm8250.yaml
index 3fd3dc1069fb..5c3ff37ec0d7 100644
--- a/Documentation/devicetree/bindings/clock/qcom,camcc-sm8250.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc-sm8250.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and
power domains on SM8250.
- See also:: include/dt-bindings/clock/qcom,camcc-sm8250.h
+ See also: include/dt-bindings/clock/qcom,camcc-sm8250.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml
index 0a3ef7fd03fa..ef2b1e204430 100644
--- a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks and power domains
on SM6125.
- See also:: include/dt-bindings/clock/qcom,dispcc-sm6125.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sm6125.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml
index 46403b98411f..a602e882e964 100644
--- a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SM6350.
- See also:: include/dt-bindings/clock/qcom,dispcc-sm6350.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sm6350.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq4019.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq4019.yaml
index 012048921f92..c91039dc100e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq4019.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq4019.yaml
@@ -15,7 +15,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on IPQ4019.
- See also:: include/dt-bindings/clock/qcom,gcc-ipq4019.h
+ See also: include/dt-bindings/clock/qcom,gcc-ipq4019.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
index 38b9e4283900..00d7df75b3d6 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on IPQ8074.
- See also:: include/dt-bindings/clock/qcom,gcc-ipq8074.h
+ See also: include/dt-bindings/clock/qcom,gcc-ipq8074.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml
index cd49704dcb95..92195091a919 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on MSM8976.
- See also:: include/dt-bindings/clock/qcom,gcc-msm8976.h
+ See also: include/dt-bindings/clock/qcom,gcc-msm8976.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8994.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8994.yaml
index 10afe984e2fb..93bcd61461e7 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8994.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8994.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on MSM8994 and MSM8992.
- See also:: include/dt-bindings/clock/qcom,gcc-msm8994.h
+ See also: include/dt-bindings/clock/qcom,gcc-msm8994.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
index 013fd074a8d5..64796f45f294 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module which provides the clocks, resets and
power domains on MSM8996.
- See also:: include/dt-bindings/clock/qcom,gcc-msm8996.h
+ See also: include/dt-bindings/clock/qcom,gcc-msm8996.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
index abae658c0ed9..d882f2b6620e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on MSM8998.
- See also:: include/dt-bindings/clock/qcom,gcc-msm8998.h
+ See also: include/dt-bindings/clock/qcom,gcc-msm8998.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-qcm2290.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-qcm2290.yaml
index 38c4c8c61b3a..b9194fa11e47 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-qcm2290.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-qcm2290.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on QCM2290.
- See also:: include/dt-bindings/clock/qcom,gcc-qcm2290.h
+ See also: include/dt-bindings/clock/qcom,gcc-qcm2290.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
index 94755465c1fb..6b35a3c080a2 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on QCS404.
- See also:: include/dt-bindings/clock/qcom,gcc-qcs404.h
+ See also: include/dt-bindings/clock/qcom,gcc-qcs404.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
index 1847bbeaa9d1..e30d1df3eeb5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SC7180.
- See also:: include/dt-bindings/clock/qcom,gcc-sc7180.h
+ See also: include/dt-bindings/clock/qcom,gcc-sc7180.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
index 4e4f68b9f6d2..5ddaf27bb1f4 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SC7280.
- See also:: include/dt-bindings/clock/qcom,gcc-sc7280.h
+ See also: include/dt-bindings/clock/qcom,gcc-sc7280.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml
index b4784ecaf58d..82c2ef39934d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SC8180x.
- See also:: include/dt-bindings/clock/qcom,gcc-sc8180x.h
+ See also: include/dt-bindings/clock/qcom,gcc-sc8180x.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml
index 5cfde8a4de4e..c1eeccef66b4 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and
power domains on SC8280xp.
- See also:: include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+ See also: include/dt-bindings/clock/qcom,gcc-sc8280xp.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
index ef0a20456e8a..a7523a414341 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SDM670 and SDM845
- See also:: include/dt-bindings/clock/qcom,gcc-sdm845.h
+ See also: include/dt-bindings/clock/qcom,gcc-sdm845.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
index 30819f3d85c6..320e4f5b2b18 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and
power domains on SDX55
- See also:: include/dt-bindings/clock/qcom,gcc-sdx55.h
+ See also: include/dt-bindings/clock/qcom,gcc-sdx55.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx65.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx65.yaml
index 915449228668..9242e6e19139 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx65.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx65.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SDX65
- See also:: include/dt-bindings/clock/qcom,gcc-sdx65.h
+ See also: include/dt-bindings/clock/qcom,gcc-sdx65.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml
index ecb69c707f09..c926630907c5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM4250/6115.
- See also:: include/dt-bindings/clock/qcom,gcc-sm6115.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm6115.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6125.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6125.yaml
index 1fe68e07a2b2..5bd422e94a38 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6125.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6125.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM6125.
- See also:: include/dt-bindings/clock/qcom,gcc-sm6125.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm6125.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6350.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6350.yaml
index 78e232fa95dc..819e855eaf9a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm6350.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm6350.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM6350.
- See also:: include/dt-bindings/clock/qcom,gcc-sm6350.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm6350.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
index 1dcf97c0c064..5f3f69fe9ddb 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8150.
- See also:: include/dt-bindings/clock/qcom,gcc-sm8150.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm8150.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
index 979ff0a8bf68..f4cd5a509c60 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8250.
- See also:: include/dt-bindings/clock/qcom,gcc-sm8250.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm8250.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
index 594e87f5ba09..97ffae3b5522 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8350.
- See also:: include/dt-bindings/clock/qcom,gcc-sm8350.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm8350.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8450.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8450.yaml
index 77273aee5d52..3169ac05e1d8 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8450.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8450.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8450
- See also:: include/dt-bindings/clock/qcom,gcc-sm8450.h
+ See also: include/dt-bindings/clock/qcom,gcc-sm8450.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
index f869b3739be8..817d51135fbf 100644
--- a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
@@ -24,6 +24,8 @@ description:
properties:
compatible:
enum:
+ - qcom,ipq5018-cmn-pll
+ - qcom,ipq5424-cmn-pll
- qcom,ipq9574-cmn-pll
reg:
diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
deleted file mode 100644
index 030ba60dab08..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Krait Clock Controller
-
-PROPERTIES
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,krait-cc-v1"
- "qcom,krait-cc-v2"
-
-- #clock-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 1
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to the clock parents of hfpll, secondary muxes.
-
-- clock-names:
- Usage: required
- Value type: <stringlist>
- Definition: must be "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb".
-
-Example:
-
- kraitcc: clock-controller {
- compatible = "qcom,krait-cc-v1";
- clocks = <&hfpll0>, <&hfpll1>, <&acpu0_aux>, <&acpu1_aux>, <qsb>;
- clock-names = "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb";
- #clock-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.yaml b/Documentation/devicetree/bindings/clock/qcom,krait-cc.yaml
new file mode 100644
index 000000000000..d6a019371fcf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,krait-cc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Krait Clock Controller
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,krait-cc-v1
+ - qcom,krait-cc-v2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: Parent clock phandle for hfpll0
+ - description: Parent clock phandle for hfpll1
+ - description: Parent clock phandle for acpu0_aux
+ - description: Parent clock phandle for acpu1_aux
+ - description: Parent clock phandle for qsb
+
+ clock-names:
+ items:
+ - const: hfpll0
+ - const: hfpll1
+ - const: acpu0_aux
+ - const: acpu1_aux
+ - const: qsb
+
+required:
+ - compatible
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,milos-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,milos-camcc.yaml
new file mode 100644
index 000000000000..f63149ecf3e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,milos-camcc.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,milos-camcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Camera Clock & Reset Controller on Milos
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+description: |
+ Qualcomm camera clock control module provides the clocks, resets and power
+ domains on Milos.
+
+ See also: include/dt-bindings/clock/qcom,milos-camcc.h
+
+properties:
+ compatible:
+ const: qcom,milos-camcc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+ - description: Camera AHB clock from GCC
+
+required:
+ - compatible
+ - clocks
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,milos-gcc.h>
+ clock-controller@adb0000 {
+ compatible = "qcom,milos-camcc";
+ reg = <0x0adb0000 0x40000>;
+ clocks = <&bi_tcxo_div2>,
+ <&sleep_clk>,
+ <&gcc GCC_CAMERA_AHB_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,milos-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,milos-dispcc.yaml
new file mode 100644
index 000000000000..94908804756b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,milos-dispcc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,milos-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller on Milos
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+description: |
+ Qualcomm display clock control module provides the clocks, resets and power
+ domains on Milos.
+
+ See also: include/dt-bindings/clock/qcom,milos-dispcc.h
+
+properties:
+ compatible:
+ const: qcom,milos-dispcc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+ - description: Display's AHB clock
+ - description: GPLL0 source from GCC
+ - description: Byte clock from DSI PHY0
+ - description: Pixel clock from DSI PHY0
+ - description: Link clock from DP PHY0
+ - description: VCO DIV clock from DP PHY0
+
+required:
+ - compatible
+ - clocks
+ - '#power-domain-cells'
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,milos-gcc.h>
+ #include <dt-bindings/phy/phy-qcom-qmp.h>
+ clock-controller@af00000 {
+ compatible = "qcom,milos-dispcc";
+ reg = <0x0af00000 0x20000>;
+ clocks = <&bi_tcxo_div2>,
+ <&sleep_clk>,
+ <&gcc GCC_DISP_AHB_CLK>,
+ <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,milos-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,milos-gcc.yaml
new file mode 100644
index 000000000000..cf244c155f9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,milos-gcc.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,milos-gcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller on Milos
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+description: |
+ Qualcomm global clock control module provides the clocks, resets and power
+ domains on Milos.
+
+ See also: include/dt-bindings/clock/qcom,milos-gcc.h
+
+properties:
+ compatible:
+ const: qcom,milos-gcc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+ - description: PCIE 0 Pipe clock source
+ - description: PCIE 1 Pipe clock source
+ - description: UFS Phy Rx symbol 0 clock source
+ - description: UFS Phy Rx symbol 1 clock source
+ - description: UFS Phy Tx symbol 0 clock source
+ - description: USB3 Phy wrapper pipe clock source
+
+required:
+ - compatible
+ - clocks
+ - '#power-domain-cells'
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,milos-gcc";
+ reg = <0x00100000 0x1f4200>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>,
+ <&pcie0_phy>,
+ <&pcie1_phy>,
+ <&ufs_mem_phy 0>,
+ <&ufs_mem_phy 1>,
+ <&ufs_mem_phy 2>,
+ <&usb_1_qmpphy>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,milos-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,milos-videocc.yaml
new file mode 100644
index 000000000000..14c31efe1308
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,milos-videocc.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,milos-videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Video Clock & Reset Controller on Milos
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+description: |
+ Qualcomm video clock control module provides the clocks, resets and power
+ domains on Milos.
+
+ See also: include/dt-bindings/clock/qcom,milos-videocc.h
+
+properties:
+ compatible:
+ const: qcom,milos-videocc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Sleep clock source
+ - description: Video AHB clock from GCC
+
+required:
+ - compatible
+ - clocks
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,milos-gcc.h>
+ clock-controller@aaf0000 {
+ compatible = "qcom,milos-videocc";
+ reg = <0x0aaf0000 0x10000>;
+ clocks = <&bi_tcxo_div2>,
+ <&bi_tcxo_ao_div2>,
+ <&sleep_clk>,
+ <&gcc GCC_VIDEO_AHB_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index 59ac288ca5f1..53ceec9673a8 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -38,36 +38,16 @@ properties:
minItems: 7
maxItems: 13
- '#clock-cells':
- const: 1
-
- '#reset-cells':
- const: 1
-
- '#power-domain-cells':
- const: 1
-
- reg:
- maxItems: 1
-
- protected-clocks:
- description:
- Protected clock specifier list as per common clock binding
-
vdd-gfx-supply:
description:
Regulator supply for the GPU_GX GDSC
required:
- compatible
- - reg
- - '#clock-cells'
- - '#reset-cells'
- '#power-domain-cells'
-additionalProperties: false
-
allOf:
+ - $ref: qcom,gcc.yaml#
- if:
properties:
compatible:
@@ -351,6 +331,8 @@ allOf:
- const: dp_link_2x_clk_divsel_five
- const: dp_vco_divided_clk_src_mux
+unevaluatedProperties: false
+
examples:
# Example for MMCC for MSM8960:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
index b9b218ef9b68..374de7a6f8d9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm graphics clock control module provides the clocks, resets and power
domains on MSM8998.
- See also:: include/dt-bindings/clock/qcom,gpucc-msm8998.h
+ See also: include/dt-bindings/clock/qcom,gpucc-msm8998.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml
index 243be4f76db3..4a533b45eec2 100644
--- a/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on qcm2290.
- See also:: include/dt-bindings/clock/qcom,dispcc-qcm2290.h
+ See also: include/dt-bindings/clock/qcom,dispcc-qcm2290.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs615-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs615-dispcc.yaml
new file mode 100644
index 000000000000..d566f19beb0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcs615-dispcc.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcs615-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller on QCS615
+
+maintainers:
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm display clock control module provides the clocks, resets and power
+ domains on QCS615.
+
+ See also: include/dt-bindings/clock/qcom,qcs615-dispcc.h
+
+properties:
+ compatible:
+ const: qcom,qcs615-dispcc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: GPLL0 clock source from GCC
+ - description: Byte clock from DSI PHY0
+ - description: Pixel clock from DSI PHY0
+ - description: Pixel clock from DSI PHY1
+ - description: Display port PLL link clock
+ - description: Display port PLL VCO DIV clock
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,qcs615-gcc.h>
+ clock-controller@af00000 {
+ compatible = "qcom,qcs615-dispcc";
+ reg = <0x0af00000 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi1_phy 0>,
+ <&mdss_dp_phy 0>,
+ <&mdss_dp_vco 0>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs615-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs615-gpucc.yaml
new file mode 100644
index 000000000000..5f7d83d1a7be
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcs615-gpucc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcs615-gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller on QCS615
+
+maintainers:
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm graphics clock control module provides clocks, resets and power
+ domains on QCS615 Qualcomm SoCs.
+
+ See also: include/dt-bindings/clock/qcom,qcs615-gpucc.h
+
+properties:
+ compatible:
+ const: qcom,qcs615-gpucc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: GPLL0 main branch source
+ - description: GPLL0 GPUCC div branch source
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+ clock-controller@5090000 {
+ compatible = "qcom,qcs615-gpucc";
+ reg = <0x5090000 0x9000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GPLL0>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs615-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs615-videocc.yaml
new file mode 100644
index 000000000000..f51b69de1047
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcs615-videocc.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcs615-videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Video Clock & Reset Controller on QCS615
+
+maintainers:
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm video clock control module provides clocks, resets and power
+ domains on QCS615 Qualcomm SoCs.
+
+ See also: include/dt-bindings/clock/qcom,qcs615-videocc.h
+
+properties:
+ compatible:
+ const: qcom,qcs615-videocc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+ clock-controller@ab00000 {
+ compatible = "qcom,qcs615-videocc";
+ reg = <0xab00000 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,qdu1000-ecpricc.yaml b/Documentation/devicetree/bindings/clock/qcom,qdu1000-ecpricc.yaml
index fd21df0e7697..3038307ff2c5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,qdu1000-ecpricc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,qdu1000-ecpricc.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm ECPRI Specification V2.0 Common Public Radio Interface clock control
module which supports the clocks, resets on QDU1000 and QRU1000
- See also:: include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
+ See also: include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml
index 86befef02650..2c5a9ef4fe4d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,qdu1000-gcc.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module which supports the clocks, resets and
power domains on QDU1000 and QRU1000
- See also:: include/dt-bindings/clock/qcom,qdu1000-gcc.h
+ See also: include/dt-bindings/clock/qcom,qdu1000-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index dcb872b9cf3e..a4414ba0b287 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -17,6 +17,7 @@ description: |
properties:
compatible:
enum:
+ - qcom,milos-rpmh-clk
- qcom,qcs615-rpmh-clk
- qcom,qdu1000-rpmh-clk
- qcom,sa8775p-rpmh-clk
diff --git a/Documentation/devicetree/bindings/clock/qcom,sa8775p-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sa8775p-camcc.yaml
index 81623f59d11d..f42ccb6627a3 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sa8775p-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sa8775p-camcc.yaml
@@ -17,12 +17,14 @@ description: |
See also:
include/dt-bindings/clock/qcom,qcs8300-camcc.h
include/dt-bindings/clock/qcom,sa8775p-camcc.h
+ include/dt-bindings/clock/qcom,sc8280xp-camcc.h
properties:
compatible:
enum:
- qcom,qcs8300-camcc
- qcom,sa8775p-camcc
+ - qcom,sc8280xp-camcc
clocks:
items:
@@ -35,6 +37,11 @@ properties:
maxItems: 1
description: MMCX power domain
+ required-opps:
+ description:
+ OPP node describing required MMCX performance point.
+ maxItems: 1
+
required:
- compatible
- clocks
@@ -43,6 +50,14 @@ required:
allOf:
- $ref: qcom,gcc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,sc8280xp-camcc
+ then:
+ required:
+ - required-opps
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,sa8775p-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sa8775p-gcc.yaml
index addbd323fa6d..c641aac8c451 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sa8775p-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sa8775p-gcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and
power domains on sa8775p.
- See also:: include/dt-bindings/clock/qcom,sa8775p-gcc.h
+ See also: include/dt-bindings/clock/qcom,sa8775p-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-camcc.yaml
index c7fe6400ea13..98ee9be84794 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-camcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SC7180.
- See also:: include/dt-bindings/clock/qcom,camcc-sc7180.h
+ See also: include/dt-bindings/clock/qcom,camcc-sc7180.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
index 0d8ea44d8141..f147d06ad2ef 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SC7180.
- See also:: include/dt-bindings/clock/qcom,dispcc-sc7180.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sc7180.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml
index fdfb389083c1..ad360debef7c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm LPASS core clock control module provides the clocks and power
domains on SC7180.
- See also:: include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
+ See also: include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7280-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7280-camcc.yaml
index dcef8de3a905..2f28be58e82e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7280-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7280-camcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and
power domains on SC7280.
- See also:: include/dt-bindings/clock/qcom,camcc-sc7280.h
+ See also: include/dt-bindings/clock/qcom,camcc-sc7280.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7280-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7280-dispcc.yaml
index 23177661be40..95b1e4f48c4f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7280-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7280-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SC7280.
- See also:: include/dt-bindings/clock/qcom,dispcc-sc7280.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sc7280.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7280-lpasscc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7280-lpasscc.yaml
index f44c5c130d2d..a90961d8656c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7280-lpasscc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7280-lpasscc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm LPASS core clock control module provides the clocks and power
domains on SC7280.
- See also:: include/dt-bindings/clock/qcom,lpass-sc7280.h
+ See also: include/dt-bindings/clock/qcom,lpass-sc7280.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml
index fa95c3a1ba3a..6214e41eec1f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SDM845.
- See also:: include/dt-bindings/clock/qcom,camcc-sm845.h
+ See also: include/dt-bindings/clock/qcom,camcc-sm845.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
index 220f4004f7fd..854c391c8307 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SDM845.
- See also:: include/dt-bindings/clock/qcom,dispcc-sdm845.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sdm845.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-lpasscc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-lpasscc.yaml
index a96fd837c70a..f9feb7049b21 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-lpasscc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-lpasscc.yaml
@@ -12,7 +12,7 @@ maintainers:
description: |
Qualcomm SDM845 LPASS (Low Power Audio SubSystem) Clock Controller.
- See also:: include/dt-bindings/clock/qcom,lpass-sdm845.h
+ See also: include/dt-bindings/clock/qcom,lpass-sdm845.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdx75-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdx75-gcc.yaml
index 567182aba300..29a0b29bcb81 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdx75-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdx75-gcc.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SDX75
- See also:: include/dt-bindings/clock/qcom,sdx75-gcc.h
+ See also: include/dt-bindings/clock/qcom,sdx75-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml
index f54ce865880d..70f025b26736 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml
@@ -14,38 +14,26 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SM4450
- See also:: include/dt-bindings/clock/qcom,sm4450-camcc.h
+ See also: include/dt-bindings/clock/qcom,sm4450-camcc.h
properties:
compatible:
const: qcom,sm4450-camcc
- reg:
- maxItems: 1
-
clocks:
items:
- description: Board XO source
- description: Camera AHB clock source from GCC
- '#clock-cells':
- const: 1
-
- '#reset-cells':
- const: 1
-
- '#power-domain-cells':
- const: 1
-
required:
- compatible
- - reg
- clocks
- - '#clock-cells'
- - '#reset-cells'
- '#power-domain-cells'
-additionalProperties: false
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml
index 2aa05353eff1..d977788bdc8a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml
@@ -14,15 +14,12 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SM4450
- See also:: include/dt-bindings/clock/qcom,sm4450-dispcc.h
+ See also: include/dt-bindings/clock/qcom,sm4450-dispcc.h
properties:
compatible:
const: qcom,sm4450-dispcc
- reg:
- maxItems: 1
-
clocks:
items:
- description: Board XO source
@@ -32,24 +29,15 @@ properties:
- description: Byte clock from DSI PHY0
- description: Pixel clock from DSI PHY0
- '#clock-cells':
- const: 1
-
- '#reset-cells':
- const: 1
-
- '#power-domain-cells':
- const: 1
-
required:
- compatible
- - reg
- clocks
- - '#clock-cells'
- - '#reset-cells'
- '#power-domain-cells'
-additionalProperties: false
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-gcc.yaml
index 0ac92d7871e1..9cfe859bacc9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm4450-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-gcc.yaml
@@ -14,7 +14,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM4450
- See also:: include/dt-bindings/clock/qcom,sm4450-gcc.h
+ See also: include/dt-bindings/clock/qcom,sm4450-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6115-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6115-dispcc.yaml
index 00be36683eb5..b31424306f49 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6115-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6115-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks and power domains
on SM6115.
- See also:: include/dt-bindings/clock/qcom,sm6115-dispcc.h
+ See also: include/dt-bindings/clock/qcom,sm6115-dispcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6115-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6115-gpucc.yaml
index 4ff17a91344b..104ba10ca573 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6115-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6115-gpucc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm graphics clock control module provides clocks, resets and power
domains on Qualcomm SoCs.
- See also:: include/dt-bindings/clock/qcom,sm6115-gpucc.h
+ See also: include/dt-bindings/clock/qcom,sm6115-gpucc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6125-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6125-gpucc.yaml
index 10a9c96a97b6..12d6f0cdbcd8 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6125-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6125-gpucc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm graphics clock control module provides clocks and power domains on
Qualcomm SoCs.
- See also:: include/dt-bindings/clock/qcom,sm6125-gpucc.h
+ See also: include/dt-bindings/clock/qcom,sm6125-gpucc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6350-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6350-camcc.yaml
index c03b30f64f35..e31cd4300f7d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6350-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6350-camcc.yaml
@@ -8,16 +8,21 @@ title: Qualcomm Camera Clock & Reset Controller on SM6350
maintainers:
- Konrad Dybcio <konradybcio@kernel.org>
+ - Taniya Das <quic_tdas@quicinc.com>
description: |
Qualcomm camera clock control module provides the clocks, resets and power
- domains on SM6350.
+ domains on SM6350 and QCS615 SoC.
- See also:: include/dt-bindings/clock/qcom,sm6350-camcc.h
+ See also:
+ include/dt-bindings/clock/qcom,qcs615-camcc.h
+ include/dt-bindings/clock/qcom,sm6350-camcc.h
properties:
compatible:
- const: qcom,sm6350-camcc
+ enum:
+ - qcom,qcs615-camcc
+ - qcom,sm6350-camcc
clocks:
items:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6375-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6375-dispcc.yaml
index 3cd422a645fd..519ea76cb052 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6375-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6375-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SM6375.
- See also:: include/dt-bindings/clock/qcom,dispcc-sm6375.h
+ See also: include/dt-bindings/clock/qcom,dispcc-sm6375.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6375-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6375-gcc.yaml
index de4e9066eeb8..66dfa72fa975 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6375-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6375-gcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM6375
- See also:: include/dt-bindings/clock/qcom,sm6375-gcc.h
+ See also: include/dt-bindings/clock/qcom,sm6375-gcc.h
allOf:
- $ref: qcom,gcc.yaml#
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
index d9dd479c17bd..3aad6b5bb1c5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm graphics clock control module provides clocks, resets and power
domains on Qualcomm SoCs.
- See also:: include/dt-bindings/clock/qcom,sm6375-gpucc.h
+ See also: include/dt-bindings/clock/qcom,sm6375-gpucc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm7150-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm7150-camcc.yaml
index 7be4b10c430c..b96091c28c5a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm7150-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm7150-camcc.yaml
@@ -15,7 +15,7 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SM7150.
- See also:: include/dt-bindings/clock/qcom,sm7150-camcc.h
+ See also: include/dt-bindings/clock/qcom,sm7150-camcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm7150-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm7150-dispcc.yaml
index b8d6e1d05ce2..13ab3359b592 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm7150-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm7150-dispcc.yaml
@@ -15,7 +15,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SM7150.
- See also:: include/dt-bindings/clock/qcom,sm7150-dispcc.h
+ See also: include/dt-bindings/clock/qcom,sm7150-dispcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm7150-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm7150-gcc.yaml
index 4d7bbbf4ce8a..3878808f811e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm7150-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm7150-gcc.yaml
@@ -15,7 +15,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM7150
- See also:: include/dt-bindings/clock/qcom,sm7150-gcc.h
+ See also: include/dt-bindings/clock/qcom,sm7150-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml
index 037ffc71e70e..9f7928730386 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml
@@ -15,7 +15,7 @@ description: |
Qualcomm video clock control module provides the clocks, resets and power
domains on SM7150.
- See also:: include/dt-bindings/clock/qcom,videocc-sm7150.h
+ See also: include/dt-bindings/clock/qcom,videocc-sm7150.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml
index 5e9f62d7866c..a55e30a4975e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml
@@ -13,15 +13,12 @@ description: |
Qualcomm camera clock control module provides the clocks, resets and
power domains on SM8150.
- See also:: include/dt-bindings/clock/qcom,sm8150-camcc.h
+ See also: include/dt-bindings/clock/qcom,sm8150-camcc.h
properties:
compatible:
const: qcom,sm8150-camcc
- reg:
- maxItems: 1
-
clocks:
items:
- description: Board XO source
@@ -37,26 +34,17 @@ properties:
description:
A phandle to an OPP node describing required MMCX performance point.
- '#clock-cells':
- const: 1
-
- '#reset-cells':
- const: 1
-
- '#power-domain-cells':
- const: 1
-
required:
- compatible
- - reg
- clocks
- power-domains
- required-opps
- - '#clock-cells'
- - '#reset-cells'
- '#power-domain-cells'
-additionalProperties: false
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
index 9e79f8fec437..c1e06f39431e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
@@ -15,7 +15,6 @@ description: |
domains on SM8450.
See also:
- include/dt-bindings/clock/qcom,sc8280xp-camcc.h
include/dt-bindings/clock/qcom,sm8450-camcc.h
include/dt-bindings/clock/qcom,sm8550-camcc.h
include/dt-bindings/clock/qcom,sm8650-camcc.h
@@ -23,7 +22,6 @@ description: |
properties:
compatible:
enum:
- - qcom,sc8280xp-camcc
- qcom,sm8450-camcc
- qcom,sm8475-camcc
- qcom,sm8550-camcc
@@ -37,14 +35,18 @@ properties:
- description: Sleep clock source
power-domains:
- maxItems: 1
description:
- A phandle and PM domain specifier for the MMCX power domain.
+ Power domains required for the clock controller to operate
+ items:
+ - description: MMCX power domain
+ - description: MXC power domain
required-opps:
- maxItems: 1
description:
- A phandle to an OPP node describing required MMCX performance point.
+ OPP nodes that describe required performance points on power domains
+ items:
+ - description: MMCX performance point
+ - description: MXC performance point
reg:
maxItems: 1
@@ -82,8 +84,10 @@ examples:
<&rpmhcc RPMH_CXO_CLK>,
<&rpmhcc RPMH_CXO_CLK_A>,
<&sleep_clk>;
- power-domains = <&rpmhpd RPMHPD_MMCX>;
- required-opps = <&rpmhpd_opp_low_svs>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>,
+ <&rpmhpd RPMHPD_MXC>;
+ required-opps = <&rpmhpd_opp_low_svs>,
+ <&rpmhpd_opp_low_svs>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-dispcc.yaml
index e9123bbfd491..bd131a1ff165 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-dispcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm display clock control module provides the clocks, resets and power
domains on SM8450.
- See also:: include/dt-bindings/clock/qcom,sm8450-dispcc.h
+ See also: include/dt-bindings/clock/qcom,sm8450-dispcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
index 02968632fb3a..44380f6f8136 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
@@ -14,6 +14,7 @@ description: |
domains on Qualcomm SoCs.
See also::
+ include/dt-bindings/clock/qcom,milos-gpucc.h
include/dt-bindings/clock/qcom,sar2130p-gpucc.h
include/dt-bindings/clock/qcom,sm4450-gpucc.h
include/dt-bindings/clock/qcom,sm8450-gpucc.h
@@ -25,6 +26,7 @@ description: |
properties:
compatible:
enum:
+ - qcom,milos-gpucc
- qcom,sar2130p-gpucc
- qcom,sm4450-gpucc
- qcom,sm8450-gpucc
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
index 62714fa54db8..fcd2727dae46 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
@@ -25,6 +25,7 @@ properties:
- qcom,sm8475-videocc
- qcom,sm8550-videocc
- qcom,sm8650-videocc
+ - qcom,x1e80100-videocc
clocks:
items:
@@ -32,14 +33,18 @@ properties:
- description: Video AHB clock from GCC
power-domains:
- maxItems: 1
description:
- MMCX power domain.
+ Power domains required for the clock controller to operate
+ items:
+ - description: MMCX power domain
+ - description: MXC power domain
required-opps:
- maxItems: 1
description:
- A phandle to an OPP node describing required MMCX performance point.
+ OPP nodes that describe required performance points on power domains
+ items:
+ - description: MMCX performance point
+ - description: MXC performance point
required:
- compatible
@@ -72,8 +77,10 @@ examples:
reg = <0x0aaf0000 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_VIDEO_AHB_CLK>;
- power-domains = <&rpmhpd RPMHPD_MMCX>;
- required-opps = <&rpmhpd_opp_low_svs>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>,
+ <&rpmhpd RPMHPD_MXC>;
+ required-opps = <&rpmhpd_opp_low_svs>,
+ <&rpmhpd_opp_low_svs>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8550-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8550-gcc.yaml
index d83b64dcce4f..c4e9b9bb63f5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8550-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8550-gcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8550
- See also:: include/dt-bindings/clock/qcom,sm8550-gcc.h
+ See also: include/dt-bindings/clock/qcom,sm8550-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8550-tcsr.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8550-tcsr.yaml
index f3afbb25e868..2ed7d59722fc 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8550-tcsr.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8550-tcsr.yaml
@@ -22,6 +22,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,milos-tcsr
- qcom,sar2130p-tcsr
- qcom,sm8550-tcsr
- qcom,sm8650-tcsr
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8650-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8650-gcc.yaml
index 976f29cce809..c7143e2abc80 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8650-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8650-gcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM8650
- See also:: include/dt-bindings/clock/qcom,sm8650-gcc.h
+ See also: include/dt-bindings/clock/qcom,sm8650-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,x1e80100-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,x1e80100-gcc.yaml
index 28797d0c5d8d..68dde0720c71 100644
--- a/Documentation/devicetree/bindings/clock/qcom,x1e80100-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,x1e80100-gcc.yaml
@@ -13,7 +13,7 @@ description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on X1E80100
- See also:: include/dt-bindings/clock/qcom,x1e80100-gcc.h
+ See also: include/dt-bindings/clock/qcom,x1e80100-gcc.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index 77ce3615c65a..bc2fd3761328 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -52,9 +52,16 @@ properties:
- renesas,r8a779f0-cpg-mssr # R-Car S4-8
- renesas,r8a779g0-cpg-mssr # R-Car V4H
- renesas,r8a779h0-cpg-mssr # R-Car V4M
+ - renesas,r9a09g077-cpg-mssr # RZ/T2H
+ - renesas,r9a09g087-cpg-mssr # RZ/N2H
reg:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: base address of register block 0
+ - description: base address of register block 1
+ description: base addresses of clock controller. Some controllers
+ (like r9a09g077) use two blocks instead of a single one.
clocks:
minItems: 1
@@ -92,16 +99,6 @@ properties:
the datasheet.
const: 1
-if:
- not:
- properties:
- compatible:
- items:
- enum:
- - renesas,r7s9210-cpg-mssr
-then:
- required:
- - '#reset-cells'
required:
- compatible
@@ -111,6 +108,36 @@ required:
- '#clock-cells'
- '#power-domain-cells'
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a09g077-cpg-mssr
+ - renesas,r9a09g087-cpg-mssr
+ then:
+ properties:
+ reg:
+ minItems: 2
+ clock-names:
+ items:
+ - const: extal
+ else:
+ properties:
+ reg:
+ maxItems: 1
+ - if:
+ not:
+ properties:
+ compatible:
+ items:
+ enum:
+ - renesas,r7s9210-cpg-mssr
+ then:
+ required:
+ - '#reset-cells'
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
index 0440f23da059..8c18616e5c4d 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
@@ -57,8 +57,7 @@ properties:
can be power-managed through Module Standby should refer to the CPG device
node in their "power-domains" property, as documented by the generic PM
Domain bindings in Documentation/devicetree/bindings/power/power-domain.yaml.
- The power domain specifiers defined in <dt-bindings/clock/r9a0*-cpg.h> could
- be used to reference individual CPG power domains.
+ const: 0
'#reset-cells':
description:
@@ -77,21 +76,6 @@ required:
additionalProperties: false
-allOf:
- - if:
- properties:
- compatible:
- contains:
- const: renesas,r9a08g045-cpg
- then:
- properties:
- '#power-domain-cells':
- const: 1
- else:
- properties:
- '#power-domain-cells':
- const: 0
-
examples:
- |
cpg: clock-controller@11010000 {
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
index 6961a68098f4..72f59db73f76 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
@@ -32,23 +32,24 @@ description: |
properties:
compatible:
enum:
- - samsung,exynosautov920-cmu-top
- samsung,exynosautov920-cmu-cpucl0
- samsung,exynosautov920-cmu-cpucl1
- samsung,exynosautov920-cmu-cpucl2
- - samsung,exynosautov920-cmu-peric0
- - samsung,exynosautov920-cmu-peric1
- - samsung,exynosautov920-cmu-misc
- samsung,exynosautov920-cmu-hsi0
- samsung,exynosautov920-cmu-hsi1
+ - samsung,exynosautov920-cmu-hsi2
+ - samsung,exynosautov920-cmu-misc
+ - samsung,exynosautov920-cmu-peric0
+ - samsung,exynosautov920-cmu-peric1
+ - samsung,exynosautov920-cmu-top
clocks:
minItems: 1
- maxItems: 4
+ maxItems: 5
clock-names:
minItems: 1
- maxItems: 4
+ maxItems: 5
"#clock-cells":
const: 1
@@ -201,6 +202,30 @@ allOf:
- const: usbdrd
- const: mmc_card
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynosautov920-cmu-hsi2
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_HSI2 NOC clock (from CMU_TOP)
+ - description: CMU_HSI2 NOC UFS clock (from CMU_TOP)
+ - description: CMU_HSI2 UFS EMBD clock (from CMU_TOP)
+ - description: CMU_HSI2 ETHERNET clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: noc
+ - const: ufs
+ - const: embd
+ - const: ethernet
+
required:
- compatible
- "#clock-cells"
diff --git a/Documentation/devicetree/bindings/clock/ti/autoidle.txt b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
deleted file mode 100644
index 05645a10a9e3..000000000000
--- a/Documentation/devicetree/bindings/clock/ti/autoidle.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Binding for Texas Instruments autoidle clock.
-
-This binding uses the common clock binding[1]. It assumes a register mapped
-clock which can be put to idle automatically by hardware based on the usage
-and a configuration bit setting. Autoidle clock is never an individual
-clock, it is always a derivative of some basic clock like a gate, divider,
-or fixed-factor.
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- reg : offset for the register controlling the autoidle
-- ti,autoidle-shift : bit shift of the autoidle enable bit
-- ti,invert-autoidle-bit : autoidle is enabled by setting the bit to 0
-
-Examples:
- dpll_core_m4_ck: dpll_core_m4_ck {
- #clock-cells = <0>;
- compatible = "ti,divider-clock";
- clocks = <&dpll_core_x2_ck>;
- ti,max-div = <31>;
- ti,autoidle-shift = <8>;
- reg = <0x2d38>;
- ti,index-starts-at-one;
- ti,invert-autoidle-bit;
- };
-
- dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
- #clock-cells = <0>;
- compatible = "ti,fixed-factor-clock";
- clocks = <&dpll_usb_ck>;
- ti,clock-div = <1>;
- ti,autoidle-shift = <8>;
- reg = <0x01b4>;
- ti,clock-mult = <1>;
- ti,invert-autoidle-bit;
- };
diff --git a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
deleted file mode 100644
index dc69477b6e98..000000000000
--- a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Binding for TI fixed factor rate clock sources.
-
-This binding uses the common clock binding[1], and also uses the autoidle
-support from TI autoidle clock [2].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/ti/autoidle.txt
-
-Required properties:
-- compatible : shall be "ti,fixed-factor-clock".
-- #clock-cells : from common clock binding; shall be set to 0.
-- ti,clock-div: fixed divider.
-- ti,clock-mult: fixed multiplier.
-- clocks: parent clock.
-
-Optional properties:
-- clock-output-names : from common clock binding.
-- ti,autoidle-shift: bit shift of the autoidle enable bit for the clock,
- see [2]
-- reg: offset for the autoidle register of this clock, see [2]
-- ti,invert-autoidle-bit: autoidle is enabled by setting the bit to 0, see [2]
-- ti,set-rate-parent: clk_set_rate is propagated to parent
-
-Example:
- clock {
- compatible = "ti,fixed-factor-clock";
- clocks = <&parentclk>;
- #clock-cells = <0>;
- ti,clock-div = <2>;
- ti,clock-mult = <1>;
- };
-
- dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
- #clock-cells = <0>;
- compatible = "ti,fixed-factor-clock";
- clocks = <&dpll_usb_ck>;
- ti,clock-div = <1>;
- ti,autoidle-shift = <8>;
- reg = <0x01b4>;
- ti,clock-mult = <1>;
- ti,invert-autoidle-bit;
- };
diff --git a/Documentation/devicetree/bindings/clock/ti/ti,autoidle.yaml b/Documentation/devicetree/bindings/clock/ti/ti,autoidle.yaml
new file mode 100644
index 000000000000..ed1bf182b64d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/ti,autoidle.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/ti/ti,autoidle.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI autoidle clock
+
+maintainers:
+ - Tero Kristo <kristo@kernel.org>
+ - Sukrut Bellary <sbellary@baylibre.com>
+
+description:
+ Some clocks in TI SoC support the autoidle feature. These properties are
+ applicable only if the clock supports autoidle feature. It assumes a register
+ mapped clock which can be put to idle automatically by hardware based on
+ usage and configuration bit setting. Autoidle clock is never an individual
+ clock, it is always a derivative of some basic clock like a gate, divider, or
+ fixed-factor.
+
+properties:
+ ti,autoidle-shift:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ bit shift of the autoidle enable bit for the clock
+ maximum: 31
+ default: 0
+
+ ti,invert-autoidle-bit:
+ type: boolean
+ description:
+ autoidle is enabled by setting the bit to 0
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/clock/ti/ti,divider-clock.yaml b/Documentation/devicetree/bindings/clock/ti/ti,divider-clock.yaml
index 3fbe236eb565..6729fcb839d2 100644
--- a/Documentation/devicetree/bindings/clock/ti/ti,divider-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/ti/ti,divider-clock.yaml
@@ -55,9 +55,10 @@ description: |
is missing it is the same as supplying a zero shift.
This binding can also optionally provide support to the hardware autoidle
- feature, see [1].
+ feature.
- [1] Documentation/devicetree/bindings/clock/ti/autoidle.txt
+allOf:
+ - $ref: ti,autoidle.yaml#
properties:
compatible:
@@ -97,7 +98,6 @@ properties:
minimum: 1
default: 1
-
ti,max-div:
$ref: /schemas/types.yaml#/definitions/uint32
description:
@@ -116,20 +116,6 @@ properties:
valid divisor programming must be a power of two,
only valid if ti,dividers is not defined.
- ti,autoidle-shift:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- bit shift of the autoidle enable bit for the clock,
- see [1].
- maximum: 31
- default: 0
-
- ti,invert-autoidle-bit:
- type: boolean
- description:
- autoidle is enabled by setting the bit to 0,
- see [1]
-
ti,set-rate-parent:
type: boolean
description:
@@ -156,7 +142,7 @@ required:
- clocks
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/clock/ti/ti,fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/ti/ti,fixed-factor-clock.yaml
new file mode 100644
index 000000000000..7a63b0992976
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/ti,fixed-factor-clock.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/ti/ti,fixed-factor-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI fixed factor rate clock sources
+
+maintainers:
+ - Tero Kristo <kristo@kernel.org>
+ - Sukrut Bellary <sbellary@baylibre.com>
+
+description:
+ This consists of a divider and a multiplier used to generate a fixed rate
+ clock. This also uses the autoidle support from TI autoidle clock.
+
+allOf:
+ - $ref: ti,autoidle.yaml#
+
+properties:
+ compatible:
+ const: ti,fixed-factor-clock
+
+ "#clock-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ ti,clock-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Fixed divider
+ minimum: 1
+
+ ti,clock-mult:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Fixed multiplier
+ minimum: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-output-names:
+ maxItems: 1
+
+ ti,set-rate-parent:
+ description:
+ Propagate to parent clock
+ type: boolean
+
+required:
+ - compatible
+ - clocks
+ - "#clock-cells"
+ - ti,clock-mult
+ - ti,clock-div
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ bus{
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clock@1b4 {
+ compatible = "ti,fixed-factor-clock";
+ reg = <0x1b4>;
+ clocks = <&dpll_usb_ck>;
+ #clock-cells = <0>;
+ ti,clock-mult = <1>;
+ ti,clock-div = <1>;
+ ti,autoidle-shift = <8>;
+ ti,invert-autoidle-bit;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/xgene.txt b/Documentation/devicetree/bindings/clock/xgene.txt
deleted file mode 100644
index 8233e771711b..000000000000
--- a/Documentation/devicetree/bindings/clock/xgene.txt
+++ /dev/null
@@ -1,131 +0,0 @@
-Device Tree Clock bindings for APM X-Gene
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
- "apm,xgene-socpll-clock" - for a X-Gene SoC PLL clock
- "apm,xgene-pcppll-clock" - for a X-Gene PCP PLL clock
- "apm,xgene-pmd-clock" - for a X-Gene PMD clock
- "apm,xgene-device-clock" - for a X-Gene device clock
- "apm,xgene-socpll-v2-clock" - for a X-Gene SoC PLL v2 clock
- "apm,xgene-pcppll-v2-clock" - for a X-Gene PCP PLL v2 clock
-
-Required properties for SoC or PCP PLL clocks:
-- reg : shall be the physical PLL register address for the pll clock.
-- clocks : shall be the input parent clock phandle for the clock. This should
- be the reference clock.
-- #clock-cells : shall be set to 1.
-- clock-output-names : shall be the name of the PLL referenced by derive
- clock.
-Optional properties for PLL clocks:
-- clock-names : shall be the name of the PLL. If missing, use the device name.
-
-Required properties for PMD clocks:
-- reg : shall be the physical register address for the pmd clock.
-- clocks : shall be the input parent clock phandle for the clock.
-- #clock-cells : shall be set to 1.
-- clock-output-names : shall be the name of the clock referenced by derive
- clock.
-Optional properties for PLL clocks:
-- clock-names : shall be the name of the clock. If missing, use the device name.
-
-Required properties for device clocks:
-- reg : shall be a list of address and length pairs describing the CSR
- reset and/or the divider. Either may be omitted, but at least
- one must be present.
- - reg-names : shall be a string list describing the reg resource. This
- may include "csr-reg" and/or "div-reg". If this property
- is not present, the reg property is assumed to describe
- only "csr-reg".
-- clocks : shall be the input parent clock phandle for the clock.
-- #clock-cells : shall be set to 1.
-- clock-output-names : shall be the name of the device referenced.
-Optional properties for device clocks:
-- clock-names : shall be the name of the device clock. If missing, use the
- device name.
-- csr-offset : Offset to the CSR reset register from the reset address base.
- Default is 0.
-- csr-mask : CSR reset mask bit. Default is 0xF.
-- enable-offset : Offset to the enable register from the reset address base.
- Default is 0x8.
-- enable-mask : CSR enable mask bit. Default is 0xF.
-- divider-offset : Offset to the divider CSR register from the divider base.
- Default is 0x0.
-- divider-width : Width of the divider register. Default is 0.
-- divider-shift : Bit shift of the divider register. Default is 0.
-
-For example:
-
- pcppll: pcppll@17000100 {
- compatible = "apm,xgene-pcppll-clock";
- #clock-cells = <1>;
- clocks = <&refclk 0>;
- clock-names = "pcppll";
- reg = <0x0 0x17000100 0x0 0x1000>;
- clock-output-names = "pcppll";
- type = <0>;
- };
-
- pmd0clk: pmd0clk@7e200200 {
- compatible = "apm,xgene-pmd-clock";
- #clock-cells = <1>;
- clocks = <&pmdpll 0>;
- reg = <0x0 0x7e200200 0x0 0x10>;
- clock-output-names = "pmd0clk";
- };
-
- socpll: socpll@17000120 {
- compatible = "apm,xgene-socpll-clock";
- #clock-cells = <1>;
- clocks = <&refclk 0>;
- clock-names = "socpll";
- reg = <0x0 0x17000120 0x0 0x1000>;
- clock-output-names = "socpll";
- type = <1>;
- };
-
- qmlclk: qmlclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- clock-names = "qmlclk";
- reg = <0x0 0x1703C000 0x0 0x1000>;
- reg-name = "csr-reg";
- clock-output-names = "qmlclk";
- };
-
- ethclk: ethclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- clock-names = "ethclk";
- reg = <0x0 0x17000000 0x0 0x1000>;
- reg-names = "div-reg";
- divider-offset = <0x238>;
- divider-width = <0x9>;
- divider-shift = <0x0>;
- clock-output-names = "ethclk";
- };
-
- apbclk: apbclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&ahbclk 0>;
- clock-names = "apbclk";
- reg = <0x0 0x1F2AC000 0x0 0x1000
- 0x0 0x1F2AC000 0x0 0x1000>;
- reg-names = "csr-reg", "div-reg";
- csr-offset = <0x0>;
- csr-mask = <0x200>;
- enable-offset = <0x8>;
- enable-mask = <0x200>;
- divider-offset = <0x10>;
- divider-width = <0x2>;
- divider-shift = <0x0>;
- flags = <0x8>;
- clock-output-names = "apbclk";
- };
-
diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml
index 7dc0748444fd..19010f90198a 100644
--- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml
+++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml
@@ -15,7 +15,9 @@ properties:
oneOf:
- const: atmel,at91sam9g46-aes
- items:
- - const: microchip,sam9x7-aes
+ - enum:
+ - microchip,sam9x7-aes
+ - microchip,sama7d65-aes
- const: atmel,at91sam9g46-aes
reg:
diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml
index d378c53314dd..39e076b275b3 100644
--- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml
+++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml
@@ -15,7 +15,9 @@ properties:
oneOf:
- const: atmel,at91sam9g46-sha
- items:
- - const: microchip,sam9x7-sha
+ - enum:
+ - microchip,sam9x7-sha
+ - microchip,sama7d65-sha
- const: atmel,at91sam9g46-sha
reg:
diff --git a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml
index 6a441f79efea..6f16008c4251 100644
--- a/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml
+++ b/Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml
@@ -15,7 +15,9 @@ properties:
oneOf:
- const: atmel,at91sam9g46-tdes
- items:
- - const: microchip,sam9x7-tdes
+ - enum:
+ - microchip,sam9x7-tdes
+ - microchip,sama7d65-tdes
- const: atmel,at91sam9g46-tdes
reg:
diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
index 75afa441e019..dcc755d2709a 100644
--- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
@@ -46,6 +46,8 @@ properties:
- items:
- enum:
- fsl,imx6ul-caam
+ - fsl,imx8qm-caam
+ - fsl,imx8qxp-caam
- fsl,sec-v5.0
- const: fsl,sec-v4.0
- const: fsl,sec-v4.0
@@ -77,6 +79,9 @@ properties:
interrupts:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
fsl,sec-era:
description: Defines the 'ERA' of the SEC device.
$ref: /schemas/types.yaml#/definitions/uint32
@@ -106,7 +111,10 @@ patternProperties:
- const: fsl,sec-v5.0-job-ring
- const: fsl,sec-v4.0-job-ring
- items:
- - const: fsl,sec-v5.0-job-ring
+ - enum:
+ - fsl,imx8qm-job-ring
+ - fsl,imx8qxp-job-ring
+ - fsl,sec-v5.0-job-ring
- const: fsl,sec-v4.0-job-ring
- const: fsl,sec-v4.0-job-ring
@@ -116,6 +124,9 @@ patternProperties:
interrupts:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
fsl,liodn:
description:
Specifies the LIODN to be used in conjunction with the ppid-to-liodn
@@ -125,6 +136,20 @@ patternProperties:
$ref: /schemas/types.yaml#/definitions/uint32-array
items:
- maximum: 0xfff
+ allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-job-ring
+ - fsl,imx8qxp-job-ring
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
'^rtic@[0-9a-f]+$':
type: object
@@ -212,6 +237,20 @@ required:
- reg
- ranges
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-caam
+ - fsl,imx8qxp-caam
+then:
+ required:
+ - power-domains
+else:
+ properties:
+ power-domains: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/crypto/omap-aes.txt b/Documentation/devicetree/bindings/crypto/omap-aes.txt
deleted file mode 100644
index fd9717653cbb..000000000000
--- a/Documentation/devicetree/bindings/crypto/omap-aes.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-OMAP SoC AES crypto Module
-
-Required properties:
-
-- compatible : Should contain entries for this and backward compatible
- AES versions:
- - "ti,omap2-aes" for OMAP2.
- - "ti,omap3-aes" for OMAP3.
- - "ti,omap4-aes" for OMAP4 and AM33XX.
- Note that the OMAP2 and 3 versions are compatible (OMAP3 supports
- more algorithms) but they are incompatible with OMAP4.
-- ti,hwmods: Name of the hwmod associated with the AES module
-- reg : Offset and length of the register set for the module
-- interrupts : the interrupt-specifier for the AES module.
-
-Optional properties:
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
-
-Example:
- /* AM335x */
- aes: aes@53500000 {
- compatible = "ti,omap4-aes";
- ti,hwmods = "aes";
- reg = <0x53500000 0xa0>;
- interrupts = <102>;
- dmas = <&edma 6>,
- <&edma 5>;
- dma-names = "tx", "rx";
- };
diff --git a/Documentation/devicetree/bindings/crypto/omap-des.txt b/Documentation/devicetree/bindings/crypto/omap-des.txt
deleted file mode 100644
index e8c63bf2e16d..000000000000
--- a/Documentation/devicetree/bindings/crypto/omap-des.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-OMAP SoC DES crypto Module
-
-Required properties:
-
-- compatible : Should contain "ti,omap4-des"
-- ti,hwmods: Name of the hwmod associated with the DES module
-- reg : Offset and length of the register set for the module
-- interrupts : the interrupt-specifier for the DES module
-- clocks : A phandle to the functional clock node of the DES module
- corresponding to each entry in clock-names
-- clock-names : Name of the functional clock, should be "fck"
-
-Optional properties:
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
- Each entry corresponds to an entry in dma-names
-- dma-names: DMA request names should include "tx" and "rx" if present
-
-Example:
- /* DRA7xx SoC */
- des: des@480a5000 {
- compatible = "ti,omap4-des";
- ti,hwmods = "des";
- reg = <0x480a5000 0xa0>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&sdma 117>, <&sdma 116>;
- dma-names = "tx", "rx";
- clocks = <&l3_iclk_div>;
- clock-names = "fck";
- };
diff --git a/Documentation/devicetree/bindings/crypto/ti,omap2-aes.yaml b/Documentation/devicetree/bindings/crypto/ti,omap2-aes.yaml
new file mode 100644
index 000000000000..90e92050ad2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/ti,omap2-aes.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ti,omap2-aes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP SoC AES crypto Module
+
+maintainers:
+ - Aaro Koskinen <aaro.koskinen@iki.fi>
+ - Andreas Kemnade <andreas@kemnade.info>
+ - Kevin Hilman <khilman@baylibre.com>
+ - Roger Quadros <rogerq@kernel.org>
+ - Tony Lindgren <tony@atomide.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,omap2-aes
+ - ti,omap3-aes
+ - ti,omap4-aes
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ ti,hwmods:
+ description: Name of the hwmod associated with the AES module
+ const: aes
+ deprecated: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ aes@53500000 {
+ compatible = "ti,omap4-aes";
+ reg = <0x53500000 0xa0>;
+ interrupts = <102>;
+ dmas = <&edma 6>,
+ <&edma 5>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/ti,omap4-des.yaml b/Documentation/devicetree/bindings/crypto/ti,omap4-des.yaml
new file mode 100644
index 000000000000..f02f1e141218
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/ti,omap4-des.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ti,omap4-des.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP4 DES crypto Module
+
+maintainers:
+ - Aaro Koskinen <aaro.koskinen@iki.fi>
+ - Andreas Kemnade <andreas@kemnade.info>
+ - Kevin Hilman <khilman@baylibre.com>
+ - Roger Quadros <rogerq@kernel.org>
+ - Tony Lindgren <tony@atomide.com>
+
+properties:
+ compatible:
+ const: ti,omap4-des
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: fck
+
+dependencies:
+ dmas: [ dma-names ]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ des@480a5000 {
+ compatible = "ti,omap4-des";
+ reg = <0x480a5000 0xa0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ dmas = <&sdma 117>, <&sdma 116>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
index b75c1ec686ad..cbd18fd83e52 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
@@ -24,9 +24,11 @@ properties:
- allwinner,sun50i-a64-de2-mixer-0
- allwinner,sun50i-a64-de2-mixer-1
- allwinner,sun50i-h6-de3-mixer-0
+ - allwinner,sun50i-h616-de33-mixer-0
- reg:
- maxItems: 1
+ reg: true
+
+ reg-names: true
clocks:
items:
@@ -61,6 +63,34 @@ properties:
required:
- port@1
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - allwinner,sun50i-h616-de33-mixer-0
+ then:
+ properties:
+ reg:
+ description: |
+ Registers for controlling individual layers of the display
+ engine (layers), global control (top), and display blending
+ control (display). Names are from Allwinner BSP kernel.
+ maxItems: 3
+ reg-names:
+ items:
+ - const: layers
+ - const: top
+ - const: display
+ required:
+ - reg-names
+
+ else:
+ properties:
+ reg:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-axi-performance-counter.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-axi-performance-counter.yaml
new file mode 100644
index 000000000000..1d6501afc7f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-axi-performance-counter.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-axi-performance-counter.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller AXI Performance Counter
+
+description: |
+ Performance counters are provided to allow measurement of average bandwidth
+ and latency during operation. The following features are supported:
+
+ * Manual and timer controlled measurement mode.
+
+ * Measurement counters:
+ - GLOBAL_COUNTER for overall measurement time
+ - BUSY_COUNTER for number of data bus busy cycles
+ - DATA_COUNTER for number of data transfer cycles
+ - TRANSFER_COUNTER for number of transfers
+ - ADDRBUSY_COUNTER for number of address bus busy cycles
+ - LATENCY_COUNTER for average latency
+
+ * Counter overflow detection.
+
+ * Outstanding Transfer Counters (OTC) which are used for latency measurement
+ have to run immediately after reset, but can be disabled by software when
+ there is no need for latency measurement.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-axi-performance-counter
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+
+ pmu@5618f000 {
+ compatible = "fsl,imx8qxp-dc-axi-performance-counter";
+ reg = <0x5618f000 0x90>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blit-engine.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blit-engine.yaml
new file mode 100644
index 000000000000..45db6da39e20
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blit-engine.yaml
@@ -0,0 +1,204 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-blit-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Blit Engine
+
+description: |
+ A blit operation (block based image transfer) reads up to 3 source images
+ from memory and computes one destination image from it, which is written
+ back to memory. The following basic operations are supported:
+
+ * Buffer Fill
+ Fills a buffer with constant color
+
+ * Buffer Copy
+ Copies one source to a destination buffer.
+
+ * Image Blend
+ Combines two source images by a blending equation and writes result to
+ destination (which can be one of the sources).
+
+ * Image Rop2/3
+ Combines up to three source images by a logical equation (raster operation)
+ and writes result to destination (which can be one of the sources).
+
+ * Image Flip
+ Mirrors the source image in horizontal and/or vertical direction.
+
+ * Format Convert
+ Convert between the supported color and buffer formats.
+
+ * Color Transform
+ Modify colors by linear or non-linear transformations.
+
+ * Image Scale
+ Changes size of the source image.
+
+ * Image Rotate
+ Rotates the source image by any angle.
+
+ * Image Filter
+ Performs an FIR filter operation on the source image.
+
+ * Image Warp
+ Performs a re-sampling of the source image with any pattern. The sample
+ point positions are read from a compressed coordinate buffer.
+
+ * Buffer Pack
+ Writes an image with color components stored in up to three different
+ buffers (planar formats) into a single buffer (packed format).
+
+ * Chroma Resample
+ Converts between different YUV formats that differ in chroma sampling rate
+ (4:4:4, 4:2:2, 4:2:0).
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-blit-engine
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^blitblend@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-blitblend
+
+ "^clut@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-clut
+
+ "^fetchdecode@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetchdecode
+
+ "^fetcheco@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetcheco
+
+ "^fetchwarp@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetchwarp
+
+ "^filter@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-filter
+
+ "^hscaler@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-hscaler
+
+ "^matrix@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-matrix
+
+ "^rop@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-rop
+
+ "^store@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-store
+
+ "^vscaler@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-vscaler
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ blit-engine@56180820 {
+ compatible = "fsl,imx8qxp-dc-blit-engine";
+ reg = <0x56180820 0x13c>, <0x56181000 0x3400>;
+ reg-names = "pec", "cfg";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ fetchdecode@56180820 {
+ compatible = "fsl,imx8qxp-dc-fetchdecode";
+ reg = <0x56180820 0x10>, <0x56181000 0x404>;
+ reg-names = "pec", "cfg";
+ };
+
+ store@56180940 {
+ compatible = "fsl,imx8qxp-dc-store";
+ reg = <0x56180940 0x1c>, <0x56184000 0x5c>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <0>, <1>, <2>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blitblend.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blitblend.yaml
new file mode 100644
index 000000000000..095e65939fba
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-blitblend.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-blitblend.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Blit Blend Unit
+
+description:
+ Combines two input frames to a single output frame, all frames having the
+ same dimension.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-blitblend
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ blitblend@56180920 {
+ compatible = "fsl,imx8qxp-dc-blitblend";
+ reg = <0x56180920 0x10>, <0x56183c00 0x3c>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-clut.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-clut.yaml
new file mode 100644
index 000000000000..21d42aa11b52
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-clut.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-clut.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Color Lookup Table
+
+description: |
+ The unit implements 3 look-up tables with 256 x 10 bit entries each. These
+ can be used for different kinds of applications. From 10-bit input values
+ only upper 8 bits are used.
+
+ The unit supports color lookup, index lookup, dithering and alpha masking.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-clut
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clut@56180880 {
+ compatible = "fsl,imx8qxp-dc-clut";
+ reg = <0x56180880 0x10>, <0x56182400 0x404>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-command-sequencer.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-command-sequencer.yaml
new file mode 100644
index 000000000000..27118f4c0d28
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-command-sequencer.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-command-sequencer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Command Sequencer
+
+description: |
+ The Command Sequencer is designed to autonomously process command lists.
+ By that it can load setups into the DC configuration and synchronize to
+ hardware events. This releases a system's CPU from workload, because it
+ does not need to wait for certain events. Also it simplifies SW architecture,
+ because no interrupt handlers are required. Setups are read via AXI bus,
+ while write access to configuration registers occurs directly via an internal
+ bus. This saves bandwidth for the AXI interconnect and improves the system
+ architecture in terms of safety aspects.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-command-sequencer
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 5
+
+ interrupt-names:
+ items:
+ - const: error
+ - const: sw0
+ - const: sw1
+ - const: sw2
+ - const: sw3
+
+ sram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle pointing to the mmio-sram device node
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+
+ command-sequencer@56180400 {
+ compatible = "fsl,imx8qxp-dc-command-sequencer";
+ reg = <0x56180400 0x1a4>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <36>, <37>, <38>, <39>, <40>;
+ interrupt-names = "error", "sw0", "sw1", "sw2", "sw3";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-constframe.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-constframe.yaml
new file mode 100644
index 000000000000..94f678563608
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-constframe.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-constframe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Constant Frame
+
+description: |
+ The Constant Frame unit is used instead of a Fetch unit where generation of
+ constant color frames only is sufficient. This is the case for the background
+ planes of content and safety streams in a Display Controller.
+
+ The color can be setup to any RGBA value.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-constframe
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ constframe@56180960 {
+ compatible = "fsl,imx8qxp-dc-constframe";
+ reg = <0x56180960 0xc>, <0x56184400 0x20>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-display-engine.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-display-engine.yaml
new file mode 100644
index 000000000000..91f3bb77d8d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-display-engine.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-display-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Display Engine
+
+description:
+ All Processing Units that operate in a display clock domain. Pixel pipeline
+ is driven by a video timing and cannot be stalled. Implements all display
+ specific processing.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-display-engine
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: top
+ - const: cfg
+
+ resets:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: shdload
+ - const: framecomplete
+ - const: seqcomplete
+
+ power-domains:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^dither@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-dither
+
+ "^framegen@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-framegen
+
+ "^gammacor@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-gammacor
+
+ "^matrix@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-matrix
+
+ "^signature@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-signature
+
+ "^tcon@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-tcon
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - power-domains
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ display-engine@5618b400 {
+ compatible = "fsl,imx8qxp-dc-display-engine";
+ reg = <0x5618b400 0x14>, <0x5618b800 0x1c00>;
+ reg-names = "top", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <15>, <16>, <17>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ power-domains = <&pd IMX_SC_R_DC_0_PLL_0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framegen@5618b800 {
+ compatible = "fsl,imx8qxp-dc-framegen";
+ reg = <0x5618b800 0x98>;
+ clocks = <&dc0_disp_lpcg IMX_LPCG_CLK_0>;
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <18>, <19>, <20>, <21>, <41>, <42>, <43>, <44>;
+ interrupt-names = "int0", "int1", "int2", "int3",
+ "primsync_on", "primsync_off",
+ "secsync_on", "secsync_off";
+ };
+
+ tcon@5618c800 {
+ compatible = "fsl,imx8qxp-dc-tcon";
+ reg = <0x5618c800 0x588>;
+
+ port {
+ dc0_disp0_dc0_pixel_combiner_ch0: endpoint {
+ remote-endpoint = <&dc0_pixel_combiner_ch0_dc0_disp0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-dither.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-dither.yaml
new file mode 100644
index 000000000000..8e4468d91836
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-dither.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-dither.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Dither Unit
+
+description: |
+ The unit can increase the physical color resolution of a display from 5, 6, 7
+ or 8 bits per RGB channel to a virtual resolution of 10 bits. The physical
+ resolution can be set individually for each channel.
+
+ The resolution is increased by mixing the two physical colors that are nearest
+ to the virtual color code in a variable ratio either by time (temporal
+ dithering) or by position (spatial dithering).
+
+ An optimized algorithm for temporal dithering minimizes noise artifacts on the
+ output image.
+
+ The dither operation can be individually enabled or disabled for each pixel
+ using the alpha input bit.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-dither
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ dither@5618c400 {
+ compatible = "fsl,imx8qxp-dc-dither";
+ reg = <0x5618c400 0x14>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-extdst.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-extdst.yaml
new file mode 100644
index 000000000000..dfc2d4f94f8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-extdst.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-extdst.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller External Destination Interface
+
+description: |
+ The External Destination unit is the interface between the internal pixel
+ processing pipeline of the Pixel Engine, which is 30-bit RGB plus 8-bit Alpha,
+ and a Display Engine.
+
+ It comprises the following built-in Gamma apply function.
+
+ +------X-----------------------+
+ | | ExtDst Unit |
+ | V |
+ | +-------+ |
+ | | Gamma | |
+ | +-------+ |
+ | | |
+ | V +
+ +------X-----------------------+
+
+ The output format is 24-bit RGB plus 1-bit Alpha. Conversion from 10 to 8
+ bits is done by LSBit truncation. Alpha output bit is 1 for input 255, 0
+ otherwise.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-extdst
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: shdload
+ - const: framecomplete
+ - const: seqcomplete
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ extdst@56180980 {
+ compatible = "fsl,imx8qxp-dc-extdst";
+ reg = <0x56180980 0x1c>, <0x56184800 0x28>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <3>, <4>, <5>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-fetchunit.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-fetchunit.yaml
new file mode 100644
index 000000000000..97fb6a4598d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-fetchunit.yaml
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-fetchunit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Fetch Unit
+
+description: |
+ The Fetch Unit is the interface between the AXI bus for source buffer access
+ and the internal pixel processing pipeline, which is 30-bit RGB plus 8-bit
+ Alpha.
+
+ It is used to generate foreground planes in Display Controllers and source
+ planes in Blit Engines, and comprises the following built-in functions to
+ convert a wide range of frame buffer types.
+
+ +---------X-----------------------------------------+
+ | | Fetch Unit |
+ | V |
+ | +---------+ |
+ | | | |
+ | | Decode | Decompression [Decode] |
+ | | | |
+ | +---------+ |
+ | | |
+ | V |
+ | +---------+ |
+ | | Clip & | Clip Window [All] |
+ | | Overlay | Plane composition [Layer, Warp] |
+ | | | |
+ | +---------+ |
+ | | |
+ | V |
+ | +---------+ |
+ | | Re- | Flip/Rotate/Repl./Drop [All] |
+ X--> | sample | Perspective/Affine warping [Persp] |
+ | | | | Arbitrary warping [Warp, Persp] |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | |
+ | | | Palette | Color Palette [Layer, Decode] |
+ | | | | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | Extract | Raw to RGBA/YUV [All] |
+ | | | & | Bit width expansion [All] |
+ | | | Expand | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | Planar to packed |
+ | |->| Combine | [Decode, Warp, Persp] |
+ | | | | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | YUV422 to YUV444 |
+ | | | Chroma | [Decode, Persp] |
+ | | | | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | YUV to RGB |
+ | | | Color | [Warp, Persp, Decode, Layer] |
+ | | | | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | Gamma removal |
+ | | | Gamma | [Warp, Persp, Decode, Layer] |
+ | | | | |
+ | | +---------+ |
+ | | | |
+ | | V |
+ | | +---------+ |
+ | | | | Alpla multiply, RGB pre-multiply |
+ | ->| Multiply| [Warp, Persp, Decode, Layer] |
+ | | | |
+ | --------- |
+ | | |
+ | V |
+ | +---------+ |
+ | | | Bilinear filter |
+ | | Filter | [Warp, Persp] |
+ | | | |
+ | +---------+ |
+ | | |
+ | V |
+ +---------X-----------------------------------------+
+
+ Note that different derivatives of the Fetch Unit exist. Each implements a
+ specific subset only of the pipeline stages shown above. Restrictions for the
+ units are specified in [square brackets].
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qxp-dc-fetchdecode
+ - fsl,imx8qxp-dc-fetcheco
+ - fsl,imx8qxp-dc-fetchlayer
+ - fsl,imx8qxp-dc-fetchwarp
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+ fsl,prg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Optional Prefetch Resolve Gasket associated with the Fetch Unit.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ fetchlayer@56180ac0 {
+ compatible = "fsl,imx8qxp-dc-fetchlayer";
+ reg = <0x56180ac0 0xc>, <0x56188400 0x404>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-filter.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-filter.yaml
new file mode 100644
index 000000000000..5c54d5179ee3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-filter.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-filter.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Filter Unit
+
+description: |
+ 5x5 FIR filter with 25 programmable coefficients.
+
+ Typical applications are image blurring, sharpening or support for edge
+ detection algorithms.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-filter
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ filter@56180900 {
+ compatible = "fsl,imx8qxp-dc-filter";
+ reg = <0x56180900 0x10>, <0x56183800 0x30>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-framegen.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-framegen.yaml
new file mode 100644
index 000000000000..9d1dc3a9de90
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-framegen.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-framegen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Frame Generator
+
+description:
+ The Frame Generator (FrameGen) module generates a programmable video timing
+ and optionally allows to synchronize the generated video timing to external
+ synchronization signals.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-framegen
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 8
+
+ interrupt-names:
+ items:
+ - const: int0
+ - const: int1
+ - const: int2
+ - const: int3
+ - const: primsync_on
+ - const: primsync_off
+ - const: secsync_on
+ - const: secsync_off
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ framegen@5618b800 {
+ compatible = "fsl,imx8qxp-dc-framegen";
+ reg = <0x5618b800 0x98>;
+ clocks = <&dc0_disp_lpcg IMX_LPCG_CLK_0>;
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <18>, <19>, <20>, <21>, <41>, <42>, <43>, <44>;
+ interrupt-names = "int0", "int1", "int2", "int3",
+ "primsync_on", "primsync_off",
+ "secsync_on", "secsync_off";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-gammacor.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-gammacor.yaml
new file mode 100644
index 000000000000..25ad85742912
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-gammacor.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-gammacor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Gamma Correction Unit
+
+description: The unit supports non-linear color transformation.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-gammacor
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ gammacor@5618c000 {
+ compatible = "fsl,imx8qxp-dc-gammacor";
+ reg = <0x5618c000 0x20>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-layerblend.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-layerblend.yaml
new file mode 100644
index 000000000000..2a6ab8a0ed7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-layerblend.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-layerblend.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Layer Blend Unit
+
+description: Combines two input frames to a single output frame.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-layerblend
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ layerblend@56180ba0 {
+ compatible = "fsl,imx8qxp-dc-layerblend";
+ reg = <0x56180ba0 0x10>, <0x5618a400 0x20>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-matrix.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-matrix.yaml
new file mode 100644
index 000000000000..d773389dd0dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-matrix.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-matrix.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Color Matrix
+
+description:
+ The unit supports linear color transformation, alpha pre-multiply and
+ alpha masking.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-matrix
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ oneOf:
+ - const: cfg # matrix in display engine
+ - items: # matrix in pixel engine
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ matrix@5618bc00 {
+ compatible = "fsl,imx8qxp-dc-matrix";
+ reg = <0x5618bc00 0x3c>;
+ reg-names = "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-pixel-engine.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-pixel-engine.yaml
new file mode 100644
index 000000000000..633443a6cc38
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-pixel-engine.yaml
@@ -0,0 +1,250 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-pixel-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Pixel Engine
+
+description:
+ All Processing Units that operate in the AXI bus clock domain. Pixel
+ pipelines have the ability to stall when a destination is busy. Implements
+ all communication to memory resources and most of the image processing
+ functions. Interconnection of Processing Units is re-configurable.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-pixel-engine
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^blit-engine@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-blit-engine
+
+ "^constframe@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-constframe
+
+ "^extdst@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-extdst
+
+ "^fetchdecode@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetchdecode
+
+ "^fetcheco@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetcheco
+
+ "^fetchlayer@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetchlayer
+
+ "^fetchwarp@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-fetchwarp
+
+ "^hscaler@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-hscaler
+
+ "^layerblend@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-layerblend
+
+ "^matrix@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-matrix
+
+ "^safety@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-safety
+
+ "^vscaler@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-vscaler
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+
+ pixel-engine@56180800 {
+ compatible = "fsl,imx8qxp-dc-pixel-engine";
+ reg = <0x56180800 0xac00>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ constframe@56180960 {
+ compatible = "fsl,imx8qxp-dc-constframe";
+ reg = <0x56180960 0xc>, <0x56184400 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ extdst@56180980 {
+ compatible = "fsl,imx8qxp-dc-extdst";
+ reg = <0x56180980 0x1c>, <0x56184800 0x28>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <3>, <4>, <5>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
+
+ constframe@561809a0 {
+ compatible = "fsl,imx8qxp-dc-constframe";
+ reg = <0x561809a0 0xc>, <0x56184c00 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ extdst@561809c0 {
+ compatible = "fsl,imx8qxp-dc-extdst";
+ reg = <0x561809c0 0x1c>, <0x56185000 0x28>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <6>, <7>, <8>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
+
+ constframe@561809e0 {
+ compatible = "fsl,imx8qxp-dc-constframe";
+ reg = <0x561809e0 0xc>, <0x56185400 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ extdst@56180a00 {
+ compatible = "fsl,imx8qxp-dc-extdst";
+ reg = <0x56180a00 0x1c>, <0x56185800 0x28>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <9>, <10>, <11>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
+
+ constframe@56180a20 {
+ compatible = "fsl,imx8qxp-dc-constframe";
+ reg = <0x56180a20 0xc>, <0x56185c00 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ extdst@56180a40 {
+ compatible = "fsl,imx8qxp-dc-extdst";
+ reg = <0x56180a40 0x1c>, <0x56186000 0x28>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <12>, <13>, <14>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
+
+ fetchwarp@56180a60 {
+ compatible = "fsl,imx8qxp-dc-fetchwarp";
+ reg = <0x56180a60 0x10>, <0x56186400 0x190>;
+ reg-names = "pec", "cfg";
+ };
+
+ fetchlayer@56180ac0 {
+ compatible = "fsl,imx8qxp-dc-fetchlayer";
+ reg = <0x56180ac0 0xc>, <0x56188400 0x404>;
+ reg-names = "pec", "cfg";
+ };
+
+ layerblend@56180ba0 {
+ compatible = "fsl,imx8qxp-dc-layerblend";
+ reg = <0x56180ba0 0x10>, <0x5618a400 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ layerblend@56180bc0 {
+ compatible = "fsl,imx8qxp-dc-layerblend";
+ reg = <0x56180bc0 0x10>, <0x5618a800 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ layerblend@56180be0 {
+ compatible = "fsl,imx8qxp-dc-layerblend";
+ reg = <0x56180be0 0x10>, <0x5618ac00 0x20>;
+ reg-names = "pec", "cfg";
+ };
+
+ layerblend@56180c00 {
+ compatible = "fsl,imx8qxp-dc-layerblend";
+ reg = <0x56180c00 0x10>, <0x5618b000 0x20>;
+ reg-names = "pec", "cfg";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-rop.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-rop.yaml
new file mode 100644
index 000000000000..7115950ecae0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-rop.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-rop.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Raster Operation Unit
+
+description: |
+ The unit can combine up to three input frames to a single output frame, all
+ having the same dimension.
+
+ The unit supports logic operations, arithmetic operations and packing.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-rop
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ rop@56180860 {
+ compatible = "fsl,imx8qxp-dc-rop";
+ reg = <0x56180860 0x10>, <0x56182000 0x20>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-safety.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-safety.yaml
new file mode 100644
index 000000000000..66c12948ab09
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-safety.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-safety.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Safety Unit
+
+description:
+ The unit allows corresponding processing units to be configured in a path
+ leading to multiple endpoints.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-safety
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ safety@56180800 {
+ compatible = "fsl,imx8qxp-dc-safety";
+ reg = <0x56180800 0x1c>;
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-scaling-engine.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-scaling-engine.yaml
new file mode 100644
index 000000000000..76cbe11a6364
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-scaling-engine.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-scaling-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Scaling Engine
+
+description: |
+ The unit can change the dimension of the input frame by nearest or linear
+ re-sampling with 1/32 sub pixel precision.
+
+ Internally it consist of two independent blocks for horizontal and vertical
+ scaling. The sequence of both operations is arbitrary.
+
+ Any frame dimensions between 1 and 16384 pixels in width and height are
+ supported, except that the vertical scaler has a frame width maximum
+ depending of the system's functional limitations.
+
+ In general all scale factors are supported inside the supported frame
+ dimensions. In range of scale factors 1/16..16 the filtered output colors
+ are LSBit precise (e.g. DC ripple free).
+
+ +-----------+
+ | Line |
+ | Buffer |
+ +-----------+
+ ^
+ |
+ V
+ |\ +-----------+
+ ------+ | | |
+ | | +-->| Vertical |----
+ | ----+ | | Scaler | |
+ | | |/ +-----------+ |
+ | | |
+ | | |
+ | | | |\
+ | ------------- -------------+-----+ |
+ Input --+ X | +--> Output
+ | ------------- -------------+-----+ |
+ | | | |/
+ | | |
+ | | |\ +-----------+ |
+ | ----+ | | | |
+ | | +-->| Horizontal|----
+ ------+ | | Scaler |
+ |/ +-----------+
+
+ The unit supports downscaling, upscaling, sub pixel translation and bob
+ de-interlacing.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qxp-dc-hscaler
+ - fsl,imx8qxp-dc-vscaler
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ hscaler@561808c0 {
+ compatible = "fsl,imx8qxp-dc-hscaler";
+ reg = <0x561808c0 0x10>, <0x56183000 0x18>;
+ reg-names = "pec", "cfg";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-signature.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-signature.yaml
new file mode 100644
index 000000000000..c495822fdc80
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-signature.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-signature.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Signature Unit
+
+description: |
+ In order to control the correctness of display output, signature values can
+ be computed for each frame and compared against reference values. In case of
+ a mismatch (signature violation) a HW event can be triggered, for example a
+ SW interrupt.
+
+ This unit supports signature computation, reference check, evaluation windows,
+ alpha masking and panic modes.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-signature
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: shdload
+ - const: valid
+ - const: error
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ signature@5618d000 {
+ compatible = "fsl,imx8qxp-dc-signature";
+ reg = <0x5618d000 0x140>;
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <22>, <23>, <24>;
+ interrupt-names = "shdload", "valid", "error";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-store.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-store.yaml
new file mode 100644
index 000000000000..42d1b10906be
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-store.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-store.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Store Unit
+
+description: |
+ The Store unit is the interface between the internal pixel processing
+ pipeline, which is 30-bit RGB plus 8-bit Alpha, and the AXI bus for
+ destination buffer access. It is used for the destination of Blit Engines.
+ It comprises a set of built-in functions to generate a wide range of buffer
+ formats. Note, that these are exactly inverse to corresponding functions in
+ the Fetch Unit.
+
+ +------X-------------------------+
+ | | Store Unit |
+ | V |
+ | +-------+ |
+ | | Gamma | Gamma apply |
+ | +-------+ |
+ | | |
+ | V |
+ | +-------+ |
+ | | Color | RGB to YUV |
+ | +-------+ |
+ | | |
+ | V |
+ | +-------+ |
+ | | Chroma| YUV444 to 422 |
+ | +-------+ |
+ | | |
+ | V |
+ | +-------+ |
+ | | Reduce| Bit width reduction |
+ | | | dithering |
+ | +-------+ |
+ | | |
+ | V |
+ | +-------+ |
+ | | Pack | RGBA/YUV to RAW |
+ | | Encode| or Compression |
+ | +-------+ |
+ | | |
+ | V |
+ +------X-------------------------+
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-store
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pec
+ - const: cfg
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: shdload
+ - const: framecomplete
+ - const: seqcomplete
+
+ fsl,lts:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Optional Linear Tile Store associated with the Store Unit.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ store@56180940 {
+ compatible = "fsl,imx8qxp-dc-store";
+ reg = <0x56180940 0x1c>, <0x56184000 0x5c>;
+ reg-names = "pec", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <0>, <1>, <2>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-tcon.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-tcon.yaml
new file mode 100644
index 000000000000..7a3b77ea92c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc-tcon.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc-tcon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller Timing Controller
+
+description:
+ The TCon can generate a wide range of customized synchronization signals and
+ does the mapping of the color bits to the output.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-tcon
+
+ reg:
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: video output
+
+required:
+ - compatible
+ - reg
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ tcon@5618c800 {
+ compatible = "fsl,imx8qxp-dc-tcon";
+ reg = <0x5618c800 0x588>;
+
+ port {
+ dc0_disp0_dc0_pixel_combiner_ch0: endpoint {
+ remote-endpoint = <&dc0_pixel_combiner_ch0_dc0_disp0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc.yaml
new file mode 100644
index 000000000000..0a72f9f0b5fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc.yaml
@@ -0,0 +1,236 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8qxp-dc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller
+
+description: |
+ The Freescale i.MX8qxp Display Controller(DC) is comprised of three main
+ components that include a blit engine for 2D graphics accelerations, display
+ controller for display output processing, as well as a command sequencer.
+
+ Display buffers Source buffers
+ (AXI read master) (AXI read master)
+ | .......... | | | |
+ +---------------------------+------------+------------------+-+-+------+
+ | Display Controller (DC) | .......... | | | | |
+ | | | | | | |
+ | @@@@@@@@@@@ +----------+------------+------------+ | | | |
+ A | | Command | | V V | | | | |
+ X <-+->| Sequencer | | @@@@@@@@@@@@@@@@@@@@@@@@@@@@ | V V V |
+ I | | (AXI CLK) | | | | | @@@@@@@@@@ |
+ | @@@@@@@@@@@ | | Pixel Engine | | | | |
+ | | | | (AXI CLK) | | | | |
+ | V | @@@@@@@@@@@@@@@@@@@@@@@@@@@@ | | | |
+ A | *********** | | | | | | | Blit | |
+ H <-+->| Configure | | V V V V | | Engine | |
+ B | | (CFG CLK) | | 00000000000 11111111111 | | (AXI CLK)| |
+ | *********** | | Display | | Display | | | | |
+ | | | Engine | | Engine | | | | |
+ | | | (Disp CLK)| | (Disp CLK)| | | | |
+ | @@@@@@@@@@@ | 00000000000 11111111111 | @@@@@@@@@@ |
+ I | | Common | | | | | | |
+ R <-+--| Control | | | Display | | | |
+ Q | | (AXI CLK) | | | Controller | | | |
+ | @@@@@@@@@@@ +------------------------------------+ | |
+ | | | ^ | |
+ +--------------------------+----------------+-------+---------+--------+
+ ^ | | | |
+ | V V | V
+ Clocks & Resets Display Display Panic Destination
+ Output0 Output1 Control buffer
+ (AXI write master)
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: axi
+ - const: cfg
+
+ power-domains:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^command-sequencer@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-command-sequencer
+
+ "^display-engine@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-display-engine
+
+ "^interrupt-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-intc
+
+ "^pixel-engine@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-pixel-engine
+
+ "^pmu@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: fsl,imx8qxp-dc-axi-performance-counter
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ display-controller@56180000 {
+ compatible = "fsl,imx8qxp-dc";
+ reg = <0x56180000 0x40000>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_4>;
+ power-domains = <&pd IMX_SC_R_DC_0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupt-controller@56180040 {
+ compatible = "fsl,imx8qxp-dc-intc";
+ reg = <0x56180040 0x60>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ interrupt-controller;
+ interrupt-parent = <&dc0_irqsteer>;
+ #interrupt-cells = <1>;
+ interrupts = <448>, <449>, <450>, <64>,
+ <65>, <66>, <67>, <68>,
+ <69>, <70>, <193>, <194>,
+ <195>, <196>, <197>, <72>,
+ <73>, <74>, <75>, <76>,
+ <77>, <78>, <79>, <80>,
+ <81>, <199>, <200>, <201>,
+ <202>, <203>, <204>, <205>,
+ <206>, <207>, <208>, <5>,
+ <0>, <1>, <2>, <3>,
+ <4>, <82>, <83>, <84>,
+ <85>, <209>, <210>, <211>,
+ <212>;
+ interrupt-names = "store9_shdload",
+ "store9_framecomplete",
+ "store9_seqcomplete",
+ "extdst0_shdload",
+ "extdst0_framecomplete",
+ "extdst0_seqcomplete",
+ "extdst4_shdload",
+ "extdst4_framecomplete",
+ "extdst4_seqcomplete",
+ "extdst1_shdload",
+ "extdst1_framecomplete",
+ "extdst1_seqcomplete",
+ "extdst5_shdload",
+ "extdst5_framecomplete",
+ "extdst5_seqcomplete",
+ "disengcfg_shdload0",
+ "disengcfg_framecomplete0",
+ "disengcfg_seqcomplete0",
+ "framegen0_int0",
+ "framegen0_int1",
+ "framegen0_int2",
+ "framegen0_int3",
+ "sig0_shdload",
+ "sig0_valid",
+ "sig0_error",
+ "disengcfg_shdload1",
+ "disengcfg_framecomplete1",
+ "disengcfg_seqcomplete1",
+ "framegen1_int0",
+ "framegen1_int1",
+ "framegen1_int2",
+ "framegen1_int3",
+ "sig1_shdload",
+ "sig1_valid",
+ "sig1_error",
+ "reserved",
+ "cmdseq_error",
+ "comctrl_sw0",
+ "comctrl_sw1",
+ "comctrl_sw2",
+ "comctrl_sw3",
+ "framegen0_primsync_on",
+ "framegen0_primsync_off",
+ "framegen0_secsync_on",
+ "framegen0_secsync_off",
+ "framegen1_primsync_on",
+ "framegen1_primsync_off",
+ "framegen1_secsync_on",
+ "framegen1_secsync_off";
+ };
+
+ pixel-engine@56180800 {
+ compatible = "fsl,imx8qxp-dc-pixel-engine";
+ reg = <0x56180800 0xac00>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+
+ display-engine@5618b400 {
+ compatible = "fsl,imx8qxp-dc-display-engine";
+ reg = <0x5618b400 0x14>, <0x5618b800 0x1c00>;
+ reg-names = "top", "cfg";
+ interrupt-parent = <&dc0_intc>;
+ interrupts = <15>, <16>, <17>;
+ interrupt-names = "shdload", "framecomplete", "seqcomplete";
+ power-domains = <&pd IMX_SC_R_DC_0_PLL_0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index 246bbb509bea..9923b065323b 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -38,6 +38,10 @@ properties:
- qcom,sm8450-dp
- qcom,sm8550-dp
- const: qcom,sm8350-dp
+ - items:
+ - enum:
+ - qcom,sm8750-dp
+ - const: qcom,sm8650-dp
reg:
minItems: 4
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
index 82fe95a6d959..d4bb65c660af 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
@@ -42,6 +42,7 @@ properties:
- qcom,sm8450-dsi-ctrl
- qcom,sm8550-dsi-ctrl
- qcom,sm8650-dsi-ctrl
+ - qcom,sm8750-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
- enum:
- qcom,dsi-ctrl-6g-qcm2290
@@ -70,11 +71,11 @@ properties:
- mnoc:: MNOC clock
- pixel:: Display pixel clock.
minItems: 3
- maxItems: 9
+ maxItems: 12
clock-names:
minItems: 3
- maxItems: 9
+ maxItems: 12
phys:
maxItems: 1
@@ -109,7 +110,8 @@ properties:
minItems: 2
maxItems: 4
description: |
- Parents of "byte" and "pixel" for the given platform.
+ For DSI on SM8650 and older: parents of "byte" and "pixel" for the given
+ platform.
For DSIv2 platforms this should contain "byte", "esc", "src" and
"pixel_src" clocks.
@@ -218,8 +220,6 @@ required:
- clocks
- clock-names
- phys
- - assigned-clocks
- - assigned-clock-parents
- ports
allOf:
@@ -244,6 +244,9 @@ allOf:
- const: byte
- const: pixel
- const: core
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
- if:
properties:
@@ -266,6 +269,9 @@ allOf:
- const: byte
- const: pixel
- const: core
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
- if:
properties:
@@ -288,6 +294,9 @@ allOf:
- const: pixel
- const: core
- const: core_mmss
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
- if:
properties:
@@ -309,6 +318,9 @@ allOf:
- const: core_mmss
- const: pixel
- const: core
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
- if:
properties:
@@ -346,6 +358,35 @@ allOf:
- const: core
- const: iface
- const: bus
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-dsi-ctrl
+ then:
+ properties:
+ clocks:
+ minItems: 12
+ maxItems: 12
+ clock-names:
+ items:
+ - const: byte
+ - const: byte_intf
+ - const: pixel
+ - const: core
+ - const: iface
+ - const: bus
+ - const: dsi_pll_pixel
+ - const: dsi_pll_byte
+ - const: esync
+ - const: osc
+ - const: byte_src
+ - const: pixel_src
- if:
properties:
@@ -369,6 +410,9 @@ allOf:
- const: core_mmss
- const: pixel
- const: core
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
index 3c75ff42999a..1ca820a500b7 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
@@ -25,6 +25,7 @@ properties:
- qcom,sm8450-dsi-phy-5nm
- qcom,sm8550-dsi-phy-4nm
- qcom,sm8650-dsi-phy-4nm
+ - qcom,sm8750-dsi-phy-3nm
reg:
items:
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml
index 01cf79bd754b..0a46120dd868 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml
@@ -16,6 +16,7 @@ properties:
enum:
- qcom,sa8775p-dpu
- qcom,sm8650-dpu
+ - qcom,sm8750-dpu
- qcom,x1e80100-dpu
reg:
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
new file mode 100644
index 000000000000..72c70edc1fb0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
@@ -0,0 +1,470 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/qcom,sm8750-mdss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8750 Display MDSS
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+description:
+ SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
+ DPU display controller, DSI and DP interfaces etc.
+
+$ref: /schemas/display/msm/mdss-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8750-mdss
+
+ clocks:
+ items:
+ - description: Display AHB
+ - description: Display hf AXI
+ - description: Display core
+
+ iommus:
+ maxItems: 1
+
+ interconnects:
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
+
+ interconnect-names:
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
+
+patternProperties:
+ "^display-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sm8750-dpu
+
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,sm8750-dp
+
+ "^dsi@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ const: qcom,sm8750-dsi-ctrl
+
+ "^phy@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sm8750-dsi-phy-3nm
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interconnect/qcom,sm8750-rpmh.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/phy/phy-qcom-qmp.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+
+ display-subsystem@ae00000 {
+ compatible = "qcom,sm8750-mdss";
+ reg = <0x0ae00000 0x1000>;
+ reg-names = "mdss";
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&disp_cc_mdss_ahb_clk>,
+ <&gcc_disp_hf_axi_clk>,
+ <&disp_cc_mdss_mdp_clk>;
+
+ interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
+
+ resets = <&disp_cc_mdss_core_bcr>;
+
+ power-domains = <&mdss_gdsc>;
+
+ iommus = <&apps_smmu 0x800 0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ display-controller@ae01000 {
+ compatible = "qcom,sm8750-dpu";
+ reg = <0x0ae01000 0x93000>,
+ <0x0aeb0000 0x2008>;
+ reg-names = "mdp",
+ "vbif";
+
+ interrupts-extended = <&mdss 0>;
+
+ clocks = <&gcc_disp_hf_axi_clk>,
+ <&disp_cc_mdss_ahb_clk>,
+ <&disp_cc_mdss_mdp_lut_clk>,
+ <&disp_cc_mdss_mdp_clk>,
+ <&disp_cc_mdss_vsync_clk>;
+ clock-names = "nrt_bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ assigned-clocks = <&disp_cc_mdss_vsync_clk>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&mdss_dsi1_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dpu_intf0_out: endpoint {
+ remote-endpoint = <&mdss_dp0_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-207000000 {
+ opp-hz = /bits/ 64 <207000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-337000000 {
+ opp-hz = /bits/ 64 <337000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-417000000 {
+ opp-hz = /bits/ 64 <417000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-532000000 {
+ opp-hz = /bits/ 64 <532000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+
+ opp-575000000 {
+ opp-hz = /bits/ 64 <575000000>;
+ required-opps = <&rpmhpd_opp_nom_l1>;
+ };
+ };
+ };
+
+ dsi@ae94000 {
+ compatible = "qcom,sm8750-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupts-extended = <&mdss 4>;
+
+ clocks = <&disp_cc_mdss_byte0_clk>,
+ <&disp_cc_mdss_byte0_intf_clk>,
+ <&disp_cc_mdss_pclk0_clk>,
+ <&disp_cc_mdss_esc0_clk>,
+ <&disp_cc_mdss_ahb_clk>,
+ <&gcc_disp_hf_axi_clk>,
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy 0>,
+ <&disp_cc_esync0_clk>,
+ <&disp_cc_osc_clk>,
+ <&disp_cc_mdss_byte0_clk_src>,
+ <&disp_cc_mdss_pclk0_clk_src>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus",
+ "dsi_pll_pixel",
+ "dsi_pll_byte",
+ "esync",
+ "osc",
+ "byte_src",
+ "pixel_src";
+
+ operating-points-v2 = <&mdss_dsi_opp_table>;
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&mdss_dsi0_phy>;
+ phy-names = "dsi";
+
+ vdda-supply = <&vreg_l3g_1p2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi0_out: endpoint {
+ remote-endpoint = <&panel0_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+
+ mdss_dsi_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@ae95000 {
+ compatible = "qcom,sm8750-dsi-phy-3nm";
+ reg = <0x0ae95000 0x200>,
+ <0x0ae95200 0x280>,
+ <0x0ae95500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ clocks = <&disp_cc_mdss_ahb_clk>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface",
+ "ref";
+
+ vdds-supply = <&vreg_l3i_0p88>;
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+ };
+
+ dsi@ae96000 {
+ compatible = "qcom,sm8750-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae96000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupts-extended = <&mdss 5>;
+
+ clocks = <&disp_cc_mdss_byte1_clk>,
+ <&disp_cc_mdss_byte1_intf_clk>,
+ <&disp_cc_mdss_pclk1_clk>,
+ <&disp_cc_mdss_esc1_clk>,
+ <&disp_cc_mdss_ahb_clk>,
+ <&gcc_disp_hf_axi_clk>,
+ <&mdss_dsi1_phy 1>,
+ <&mdss_dsi1_phy 0>,
+ <&disp_cc_esync1_clk>,
+ <&disp_cc_osc_clk>,
+ <&disp_cc_mdss_byte1_clk_src>,
+ <&disp_cc_mdss_pclk1_clk_src>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus",
+ "dsi_pll_pixel",
+ "dsi_pll_byte",
+ "esync",
+ "osc",
+ "byte_src",
+ "pixel_src";
+
+ operating-points-v2 = <&mdss_dsi_opp_table>;
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&mdss_dsi1_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi1_phy: phy@ae97000 {
+ compatible = "qcom,sm8750-dsi-phy-3nm";
+ reg = <0x0ae97000 0x200>,
+ <0x0ae97200 0x280>,
+ <0x0ae97500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ clocks = <&disp_cc_mdss_ahb_clk>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface",
+ "ref";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+ };
+
+ displayport-controller@af54000 {
+ compatible = "qcom,sm8750-dp", "qcom,sm8650-dp";
+ reg = <0xaf54000 0x104>,
+ <0xaf54200 0xc0>,
+ <0xaf55000 0x770>,
+ <0xaf56000 0x9c>,
+ <0xaf57000 0x9c>;
+
+ interrupts-extended = <&mdss 12>;
+
+ clocks = <&disp_cc_mdss_ahb_clk>,
+ <&disp_cc_mdss_dptx0_aux_clk>,
+ <&disp_cc_mdss_dptx0_link_clk>,
+ <&disp_cc_mdss_dptx0_link_intf_clk>,
+ <&disp_cc_mdss_dptx0_pixel0_clk>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+
+ assigned-clocks = <&disp_cc_mdss_dptx0_link_clk_src>,
+ <&disp_cc_mdss_dptx0_pixel0_clk_src>;
+ assigned-clock-parents = <&usb_dp_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_dp_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+
+ operating-points-v2 = <&dp_opp_table>;
+
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&usb_dp_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ #sound-dai-cells = <0>;
+
+ dp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-192000000 {
+ opp-hz = /bits/ 64 <192000000>;
+ required-opps = <&rpmhpd_opp_low_svs_d1>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dp0_in: endpoint {
+ remote-endpoint = <&dpu_intf0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx83112b.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx83112b.yaml
new file mode 100644
index 000000000000..e58bb3d45331
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx83112b.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/himax,hx83112b.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Himax HX83112B-based DSI display panels
+
+maintainers:
+ - Luca Weiss <luca@lucaweiss.eu>
+
+description:
+ The Himax HX83112B is a generic DSI Panel IC used to control
+ LCD panels.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ contains:
+ const: djn,98-03057-6598b-i
+
+ reg:
+ maxItems: 1
+
+ iovcc-supply:
+ description: I/O voltage rail
+
+ vsn-supply:
+ description: Positive source voltage rail
+
+ vsp-supply:
+ description: Negative source voltage rail
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - iovcc-supply
+ - vsn-supply
+ - vsp-supply
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "djn,98-03057-6598b-i";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>;
+
+ iovcc-supply = <&pm8953_l6>;
+ vsn-supply = <&pmi632_lcdb_ncp>;
+ vsp-supply = <&pmi632_lcdb_ldo>;
+
+ port {
+ panel_in_0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 75ccabff308b..5725a587e35c 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -17,12 +17,17 @@ description:
properties:
compatible:
- items:
- - enum:
- - hannstar,hsd060bhw4
- - microchip,ac40t08a-mipi-panel
- - powkiddy,x55-panel
- - const: himax,hx8394
+ oneOf:
+ - items:
+ - enum:
+ - hannstar,hsd060bhw4
+ - microchip,ac40t08a-mipi-panel
+ - powkiddy,x55-panel
+ - const: himax,hx8394
+ - items:
+ - enum:
+ - huiling,hl055fhav028c
+ - const: himax,hx8399c
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index baf5dfe5f5eb..a51af61d4846 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -19,6 +19,7 @@ properties:
- ampire,am8001280g
- bananapi,lhr050h41
- feixin,k101-im2byl02
+ - raspberrypi,dsi-7inch
- startek,kd050hdfia020
- tdo,tl050hdv35
- wanchanglong,w552946aba
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 5542c9229d54..1ac1f0219079 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -57,6 +57,8 @@ properties:
- auo,g121ean01
# AU Optronics Corporation 15.6" (1366x768) TFT LCD panel
- auo,g156xtn01
+ # AU Optronics Corporation 23.8" FHD (1920x1080) TFT LCD panel
+ - auo,p238han01
# AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
- auo,p320hvn03
# AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml
index 54c9c0ef45ec..97b7fbe05c07 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml
@@ -42,7 +42,6 @@ required:
- compatible
- port
- reg
- - reset-gpios
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/renesas,r61307.yaml b/Documentation/devicetree/bindings/display/panel/renesas,r61307.yaml
new file mode 100644
index 000000000000..90cce221c0d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/renesas,r61307.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/renesas,r61307.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R61307 based DSI Display Panel
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description:
+ The Renesas R61307 is a generic DSI Panel IC used to control LCD panels.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ # KOE/HITACHI TX13D100VM0EAA 5.0" XGA TFT LCD panel
+ - hit,tx13d100vm0eaa
+ - koe,tx13d100vm0eaa
+ - const: renesas,r61307
+
+ reg:
+ maxItems: 1
+
+ vcc-supply:
+ description: Regulator for main power supply.
+
+ iovcc-supply:
+ description: Regulator for 1.8V IO power supply.
+
+ backlight: true
+
+ renesas,gamma:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ 0 - disabled
+ 1-3 - gamma setting A presets
+ enum: [0, 1, 2, 3]
+
+ renesas,column-inversion:
+ type: boolean
+ description: switch between line and column inversion. The line
+ inversion is set by default.
+
+ renesas,contrast:
+ type: boolean
+ description: digital contrast adjustment
+
+ reset-gpios: true
+ port: true
+
+required:
+ - compatible
+ - port
+ - backlight
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@1 {
+ compatible = "koe,tx13d100vm0eaa", "renesas,r61307";
+ reg = <1>;
+
+ reset-gpios = <&gpio 176 GPIO_ACTIVE_LOW>;
+
+ renesas,gamma = <3>;
+ renesas,column-inversion;
+ renesas,contrast;
+
+ vcc-supply = <&vcc_3v0_lcd>;
+ iovcc-supply = <&iovcc_1v8_lcd>;
+
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/renesas,r69328.yaml b/Documentation/devicetree/bindings/display/panel/renesas,r69328.yaml
new file mode 100644
index 000000000000..1cd219b510ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/renesas,r69328.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/renesas,r69328.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R69328 based DSI Display Panel
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description:
+ The Renesas R69328 is a generic DSI Panel IC used to control LCD panels.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ # JDI DX12D100VM0EAA 4.7" WXGA TFT LCD panel
+ - jdi,dx12d100vm0eaa
+ - const: renesas,r69328
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator for main power supply.
+
+ vddio-supply:
+ description: Regulator for 1.8V IO power supply.
+
+ backlight: true
+
+ reset-gpios: true
+ port: true
+
+required:
+ - compatible
+ - port
+ - backlight
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@1 {
+ compatible = "jdi,dx12d100vm0eaa", "renesas,r69328";
+ reg = <1>;
+
+ reset-gpios = <&gpio 176 GPIO_ACTIVE_LOW>;
+
+ vdd-supply = <&vdd_3v0_lcd>;
+ vddio-supply = <&vdd_1v8_io>;
+
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index 31f0c0f038e4..e36659340ef3 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -19,6 +19,8 @@ properties:
- const: samsung,atna33xc20
- items:
- enum:
+ # Samsung 13" 3K (2880×1920 pixels) eDP AMOLED panel
+ - samsung,atna30dw01
# Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel
- samsung,atna40yk20
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
index b07f3eca669b..1e434240ea3f 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
@@ -29,6 +29,7 @@ properties:
- densitron,dmt028vghmcmi-1a
- elida,kd50t048a
- techstar,ts8550b
+ - winstar,wf40eswaa6mnn0
- const: sitronix,st7701
reg:
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
index 30047a62fc11..f0a82f0ff790 100644
--- a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
+++ b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
@@ -18,7 +18,9 @@ allOf:
properties:
compatible:
- const: visionox,rm69299-1080p-display
+ enum:
+ - visionox,rm69299-1080p-display
+ - visionox,rm69299-shift
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
index 95e3d5e74b87..1e32d14b6edb 100644
--- a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- renesas,r9a07g043u-du # RZ/G2UL
- renesas,r9a07g044-du # RZ/G2{L,LC}
+ - renesas,r9a09g057-du # RZ/V2H(P)
- items:
- enum:
- renesas,r9a07g054-du # RZ/V2L
@@ -101,7 +102,12 @@ allOf:
required:
- port@0
- else:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a07g044-du
+ then:
properties:
ports:
properties:
@@ -113,6 +119,21 @@ allOf:
required:
- port@0
- port@1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g057-du
+ then:
+ properties:
+ ports:
+ properties:
+ port@0:
+ description: DSI
+ port@1: false
+
+ required:
+ - port@0
examples:
# RZ/G2L DU
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
index ccd71c5324af..0881e82deb11 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
@@ -58,12 +58,6 @@ properties:
power-domains:
maxItems: 1
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
required:
- compatible
- clocks
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
index f546d481b7e5..93da1fb9adc4 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
@@ -64,10 +64,10 @@ properties:
- description: Pixel clock for video port 0.
- description: Pixel clock for video port 1.
- description: Pixel clock for video port 2.
- - description: Pixel clock for video port 3.
- - description: Peripheral(vop grf/dsi) clock.
- - description: Alternative pixel clock provided by HDMI0 PHY PLL.
- - description: Alternative pixel clock provided by HDMI1 PHY PLL.
+ - {}
+ - {}
+ - {}
+ - {}
clock-names:
minItems: 5
@@ -77,10 +77,10 @@ properties:
- const: dclk_vp0
- const: dclk_vp1
- const: dclk_vp2
- - const: dclk_vp3
- - const: pclk_vop
- - const: pll_hdmiphy0
- - const: pll_hdmiphy1
+ - {}
+ - {}
+ - {}
+ - {}
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -175,10 +175,24 @@ allOf:
then:
properties:
clocks:
- maxItems: 5
+ minItems: 5
+ items:
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - description: Alternative pixel clock provided by HDMI PHY PLL.
clock-names:
- maxItems: 5
+ minItems: 5
+ items:
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - const: pll_hdmiphy0
interrupts:
minItems: 4
@@ -208,11 +222,29 @@ allOf:
properties:
clocks:
minItems: 7
- maxItems: 9
+ items:
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - description: Pixel clock for video port 3.
+ - description: Peripheral(vop grf/dsi) clock.
+ - description: Alternative pixel clock provided by HDMI0 PHY PLL.
+ - description: Alternative pixel clock provided by HDMI1 PHY PLL.
clock-names:
minItems: 7
- maxItems: 9
+ items:
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - const: dclk_vp3
+ - const: pclk_vop
+ - const: pll_hdmiphy0
+ - const: pll_hdmiphy1
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7567.yaml b/Documentation/devicetree/bindings/display/sitronix,st7567.yaml
new file mode 100644
index 000000000000..e8a5b8ad18fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7567.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sitronix,st7567.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7567 Display Controller
+
+maintainers:
+ - Javier Martinez Canillas <javierm@redhat.com>
+
+description:
+ Sitronix ST7567 is a driver and controller for monochrome
+ dot matrix LCD panels.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ const: sitronix,st7567
+
+ reg:
+ maxItems: 1
+
+ width-mm: true
+ height-mm: true
+ panel-timing: true
+
+required:
+ - compatible
+ - reg
+ - width-mm
+ - height-mm
+ - panel-timing
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ display@3f {
+ compatible = "sitronix,st7567";
+ reg = <0x3f>;
+ width-mm = <37>;
+ height-mm = <27>;
+
+ panel-timing {
+ hactive = <128>;
+ vactive = <64>;
+ hback-porch = <0>;
+ vback-porch = <0>;
+ clock-frequency = <0>;
+ hfront-porch = <0>;
+ hsync-len = <0>;
+ vfront-porch = <0>;
+ vsync-len = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am625-oldi.yaml b/Documentation/devicetree/bindings/display/ti/ti,am625-oldi.yaml
new file mode 100644
index 000000000000..8203ec5e5bb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,am625-oldi.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ti/ti,am625-oldi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments AM625 OLDI Transmitter
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ - Aradhya Bhatia <aradhya.bhatia@linux.dev>
+
+description:
+ The AM625 TI Keystone OpenLDI transmitter (OLDI TX) supports serialized RGB
+ pixel data transmission between host and flat panel display over LVDS (Low
+ Voltage Differential Sampling) interface. The OLDI TX consists of 7-to-1 data
+ serializers, and 4-data and 1-clock LVDS outputs. It supports the LVDS output
+ formats "jeida-18", "jeida-24" and "vesa-18", and can accept 24-bit RGB or
+ padded and un-padded 18-bit RGB bus formats as input.
+
+properties:
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: serial clock input for the OLDI transmitters
+
+ clock-names:
+ const: serial
+
+ ti,companion-oldi:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to companion OLDI transmitter. This property is required for both
+ the OLDI TXes if they are expected to work either in dual-lvds mode or in
+ clone mode. This property should point to the other OLDI TX's phandle.
+
+ ti,secondary-oldi:
+ type: boolean
+ description:
+ Boolean property to mark the OLDI transmitter as the secondary one, when the
+ OLDI hardware is expected to run as a companion HW, in cases of dual-lvds
+ mode or clone mode. The primary OLDI hardware is responsible for all the
+ hardware configuration.
+
+ ti,oldi-io-ctrl:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to syscon device node mapping OLDI IO_CTRL registers found in the
+ control MMR region. These registers are required to toggle the I/O lane
+ power, and control its electrical characteristics.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Parallel RGB input port
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: LVDS output port
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - reg
+ - clocks
+ - clock-names
+ - ti,oldi-io-ctrl
+ - ports
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index 31c4ffcb599c..361e9cae6896 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -12,18 +12,25 @@ maintainers:
- Tomi Valkeinen <tomi.valkeinen@ti.com>
description: |
- The AM625 and AM65x TI Keystone Display SubSystem with two output
+ The AM625 and AM65x TI Keystone Display SubSystem has two output
ports and two video planes. In AM65x DSS, the first video port
supports 1 OLDI TX and in AM625 DSS, the first video port output is
internally routed to 2 OLDI TXes. The second video port supports DPI
format. The first plane is full video plane with all features and the
second is a "lite plane" without scaling support.
+ The AM62L display subsystem has a single output port which supports DPI
+ format but it only supports single video "lite plane" which does not support
+ scaling. The output port is routed to SoC boundary via DPI interface and same
+ DPI signals are also routed internally to DSI Tx controller present within the
+ SoC. Due to clocking limitations only one of the interface i.e. either DSI or
+ DPI can be used at once.
properties:
compatible:
enum:
- ti,am625-dss
- ti,am62a7-dss
+ - ti,am62l-dss
- ti,am65x-dss
reg:
@@ -91,6 +98,26 @@ properties:
For AM625 DSS, the internal DPI output port node from video
port 1.
For AM62A7 DSS, the port is tied off inside the SoC.
+ For AM62L DSS, the DSS DPI output port node from video port 1
+ or DSI Tx controller node connected to video port 1.
+ properties:
+ endpoint@0:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description:
+ For AM625 DSS, VP Connection to OLDI0.
+ For AM65X DSS, OLDI output from the SoC.
+
+ endpoint@1:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description:
+ For AM625 DSS, VP Connection to OLDI1.
+
+ anyOf:
+ - required:
+ - endpoint
+ - required:
+ - endpoint@0
+ - endpoint@1
port@1:
$ref: /schemas/graph.yaml#/properties/port
@@ -112,6 +139,25 @@ properties:
Input memory (from main memory to dispc) bandwidth limit in
bytes per second
+ oldi-transmitters:
+ description:
+ Child node under the DSS, to describe all the OLDI transmitters connected
+ to the DSS videoports.
+ type: object
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ '^oldi@[0-1]$':
+ $ref: ti,am625-oldi.yaml#
+ description: OLDI transmitters connected to the DSS VPs
+
allOf:
- if:
properties:
@@ -120,9 +166,36 @@ allOf:
const: ti,am62a7-dss
then:
properties:
+ oldi-transmitters: false
ports:
properties:
port@0: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,am62l-dss
+ then:
+ properties:
+ ports:
+ properties:
+ port@1: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,am62l-dss
+ - ti,am65x-dss
+ then:
+ properties:
+ oldi-transmitters: false
+ ports:
+ properties:
+ port@0:
+ properties:
+ endpoint@1: false
required:
- compatible
@@ -142,32 +215,135 @@ examples:
#include <dt-bindings/soc/ti,sci_pm_domain.h>
dss: dss@4a00000 {
- compatible = "ti,am65x-dss";
- reg = <0x04a00000 0x1000>, /* common */
- <0x04a02000 0x1000>, /* vidl1 */
- <0x04a06000 0x1000>, /* vid */
- <0x04a07000 0x1000>, /* ovr1 */
- <0x04a08000 0x1000>, /* ovr2 */
- <0x04a0a000 0x1000>, /* vp1 */
- <0x04a0b000 0x1000>, /* vp2 */
- <0x04a01000 0x1000>; /* common1 */
+ compatible = "ti,am65x-dss";
+ reg = <0x04a00000 0x1000>, /* common */
+ <0x04a02000 0x1000>, /* vidl1 */
+ <0x04a06000 0x1000>, /* vid */
+ <0x04a07000 0x1000>, /* ovr1 */
+ <0x04a08000 0x1000>, /* ovr2 */
+ <0x04a0a000 0x1000>, /* vp1 */
+ <0x04a0b000 0x1000>, /* vp2 */
+ <0x04a01000 0x1000>; /* common1 */
+ reg-names = "common", "vidl1", "vid",
+ "ovr1", "ovr2", "vp1", "vp2", "common1";
+ ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+ power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 67 1>,
+ <&k3_clks 216 1>,
+ <&k3_clks 67 2>;
+ clock-names = "fck", "vp1", "vp2";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ oldi_out0: endpoint {
+ remote-endpoint = <&lcd_in0>;
+ };
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dss1: dss@30200000 {
+ compatible = "ti,am625-dss";
+ reg = <0x00 0x30200000 0x00 0x1000>, /* common */
+ <0x00 0x30202000 0x00 0x1000>, /* vidl1 */
+ <0x00 0x30206000 0x00 0x1000>, /* vid */
+ <0x00 0x30207000 0x00 0x1000>, /* ovr1 */
+ <0x00 0x30208000 0x00 0x1000>, /* ovr2 */
+ <0x00 0x3020a000 0x00 0x1000>, /* vp1 */
+ <0x00 0x3020b000 0x00 0x1000>, /* vp2 */
+ <0x00 0x30201000 0x00 0x1000>; /* common1 */
reg-names = "common", "vidl1", "vid",
- "ovr1", "ovr2", "vp1", "vp2", "common1";
- ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
- power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 67 1>,
- <&k3_clks 216 1>,
- <&k3_clks 67 2>;
+ "ovr1", "ovr2", "vp1", "vp2", "common1";
+ power-domains = <&k3_pds 186 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 186 6>,
+ <&vp1_clock>,
+ <&k3_clks 186 2>;
clock-names = "fck", "vp1", "vp2";
- interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ oldi-transmitters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ oldi0: oldi@0 {
+ reg = <0>;
+ clocks = <&k3_clks 186 0>;
+ clock-names = "serial";
+ ti,companion-oldi = <&oldi1>;
+ ti,oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ oldi0_in: endpoint {
+ remote-endpoint = <&dpi0_out0>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ oldi0_out: endpoint {
+ remote-endpoint = <&panel_in0>;
+ };
+ };
+ };
+ };
+ oldi1: oldi@1 {
+ reg = <1>;
+ clocks = <&k3_clks 186 0>;
+ clock-names = "serial";
+ ti,secondary-oldi;
+ ti,companion-oldi = <&oldi0>;
+ ti,oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ oldi1_in: endpoint {
+ remote-endpoint = <&dpi0_out1>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ oldi1_out: endpoint {
+ remote-endpoint = <&panel_in1>;
+ };
+ };
+ };
+ };
+ };
ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
- oldi_out0: endpoint {
- remote-endpoint = <&lcd_in0>;
- };
+ reg = <0>;
+ dpi0_out0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&oldi0_in>;
+ };
+ dpi0_out1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&oldi1_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ dpi1_out: endpoint {
+ remote-endpoint = <&hdmi_bridge>;
};
+ };
};
+ };
};
diff --git a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
deleted file mode 100644
index 092913a28457..000000000000
--- a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Broadcom SBA RAID engine
-
-Required properties:
-- compatible: Should be one of the following
- "brcm,iproc-sba"
- "brcm,iproc-sba-v2"
- The "brcm,iproc-sba" has support for only 6 PQ coefficients
- The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients
-- mboxes: List of phandle and mailbox channel specifiers
-
-Example:
-
-raid_mbox: mbox@67400000 {
- ...
- #mbox-cells = <3>;
- ...
-};
-
-raid0 {
- compatible = "brcm,iproc-sba-v2";
- mboxes = <&raid_mbox 0 0x1 0xffff>,
- <&raid_mbox 1 0x1 0xffff>,
- <&raid_mbox 2 0x1 0xffff>,
- <&raid_mbox 3 0x1 0xffff>,
- <&raid_mbox 4 0x1 0xffff>,
- <&raid_mbox 5 0x1 0xffff>,
- <&raid_mbox 6 0x1 0xffff>,
- <&raid_mbox 7 0x1 0xffff>;
-};
diff --git a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.yaml b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.yaml
new file mode 100644
index 000000000000..f3fed576cacf
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/brcm,iproc-sba.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom SBA RAID engine
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,iproc-sba
+ - brcm,iproc-sba-v2
+
+ mboxes:
+ minItems: 1
+ maxItems: 8
+
+required:
+ - compatible
+ - mboxes
+
+additionalProperties: false
+
+examples:
+ - |
+ raid0 {
+ compatible = "brcm,iproc-sba-v2";
+ mboxes = <&raid_mbox 0 0x1 0xffff>,
+ <&raid_mbox 1 0x1 0xffff>,
+ <&raid_mbox 2 0x1 0xffff>,
+ <&raid_mbox 3 0x1 0xffff>,
+ <&raid_mbox 4 0x1 0xffff>,
+ <&raid_mbox 5 0x1 0xffff>,
+ <&raid_mbox 6 0x1 0xffff>,
+ <&raid_mbox 7 0x1 0xffff>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
index 75a7d9556699..9102b615dbd6 100644
--- a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
@@ -23,6 +23,35 @@ allOf:
properties:
power-domains: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx23-dma-apbx
+ then:
+ properties:
+ interrupt-names:
+ items:
+ - const: audio-adc
+ - const: audio-dac
+ - const: spdif-tx
+ - const: i2c
+ - const: saif0
+ - const: empty0
+ - const: auart0-rx
+ - const: auart0-tx
+ - const: auart1-rx
+ - const: auart1-tx
+ - const: saif1
+ - const: empty1
+ - const: empty2
+ - const: empty3
+ - const: empty4
+ - const: empty5
+ else:
+ properties:
+ interrupt-names: false
+
properties:
compatible:
oneOf:
@@ -54,6 +83,10 @@ properties:
minItems: 4
maxItems: 16
+ interrupt-names:
+ minItems: 4
+ maxItems: 16
+
"#dma-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
deleted file mode 100644
index 87740adb2995..000000000000
--- a/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-NXP LPC18xx/43xx DMA MUX (DMA request router)
-
-Required properties:
-- compatible: "nxp,lpc1850-dmamux"
-- reg: Memory map for accessing module
-- #dma-cells: Should be set to <3>.
- * 1st cell contain the master dma request signal
- * 2nd cell contain the mux value (0-3) for the peripheral
- * 3rd cell contain either 1 or 2 depending on the AHB
- master used.
-- dma-requests: Number of DMA requests for the mux
-- dma-masters: phandle pointing to the DMA controller
-
-The DMA controller node need to have the following poroperties:
-- dma-requests: Number of DMA requests the controller can handle
-
-Example:
-
-dmac: dma@40002000 {
- compatible = "nxp,lpc1850-gpdma", "arm,pl080", "arm,primecell";
- arm,primecell-periphid = <0x00041080>;
- reg = <0x40002000 0x1000>;
- interrupts = <2>;
- clocks = <&ccu1 CLK_CPU_DMA>;
- clock-names = "apb_pclk";
- #dma-cells = <2>;
- dma-channels = <8>;
- dma-requests = <16>;
- lli-bus-interface-ahb1;
- lli-bus-interface-ahb2;
- mem-bus-interface-ahb1;
- mem-bus-interface-ahb2;
- memcpy-burst-size = <256>;
- memcpy-bus-width = <32>;
-};
-
-dmamux: dma-mux {
- compatible = "nxp,lpc1850-dmamux";
- #dma-cells = <3>;
- dma-requests = <64>;
- dma-masters = <&dmac>;
-};
-
-uart0: serial@40081000 {
- compatible = "nxp,lpc1850-uart", "ns16550a";
- reg = <0x40081000 0x1000>;
- reg-shift = <2>;
- interrupts = <24>;
- clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
- clock-names = "uartclk", "reg";
- dmas = <&dmamux 1 1 2
- &dmamux 2 1 2>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/marvell,orion-xor.yaml b/Documentation/devicetree/bindings/dma/marvell,orion-xor.yaml
new file mode 100644
index 000000000000..add08257ec59
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/marvell,orion-xor.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/marvell,orion-xor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell XOR engine
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: marvell,armada-380-xor
+ - const: marvell,orion-xor
+ - enum:
+ - marvell,armada-3700-xor
+ - marvell,orion-xor
+
+ reg:
+ items:
+ - description: Low registers for the XOR engine
+ - description: High registers for the XOR engine
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ "^(channel|xor)[0-9]+$":
+ description: XOR channel sub-node
+ type: object
+ additionalProperties: false
+
+ properties:
+ interrupts:
+ description: Interrupt specifier for the XOR channel
+ items:
+ - description: Interrupt for this channel
+
+ dmacap,memcpy:
+ type: boolean
+ deprecated: true
+ description:
+ Indicates that the XOR channel is capable of memcpy operations
+
+ dmacap,memset:
+ type: boolean
+ deprecated: true
+ description:
+ Indicates that the XOR channel is capable of memset operations
+
+ dmacap,xor:
+ type: boolean
+ deprecated: true
+ description:
+ Indicates that the XOR channel is capable of xor operations
+
+ required:
+ - interrupts
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ xor@d0060900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd0060900 0x100>,
+ <0xd0060b00 0x100>;
+ clocks = <&coreclk 0>;
+
+ xor00 {
+ interrupts = <51>;
+ };
+ xor01 {
+ interrupts = <52>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
deleted file mode 100644
index 0ffb4d8766a8..000000000000
--- a/Documentation/devicetree/bindings/dma/mv-xor.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-* Marvell XOR engines
-
-Required properties:
-- compatible: Should be one of the following:
- - "marvell,orion-xor"
- - "marvell,armada-380-xor"
- - "marvell,armada-3700-xor".
-- reg: Should contain registers location and length (two sets)
- the first set is the low registers, the second set the high
- registers for the XOR engine.
-- clocks: pointer to the reference clock
-
-The DT node must also contains sub-nodes for each XOR channel that the
-XOR engine has. Those sub-nodes have the following required
-properties:
-- interrupts: interrupt of the XOR channel
-
-The sub-nodes used to contain one or several of the following
-properties, but they are now deprecated:
-- dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
-- dmacap,memset to indicate that the XOR channel is capable of memset operations
-- dmacap,xor to indicate that the XOR channel is capable of xor operations
-- dmacap,interrupt to indicate that the XOR channel is capable of
- generating interrupts
-
-Example:
-
-xor@d0060900 {
- compatible = "marvell,orion-xor";
- reg = <0xd0060900 0x100
- 0xd0060b00 0x100>;
- clocks = <&coreclk 0>;
-
- xor00 {
- interrupts = <51>;
- };
- xor01 {
- interrupts = <52>;
- };
-};
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index 7052468b15c8..bbe4da2a1105 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -24,12 +24,14 @@ properties:
- qcom,sm6350-gpi-dma
- items:
- enum:
+ - qcom,milos-gpi-dma
- qcom,qcm2290-gpi-dma
- qcom,qcs8300-gpi-dma
- qcom,qdu1000-gpi-dma
- qcom,sa8775p-gpi-dma
- qcom,sar2130p-gpi-dma
- qcom,sc7280-gpi-dma
+ - qcom,sc8280xp-gpi-dma
- qcom,sdx75-gpi-dma
- qcom,sm6115-gpi-dma
- qcom,sm6375-gpi-dma
diff --git a/Documentation/devicetree/bindings/dma/sophgo,cv1800b-dmamux.yaml b/Documentation/devicetree/bindings/dma/sophgo,cv1800b-dmamux.yaml
new file mode 100644
index 000000000000..011002942235
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sophgo,cv1800b-dmamux.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/sophgo,cv1800b-dmamux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV1800/SG200 Series DMA multiplexer
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+description:
+ The DMA multiplexer of CV1800 is a subdevice of the system
+ controller. It support mapping 8 channels, but each channel
+ can be mapped only once.
+
+allOf:
+ - $ref: dma-router.yaml#
+
+properties:
+ compatible:
+ const: sophgo,cv1800b-dmamux
+
+ reg:
+ items:
+ - description: DMA channal remapping register
+ - description: DMA channel interrupt mapping register
+
+ '#dma-cells':
+ const: 2
+ description:
+ The first cells is device id. The second one is the cpu id.
+
+ dma-masters:
+ maxItems: 1
+
+required:
+ - reg
+ - '#dma-cells'
+ - dma-masters
+
+additionalProperties: false
+
+examples:
+ - |
+ dma-router@154 {
+ compatible = "sophgo,cv1800b-dmamux";
+ reg = <0x154 0x8>, <0x298 0x4>;
+ #dma-cells = <2>;
+ dma-masters = <&dmac>;
+ };
diff --git a/Documentation/devicetree/bindings/dpll/dpll-device.yaml b/Documentation/devicetree/bindings/dpll/dpll-device.yaml
new file mode 100644
index 000000000000..fb8d7a9a3693
--- /dev/null
+++ b/Documentation/devicetree/bindings/dpll/dpll-device.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dpll/dpll-device.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Digital Phase-Locked Loop (DPLL) Device
+
+maintainers:
+ - Ivan Vecera <ivecera@redhat.com>
+
+description:
+ Digital Phase-Locked Loop (DPLL) device is used for precise clock
+ synchronization in networking and telecom hardware. The device can
+ have one or more channels (DPLLs) and one or more physical input and
+ output pins. Each DPLL channel can either produce pulse-per-clock signal
+ or drive ethernet equipment clock. The type of each channel can be
+ indicated by dpll-types property.
+
+properties:
+ $nodename:
+ pattern: "^dpll(@.*)?$"
+
+ "#address-cells":
+ const: 0
+
+ "#size-cells":
+ const: 0
+
+ dpll-types:
+ description: List of DPLL channel types, one per DPLL instance.
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ items:
+ enum: [pps, eec]
+
+ input-pins:
+ type: object
+ description: DPLL input pins
+ unevaluatedProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^pin@[0-9a-f]+$":
+ $ref: /schemas/dpll/dpll-pin.yaml
+ unevaluatedProperties: false
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ output-pins:
+ type: object
+ description: DPLL output pins
+ unevaluatedProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^pin@[0-9]+$":
+ $ref: /schemas/dpll/dpll-pin.yaml
+ unevaluatedProperties: false
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/dpll/dpll-pin.yaml b/Documentation/devicetree/bindings/dpll/dpll-pin.yaml
new file mode 100644
index 000000000000..51db93b77306
--- /dev/null
+++ b/Documentation/devicetree/bindings/dpll/dpll-pin.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dpll/dpll-pin.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DPLL Pin
+
+maintainers:
+ - Ivan Vecera <ivecera@redhat.com>
+
+description: |
+ The DPLL pin is either a physical input or output pin that is provided
+ by a DPLL( Digital Phase-Locked Loop) device. The pin is identified by
+ its physical order number that is stored in reg property and can have
+ an additional set of properties like supported (allowed) frequencies,
+ label, type and may support embedded sync.
+
+ Note that the pin in this context has nothing to do with pinctrl.
+
+properties:
+ reg:
+ description: Hardware index of the DPLL pin.
+ maxItems: 1
+
+ connection-type:
+ description: Connection type of the pin
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ext, gnss, int, mux, synce]
+
+ esync-control:
+ description: Indicates whether the pin supports embedded sync functionality.
+ type: boolean
+
+ label:
+ description: String exposed as the pin board label
+ $ref: /schemas/types.yaml#/definitions/string
+
+ supported-frequencies-hz:
+ description: List of supported frequencies for this pin, expressed in Hz.
+
+required:
+ - reg
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/dpll/microchip,zl30731.yaml b/Documentation/devicetree/bindings/dpll/microchip,zl30731.yaml
new file mode 100644
index 000000000000..17747f754b84
--- /dev/null
+++ b/Documentation/devicetree/bindings/dpll/microchip,zl30731.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dpll/microchip,zl30731.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Azurite DPLL device
+
+maintainers:
+ - Ivan Vecera <ivecera@redhat.com>
+
+description:
+ Microchip Azurite DPLL (ZL3073x) is a family of DPLL devices that
+ provides up to 5 independent DPLL channels, up to 10 differential or
+ single-ended inputs and 10 differential or 20 single-ended outputs.
+ These devices support both I2C and SPI interfaces.
+
+properties:
+ compatible:
+ enum:
+ - microchip,zl30731
+ - microchip,zl30732
+ - microchip,zl30733
+ - microchip,zl30734
+ - microchip,zl30735
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/dpll/dpll-device.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll@70 {
+ compatible = "microchip,zl30732";
+ reg = <0x70>;
+ dpll-types = "pps", "eec";
+
+ input-pins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pin@0 { /* REF0P */
+ reg = <0>;
+ connection-type = "ext";
+ label = "Input 0";
+ supported-frequencies-hz = /bits/ 64 <1 1000>;
+ };
+ };
+
+ output-pins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pin@3 { /* OUT1N */
+ reg = <3>;
+ connection-type = "gnss";
+ esync-control;
+ label = "Output 1";
+ supported-frequencies-hz = /bits/ 64 <1 10000>;
+ };
+ };
+ };
+ };
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll@70 {
+ compatible = "microchip,zl30731";
+ reg = <0x70>;
+ spi-max-frequency = <12500000>;
+
+ dpll-types = "pps";
+
+ input-pins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pin@0 { /* REF0P */
+ reg = <0>;
+ connection-type = "ext";
+ label = "Input 0";
+ supported-frequencies-hz = /bits/ 64 <1 1000>;
+ };
+ };
+
+ output-pins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pin@3 { /* OUT1N */
+ reg = <3>;
+ connection-type = "gnss";
+ esync-control;
+ label = "Output 1";
+ supported-frequencies-hz = /bits/ 64 <1 10000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
index 7d4b6d49e5ee..c0c2bfaa606f 100644
--- a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
+++ b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
@@ -18,10 +18,14 @@ description: >
properties:
compatible:
- enum:
- - u-blox,neo-6m
- - u-blox,neo-8
- - u-blox,neo-m8
+ oneOf:
+ - enum:
+ - u-blox,neo-6m
+ - u-blox,neo-8
+ - u-blox,neo-m8
+ - items:
+ - const: u-blox,neo-m9
+ - const: u-blox,neo-m8
reg:
description: >
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index aa19f8819231..be198182dbfe 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -43,6 +43,7 @@ properties:
- allwinner,sun55i-a523-mali
- mediatek,mt8188-mali
- mediatek,mt8192-mali
+ - mediatek,mt8370-mali
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
reg:
@@ -226,7 +227,9 @@ allOf:
properties:
compatible:
contains:
- const: mediatek,mt8186-mali
+ enum:
+ - mediatek,mt8186-mali
+ - mediatek,mt8370-mali
then:
properties:
power-domains:
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index 9318817ea135..c8d0d9192d92 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -47,6 +47,7 @@ properties:
- hisilicon,hi6220-mali
- mediatek,mt7623-mali
- rockchip,rk3328-mali
+ - rockchip,rk3528-mali
- const: arm,mali-450
# "arm,mali-300"
@@ -148,6 +149,7 @@ allOf:
- rockchip,rk3188-mali
- rockchip,rk3228-mali
- rockchip,rk3328-mali
+ - rockchip,rk3528-mali
then:
required:
- resets
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1266.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1266.yaml
index 4f8e11bd5142..fe87a592de45 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,adm1266.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,adm1266.yaml
@@ -8,7 +8,7 @@ title: Analog Devices ADM1266 Cascadable Super Sequencer with Margin
Control and Fault Recording
maintainers:
- - Alexandru Tachici <alexandru.tachici@analog.com>
+ - Cedric Encarnacion <cedricjustine.encarnacion@analog.com>
description: |
Analog Devices ADM1266 Cascadable Super Sequencer with Margin
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2992.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2992.yaml
index 0ad12d245656..38a8f3a14c02 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2992.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2992.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Linear Technology 2992 Power Monitor
maintainers:
- - Alexandru Tachici <alexandru.tachici@analog.com>
+ - Cedric Encarnacion <cedricjustine.encarnacion@analog.com>
description: |
Linear Technology 2992 Dual Wide Range Power Monitor
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index 79e8d62fa3b3..43e9fe225870 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -53,7 +53,10 @@ properties:
default: 1
"#pwm-cells":
- const: 4
+ oneOf:
+ - const: 3
+ - const: 4
+ deprecated: true
description: |
Number of cells in a PWM specifier.
- 0: The PWM channel
@@ -68,7 +71,7 @@ properties:
- 11363636 (88 Hz)
- 44444 (22 kHz)
- 2: PWM flags 0 or PWM_POLARITY_INVERTED
- - 3: The default PWM duty cycle in nanoseconds
+ - 3: The default PWM duty cycle in nanoseconds, defaults to period.
patternProperties:
"^adi,bypass-attenuator-in[0-4]$":
@@ -124,15 +127,15 @@ examples:
adi,bypass-attenuator-in1 = <0>;
adi,pin10-function = "smbalert#";
adi,pin14-function = "tach4";
- #pwm-cells = <4>;
+ #pwm-cells = <3>;
- /* PWMs at 22.5 kHz frequency, 50% duty*/
+ /* PWMs at 22.5 kHz frequency */
fan-0 {
- pwms = <&pwm 0 44444 0 22222>;
+ pwms = <&pwm 0 44444 0>;
};
fan-1 {
- pwms = <&pwm 2 44444 0 22222>;
+ pwms = <&pwm 2 44444 0>;
};
};
};
diff --git a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
index aa801ef1640b..ea8b1553a3e9 100644
--- a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
+++ b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
@@ -28,6 +28,7 @@ properties:
- lltc,ltc3886
- lltc,ltc3887
- lltc,ltc3889
+ - lltc,ltc7132
- lltc,ltc7841
- lltc,ltc7880
- lltc,ltm2987
@@ -55,6 +56,7 @@ properties:
* ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7
* ltc2978 : vout0 - vout7
* ltc3880, ltc3882, ltc3884, ltc3886, ltc3887, ltc3889 : vout0 - vout1
+ * ltc7132 : vout0 - vout1
* ltc7841 : vout0
* ltc7880 : vout0 - vout1
* ltc3883 : vout0
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
index 8af0d7458e62..8588d97ba6ec 100644
--- a/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
@@ -25,6 +25,7 @@ description: |
properties:
compatible:
enum:
+ - maxim,max20710
- maxim,max20730
- maxim,max20734
- maxim,max20743
diff --git a/Documentation/devicetree/bindings/hwmon/national,lm90.yaml b/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
index 4feb76919404..1b871f166e79 100644
--- a/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
+++ b/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
@@ -20,6 +20,7 @@ properties:
- dallas,max6646
- dallas,max6647
- dallas,max6649
+ - dallas,max6654
- dallas,max6657
- dallas,max6658
- dallas,max6659
@@ -36,6 +37,9 @@ properties:
- nuvoton,nct7717
- nuvoton,nct7718
- nxp,sa56004
+ - onnn,nct72
+ - onnn,nct214
+ - onnn,nct218
- onnn,nct1008
- ti,tmp451
- ti,tmp461
@@ -118,6 +122,7 @@ allOf:
- dallas,max6646
- dallas,max6647
- dallas,max6649
+ - dallas,max6654
- dallas,max6657
- dallas,max6658
- dallas,max6659
@@ -139,6 +144,9 @@ allOf:
- adi,adt7461
- adi,adt7461a
- adi,adt7481
+ - onnn,nct72
+ - onnn,nct214
+ - onnn,nct218
- onnn,nct1008
then:
patternProperties:
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
index 10c2204bc3df..af7530093942 100644
--- a/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
@@ -10,16 +10,27 @@ maintainers:
- Radu Sabau <radu.sabau@analog.com>
description: |
- The ADP1050 is used to monitor system voltages, currents and temperatures.
+ The ADP1050 and similar devices are used to monitor system voltages,
+ currents, power, and temperatures.
+
Through the PMBus interface, the ADP1050 targets isolated power supplies
and has four individual monitors for input/output voltage, input current
and temperature.
Datasheet:
https://www.analog.com/en/products/adp1050.html
+ https://www.analog.com/en/products/adp1051.html
+ https://www.analog.com/en/products/adp1055.html
+ https://www.analog.com/en/products/ltp8800-1a.html
+ https://www.analog.com/en/products/ltp8800-2.html
+ https://www.analog.com/en/products/ltp8800-4a.html
properties:
compatible:
- const: adi,adp1050
+ enum:
+ - adi,adp1050
+ - adi,adp1051
+ - adi,adp1055
+ - adi,ltp8800
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/isil,isl68137.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/isil,isl68137.yaml
index bac5f8e352aa..3dc7f15484d2 100644
--- a/Documentation/devicetree/bindings/hwmon/pmbus/isil,isl68137.yaml
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/isil,isl68137.yaml
@@ -56,6 +56,7 @@ properties:
- renesas,raa228228
- renesas,raa229001
- renesas,raa229004
+ - renesas,raa229621
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
index f8bea1c0e94a..8f9ce00079df 100644
--- a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
@@ -23,7 +23,13 @@ description: |
properties:
compatible:
enum:
+ - ti,ucd9000
+ - ti,ucd9090
+ - ti,ucd90120
+ - ti,ucd90124
+ - ti,ucd90160
- ti,ucd90320
+ - ti,ucd90910
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml b/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
index 9ca7356760a7..eb00756988be 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,amc6821.yaml
@@ -32,6 +32,12 @@ properties:
$ref: fan-common.yaml#
unevaluatedProperties: false
+ properties:
+ cooling-levels:
+ description: PWM duty cycle values corresponding to thermal cooling states.
+ items:
+ maximum: 255
+
"#pwm-cells":
const: 2
description: |
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
index d1fb7b9abda0..fa68b99ef2e2 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
@@ -25,6 +25,7 @@ properties:
- ti,ina219
- ti,ina220
- ti,ina226
+ - ti,ina228
- ti,ina230
- ti,ina231
- ti,ina233
@@ -107,6 +108,7 @@ allOf:
- ti,ina219
- ti,ina220
- ti,ina226
+ - ti,ina228
- ti,ina230
- ti,ina231
- ti,ina237
diff --git a/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
index 63d8cf467806..5c0cdc0091b5 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
@@ -18,7 +18,9 @@ description: |
properties:
compatible:
- const: ti,lm87
+ enum:
+ - adi,adm1024
+ - ti,lm87
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
index 077d2a539c83..fed3e1b8c43f 100644
--- a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
@@ -22,6 +22,11 @@ properties:
compatible:
items:
- enum:
+ - apple,s5l8960x-i2c
+ - apple,t7000-i2c
+ - apple,s8000-i2c
+ - apple,t8010-i2c
+ - apple,t8015-i2c
- apple,t8103-i2c
- apple,t8112-i2c
- apple,t6000-i2c
diff --git a/Documentation/devicetree/bindings/i3c/renesas,i3c.yaml b/Documentation/devicetree/bindings/i3c/renesas,i3c.yaml
new file mode 100644
index 000000000000..fe2e9633c46f
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/renesas,i3c.yaml
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/renesas,i3c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/G3S and RZ/G3E I3C Bus Interface
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+ - Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a08g045-i3c # RZ/G3S
+ - renesas,r9a09g047-i3c # RZ/G3E
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Non-recoverable internal error interrupt
+ - description: Normal transfer error interrupt
+ - description: Normal transfer abort interrupt
+ - description: Normal response status buffer full interrupt
+ - description: Normal command buffer empty interrupt
+ - description: Normal IBI status buffer full interrupt
+ - description: Normal Rx data buffer full interrupt
+ - description: Normal Tx data buffer empty interrupt
+ - description: Normal receive status buffer full interrupt
+ - description: START condition detection interrupt
+ - description: STOP condition detection interrupt
+ - description: Transmit end interrupt
+ - description: NACK detection interrupt
+ - description: Arbitration lost interrupt
+ - description: Timeout detection interrupt
+ - description: Wake-up condition detection interrupt
+ - description: HDR Exit Pattern detection interrupt
+ minItems: 16
+
+ interrupt-names:
+ items:
+ - const: ierr
+ - const: terr
+ - const: abort
+ - const: resp
+ - const: cmd
+ - const: ibi
+ - const: rx
+ - const: tx
+ - const: rcv
+ - const: st
+ - const: sp
+ - const: tend
+ - const: nack
+ - const: al
+ - const: tmo
+ - const: wu
+ - const: exit
+ minItems: 16
+
+ clocks:
+ items:
+ - description: APB bus clock
+ - description: transfer clock
+ - description: SFRs clock
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: tclk
+ - const: pclkrw
+ minItems: 2
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ items:
+ - description: Reset signal
+ - description: APB interface reset signal/SCAN reset signal
+
+ reset-names:
+ items:
+ - const: presetn
+ - const: tresetn
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clock-names
+ - clocks
+ - power-domains
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: i3c.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a08g045-i3c
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
+ interrupts:
+ minItems: 17
+ interrupt-names:
+ minItems: 17
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g047-i3c
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ minItems: 3
+ interrupts:
+ maxItems: 16
+ interrupt-names:
+ maxItems: 16
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r9a08g045-cpg.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i3c@1005b000 {
+ compatible = "renesas,r9a08g045-i3c";
+ reg = <0x1005b000 0x1000>;
+ clocks = <&cpg CPG_MOD R9A08G045_I3C_PCLK>,
+ <&cpg CPG_MOD R9A08G045_I3C_TCLK>;
+ clock-names = "pclk", "tclk";
+ interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 295 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 297 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 298 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 299 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ierr", "terr", "abort", "resp",
+ "cmd", "ibi", "rx", "tx", "rcv",
+ "st", "sp", "tend", "nack",
+ "al", "tmo", "wu", "exit";
+ resets = <&cpg R9A08G045_I3C_PRESETN>,
+ <&cpg R9A08G045_I3C_TRESETN>;
+ reset-names = "presetn", "tresetn";
+ power-domains = <&cpg>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/input/syna,rmi4.yaml b/Documentation/devicetree/bindings/input/syna,rmi4.yaml
index b522c8d3ce0d..f369385ffaf0 100644
--- a/Documentation/devicetree/bindings/input/syna,rmi4.yaml
+++ b/Documentation/devicetree/bindings/input/syna,rmi4.yaml
@@ -89,6 +89,24 @@ properties:
required:
- reg
+ rmi4-f1a@1a:
+ type: object
+ additionalProperties: false
+ $ref: input.yaml#
+ description:
+ RMI4 Function 1A is for capacitive keys.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ linux,keycodes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - reg
+
patternProperties:
"^rmi4-f1[12]@1[12]$":
type: object
@@ -201,6 +219,7 @@ allOf:
examples:
- |
+ #include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
@@ -234,6 +253,7 @@ examples:
rmi4-f1a@1a {
reg = <0x1a>;
+ linux,keycodes = <KEY_BACK KEY_HOME KEY_MENU>;
};
};
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index ab821490284a..7d3edb58f72d 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -43,6 +43,7 @@ properties:
- focaltech,ft5452
- focaltech,ft6236
- focaltech,ft8201
+ - focaltech,ft8716
- focaltech,ft8719
reg:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt
deleted file mode 100644
index 41cbf4b7a670..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* NXP LPC32xx SoC Touchscreen Controller (TSC)
-
-Required properties:
-- compatible: must be "nxp,lpc3220-tsc"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: The TSC/ADC interrupt
-
-Example:
-
- tsc@40048000 {
- compatible = "nxp,lpc3220-tsc";
- reg = <0x40048000 0x1000>;
- interrupt-parent = <&mic>;
- interrupts = <39 0>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/nxp,lpc3220-tsc.yaml b/Documentation/devicetree/bindings/input/touchscreen/nxp,lpc3220-tsc.yaml
new file mode 100644
index 000000000000..b6feda127c7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/nxp,lpc3220-tsc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/nxp,lpc3220-tsc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx SoC Touchscreen Controller (TSC)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-tsc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc32xx-clock.h>
+
+ touchscreen@40048000 {
+ compatible = "nxp,lpc3220-tsc";
+ reg = <0x40048000 0x1000>;
+ interrupt-parent = <&mic>;
+ interrupts = <39 0>;
+ clocks = <&clk LPC32XX_CLK_ADC>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml b/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml
index 1d8ca19fd37a..e7ee7a0d74c4 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml
@@ -37,6 +37,7 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/input/linux-event-codes.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -46,5 +47,33 @@ examples:
reg = <0x55>;
interrupts = <2 0>;
gpios = <&gpio1 166 0>;
+
+ touch-overlay {
+ segment-0 {
+ label = "Touchscreen";
+ x-origin = <0>;
+ x-size = <240>;
+ y-origin = <40>;
+ y-size = <280>;
+ };
+
+ segment-1a {
+ label = "Camera light";
+ linux,code = <KEY_LIGHTS_TOGGLE>;
+ x-origin = <40>;
+ x-size = <40>;
+ y-origin = <0>;
+ y-size = <40>;
+ };
+
+ segment-2a {
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ x-origin = <160>;
+ x-size = <40>;
+ y-origin = <0>;
+ y-size = <40>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti.tsc2007.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti.tsc2007.yaml
new file mode 100644
index 000000000000..8bb4bc7df4fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti.tsc2007.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/ti.tsc2007.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments tsc2007 touchscreen controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: ti,tsc2007
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ ti,x-plate-ohms:
+ description: X-plate resistance in ohms.
+
+ gpios: true
+
+ pendown-gpio: true
+
+ ti,max-rt:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: maximum pressure.
+
+ ti,fuzzx:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ specifies the absolute input fuzz x value.
+ If set, it will permit noise in the data up to +- the value given to the fuzz
+ parameter, that is used to filter noise from the event stream.
+
+ ti,fuzzy:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: specifies the absolute input fuzz y value.
+
+ ti,fuzzz:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: specifies the absolute input fuzz z value.
+
+ ti,poll-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ how much time to wait (in milliseconds) before reading again the
+ values from the tsc2007.
+
+required:
+ - compatible
+ - reg
+ - ti,x-plate-ohms
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touch@49 {
+ compatible = "ti,tsc2007";
+ reg = <0x49>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 0x8>;
+ gpios = <&gpio4 0 0>;
+ ti,x-plate-ohms = <180>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
index 431c13335c40..3e3572aa483a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
@@ -87,6 +87,125 @@ properties:
touchscreen-y-plate-ohms:
description: Resistance of the Y-plate in Ohms
+ touch-overlay:
+ description: |
+ List of nodes defining segments (touch areas) on the touchscreen.
+
+ This object can be used to describe a series of segments to restrict
+ the region within touch events are reported or buttons with a specific
+ functionality.
+
+ This is of special interest if the touchscreen is shipped with a physical
+ overlay on top of it with a frame that hides some part of the original
+ touchscreen area. Printed buttons on that overlay are also a typical
+ use case.
+
+ A new touchscreen area is defined as a sub-node without a key code. If a
+ key code is defined in the sub-node, it will be interpreted as a button.
+
+ The x-origin and y-origin properties of a touchscreen area define the
+ offset of a new origin from where the touchscreen events are referenced.
+ This offset is applied to the events accordingly. The x-size and y-size
+ properties define the size of the touchscreen effective area.
+
+ The following example shows a new touchscreen area with the new origin
+ (0',0') for the touch events generated by the device.
+
+ Touchscreen (full area)
+ ┌────────────────────────────────────────â”
+ │ ┌───────────────────────────────┠│
+ │ │ │ │
+ │ ├ y-size │ │
+ │ │ │ │
+ │ │ touchscreen area │ │
+ │ │ (no key code) │ │
+ │ │ │ │
+ │ │ x-size │ │
+ │ ┌└──────────────┴────────────────┘ │
+ │(0',0') │
+ ┌└────────────────────────────────────────┘
+ (0,0)
+
+ where (0',0') = (0+x-origin,0+y-origin)
+
+ Sub-nodes with key codes report the touch events on their surface as key
+ events instead.
+
+ The following example shows a touchscreen with a single button on it.
+
+ Touchscreen (full area)
+ ┌───────────────────────────────────â”
+ │ │
+ │ │
+ │ ┌─────────┠│
+ │ │button 0 │ │
+ │ │KEY_POWER│ │
+ │ └─────────┘ │
+ │ │
+ │ │
+ ┌└───────────────────────────────────┘
+ (0,0)
+
+ Segments defining buttons and clipped toushcreen areas can be combined
+ as shown in the following example.
+ In that case only the events within the touchscreen area are reported
+ as touch events. Events within the button areas report their associated
+ key code. Any events outside the defined areas are ignored.
+
+ Touchscreen (full area)
+ ┌─────────┬──────────────────────────────â”
+ │ │ │
+ │ │ ┌───────────────────────┠│
+ │ button 0│ │ │ │
+ │KEY_POWER│ │ │ │
+ │ │ │ │ │
+ ├─────────┤ │ touchscreen area │ │
+ │ │ │ (no key code) │ │
+ │ │ │ │ │
+ │ button 1│ │ │ │
+ │ KEY_INFO│ ┌└───────────────────────┘ │
+ │ │(0',0') │
+ ┌└─────────┴──────────────────────────────┘
+ (0,0)
+
+ type: object
+
+ patternProperties:
+ '^segment-':
+ type: object
+ description:
+ Each segment is represented as a sub-node.
+ properties:
+ x-origin:
+ description: horizontal origin of the node area
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ y-origin:
+ description: vertical origin of the node area
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ x-size:
+ description: horizontal resolution of the node area
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ y-size:
+ description: vertical resolution of the node area
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ label:
+ description: descriptive name of the segment
+ $ref: /schemas/types.yaml#/definitions/string
+
+ linux,code: true
+
+ required:
+ - x-origin
+ - y-origin
+ - x-size
+ - y-size
+
+ unevaluatedProperties: false
+
dependencies:
touchscreen-size-x: [ touchscreen-size-y ]
touchscreen-size-y: [ touchscreen-size-x ]
diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
deleted file mode 100644
index 210486a3fb11..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-* Texas Instruments tsc2007 touchscreen controller
-
-Required properties:
-- compatible: must be "ti,tsc2007".
-- reg: I2C address of the chip.
-- ti,x-plate-ohms: X-plate resistance in ohms.
-
-Optional properties:
-- gpios: the interrupt gpio the chip is connected to (through the penirq pin).
- The penirq pin goes to low when the panel is touched.
- (see GPIO binding[1] for more details).
-- interrupts: (gpio) interrupt to which the chip is connected
- (see interrupt binding[0]).
-- ti,max-rt: maximum pressure.
-- ti,fuzzx: specifies the absolute input fuzz x value.
- If set, it will permit noise in the data up to +- the value given to the fuzz
- parameter, that is used to filter noise from the event stream.
-- ti,fuzzy: specifies the absolute input fuzz y value.
-- ti,fuzzz: specifies the absolute input fuzz z value.
-- ti,poll-period: how much time to wait (in milliseconds) before reading again the
- values from the tsc2007.
-
-[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-[1]: Documentation/devicetree/bindings/gpio/gpio.txt
-
-Example:
- &i2c1 {
- /* ... */
- tsc2007@49 {
- compatible = "ti,tsc2007";
- reg = <0x49>;
- interrupt-parent = <&gpio4>;
- interrupts = <0x0 0x8>;
- gpios = <&gpio4 0 0>;
- ti,x-plate-ohms = <180>;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5-iwb.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5-iwb.yaml
new file mode 100644
index 000000000000..99a266a62385
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5-iwb.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,gic-v5-iwb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Generic Interrupt Controller, version 5 Interrupt Wire Bridge (IWB)
+
+maintainers:
+ - Lorenzo Pieralisi <lpieralisi@kernel.org>
+ - Marc Zyngier <maz@kernel.org>
+
+description: |
+ The GICv5 architecture defines the guidelines to implement GICv5
+ compliant interrupt controllers for AArch64 systems.
+
+ The GICv5 specification can be found at
+ https://developer.arm.com/documentation/aes0070
+
+ GICv5 has zero or more Interrupt Wire Bridges (IWB) that are responsible
+ for translating wire signals into interrupt messages to the GICv5 ITS.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: arm,gic-v5-iwb
+
+ reg:
+ items:
+ - description: IWB control frame
+
+ "#address-cells":
+ const: 0
+
+ "#interrupt-cells":
+ description: |
+ The 1st cell corresponds to the IWB wire.
+
+ The 2nd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+
+ const: 2
+
+ interrupt-controller: true
+
+ msi-parent:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#interrupt-cells"
+ - interrupt-controller
+ - msi-parent
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@2f000000 {
+ compatible = "arm,gic-v5-iwb";
+ reg = <0x2f000000 0x10000>;
+
+ #address-cells = <0>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ msi-parent = <&its0 64>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5.yaml
new file mode 100644
index 000000000000..86ca7f3ac281
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5.yaml
@@ -0,0 +1,267 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,gic-v5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Generic Interrupt Controller, version 5
+
+maintainers:
+ - Lorenzo Pieralisi <lpieralisi@kernel.org>
+ - Marc Zyngier <maz@kernel.org>
+
+description: |
+ The GICv5 architecture defines the guidelines to implement GICv5
+ compliant interrupt controllers for AArch64 systems.
+
+ The GICv5 specification can be found at
+ https://developer.arm.com/documentation/aes0070
+
+ The GICv5 architecture is composed of multiple components:
+ - one or more IRS (Interrupt Routing Service)
+ - zero or more ITS (Interrupt Translation Service)
+
+ The architecture defines:
+ - PE-Private Peripheral Interrupts (PPI)
+ - Shared Peripheral Interrupts (SPI)
+ - Logical Peripheral Interrupts (LPI)
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: arm,gic-v5
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ "#interrupt-cells":
+ description: |
+ The 1st cell corresponds to the INTID.Type field in the INTID; 1 for PPI,
+ 3 for SPI. LPI interrupts must not be described in the bindings since
+ they are allocated dynamically by the software component managing them.
+
+ The 2nd cell contains the interrupt INTID.ID field.
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+
+ const: 3
+
+ interrupt-controller: true
+
+ interrupts:
+ description:
+ The VGIC maintenance interrupt.
+ maxItems: 1
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - "#interrupt-cells"
+ - interrupt-controller
+
+patternProperties:
+ "^irs@[0-9a-f]+$":
+ type: object
+ description:
+ GICv5 has one or more Interrupt Routing Services (IRS) that are
+ responsible for handling IRQ state and routing.
+
+ additionalProperties: false
+
+ properties:
+ compatible:
+ const: arm,gic-v5-irs
+
+ reg:
+ minItems: 1
+ items:
+ - description: IRS config frames
+ - description: IRS setlpi frames
+
+ reg-names:
+ description:
+ Describe config and setlpi frames that are present.
+ "ns-" stands for non-secure, "s-" for secure, "realm-" for realm
+ and "el3-" for EL3.
+ minItems: 1
+ maxItems: 8
+ items:
+ enum: [ ns-config, s-config, realm-config, el3-config, ns-setlpi,
+ s-setlpi, realm-setlpi, el3-setlpi ]
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ dma-noncoherent:
+ description:
+ Present if the GIC IRS permits programming shareability and
+ cacheability attributes but is connected to a non-coherent
+ downstream interconnect.
+
+ cpus:
+ description:
+ CPUs managed by the IRS.
+
+ arm,iaffids:
+ $ref: /schemas/types.yaml#/definitions/uint16-array
+ description:
+ Interrupt AFFinity ID (IAFFID) associated with the CPU whose
+ CPU node phandle is at the same index in the cpus array.
+
+ patternProperties:
+ "^its@[0-9a-f]+$":
+ type: object
+ description:
+ GICv5 has zero or more Interrupt Translation Services (ITS) that are
+ used to route Message Signalled Interrupts (MSI) to the CPUs. Each
+ ITS is connected to an IRS.
+ additionalProperties: false
+
+ properties:
+ compatible:
+ const: arm,gic-v5-its
+
+ reg:
+ items:
+ - description: ITS config frames
+
+ reg-names:
+ description:
+ Describe config frames that are present.
+ "ns-" stands for non-secure, "s-" for secure, "realm-" for realm
+ and "el3-" for EL3.
+ minItems: 1
+ maxItems: 4
+ items:
+ enum: [ ns-config, s-config, realm-config, el3-config ]
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ dma-noncoherent:
+ description:
+ Present if the GIC ITS permits programming shareability and
+ cacheability attributes but is connected to a non-coherent
+ downstream interconnect.
+
+ patternProperties:
+ "^msi-controller@[0-9a-f]+$":
+ type: object
+ description:
+ GICv5 ITS has one or more translate register frames.
+ additionalProperties: false
+
+ properties:
+ reg:
+ items:
+ - description: ITS translate frames
+
+ reg-names:
+ description:
+ Describe translate frames that are present.
+ "ns-" stands for non-secure, "s-" for secure, "realm-" for realm
+ and "el3-" for EL3.
+ minItems: 1
+ maxItems: 4
+ items:
+ enum: [ ns-translate, s-translate, realm-translate, el3-translate ]
+
+ "#msi-cells":
+ description:
+ The single msi-cell is the DeviceID of the device which will
+ generate the MSI.
+ const: 1
+
+ msi-controller: true
+
+ required:
+ - reg
+ - reg-names
+ - "#msi-cells"
+ - msi-controller
+
+ required:
+ - compatible
+ - reg
+ - reg-names
+
+ required:
+ - compatible
+ - reg
+ - reg-names
+ - cpus
+ - arm,iaffids
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller {
+ compatible = "arm,gic-v5";
+
+ #interrupt-cells = <3>;
+ interrupt-controller;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupts = <1 25 4>;
+
+ irs@2f1a0000 {
+ compatible = "arm,gic-v5-irs";
+ reg = <0x2f1a0000 0x10000>; // IRS_CONFIG_FRAME
+ reg-names = "ns-config";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>, <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+ arm,iaffids = /bits/ 16 <0 1 2 3 4 5 6 7>;
+
+ its@2f120000 {
+ compatible = "arm,gic-v5-its";
+ reg = <0x2f120000 0x10000>; // ITS_CONFIG_FRAME
+ reg-names = "ns-config";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ msi-controller@2f130000 {
+ reg = <0x2f130000 0x10000>; // ITS_TRANSLATE_FRAME
+ reg-names = "ns-translate";
+
+ #msi-cells = <1>;
+ msi-controller;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,imx8qxp-dc-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,imx8qxp-dc-intc.yaml
new file mode 100644
index 000000000000..6985ee644a25
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,imx8qxp-dc-intc.yaml
@@ -0,0 +1,318 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/fsl,imx8qxp-dc-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8qxp Display Controller interrupt controller
+
+description: |
+ The Display Controller has a built-in interrupt controller with the following
+ features for all relevant HW events:
+
+ * Enable bit (mask)
+ * Status bit (set by an HW event)
+ * Preset bit (can be used by SW to set status)
+ * Clear bit (used by SW to reset the status)
+
+ Each interrupt can be connected as IRQ (maskable) and/or NMI (non-maskable).
+ Alternatively the un-masked trigger signals for all HW events are provided,
+ allowing it to use a global interrupt controller instead.
+
+ Each interrupt can be protected against SW running in user mode. In that case,
+ only privileged AHB access can control the interrupt status.
+
+maintainers:
+ - Liu Ying <victor.liu@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qxp-dc-intc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ items:
+ - description: store9 shadow load interrupt(blit engine)
+ - description: store9 frame complete interrupt(blit engine)
+ - description: store9 sequence complete interrupt(blit engine)
+ - description:
+ extdst0 shadow load interrupt
+ (display controller, content stream 0)
+ - description:
+ extdst0 frame complete interrupt
+ (display controller, content stream 0)
+ - description:
+ extdst0 sequence complete interrupt
+ (display controller, content stream 0)
+ - description:
+ extdst4 shadow load interrupt
+ (display controller, safety stream 0)
+ - description:
+ extdst4 frame complete interrupt
+ (display controller, safety stream 0)
+ - description:
+ extdst4 sequence complete interrupt
+ (display controller, safety stream 0)
+ - description:
+ extdst1 shadow load interrupt
+ (display controller, content stream 1)
+ - description:
+ extdst1 frame complete interrupt
+ (display controller, content stream 1)
+ - description:
+ extdst1 sequence complete interrupt
+ (display controller, content stream 1)
+ - description:
+ extdst5 shadow load interrupt
+ (display controller, safety stream 1)
+ - description:
+ extdst5 frame complete interrupt
+ (display controller, safety stream 1)
+ - description:
+ extdst5 sequence complete interrupt
+ (display controller, safety stream 1)
+ - description:
+ disengcfg0 shadow load interrupt
+ (display controller, display stream 0)
+ - description:
+ disengcfg0 frame complete interrupt
+ (display controller, display stream 0)
+ - description:
+ disengcfg0 sequence complete interrupt
+ (display controller, display stream 0)
+ - description:
+ framegen0 programmable interrupt0
+ (display controller, display stream 0)
+ - description:
+ framegen0 programmable interrupt1
+ (display controller, display stream 0)
+ - description:
+ framegen0 programmable interrupt2
+ (display controller, display stream 0)
+ - description:
+ framegen0 programmable interrupt3
+ (display controller, display stream 0)
+ - description:
+ signature0 shadow load interrupt
+ (display controller, display stream 0)
+ - description:
+ signature0 measurement valid interrupt
+ (display controller, display stream 0)
+ - description:
+ signature0 error condition interrupt
+ (display controller, display stream 0)
+ - description:
+ disengcfg1 shadow load interrupt
+ (display controller, display stream 1)
+ - description:
+ disengcfg1 frame complete interrupt
+ (display controller, display stream 1)
+ - description:
+ disengcfg1 sequence complete interrupt
+ (display controller, display stream 1)
+ - description:
+ framegen1 programmable interrupt0
+ (display controller, display stream 1)
+ - description:
+ framegen1 programmable interrupt1
+ (display controller, display stream 1)
+ - description:
+ framegen1 programmable interrupt2
+ (display controller, display stream 1)
+ - description:
+ framegen1 programmable interrupt3
+ (display controller, display stream 1)
+ - description:
+ signature1 shadow load interrupt
+ (display controller, display stream 1)
+ - description:
+ signature1 measurement valid interrupt
+ (display controller, display stream 1)
+ - description:
+ signature1 error condition interrupt
+ (display controller, display stream 1)
+ - description: reserved
+ - description:
+ command sequencer error condition interrupt(command sequencer)
+ - description:
+ common control software interrupt0(common control)
+ - description:
+ common control software interrupt1(common control)
+ - description:
+ common control software interrupt2(common control)
+ - description:
+ common control software interrupt3(common control)
+ - description:
+ framegen0 synchronization status activated interrupt
+ (display controller, safety stream 0)
+ - description:
+ framegen0 synchronization status deactivated interrupt
+ (display controller, safety stream 0)
+ - description:
+ framegen0 synchronization status activated interrupt
+ (display controller, content stream 0)
+ - description:
+ framegen0 synchronization status deactivated interrupt
+ (display controller, content stream 0)
+ - description:
+ framegen1 synchronization status activated interrupt
+ (display controller, safety stream 1)
+ - description:
+ framegen1 synchronization status deactivated interrupt
+ (display controller, safety stream 1)
+ - description:
+ framegen1 synchronization status activated interrupt
+ (display controller, content stream 1)
+ - description:
+ framegen1 synchronization status deactivated interrupt
+ (display controller, content stream 1)
+ minItems: 49
+
+ interrupt-names:
+ items:
+ - const: store9_shdload
+ - const: store9_framecomplete
+ - const: store9_seqcomplete
+ - const: extdst0_shdload
+ - const: extdst0_framecomplete
+ - const: extdst0_seqcomplete
+ - const: extdst4_shdload
+ - const: extdst4_framecomplete
+ - const: extdst4_seqcomplete
+ - const: extdst1_shdload
+ - const: extdst1_framecomplete
+ - const: extdst1_seqcomplete
+ - const: extdst5_shdload
+ - const: extdst5_framecomplete
+ - const: extdst5_seqcomplete
+ - const: disengcfg_shdload0
+ - const: disengcfg_framecomplete0
+ - const: disengcfg_seqcomplete0
+ - const: framegen0_int0
+ - const: framegen0_int1
+ - const: framegen0_int2
+ - const: framegen0_int3
+ - const: sig0_shdload
+ - const: sig0_valid
+ - const: sig0_error
+ - const: disengcfg_shdload1
+ - const: disengcfg_framecomplete1
+ - const: disengcfg_seqcomplete1
+ - const: framegen1_int0
+ - const: framegen1_int1
+ - const: framegen1_int2
+ - const: framegen1_int3
+ - const: sig1_shdload
+ - const: sig1_valid
+ - const: sig1_error
+ - const: reserved
+ - const: cmdseq_error
+ - const: comctrl_sw0
+ - const: comctrl_sw1
+ - const: comctrl_sw2
+ - const: comctrl_sw3
+ - const: framegen0_primsync_on
+ - const: framegen0_primsync_off
+ - const: framegen0_secsync_on
+ - const: framegen0_secsync_off
+ - const: framegen1_primsync_on
+ - const: framegen1_primsync_off
+ - const: framegen1_secsync_on
+ - const: framegen1_secsync_off
+ minItems: 49
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+
+ interrupt-controller@56180040 {
+ compatible = "fsl,imx8qxp-dc-intc";
+ reg = <0x56180040 0x60>;
+ clocks = <&dc0_lpcg IMX_LPCG_CLK_5>;
+ interrupt-controller;
+ interrupt-parent = <&dc0_irqsteer>;
+ #interrupt-cells = <1>;
+ interrupts = <448>, <449>, <450>, <64>,
+ <65>, <66>, <67>, <68>,
+ <69>, <70>, <193>, <194>,
+ <195>, <196>, <197>, <72>,
+ <73>, <74>, <75>, <76>,
+ <77>, <78>, <79>, <80>,
+ <81>, <199>, <200>, <201>,
+ <202>, <203>, <204>, <205>,
+ <206>, <207>, <208>, <5>,
+ <0>, <1>, <2>, <3>,
+ <4>, <82>, <83>, <84>,
+ <85>, <209>, <210>, <211>,
+ <212>;
+ interrupt-names = "store9_shdload",
+ "store9_framecomplete",
+ "store9_seqcomplete",
+ "extdst0_shdload",
+ "extdst0_framecomplete",
+ "extdst0_seqcomplete",
+ "extdst4_shdload",
+ "extdst4_framecomplete",
+ "extdst4_seqcomplete",
+ "extdst1_shdload",
+ "extdst1_framecomplete",
+ "extdst1_seqcomplete",
+ "extdst5_shdload",
+ "extdst5_framecomplete",
+ "extdst5_seqcomplete",
+ "disengcfg_shdload0",
+ "disengcfg_framecomplete0",
+ "disengcfg_seqcomplete0",
+ "framegen0_int0",
+ "framegen0_int1",
+ "framegen0_int2",
+ "framegen0_int3",
+ "sig0_shdload",
+ "sig0_valid",
+ "sig0_error",
+ "disengcfg_shdload1",
+ "disengcfg_framecomplete1",
+ "disengcfg_seqcomplete1",
+ "framegen1_int0",
+ "framegen1_int1",
+ "framegen1_int2",
+ "framegen1_int3",
+ "sig1_shdload",
+ "sig1_valid",
+ "sig1_error",
+ "reserved",
+ "cmdseq_error",
+ "comctrl_sw0",
+ "comctrl_sw1",
+ "comctrl_sw2",
+ "comctrl_sw3",
+ "framegen0_primsync_on",
+ "framegen0_primsync_off",
+ "framegen0_secsync_on",
+ "framegen0_secsync_off",
+ "framegen1_primsync_on",
+ "framegen1_primsync_off",
+ "framegen1_secsync_on",
+ "framegen1_secsync_off";
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 7b9d5507d6cc..89495f094d52 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -35,6 +35,7 @@ properties:
- description: Qcom SoCs implementing "qcom,smmu-500" and "arm,mmu-500"
items:
- enum:
+ - qcom,milos-smmu-500
- qcom,qcm2290-smmu-500
- qcom,qcs615-smmu-500
- qcom,qcs8300-smmu-500
@@ -88,6 +89,7 @@ properties:
- description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500"
items:
- enum:
+ - qcom,milos-smmu-500
- qcom,qcm2290-smmu-500
- qcom,qcs615-smmu-500
- qcom,qcs8300-smmu-500
@@ -132,10 +134,6 @@ properties:
- qcom,sm7150-smmu-v2
- const: qcom,adreno-smmu
- const: qcom,smmu-v2
- - description: Qcom Adreno GPUs on Google Cheza platform
- items:
- - const: qcom,sdm845-smmu-v2
- - const: qcom,smmu-v2
- description: Marvell SoCs implementing "arm,mmu-500"
items:
- const: marvell,ap806-smmu-500
@@ -534,6 +532,7 @@ allOf:
compatible:
items:
- enum:
+ - qcom,milos-smmu-500
- qcom,sar2130p-smmu-500
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
diff --git a/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml b/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
index 402c25424525..23f809906ba7 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
@@ -81,7 +81,12 @@ patternProperties:
properties:
reg:
- maxItems: 1
+ items:
+ - minimum: 0
+ maximum: 2
+
+ description:
+ This property denotes the index within the LED bank.
required:
- reg
@@ -138,18 +143,18 @@ examples:
color = <LED_COLOR_ID_RGB>;
function = LED_FUNCTION_STANDBY;
- led@3 {
- reg = <0x3>;
+ led@0 {
+ reg = <0x0>;
color = <LED_COLOR_ID_RED>;
};
- led@4 {
- reg = <0x4>;
+ led@1 {
+ reg = <0x1>;
color = <LED_COLOR_ID_GREEN>;
};
- led@5 {
- reg = <0x5>;
+ led@2 {
+ reg = <0x2>;
color = <LED_COLOR_ID_BLUE>;
};
};
diff --git a/Documentation/devicetree/bindings/leds/onnn,ncp5623.yaml b/Documentation/devicetree/bindings/leds/onnn,ncp5623.yaml
index 9c9f3a682ba2..11d45c7f741d 100644
--- a/Documentation/devicetree/bindings/leds/onnn,ncp5623.yaml
+++ b/Documentation/devicetree/bindings/leds/onnn,ncp5623.yaml
@@ -19,7 +19,9 @@ properties:
- onnn,ncp5623
reg:
- const: 0x38
+ enum:
+ - 0x38
+ - 0x39
multi-led:
type: object
diff --git a/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml b/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
index 75d5d97305e1..87d31963c1b7 100644
--- a/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
@@ -68,13 +68,13 @@ examples:
#include <dt-bindings/reset/sun8i-h3-ccu.h>
msgbox: mailbox@1c17000 {
- compatible = "allwinner,sun8i-h3-msgbox",
- "allwinner,sun6i-a31-msgbox";
- reg = <0x01c17000 0x1000>;
- clocks = <&ccu CLK_BUS_MSGBOX>;
- resets = <&ccu RST_BUS_MSGBOX>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <1>;
+ compatible = "allwinner,sun8i-h3-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x01c17000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
};
...
diff --git a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
index 385809ed1569..79963c9878ba 100644
--- a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
@@ -27,7 +27,7 @@ properties:
maxItems: 1
interrupts:
- minItems: 3
+ maxItems: 3
description:
Contains the interrupt information corresponding to each of the 3 links
of MHU.
@@ -46,8 +46,8 @@ additionalProperties: false
examples:
- |
mailbox@c883c404 {
- compatible = "amlogic,meson-gxbb-mhu";
- reg = <0xc883c404 0x4c>;
- interrupts = <208>, <209>, <210>;
- #mbox-cells = <1>;
+ compatible = "amlogic,meson-gxbb-mhu";
+ reg = <0xc883c404 0x4c>;
+ interrupts = <208>, <209>, <210>;
+ #mbox-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml b/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
index 4c0668e5f0bd..474c1a0f99f3 100644
--- a/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
@@ -78,11 +78,11 @@ additionalProperties: false
examples:
- |
- mailbox@77408000 {
- compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
- reg = <0x77408000 0x4000>;
- interrupts = <1 583 4>, <1 584 4>, <1 585 4>, <1 586 4>;
- interrupt-names = "send-empty", "send-not-empty",
- "recv-empty", "recv-not-empty";
- #mbox-cells = <0>;
- };
+ mailbox@77408000 {
+ compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+ reg = <0x77408000 0x4000>;
+ interrupts = <1 583 4>, <1 584 4>, <1 585 4>, <1 586 4>;
+ interrupt-names = "send-empty", "send-not-empty",
+ "recv-empty", "recv-not-empty";
+ #mbox-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/aspeed,ast2700-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/aspeed,ast2700-mailbox.yaml
new file mode 100644
index 000000000000..600e2d63fccd
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/aspeed,ast2700-mailbox.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/aspeed,ast2700-mailbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASPEED AST2700 mailbox controller
+
+maintainers:
+ - Jammy Huang <jammy_huang@aspeedtech.com>
+
+description: >
+ ASPEED AST2700 has multiple processors that need to communicate with each
+ other. The mailbox controller provides a way for these processors to send
+ messages to each other. It is a hardware-based inter-processor communication
+ mechanism that allows processors to send and receive messages through
+ dedicated channels.
+
+ The mailbox's tx/rx are independent, meaning that one processor can send a
+ message while another processor is receiving a message simultaneously.
+ There are 4 channels available for both tx and rx operations. Each channel
+ has a FIFO buffer that can hold messages of a fixed size (32 bytes in this
+ case).
+
+ The mailbox controller also supports interrupt generation, allowing
+ processors to notify each other when a message is available or when an event
+ occurs.
+
+properties:
+ compatible:
+ const: aspeed,ast2700-mailbox
+
+ reg:
+ items:
+ - description: TX control register
+ - description: RX control register
+
+ reg-names:
+ items:
+ - const: tx
+ - const: rx
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mailbox@12c1c200 {
+ compatible = "aspeed,ast2700-mailbox";
+ reg = <0x12c1c200 0x100>, <0x12c1c300 0x100>;
+ reg-names = "tx", "rx";
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,bcm74110-mbox.yaml b/Documentation/devicetree/bindings/mailbox/brcm,bcm74110-mbox.yaml
new file mode 100644
index 000000000000..750cc96edb46
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/brcm,bcm74110-mbox.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/brcm,bcm74110-mbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM74110 Mailbox
+
+maintainers:
+ - Justin Chen <justin.chen@broadcom.com>
+ - Florian Fainelli <florian.fainelli@broadcom.com>
+
+description: Broadcom mailbox hardware first introduced with 74110
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm74110-mbox
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: RX doorbell and watermark interrupts
+ - description: TX doorbell and watermark interrupts
+
+ "#mbox-cells":
+ const: 2
+ description:
+ The first cell is channel type and second cell is shared memory slot
+
+ brcm,rx:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: RX Mailbox number
+
+ brcm,tx:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: TX Mailbox number
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+ - brcm,rx
+ - brcm,tx
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mailbox@a552000 {
+ compatible = "brcm,bcm74110-mbox";
+ reg = <0xa552000 0x1104>;
+ interrupts = <GIC_SPI 0x67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 0x66 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <0x2>;
+ brcm,rx = <0x7>;
+ brcm,tx = <0x6>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
index d97050e40fbf..f833b845de0d 100644
--- a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
+++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
@@ -59,9 +59,6 @@ description: |
<dt-bindings/mailbox/tegra186-hsp.h>
properties:
- $nodename:
- pattern: "^hsp@[0-9a-f]+$"
-
compatible:
oneOf:
- enum:
@@ -131,14 +128,10 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mailbox/tegra186-hsp.h>
- hsp_top0: hsp@3c00000 {
+ mailbox@3c00000 {
compatible = "nvidia,tegra186-hsp";
reg = <0x03c00000 0xa0000>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "doorbell";
#mbox-cells = <2>;
};
-
- client {
- mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_CCPLEX>;
- };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index ac726136f7e5..615ed103b7e6 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -251,7 +251,7 @@ examples:
# Example apcs with msm8996
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- apcs_glb: mailbox@9820000 {
+ mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";
reg = <0x9820000 0x1000>;
@@ -259,13 +259,6 @@ examples:
#clock-cells = <0>;
};
- rpm-glink {
- compatible = "qcom,glink-rpm";
- interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,rpm-msg-ram = <&rpm_msg_ram>;
- mboxes = <&apcs_glb 0>;
- };
-
# Example apcs with qcs404
- |
#define GCC_APSS_AHB_CLK_SRC 1
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index f69c0ec5d19d..e5c423130db6 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -24,6 +24,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,milos-ipcc
- qcom,qcs8300-ipcc
- qcom,qdu1000-ipcc
- qcom,sa8255p-ipcc
diff --git a/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
index 1a2001e58880..8504ceb64806 100644
--- a/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
@@ -242,7 +242,7 @@ examples:
- |
/* OMAP4 */
#include <dt-bindings/interrupt-controller/arm-gic.h>
- mailbox: mailbox@4a0f4000 {
+ mailbox@4a0f4000 {
compatible = "ti,omap4-mailbox";
reg = <0x4a0f4000 0x200>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@@ -260,13 +260,9 @@ examples:
};
};
- dsp {
- mboxes = <&mailbox &mbox_dsp>;
- };
-
- |
/* AM33xx */
- mailbox1: mailbox@480c8000 {
+ mailbox@480c8000 {
compatible = "ti,omap4-mailbox";
reg = <0x480c8000 0x200>;
interrupts = <77>;
@@ -283,7 +279,7 @@ examples:
- |
/* AM65x */
- mailbox0_cluster0: mailbox@31f80000 {
+ mailbox@31f80000 {
compatible = "ti,am654-mailbox";
reg = <0x31f80000 0x200>;
#mbox-cells = <1>;
diff --git a/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml
index eea822861804..c321b69f0ccd 100644
--- a/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml
+++ b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml
@@ -36,7 +36,7 @@ properties:
- const: scfg
reg:
- minItems: 3
+ maxItems: 3
interrupt-names:
minItems: 1
@@ -68,12 +68,12 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
secure_proxy: mailbox@32c00000 {
- compatible = "ti,am654-secure-proxy";
- #mbox-cells = <1>;
- reg-names = "target_data", "rt", "scfg";
- reg = <0x32c00000 0x100000>,
- <0x32400000 0x100000>,
- <0x32800000 0x100000>;
- interrupt-names = "rx_011";
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x32c00000 0x100000>,
+ <0x32400000 0x100000>,
+ <0x32800000 0x100000>;
+ interrupt-names = "rx_011";
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
index 2008a47c0580..6ed9a5621064 100644
--- a/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
+++ b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
@@ -24,6 +24,14 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: error_irq
+ - const: irq
+
clocks:
items:
- description: CSI2Rx system clock
diff --git a/Documentation/devicetree/bindings/media/fsl,imx6q-vdoa.yaml b/Documentation/devicetree/bindings/media/fsl,imx6q-vdoa.yaml
new file mode 100644
index 000000000000..511ac0d67a7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/fsl,imx6q-vdoa.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/fsl,imx6q-vdoa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Video Data Order Adapter
+
+description:
+ The Video Data Order Adapter (VDOA) is present on the i.MX6q. Its sole purpose
+ is to reorder video data from the macroblock tiled order produced by the CODA
+ 960 VPU to the conventional raster-scan order for scanout.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: "fsl,imx6q-vdoa"
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+
+ vdoa@21e4000 {
+ compatible = "fsl,imx6q-vdoa";
+ reg = <0x021e4000 0x4000>;
+ interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_VDOA>;
+ };
diff --git a/Documentation/devicetree/bindings/media/fsl,imx8qm-isi.yaml b/Documentation/devicetree/bindings/media/fsl,imx8qm-isi.yaml
new file mode 100644
index 000000000000..93f527e223af
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/fsl,imx8qm-isi.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/fsl,imx8qm-isi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX8QM Image Sensing Interface
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ The Image Sensing Interface (ISI) combines image processing pipelines with
+ DMA engines to process and capture frames originating from a variety of
+ sources. The inputs to the ISI go through Pixel Link interfaces, and their
+ number and nature is SoC-dependent. They cover both capture interfaces (MIPI
+ CSI-2 RX, HDMI RX, ...) and display engine outputs for writeback support.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qm-isi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 8
+
+ clock-names:
+ items:
+ - const: per0
+ - const: per1
+ - const: per2
+ - const: per3
+ - const: per4
+ - const: per5
+ - const: per6
+ - const: per7
+
+ interrupts:
+ maxItems: 8
+
+ power-domains:
+ maxItems: 8
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: MIPI CSI-2 RX 0
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: MIPI CSI-2 RX 1
+ port@4:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: HDMI RX
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - power-domains
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/imx8-clock.h>
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ image-controller@58100000 {
+ compatible = "fsl,imx8qm-isi";
+ reg = <0x58100000 0x80000>;
+ interrupts = <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 301 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pdma0_lpcg IMX_LPCG_CLK_0>,
+ <&pdma1_lpcg IMX_LPCG_CLK_0>,
+ <&pdma2_lpcg IMX_LPCG_CLK_0>,
+ <&pdma3_lpcg IMX_LPCG_CLK_0>,
+ <&pdma4_lpcg IMX_LPCG_CLK_0>,
+ <&pdma5_lpcg IMX_LPCG_CLK_0>,
+ <&pdma6_lpcg IMX_LPCG_CLK_0>,
+ <&pdma7_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per0", "per1", "per2", "per3",
+ "per4", "per5", "per6", "per7";
+ power-domains = <&pd IMX_SC_R_ISI_CH0>, <&pd IMX_SC_R_ISI_CH1>,
+ <&pd IMX_SC_R_ISI_CH2>, <&pd IMX_SC_R_ISI_CH3>,
+ <&pd IMX_SC_R_ISI_CH4>, <&pd IMX_SC_R_ISI_CH5>,
+ <&pd IMX_SC_R_ISI_CH6>, <&pd IMX_SC_R_ISI_CH7>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+ endpoint {
+ remote-endpoint = <&mipi_csi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/fsl,imx8qxp-isi.yaml b/Documentation/devicetree/bindings/media/fsl,imx8qxp-isi.yaml
new file mode 100644
index 000000000000..bb41996bd2e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/fsl,imx8qxp-isi.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/fsl,imx8qxp-isi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX8QXP Image Sensing Interface
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ The Image Sensing Interface (ISI) combines image processing pipelines with
+ DMA engines to process and capture frames originating from a variety of
+ sources. The inputs to the ISI go through Pixel Link interfaces, and their
+ number and nature is SoC-dependent. They cover both capture interfaces (MIPI
+ CSI-2 RX, HDMI RX, ...) and display engine outputs for writeback support.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qxp-isi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 6
+
+ clock-names:
+ items:
+ - const: per0
+ - const: per1
+ - const: per2
+ - const: per3
+ - const: per4
+ - const: per5
+
+ interrupts:
+ maxItems: 6
+
+ power-domains:
+ maxItems: 6
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: MIPI CSI-2 RX 0
+ port@6:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: CSI-2 Parallel RX
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - power-domains
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/imx8-clock.h>
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ image-controller@58100000 {
+ compatible = "fsl,imx8qxp-isi";
+ reg = <0x58100000 0x60000>;
+ interrupts = <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 301 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pdma0_lpcg IMX_LPCG_CLK_0>,
+ <&pdma1_lpcg IMX_LPCG_CLK_0>,
+ <&pdma2_lpcg IMX_LPCG_CLK_0>,
+ <&pdma3_lpcg IMX_LPCG_CLK_0>,
+ <&pdma4_lpcg IMX_LPCG_CLK_0>,
+ <&pdma5_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per0", "per1", "per2", "per3", "per4", "per5";
+ power-domains = <&pd IMX_SC_R_ISI_CH0>, <&pd IMX_SC_R_ISI_CH1>,
+ <&pd IMX_SC_R_ISI_CH2>, <&pd IMX_SC_R_ISI_CH3>,
+ <&pd IMX_SC_R_ISI_CH4>, <&pd IMX_SC_R_ISI_CH5>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+ endpoint {
+ remote-endpoint = <&mipi_csi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/fsl-vdoa.txt b/Documentation/devicetree/bindings/media/fsl-vdoa.txt
deleted file mode 100644
index 6c5628530bb7..000000000000
--- a/Documentation/devicetree/bindings/media/fsl-vdoa.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Freescale Video Data Order Adapter
-==================================
-
-The Video Data Order Adapter (VDOA) is present on the i.MX6q. Its sole purpose
-is to reorder video data from the macroblock tiled order produced by the CODA
-960 VPU to the conventional raster-scan order for scanout.
-
-Required properties:
-- compatible: must be "fsl,imx6q-vdoa"
-- reg: the register base and size for the device registers
-- interrupts: the VDOA interrupt
-- clocks: the vdoa clock
-
-Example:
-
-vdoa@21e4000 {
- compatible = "fsl,imx6q-vdoa";
- reg = <0x021e4000 0x4000>;
- interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_VDOA>;
-};
diff --git a/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml b/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
index f8ace8cbccdb..bc664a016396 100644
--- a/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
@@ -23,6 +23,9 @@ description:
More detailed documentation can be found in
Documentation/devicetree/bindings/media/video-interfaces.txt .
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
oneOf:
@@ -58,16 +61,10 @@ properties:
documentation.
maxItems: 1
- flash-leds:
- description: Flash LED phandles. See ../video-interfaces.txt for details.
-
- lens-focus:
- description: Lens focus controller phandles. See ../video-interfaces.txt
- for details.
+ flash-leds: true
+ lens-focus: true
rotation:
- description: Rotation of the sensor. See ../video-interfaces.txt for
- details.
enum: [ 0, 180 ]
port:
diff --git a/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
index f6b87892068a..a89f740214f7 100644
--- a/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
@@ -70,6 +70,15 @@ properties:
- bus-type
- link-frequencies
+ slew-rate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Slew rate ot the output pads DOUT[7:0], LINE_VALID, FRAME_VALID and
+ PIXCLK. Higher values imply steeper voltage-flanks on the pads.
+ minimum: 0
+ maximum: 7
+ default: 7
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
index a65f921ec0fd..491f2931e6bc 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
@@ -15,6 +15,8 @@ description: |
controlled through an I2C-compatible SCCB bus. The sensor transmits images
on a MIPI CSI-2 output interface with up to 4 data lanes.
+$ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
const: ovti,ov8858
@@ -69,7 +71,7 @@ required:
- clocks
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
index 0162eec8ca99..aea99ebf8e9e 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
@@ -33,20 +33,21 @@ properties:
clock-frequency:
description: Frequency of the xclk clock in Hz.
+ deprecated: true
enable-gpios:
description: GPIO descriptor for the enable pin.
maxItems: 1
- vdddo-supply:
- description: Chip digital IO regulator (1.8V).
-
vdda-supply:
description: Chip analog regulator (2.7V).
vddd-supply:
description: Chip digital core regulator (1.12V).
+ vdddo-supply:
+ description: Chip digital IO regulator (1.8V).
+
flash-leds: true
lens-focus: true
@@ -84,11 +85,10 @@ required:
- compatible
- reg
- clocks
- - clock-frequency
- enable-gpios
- - vdddo-supply
- vdda-supply
- vddd-supply
+ - vdddo-supply
- port
unevaluatedProperties: false
@@ -104,22 +104,25 @@ examples:
camera-sensor@1a {
compatible = "sony,imx214";
reg = <0x1a>;
- vdddo-supply = <&pm8994_lvs1>;
- vddd-supply = <&camera_vddd_1v12>;
+
+ clocks = <&camera_clk>;
+ assigned-clocks = <&camera_clk>;
+ assigned-clock-rates = <24000000>;
+
+ enable-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+
vdda-supply = <&pm8994_l17>;
+ vddd-supply = <&camera_vddd_1v12>;
+ vdddo-supply = <&pm8994_lvs1>;
+
lens-focus = <&ad5820>;
- enable-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
- clocks = <&camera_clk>;
- clock-frequency = <24000000>;
port {
imx214_ep: endpoint {
data-lanes = <1 2 3 4>;
- link-frequencies = /bits/ 64 <480000000>;
+ link-frequencies = /bits/ 64 <600000000>;
remote-endpoint = <&csiphy0_ep>;
};
};
};
};
-
-...
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml
index 975c1d77c8e5..421b935b52bc 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml
@@ -18,6 +18,8 @@ description: |-
The camera module does not expose the model through registers, so the
exact model needs to be specified.
+$ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
enum:
@@ -81,7 +83,7 @@ required:
- reg
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml b/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
index 2be30c5fdc83..4cba42ba7cf7 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
@@ -22,10 +22,14 @@ properties:
- nxp,imx8qxp-jpgdec
- nxp,imx8qxp-jpgenc
- items:
- - const: nxp,imx8qm-jpgdec
+ - enum:
+ - nxp,imx8qm-jpgdec
+ - nxp,imx95-jpgdec
- const: nxp,imx8qxp-jpgdec
- items:
- - const: nxp,imx8qm-jpgenc
+ - enum:
+ - nxp,imx8qm-jpgenc
+ - nxp,imx95-jpgenc
- const: nxp,imx8qxp-jpgenc
reg:
@@ -48,7 +52,7 @@ properties:
description:
List of phandle and PM domain specifier as documented in
Documentation/devicetree/bindings/power/power_domain.txt
- minItems: 2 # Wrapper and 1 slot
+ minItems: 1 # Wrapper and all slots
maxItems: 5 # Wrapper and 4 slots
required:
@@ -58,6 +62,24 @@ required:
- interrupts
- power-domains
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nxp,imx95-jpgenc
+ - nxp,imx95-jpgdec
+ then:
+ properties:
+ power-domains:
+ maxItems: 1
+ else:
+ properties:
+ power-domains:
+ minItems: 2 # Wrapper and 1 slot
+
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
index 2a14e3b0e004..3389bab266a9 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
@@ -16,11 +16,19 @@ description: |-
properties:
compatible:
- enum:
- - fsl,imx8mq-mipi-csi2
+ oneOf:
+ - enum:
+ - fsl,imx8mq-mipi-csi2
+ - fsl,imx8qxp-mipi-csi2
+ - items:
+ - const: fsl,imx8qm-mipi-csi2
+ - const: fsl,imx8qxp-mipi-csi2
reg:
- maxItems: 1
+ items:
+ - description: MIPI CSI-2 RX host controller register.
+ - description: MIPI CSI-2 control and status register (csr).
+ minItems: 1
clocks:
items:
@@ -46,6 +54,7 @@ properties:
- description: CORE_RESET reset register bit definition
- description: PHY_REF_RESET reset register bit definition
- description: ESC_RESET reset register bit definition
+ minItems: 1
fsl,mipi-phy-gpr:
description: |
@@ -113,9 +122,30 @@ required:
- clock-names
- power-domains
- resets
- - fsl,mipi-phy-gpr
- ports
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qxp-mipi-csi2
+ then:
+ properties:
+ reg:
+ minItems: 2
+ resets:
+ maxItems: 1
+ else:
+ properties:
+ reg:
+ maxItems: 1
+ resets:
+ minItems: 3
+ required:
+ - fsl,mipi-phy-gpr
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml b/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml
index 113565cf2a99..b075341caafc 100644
--- a/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,x1e80100-camss.yaml
@@ -133,7 +133,7 @@ properties:
CSI input ports.
patternProperties:
- "^port@[0-3]+$":
+ "^port@[0-3]$":
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
@@ -146,15 +146,16 @@ properties:
unevaluatedProperties: false
properties:
- clock-lanes:
- maxItems: 1
-
data-lanes:
minItems: 1
maxItems: 4
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
required:
- - clock-lanes
- data-lanes
required:
@@ -189,7 +190,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
- camss: isp@acb6000 {
+ camss: isp@acb7000 {
compatible = "qcom,x1e80100-camss";
reg = <0 0x0acb7000 0 0x2000>,
@@ -357,7 +358,6 @@ examples:
port@0 {
reg = <0>;
csiphy_ep0: endpoint {
- clock-lanes = <7>;
data-lanes = <0 1>;
remote-endpoint = <&sensor_ep>;
};
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.yaml b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
index 7bf1266223e8..cf92dfe69637 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
@@ -30,6 +30,7 @@ properties:
- renesas,r9a07g043u-fcpvd # RZ/G2UL
- renesas,r9a07g044-fcpvd # RZ/G2{L,LC}
- renesas,r9a07g054-fcpvd # RZ/V2L
+ - renesas,r9a09g056-fcpvd # RZ/V2N
- renesas,r9a09g057-fcpvd # RZ/V2H(P)
- const: renesas,fcpv # Generic FCP for VSP fallback
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
index fcf7219b1f40..07a97dd87a5b 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
@@ -25,6 +25,7 @@ properties:
- enum:
- renesas,r9a07g043u-vsp2 # RZ/G2UL
- renesas,r9a07g054-vsp2 # RZ/V2L
+ - renesas,r9a09g056-vsp2 # RZ/V2N
- renesas,r9a09g057-vsp2 # RZ/V2H(P)
- const: renesas,r9a07g044-vsp2 # RZ/G2L fallback
diff --git a/Documentation/devicetree/bindings/media/rockchip,vdec.yaml b/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
index 08b02ec16755..96b6c8938768 100644
--- a/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
@@ -10,13 +10,15 @@ maintainers:
- Heiko Stuebner <heiko@sntech.de>
description: |-
- The Rockchip rk3399 has a stateless Video Decoder that can decodes H.264,
- HEVC an VP9 streams.
+ Rockchip SoCs have variants of the same stateless Video Decoder that can
+ decodes H.264, HEVC, VP9 and AV1 streams, depending on the variant.
properties:
compatible:
oneOf:
- const: rockchip,rk3399-vdec
+ - const: rockchip,rk3576-vdec
+ - const: rockchip,rk3588-vdec
- items:
- enum:
- rockchip,rk3228-vdec
@@ -24,35 +26,72 @@ properties:
- const: rockchip,rk3399-vdec
reg:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: The function configuration registers base
+ - description: The link table configuration registers base
+ - description: The cache configuration registers base
+
+ reg-names:
+ items:
+ - const: function
+ - const: link
+ - const: cache
interrupts:
maxItems: 1
clocks:
+ minItems: 4
items:
- description: The Video Decoder AXI interface clock
- description: The Video Decoder AHB interface clock
- description: The Video Decoded CABAC clock
- description: The Video Decoder core clock
+ - description: The Video decoder HEVC CABAC clock
clock-names:
+ minItems: 4
items:
- const: axi
- const: ahb
- const: cabac
- const: core
+ - const: hevc_cabac
assigned-clocks: true
assigned-clock-rates: true
+ resets:
+ items:
+ - description: The Video Decoder AXI interface reset
+ - description: The Video Decoder AHB interface reset
+ - description: The Video Decoded CABAC reset
+ - description: The Video Decoder core reset
+ - description: The Video decoder HEVC CABAC reset
+
+ reset-names:
+ items:
+ - const: axi
+ - const: ahb
+ - const: cabac
+ - const: core
+ - const: hevc_cabac
+
power-domains:
maxItems: 1
iommus:
maxItems: 1
+ sram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ phandle to a reserved on-chip SRAM regions.
+ Some SoCs, like rk3588 provide on-chip SRAM to store temporary
+ buffers during decoding.
+
required:
- compatible
- reg
@@ -61,6 +100,41 @@ required:
- clock-names
- power-domains
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3576-vdec
+ - rockchip,rk3588-vdec
+ then:
+ properties:
+ reg:
+ minItems: 3
+ reg-names:
+ minItems: 3
+ clocks:
+ minItems: 5
+ clock-names:
+ minItems: 5
+ resets:
+ minItems: 5
+ reset-names:
+ minItems: 5
+ else:
+ properties:
+ reg:
+ maxItems: 1
+ reg-names: false
+ clocks:
+ maxItems: 4
+ clock-names:
+ maxItems: 4
+ resets: false
+ reset-names: false
+ sram: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml b/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml
deleted file mode 100644
index 20067002cc4a..000000000000
--- a/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml
+++ /dev/null
@@ -1,192 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/mfd/fsl,imx8qxp-csr.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Freescale i.MX8qm/qxp Control and Status Registers Module
-
-maintainers:
- - Liu Ying <victor.liu@nxp.com>
-
-description: |
- As a system controller, the Freescale i.MX8qm/qxp Control and Status
- Registers(CSR) module represents a set of miscellaneous registers of a
- specific subsystem. It may provide control and/or status report interfaces
- to a mix of standalone hardware devices within that subsystem. One typical
- use-case is for some other nodes to acquire a reference to the syscon node
- by phandle, and the other typical use-case is that the operating system
- should consider all subnodes of the CSR module as separate child devices.
-
-properties:
- $nodename:
- pattern: "^syscon@[0-9a-f]+$"
-
- compatible:
- items:
- - enum:
- - fsl,imx8qxp-mipi-lvds-csr
- - fsl,imx8qm-lvds-csr
- - const: syscon
- - const: simple-mfd
-
- reg:
- maxItems: 1
-
- clocks:
- maxItems: 1
-
- clock-names:
- const: ipg
-
-patternProperties:
- "^(ldb|phy|pxl2dpi)$":
- type: object
- description: The possible child devices of the CSR module.
-
-required:
- - compatible
- - reg
- - clocks
- - clock-names
-
-allOf:
- - if:
- properties:
- compatible:
- contains:
- const: fsl,imx8qxp-mipi-lvds-csr
- then:
- required:
- - pxl2dpi
- - ldb
-
- - if:
- properties:
- compatible:
- contains:
- const: fsl,imx8qm-lvds-csr
- then:
- required:
- - phy
- - ldb
-
-additionalProperties: false
-
-examples:
- - |
- #include <dt-bindings/clock/imx8-lpcg.h>
- #include <dt-bindings/firmware/imx/rsrc.h>
- mipi_lvds_0_csr: syscon@56221000 {
- compatible = "fsl,imx8qxp-mipi-lvds-csr", "syscon", "simple-mfd";
- reg = <0x56221000 0x1000>;
- clocks = <&mipi_lvds_0_di_mipi_lvds_regs_lpcg IMX_LPCG_CLK_4>;
- clock-names = "ipg";
-
- mipi_lvds_0_pxl2dpi: pxl2dpi {
- compatible = "fsl,imx8qxp-pxl2dpi";
- fsl,sc-resource = <IMX_SC_R_MIPI_0>;
- power-domains = <&pd IMX_SC_R_MIPI_0>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- mipi_lvds_0_pxl2dpi_dc0_pixel_link0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&dc0_pixel_link0_mipi_lvds_0_pxl2dpi>;
- };
-
- mipi_lvds_0_pxl2dpi_dc0_pixel_link1: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&dc0_pixel_link1_mipi_lvds_0_pxl2dpi>;
- };
- };
-
- port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&mipi_lvds_0_ldb_ch0_mipi_lvds_0_pxl2dpi>;
- };
-
- mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch1: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&mipi_lvds_0_ldb_ch1_mipi_lvds_0_pxl2dpi>;
- };
- };
- };
- };
-
- mipi_lvds_0_ldb: ldb {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx8qxp-ldb";
- clocks = <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_MISC2>,
- <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_BYPASS>;
- clock-names = "pixel", "bypass";
- power-domains = <&pd IMX_SC_R_LVDS_0>;
-
- channel@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- phys = <&mipi_lvds_0_phy>;
- phy-names = "lvds_phy";
-
- port@0 {
- reg = <0>;
-
- mipi_lvds_0_ldb_ch0_mipi_lvds_0_pxl2dpi: endpoint {
- remote-endpoint = <&mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch0>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- /* ... */
- };
- };
-
- channel@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- phys = <&mipi_lvds_0_phy>;
- phy-names = "lvds_phy";
-
- port@0 {
- reg = <0>;
-
- mipi_lvds_0_ldb_ch1_mipi_lvds_0_pxl2dpi: endpoint {
- remote-endpoint = <&mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch1>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- /* ... */
- };
- };
- };
- };
-
- mipi_lvds_0_phy: phy@56228300 {
- compatible = "fsl,imx8qxp-mipi-dphy";
- reg = <0x56228300 0x100>;
- clocks = <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_PHY>;
- clock-names = "phy_ref";
- #phy-cells = <0>;
- fsl,syscon = <&mipi_lvds_0_csr>;
- power-domains = <&pd IMX_SC_R_MIPI_0>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index f00827c9b67f..18c3fc26ca93 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -19,7 +19,7 @@ which are described in the following files:
- Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml
- Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml
- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
-- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+- Documentation/devicetree/bindings/phy/motorola,cpcap-usb-phy.yaml
- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
- Documentation/devicetree/bindings/rtc/cpcap-rtc.txt
- Documentation/devicetree/bindings/leds/leds-cpcap.txt
diff --git a/Documentation/devicetree/bindings/mfd/mxs-lradc.txt b/Documentation/devicetree/bindings/mfd/mxs-lradc.txt
deleted file mode 100644
index 755cbef0647d..000000000000
--- a/Documentation/devicetree/bindings/mfd/mxs-lradc.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-* Freescale MXS LRADC device driver
-
-Required properties:
-- compatible: Should be "fsl,imx23-lradc" for i.MX23 SoC and "fsl,imx28-lradc"
- for i.MX28 SoC
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the LRADC interrupts
-
-Optional properties:
-- fsl,lradc-touchscreen-wires: Number of wires used to connect the touchscreen
- to LRADC. Valid value is either 4 or 5. If this
- property is not present, then the touchscreen is
- disabled. 5 wires is valid for i.MX28 SoC only.
-- fsl,ave-ctrl: number of samples per direction to calculate an average value.
- Allowed value is 1 ... 32, default is 4
-- fsl,ave-delay: delay between consecutive samples. Allowed value is
- 2 ... 2048. It is used if 'fsl,ave-ctrl' > 1, counts at
- 2 kHz and its default is 2 (= 1 ms)
-- fsl,settling: delay between plate switch to next sample. Allowed value is
- 1 ... 2047. It counts at 2 kHz and its default is
- 10 (= 5 ms)
-
-Example for i.MX23 SoC:
-
- lradc@80050000 {
- compatible = "fsl,imx23-lradc";
- reg = <0x80050000 0x2000>;
- interrupts = <36 37 38 39 40 41 42 43 44>;
- fsl,lradc-touchscreen-wires = <4>;
- fsl,ave-ctrl = <4>;
- fsl,ave-delay = <2>;
- fsl,settling = <10>;
- };
-
-Example for i.MX28 SoC:
-
- lradc@80050000 {
- compatible = "fsl,imx28-lradc";
- reg = <0x80050000 0x2000>;
- interrupts = <10 14 15 16 17 18 19 20 21 22 23 24 25>;
- fsl,lradc-touchscreen-wires = <5>;
- fsl,ave-ctrl = <4>;
- fsl,ave-delay = <2>;
- fsl,settling = <10>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/mxs-lradc.yaml b/Documentation/devicetree/bindings/mfd/mxs-lradc.yaml
new file mode 100644
index 000000000000..782b2f4005a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/mxs-lradc.yaml
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/mxs-lradc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MXS Low-Resolution ADC (LRADC)
+
+maintainers:
+ - Dario Binacchi <dario.binacchi@amarulasolutions.com>
+
+description:
+ The LRADC provides 16 physical channels of 12-bit resolution for
+ analog-to-digital conversion and includes an integrated 4-wire/5-wire
+ touchscreen controller.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,imx23-lradc
+ - fsl,imx28-lradc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ interrupts:
+ minItems: 9
+ maxItems: 13
+
+ fsl,lradc-touchscreen-wires:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [4, 5]
+ description: >
+ Number of wires used to connect the touchscreen to LRADC.
+
+ If this property is not present, then the touchscreen is disabled.
+
+ fsl,ave-ctrl:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ default: 4
+ description:
+ Number of samples per direction to calculate an average value.
+
+ fsl,ave-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 2
+ maximum: 2048
+ default: 2
+ description: >
+ Delay between consecutive samples.
+
+ It is used if 'fsl,ave-ctrl' > 1, counts at 2 kHz and its default value (2)
+ is 1 ms.
+
+ fsl,settling:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 2047
+ default: 10
+ description: >
+ Delay between plate switch to next sample.
+
+ It counts at 2 kHz and its default (10) is 5 ms.
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx23-lradc
+then:
+ properties:
+ interrupts:
+ items:
+ - description: channel 0
+ - description: channel 1
+ - description: channel 2
+ - description: channel 3
+ - description: channel 4
+ - description: channel 5
+ - description: touchscreen
+ - description: channel 6
+ - description: channel 7
+ fsl,lradc-touchscreen-wires:
+ const: 4
+else:
+ properties:
+ interrupts:
+ items:
+ - description: threshold 0
+ - description: threshold 1
+ - description: channel 0
+ - description: channel 1
+ - description: channel 2
+ - description: channel 3
+ - description: channel 4
+ - description: channel 5
+ - description: button 0
+ - description: button 1
+ - description: touchscreen
+ - description: channel 6
+ - description: channel 7
+
+additionalProperties: false
+
+examples:
+ - |
+ lradc@80050000 {
+ compatible = "fsl,imx23-lradc";
+ reg = <0x80050000 0x2000>;
+ interrupts = <36>, <37>, <38>, <39>, <40>,
+ <41>, <42>, <43>, <44>;
+ clocks = <&clks 26>;
+ #io-channel-cells = <1>;
+ fsl,lradc-touchscreen-wires = <4>;
+ fsl,ave-ctrl = <4>;
+ fsl,ave-delay = <2>;
+ fsl,settling = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/nxp,lpc1850-creg.yaml b/Documentation/devicetree/bindings/mfd/nxp,lpc1850-creg.yaml
new file mode 100644
index 000000000000..89b4892e9ca7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/nxp,lpc1850-creg.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/nxp,lpc1850-creg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: The NXP LPC18xx/43xx CREG (Configuration Registers) block
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - nxp,lpc1850-creg
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clock-controller:
+ type: object
+ description:
+ The NXP LPC18xx/43xx CREG (Configuration Registers) block contains
+ control registers for two low speed clocks. One of the clocks is a
+ 32 kHz oscillator driver with power up/down and clock gating. Next
+ is a fixed divider that creates a 1 kHz clock from the 32 kHz osc.
+
+ These clocks are used by the RTC and the Event Router peripherals.
+ The 32 kHz can also be routed to other peripherals to enable low
+ power modes.
+
+ properties:
+ compatible:
+ const: nxp,lpc1850-creg-clk
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+ description: |
+ 0 1 kHz clock
+ 1 32 kHz Oscillator
+
+ required:
+ - compatible
+ - clocks
+ - '#clock-cells'
+
+ additionalProperties: false
+
+ phy:
+ type: object
+ description: the internal USB OTG PHY in NXP LPC18xx and LPC43xx SoCs
+ properties:
+ compatible:
+ const: nxp,lpc1850-usb-otg-phy
+
+ clocks:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+ required:
+ - compatible
+ - clocks
+ - '#phy-cells'
+
+ additionalProperties: false
+
+ dma-mux:
+ type: object
+ description: NXP LPC18xx/43xx DMA MUX (DMA request router)
+ properties:
+ compatible:
+ const: nxp,lpc1850-dmamux
+
+ '#dma-cells':
+ const: 3
+ description: |
+ Should be set to <3>.
+ * 1st cell contain the master dma request signal
+ * 2nd cell contain the mux value (0-3) for the peripheral
+ * 3rd cell contain either 1 or 2 depending on the AHB master used.
+
+ dma-requests:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 64
+ description: Number of DMA requests the controller can handle
+
+ dma-masters:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle pointing to the DMA controller
+
+ required:
+ - compatible
+ - '#dma-cells'
+ - dma-masters
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-ccu.h>
+
+ syscon@40043000 {
+ compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
+ reg = <0x40043000 0x1000>;
+ clocks = <&ccu1 CLK_CPU_CREG>;
+ resets = <&rgu 5>;
+
+ clock-controller {
+ compatible = "nxp,lpc1850-creg-clk";
+ clocks = <&xtal32>;
+ #clock-cells = <1>;
+ };
+
+ phy {
+ compatible = "nxp,lpc1850-usb-otg-phy";
+ clocks = <&ccu1 CLK_USB0>;
+ #phy-cells = <0>;
+ };
+
+ dma-mux {
+ compatible = "nxp,lpc1850-dmamux";
+ #dma-cells = <3>;
+ dma-requests = <64>;
+ dma-masters = <&dmac>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk806.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk806.yaml
index 3c2b06629b75..eb5bca31948e 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk806.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk806.yaml
@@ -31,6 +31,27 @@ properties:
system-power-controller: true
+ rockchip,reset-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+ description:
+ Mode to use when a reset of the PMIC is triggered.
+
+ The reset can be triggered either programmatically, via one of
+ the PWRCTRL pins (provided additional configuration) or
+ asserting RESETB pin low.
+
+ The following modes are supported
+
+ - 0; restart PMU,
+ - 1; reset all power off reset registers and force state to
+ switch to ACTIVE mode,
+ - 2; same as mode 1 and also pull RESETB pin down for 5ms,
+
+ For example, some hardware may require a full restart (mode 0)
+ in order to function properly as regulators are shortly
+ interrupted in this mode.
+
vcc1-supply:
description:
The input supply for dcdc-reg1.
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
index d6b9e2914796..31d544a9c05c 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
@@ -81,6 +81,9 @@ allOf:
samsung,s2mps11-acokb-ground: false
samsung,s2mps11-wrstbi-ground: false
+ # oneOf is required, because dtschema's fixups.py doesn't handle this
+ # nesting here. Its special treatment to allow either interrupt property
+ # when only one is specified in the binding works at the top level only.
oneOf:
- required: [interrupts]
- required: [interrupts-extended]
diff --git a/Documentation/devicetree/bindings/mfd/ti,tps65910.yaml b/Documentation/devicetree/bindings/mfd/ti,tps65910.yaml
new file mode 100644
index 000000000000..a2668fc30a7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ti,tps65910.yaml
@@ -0,0 +1,318 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/ti,tps65910.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TPS65910 Power Management Integrated Circuit
+
+maintainers:
+ - Shree Ramamoorthy <s-ramamoorthy@ti.com>
+
+description:
+ TPS65910 device is a Power Management IC that provides 3 step-down converters,
+ 1 stepup converter, and 8 LDOs. The device contains an embedded power controller (EPC),
+ 1 GPIO, and an RTC.
+
+properties:
+ compatible:
+ enum:
+ - ti,tps65910
+ - ti,tps65911
+
+ reg:
+ description: I2C slave address
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+ description: |
+ The first cell is the GPIO number.
+ The second cell is used to specify additional options <unused>.
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: Specifies the IRQ number and flags
+ const: 2
+
+ ti,vmbch-threshold:
+ description: |
+ (TPS65911) Main battery charged threshold comparator.
+ See VMBCH_VSEL in TPS65910 datasheet.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ ti,vmbch2-threshold:
+ description: |
+ (TPS65911) Main battery discharged threshold comparator.
+ See VMBCH_VSEL in TPS65910 datasheet.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ ti,en-ck32k-xtal:
+ type: boolean
+ description: Enable external 32-kHz crystal oscillator.
+
+ ti,en-gpio-sleep:
+ description: |
+ Enable sleep control for gpios.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 9
+ maxItems: 9
+ items:
+ minimum: 0
+ maximum: 1
+
+ ti,system-power-controller:
+ type: boolean
+ description: Identify whether or not this pmic controls the system power
+
+ ti,sleep-enable:
+ type: boolean
+ description: Enable SLEEP state.
+
+ ti,sleep-keep-therm:
+ type: boolean
+ description: Keep thermal monitoring on in sleep state.
+
+ ti,sleep-keep-ck32k:
+ type: boolean
+ description: Keep the 32KHz clock output on in sleep state.
+
+ ti,sleep-keep-hsclk:
+ type: boolean
+ description: Keep high speed internal clock on in sleep state.
+
+ regulators:
+ type: object
+ additionalProperties: false
+ description: List of regulators provided by this controller.
+
+ patternProperties:
+ "^(vrtc|vio|vpll|vdac|vmmc|vbb|vddctrl)$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ ti,regulator-ext-sleep-control:
+ description: |
+ Enable external sleep control through external inputs:
+ [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)].
+ If this property is not defined, it defaults to 0 (not enabled).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 4, 8]
+ unevaluatedProperties: false
+
+ "^(vdd[1-3]|vaux([1-2]|33)|vdig[1-2])$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ ti,regulator-ext-sleep-control:
+ description: |
+ Enable external sleep control through external inputs:
+ [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)].
+ If this property is not defined, it defaults to 0 (not enabled).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 4, 8]
+ unevaluatedProperties: false
+
+ "^ldo[1-8]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ ti,regulator-ext-sleep-control:
+ description: |
+ Enable external sleep control through external inputs:
+ [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)].
+ If this property is not defined, it defaults to 0 (not enabled).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 4, 8]
+ unevaluatedProperties: false
+
+patternProperties:
+ "^(vcc(io|[1-7])-supply)$":
+ description: |
+ Input voltage supply phandle for regulators.
+ These entries are required if PMIC regulators are enabled, or else it
+ can cause the regulator registration to fail.
+
+ If some input supply is powered through battery or always-on supply, then
+ it is also required to have these parameters with the proper node handle for always-on
+ power supply.
+ tps65910:
+ vcc1-supply: VDD1 input.
+ vcc2-supply: VDD2 input.
+ vcc3-supply: VAUX33 and VMMC input.
+ vcc4-supply: VAUX1 and VAUX2 input.
+ vcc5-supply: VPLL and VDAC input.
+ vcc6-supply: VDIG1 and VDIG2 input.
+ vcc7-supply: VRTC and VBB input.
+ vccio-supply: VIO input.
+ tps65911:
+ vcc1-supply: VDD1 input.
+ vcc2-supply: VDD2 input.
+ vcc3-supply: LDO6, LDO7 and LDO8 input.
+ vcc4-supply: LDO5 input.
+ vcc5-supply: LDO3 and LDO4 input.
+ vcc6-supply: LDO1 and LDO2 input.
+ vcc7-supply: VRTC input.
+ vccio-supply: VIO input.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - regulators
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,tps65910
+ then:
+ properties:
+ regulators:
+ patternProperties:
+ "^(ldo[1-8]|vddctrl)$": false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,tps65911
+ then:
+ properties:
+ regulators:
+ patternProperties:
+ "^(vdd3|vaux([1-2]|33)|vdig[1-2])$": false
+ "^(vpll|vdac|vmmc|vbb)$": false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic: tps65910@2d {
+ compatible = "ti,tps65910";
+ reg = <0x2d>;
+ interrupt-parent = <&intc>;
+ interrupts = < 0 118 0x04 >;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ ti,system-power-controller;
+
+ ti,vmbch-threshold = <0>;
+ ti,vmbch2-threshold = <0>;
+ ti,en-ck32k-xtal;
+ ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+
+ vcc1-supply = <&reg_parent>;
+ vcc2-supply = <&some_reg>;
+ vcc3-supply = <&vbat>;
+ vcc4-supply = <&vbat>;
+ vcc5-supply = <&vbat>;
+ vcc6-supply = <&vbat>;
+ vcc7-supply = <&vbat>;
+ vccio-supply = <&vbat>;
+
+ regulators {
+ vio_reg: vio {
+ regulator-name = "vio";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ vdd1_reg: vdd1 {
+ regulator-name = "vdd1";
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ vdd2_reg: vdd2 {
+ regulator-name = "vdd2";
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ vdd3_reg: vdd3 {
+ regulator-name = "vdd3";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+ vdig1_reg: vdig1 {
+ regulator-name = "vdig1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <2700000>;
+ regulator-always-on;
+ };
+ vdig2_reg: vdig2 {
+ regulator-name = "vdig2";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ vpll_reg: vpll {
+ regulator-name = "vpll";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ vdac_reg: vdac {
+ regulator-name = "vdac";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ };
+ vaux1_reg: vaux1 {
+ regulator-name = "vaux1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ };
+ vaux2_reg: vaux2 {
+ regulator-name = "vaux2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ vaux33_reg: vaux33 {
+ regulator-name = "vaux33";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ vmmc_reg: vmmc {
+ regulator-name = "vmmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
deleted file mode 100644
index a5ced46bbde9..000000000000
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ /dev/null
@@ -1,205 +0,0 @@
-TPS65910 Power Management Integrated Circuit
-
-Required properties:
-- compatible: "ti,tps65910" or "ti,tps65911"
-- reg: I2C slave address
-- interrupts: the interrupt outputs of the controller
-- #gpio-cells: number of cells to describe a GPIO, this should be 2.
- The first cell is the GPIO number.
- The second cell is used to specify additional options <unused>.
-- gpio-controller: mark the device as a GPIO controller
-- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
- The first cell is the IRQ number.
- The second cell is the flags, encoded as the trigger masks from
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- regulators: This is the list of child nodes that specify the regulator
- initialization data for defined regulators. Not all regulators for the given
- device need to be present. The definition for each of these nodes is defined
- using the standard binding for regulators found at
- Documentation/devicetree/bindings/regulator/regulator.txt.
- The regulator is matched with the regulator-compatible.
-
- The valid regulator-compatible values are:
- tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
- vaux2, vaux33, vmmc, vbb
- tps65911: vrtc, vio, vdd1, vdd2, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
- ldo6, ldo7, ldo8
-
-- xxx-supply: Input voltage supply regulator.
- These entries are required if regulators are enabled for a device. Missing these
- properties can cause the regulator registration to fail.
- If some of input supply is powered through battery or always-on supply then
- also it is require to have these parameters with proper node handle of always
- on power supply.
- tps65910:
- vcc1-supply: VDD1 input.
- vcc2-supply: VDD2 input.
- vcc3-supply: VAUX33 and VMMC input.
- vcc4-supply: VAUX1 and VAUX2 input.
- vcc5-supply: VPLL and VDAC input.
- vcc6-supply: VDIG1 and VDIG2 input.
- vcc7-supply: VRTC and VBB input.
- vccio-supply: VIO input.
- tps65911:
- vcc1-supply: VDD1 input.
- vcc2-supply: VDD2 input.
- vcc3-supply: LDO6, LDO7 and LDO8 input.
- vcc4-supply: LDO5 input.
- vcc5-supply: LDO3 and LDO4 input.
- vcc6-supply: LDO1 and LDO2 input.
- vcc7-supply: VRTC input.
- vccio-supply: VIO input.
-
-Optional properties:
-- ti,vmbch-threshold: (tps65911) main battery charged threshold
- comparator. (see VMBCH_VSEL in TPS65910 datasheet)
-- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
- comparator. (see VMBCH_VSEL in TPS65910 datasheet)
-- ti,en-ck32k-xtal: enable external 32-kHz crystal oscillator (see CK32K_CTRL
- in TPS6591X datasheet)
-- ti,en-gpio-sleep: enable sleep control for gpios
- There should be 9 entries here, one for each gpio.
-- ti,system-power-controller: Telling whether or not this pmic is controlling
- the system power.
-- ti,sleep-enable: Enable SLEEP state.
-- ti,sleep-keep-therm: Keep thermal monitoring on in sleep state.
-- ti,sleep-keep-ck32k: Keep the 32KHz clock output on in sleep state.
-- ti,sleep-keep-hsclk: Keep high speed internal clock on in sleep state.
-
-Regulator Optional properties:
-- ti,regulator-ext-sleep-control: enable external sleep
- control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
- If this property is not defined, it defaults to 0 (not enabled).
-
-Example:
-
- pmu: tps65910@d2 {
- compatible = "ti,tps65910";
- reg = <0xd2>;
- interrupt-parent = <&intc>;
- interrupts = < 0 118 0x04 >;
-
- #gpio-cells = <2>;
- gpio-controller;
-
- #interrupt-cells = <2>;
- interrupt-controller;
-
- ti,system-power-controller;
-
- ti,vmbch-threshold = 0;
- ti,vmbch2-threshold = 0;
- ti,en-ck32k-xtal;
- ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
-
- vcc1-supply = <&reg_parent>;
- vcc2-supply = <&some_reg>;
- vcc3-supply = <...>;
- vcc4-supply = <...>;
- vcc5-supply = <...>;
- vcc6-supply = <...>;
- vcc7-supply = <...>;
- vccio-supply = <...>;
-
- regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
- vdd1_reg: regulator@0 {
- regulator-compatible = "vdd1";
- reg = <0>;
- regulator-min-microvolt = < 600000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
- regulator-boot-on;
- ti,regulator-ext-sleep-control = <0>;
- };
- vdd2_reg: regulator@1 {
- regulator-compatible = "vdd2";
- reg = <1>;
- regulator-min-microvolt = < 600000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
- regulator-boot-on;
- ti,regulator-ext-sleep-control = <4>;
- };
- vddctrl_reg: regulator@2 {
- regulator-compatible = "vddctrl";
- reg = <2>;
- regulator-min-microvolt = < 600000>;
- regulator-max-microvolt = <1400000>;
- regulator-always-on;
- regulator-boot-on;
- ti,regulator-ext-sleep-control = <0>;
- };
- vio_reg: regulator@3 {
- regulator-compatible = "vio";
- reg = <3>;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- ti,regulator-ext-sleep-control = <1>;
- };
- ldo1_reg: regulator@4 {
- regulator-compatible = "ldo1";
- reg = <4>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo2_reg: regulator@5 {
- regulator-compatible = "ldo2";
- reg = <5>;
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo3_reg: regulator@6 {
- regulator-compatible = "ldo3";
- reg = <6>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo4_reg: regulator@7 {
- regulator-compatible = "ldo4";
- reg = <7>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo5_reg: regulator@8 {
- regulator-compatible = "ldo5";
- reg = <8>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo6_reg: regulator@9 {
- regulator-compatible = "ldo6";
- reg = <9>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- ti,regulator-ext-sleep-control = <0>;
- };
- ldo7_reg: regulator@10 {
- regulator-compatible = "ldo7";
- reg = <10>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-always-on;
- regulator-boot-on;
- ti,regulator-ext-sleep-control = <1>;
- };
- ldo8_reg: regulator@11 {
- regulator-compatible = "ldo8";
- reg = <11>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- ti,regulator-ext-sleep-control = <1>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
index 335f8204aa1e..587af4968255 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
@@ -20,7 +20,7 @@ properties:
- pattern: "^((((micron|spansion|st),)?\
(m25p(40|80|16|32|64|128)|\
n25q(32b|064|128a11|128a13|256a|512a|164k)))|\
- atmel,at25df(321a|641|081a)|\
+ atmel,at(25|26)df(321a|641|081a)|\
everspin,mr25h(10|40|128|256)|\
(mxicy|macronix),mx25l(4005a|1606e|6405d|8005|12805d|25635e)|\
(mxicy|macronix),mx25u(4033|4035)|\
diff --git a/Documentation/devicetree/bindings/mtd/nxp,lpc1773-spifi.yaml b/Documentation/devicetree/bindings/mtd/nxp,lpc1773-spifi.yaml
new file mode 100644
index 000000000000..d6efb9417b7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nxp,lpc1773-spifi.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/nxp,lpc1773-spifi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP SPI Flash Interface (SPIFI)
+
+description:
+ NXP SPIFI is a specialized SPI interface for serial Flash devices.
+ It supports one Flash device with 1-, 2- and 4-bits width in SPI
+ mode 0 or 3. The controller operates in either command or memory
+ mode. In memory mode the Flash is accessible from the CPU as
+ normal memory.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc1773-spifi
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: spifi
+ - const: flash
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: spifi
+ - const: reg
+
+ resets:
+ maxItems: 1
+
+ spi-cpol:
+ enum: [0, 3]
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-ccu.h>
+
+ spi@40003000 {
+ compatible = "nxp,lpc1773-spifi";
+ reg = <0x40003000 0x1000>, <0x14000000 0x4000000>;
+ reg-names = "spifi", "flash";
+ interrupts = <30>;
+ clocks = <&ccu1 CLK_SPIFI>, <&ccu1 CLK_CPU_SPIFI>;
+ clock-names = "spifi", "reg";
+ resets = <&rgu 53>;
+ };
+
diff --git a/Documentation/devicetree/bindings/mtd/nxp-spifi.txt b/Documentation/devicetree/bindings/mtd/nxp-spifi.txt
deleted file mode 100644
index f8b6b250654e..000000000000
--- a/Documentation/devicetree/bindings/mtd/nxp-spifi.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-* NXP SPI Flash Interface (SPIFI)
-
-NXP SPIFI is a specialized SPI interface for serial Flash devices.
-It supports one Flash device with 1-, 2- and 4-bits width in SPI
-mode 0 or 3. The controller operates in either command or memory
-mode. In memory mode the Flash is accessible from the CPU as
-normal memory.
-
-Required properties:
- - compatible : Should be "nxp,lpc1773-spifi"
- - reg : the first contains the register location and length,
- the second contains the memory mapping address and length
- - reg-names: Should contain the reg names "spifi" and "flash"
- - interrupts : Should contain the interrupt for the device
- - clocks : The clocks needed by the SPIFI controller
- - clock-names : Should contain the clock names "spifi" and "reg"
-
-Optional properties:
- - resets : phandle + reset specifier
-
-The SPI Flash must be a child of the SPIFI node and must have a
-compatible property as specified in bindings/mtd/jedec,spi-nor.txt
-
-Optionally it can also contain the following properties.
- - spi-cpol : Controller only supports mode 0 and 3 so either
- both spi-cpol and spi-cpha should be present or
- none of them
- - spi-cpha : See above
- - spi-rx-bus-width : Used to select how many pins that are used
- for input on the controller
-
-See bindings/spi/spi-bus.txt for more information.
-
-Example:
-spifi: spifi@40003000 {
- compatible = "nxp,lpc1773-spifi";
- reg = <0x40003000 0x1000>, <0x14000000 0x4000000>;
- reg-names = "spifi", "flash";
- interrupts = <30>;
- clocks = <&ccu1 CLK_SPIFI>, <&ccu1 CLK_CPU_SPIFI>;
- clock-names = "spifi", "reg";
- resets = <&rgu 53>;
-
- flash@0 {
- compatible = "jedec,spi-nor";
- spi-cpol;
- spi-cpha;
- spi-rx-bus-width = <4>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "data";
- reg = <0 0x200000>;
- };
- };
-};
-
diff --git a/Documentation/devicetree/bindings/net/adi,adin.yaml b/Documentation/devicetree/bindings/net/adi,adin.yaml
index 929cf8c0b0fd..c425a9f1886d 100644
--- a/Documentation/devicetree/bindings/net/adi,adin.yaml
+++ b/Documentation/devicetree/bindings/net/adi,adin.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADIN1200/ADIN1300 PHY
maintainers:
- - Alexandru Tachici <alexandru.tachici@analog.com>
+ - Marcelo Schmitt <marcelo.schmitt@analog.com>
description: |
Bindings for Analog Devices Industrial Ethernet PHYs
diff --git a/Documentation/devicetree/bindings/net/adi,adin1110.yaml b/Documentation/devicetree/bindings/net/adi,adin1110.yaml
index 9de865295d7a..0a73e01d7f97 100644
--- a/Documentation/devicetree/bindings/net/adi,adin1110.yaml
+++ b/Documentation/devicetree/bindings/net/adi,adin1110.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: ADI ADIN1110 MAC-PHY
maintainers:
- - Alexandru Tachici <alexandru.tachici@analog.com>
+ - Marcelo Schmitt <marcelo.schmitt@analog.com>
description: |
The ADIN1110 is a low power single port 10BASE-T1L MAC-
diff --git a/Documentation/devicetree/bindings/net/airoha,an7583-mdio.yaml b/Documentation/devicetree/bindings/net/airoha,an7583-mdio.yaml
new file mode 100644
index 000000000000..3e7e68ec1560
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/airoha,an7583-mdio.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/airoha,an7583-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha AN7583 Dedicated MDIO Controller
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ Airoha AN7583 SoC have 3 different MDIO Controller.
+
+ One comes from the intergated Switch based on MT7530.
+
+ The other 2 (that this schema describe) live under the SCU
+ register supporting both C22 and C45 PHYs.
+
+$ref: mdio.yaml#
+
+properties:
+ compatible:
+ const: airoha,an7583-mdio
+
+ reg:
+ enum: [0xc8, 0xcc]
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clock-frequency:
+ default: 2500000
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ system-controller {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio-bus@c8 {
+ compatible = "airoha,an7583-mdio";
+ reg = <0xc8>;
+
+ clocks = <&scu>;
+ resets = <&scu>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 19934d5c24e5..2ac709a4c472 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -21,6 +21,7 @@ properties:
- items:
- enum:
- allwinner,sun20i-d1-emac
+ - allwinner,sun50i-a100-emac
- allwinner,sun50i-h6-emac
- allwinner,sun50i-h616-emac0
- allwinner,sun55i-a523-gmac0
diff --git a/Documentation/devicetree/bindings/net/altr,gmii-to-sgmii-2.0.yaml b/Documentation/devicetree/bindings/net/altr,gmii-to-sgmii-2.0.yaml
new file mode 100644
index 000000000000..aafb6447b6c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/altr,gmii-to-sgmii-2.0.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+# Copyright (C) 2025 Altera Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/altr,gmii-to-sgmii-2.0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera GMII to SGMII Converter
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@altera.com>
+
+description:
+ This binding describes the Altera GMII to SGMII converter.
+
+properties:
+ compatible:
+ const: altr,gmii-to-sgmii-2.0
+
+ reg:
+ items:
+ - description: Registers for the emac splitter IP
+ - description: Registers for the GMII to SGMII converter.
+ - description: Registers for TSE control.
+
+ reg-names:
+ items:
+ - const: hps_emac_interface_splitter_avalon_slave
+ - const: gmii_to_sgmii_adapter_avalon_slave
+ - const: eth_tse_control_port
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ phy@ff000240 {
+ compatible = "altr,gmii-to-sgmii-2.0";
+ reg = <0xff000240 0x00000008>,
+ <0xff000200 0x00000040>,
+ <0xff000250 0x00000008>;
+ reg-names = "hps_emac_interface_splitter_avalon_slave",
+ "gmii_to_sgmii_adapter_avalon_slave",
+ "eth_tse_control_port";
+ };
diff --git a/Documentation/devicetree/bindings/net/altr,socfpga-stmmac.yaml b/Documentation/devicetree/bindings/net/altr,socfpga-stmmac.yaml
new file mode 100644
index 000000000000..3a22d35db778
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/altr,socfpga-stmmac.yaml
@@ -0,0 +1,171 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/altr,socfpga-stmmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera SOCFPGA SoC DWMAC controller
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@altera.com>
+
+description:
+ This binding describes the Altera SOCFPGA SoC implementation of the
+ Synopsys DWMAC for the Cyclone5, Arria5, Stratix10, Agilex5 and Agilex7
+ families of chips.
+ # TODO: Determine how to handle the Arria10 reset-name, stmmaceth-ocp, that
+ # does not validate against net/snps,dwmac.yaml.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - altr,socfpga-stmmac
+ - altr,socfpga-stmmac-a10-s10
+ - altr,socfpga-stmmac-agilex5
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: altr,socfpga-stmmac
+ - const: snps,dwmac-3.70a
+ - const: snps,dwmac
+ - items:
+ - const: altr,socfpga-stmmac-a10-s10
+ - const: snps,dwmac-3.72a
+ - const: snps,dwmac
+ - items:
+ - const: altr,socfpga-stmmac-a10-s10
+ - const: snps,dwmac-3.74a
+ - const: snps,dwmac
+ - items:
+ - const: altr,socfpga-stmmac-agilex5
+ - const: snps,dwxgmac-2.10
+
+ clocks:
+ minItems: 1
+ items:
+ - description: GMAC main clock
+ - description:
+ PTP reference clock. This clock is used for programming the
+ Timestamp Addend Register. If not passed then the system
+ clock will be used and this is fine on some platforms.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: stmmaceth
+ - const: ptp_ref
+
+ iommus:
+ minItems: 1
+ maxItems: 2
+
+ phy-mode:
+ enum:
+ - gmii
+ - mii
+ - rgmii
+ - rgmii-id
+ - rgmii-rxid
+ - rgmii-txid
+ - sgmii
+ - 1000base-x
+
+ rxc-skew-ps:
+ description: Skew control of RXC pad
+
+ rxd0-skew-ps:
+ description: Skew control of RX data 0 pad
+
+ rxd1-skew-ps:
+ description: Skew control of RX data 1 pad
+
+ rxd2-skew-ps:
+ description: Skew control of RX data 2 pad
+
+ rxd3-skew-ps:
+ description: Skew control of RX data 3 pad
+
+ rxdv-skew-ps:
+ description: Skew control of RX CTL pad
+
+ txc-skew-ps:
+ description: Skew control of TXC pad
+
+ txen-skew-ps:
+ description: Skew control of TXC pad
+
+ altr,emac-splitter:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Should be the phandle to the emac splitter soft IP node if DWMAC
+ controller is connected an emac splitter.
+
+ altr,f2h_ptp_ref_clk:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to Precision Time Protocol reference clock. This clock is
+ common to gmac instances and defaults to osc1.
+
+ altr,gmii-to-sgmii-converter:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Should be the phandle to the gmii to sgmii converter soft IP.
+
+ altr,sysmgr-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should be the phandle to the system manager node that encompass
+ the glue register, the register offset, and the register shift.
+ On Cyclone5/Arria5, the register shift represents the PHY mode
+ bits, while on the Arria10/Stratix10/Agilex platforms, the
+ register shift represents bit for each emac to enable/disable
+ signals from the FPGA fabric to the EMAC modules.
+ items:
+ - items:
+ - description: phandle to the system manager node
+ - description: offset of the control register
+ - description: shift within the control register
+
+patternProperties:
+ "^mdio[0-9]$":
+ type: object
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - altr,sysmgr-syscon
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ethernet@ff700000 {
+ compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a",
+ "snps,dwmac";
+ altr,sysmgr-syscon = <&sysmgr 0x60 0>;
+ reg = <0xff700000 0x2000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+ clocks = <&emac_0_clk>;
+ clock-names = "stmmaceth";
+ phy-mode = "sgmii";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
index 3ab60c70286f..857c6234ba9b 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
@@ -34,6 +34,13 @@ properties:
This property depends on the module vendor's
configuration.
+ max-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 3000000
+ - 4000000
+ default: 3000000
+
firmware-name:
maxItems: 1
@@ -65,6 +72,14 @@ properties:
description:
The GPIO number of the NXP chipset used for BT_WAKE_OUT.
+ vcc-supply:
+ description:
+ phandle of the regulator that provides the supply voltage.
+
+ reset-gpios:
+ description:
+ Chip powerdown/reset signal (PDn).
+
required:
- compatible
@@ -78,10 +93,13 @@ examples:
bluetooth {
compatible = "nxp,88w8987-bt";
fw-init-baudrate = <3000000>;
+ max-speed = <4000000>;
firmware-name = "uartuart8987_bt_v0.bin";
device-wakeup-gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
nxp,wakein-pin = /bits/ 8 <18>;
nxp,wakeout-pin = /bits/ 8 <19>;
+ vcc-supply = <&nxp_iw612_supply>;
+ reset-gpios = <&gpioctrl 2 GPIO_ACTIVE_LOW>;
local-bd-address = [66 55 44 33 22 11];
interrupt-parent = <&gpio>;
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 8d69846b2e09..559d0f733e7e 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -62,6 +62,7 @@ properties:
- items:
- enum:
- microchip,sam9x7-gem # Microchip SAM9X7 gigabit ethernet interface
+ - microchip,sama7d65-gem # Microchip SAMA7D65 gigabit ethernet interface
- const: microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface
reg:
@@ -114,6 +115,13 @@ properties:
power-domains:
maxItems: 1
+ cdns,refclk-ext:
+ type: boolean
+ description:
+ This selects if the REFCLK for RMII is provided by an external source.
+ For RGMII mode this selects if the 125MHz REF clock is provided by an external
+ source.
+
cdns,rx-watermark:
$ref: /schemas/types.yaml#/definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
index d6c957a33b48..fbab3a1a8d3e 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
@@ -66,6 +66,12 @@ properties:
- brcm,bcm63268-switch
- const: brcm,bcm63xx-switch
+ brcm,gpio-ctrl:
+ description:
+ A phandle to the syscon node of the bcm63xx gpio controller
+ which contains phy control registers
+ $ref: /schemas/types.yaml#/definitions/phandle
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index 51205f9f2985..815a90808901 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -136,6 +136,16 @@ properties:
See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt for
details for the regulator setup on these boards.
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ mediatek,pio:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle pointing to the mediatek pinctrl node.
+
mediatek,mcm:
type: boolean
description:
@@ -190,6 +200,18 @@ required:
- reg
$defs:
+ builtin-dsa-port:
+ patternProperties:
+ "^(ethernet-)?ports$":
+ patternProperties:
+ "^(ethernet-)?port@[0-6]$":
+ if:
+ required: [ ethernet ]
+ then:
+ properties:
+ phy-mode:
+ const: internal
+
mt7530-dsa-port:
patternProperties:
"^(ethernet-)?ports$":
@@ -297,7 +319,7 @@ allOf:
- airoha,en7581-switch
- airoha,an7583-switch
then:
- $ref: "#/$defs/mt7530-dsa-port"
+ $ref: "#/$defs/builtin-dsa-port"
properties:
gpio-controller: false
mediatek,mcm: false
diff --git a/Documentation/devicetree/bindings/net/dsa/micrel,ks8995.yaml b/Documentation/devicetree/bindings/net/dsa/micrel,ks8995.yaml
new file mode 100644
index 000000000000..854808ff5ad5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/micrel,ks8995.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/micrel,ks8995.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Micrel KS8995 Family DSA Switches
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ The Micrel KS8995 DSA Switches are 100 Mbit switches that were produced in
+ the early-to-mid 2000s. The chip features a CPU port and four outgoing ports,
+ each with an internal PHY. The chip itself is managed over SPI, but all the
+ PHYs need to be accessed from an external MDIO channel.
+
+ Further, a fifth PHY is available and can be used separately from the switch
+ fabric, connected to an external MII interface name MII-P5. This is
+ unrelated from the CPU-facing port 5 which is used for DSA MII traffic.
+
+properties:
+ compatible:
+ enum:
+ - micrel,ks8995
+ - micrel,ksz8795
+ - micrel,ksz8864
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO to be used to reset the whole device
+ maxItems: 1
+
+allOf:
+ - $ref: dsa.yaml#/$defs/ethernet-ports
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-switch@0 {
+ compatible = "micrel,ks8995";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ ethernet-port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ ethernet-port@4 {
+ reg = <4>;
+ ethernet = <&mac2>;
+ phy-mode = "mii";
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* The WAN port connected on MII-P5 */
+ ethernet-port@1000 {
+ reg = <0x00001000 0x1000>;
+ label = "wan";
+ phy-mode = "mii";
+ phy-handle = <&phy5>;
+ };
+
+ mac2: ethernet-port@2000 {
+ reg = <0x00002000 0x1000>;
+ phy-mode = "mii";
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* LAN PHYs 1-4 accessible over external MDIO */
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+ /* WAN PHY accessible over external MDIO */
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 62ca63e8a26f..eb4607460db7 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -18,6 +18,7 @@ properties:
# required and optional properties.
compatible:
enum:
+ - microchip,ksz8463
- microchip,ksz8765
- microchip,ksz8794
- microchip,ksz8795
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 7cbf11bbe99c..66b1cfbbfe22 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -39,6 +39,7 @@ properties:
# MAC.
- internal
- mii
+ - mii-lite
- gmii
- sgmii
- psgmii
diff --git a/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml b/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml
index 55d6a8379025..d14410018bcf 100644
--- a/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml
+++ b/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml
@@ -6,9 +6,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Faraday Technology FTGMAC100 gigabit ethernet controller
-allOf:
- - $ref: ethernet-controller.yaml#
-
maintainers:
- Po-Yu Chuang <ratbert@faraday-tech.com>
@@ -35,6 +32,9 @@ properties:
- description: MAC IP clock
- description: RMII RCLK gate for AST2500/2600
+ resets:
+ maxItems: 1
+
clock-names:
minItems: 1
items:
@@ -74,6 +74,21 @@ required:
- reg
- interrupts
+allOf:
+ - $ref: ethernet-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - aspeed,ast2600-mac
+ then:
+ properties:
+ resets: true
+ else:
+ properties:
+ resets: false
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
deleted file mode 100644
index 168f1be50912..000000000000
--- a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* AT86RF230 IEEE 802.15.4 *
-
-Required properties:
- - compatible: should be "atmel,at86rf230", "atmel,at86rf231",
- "atmel,at86rf233" or "atmel,at86rf212"
- - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
- sync or async operation mode
- - reg: the chipselect index
- - interrupts: the interrupt generated by the device. Non high-level
- can occur deadlocks while handling isr.
-
-Optional properties:
- - reset-gpio: GPIO spec for the rstn pin
- - sleep-gpio: GPIO spec for the slp_tr pin
- - xtal-trim: u8 value for fine tuning the internal capacitance
- arrays of xtal pins: 0 = +0 pF, 0xf = +4.5 pF
-
-Example:
-
- at86rf231@0 {
- compatible = "atmel,at86rf231";
- spi-max-frequency = <7500000>;
- reg = <0>;
- interrupts = <19 4>;
- interrupt-parent = <&gpio3>;
- xtal-trim = /bits/ 8 <0x06>;
- };
diff --git a/Documentation/devicetree/bindings/net/ieee802154/atmel,at86rf233.yaml b/Documentation/devicetree/bindings/net/ieee802154/atmel,at86rf233.yaml
new file mode 100644
index 000000000000..32cdc30009cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ieee802154/atmel,at86rf233.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ieee802154/atmel,at86rf233.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AT86RF230 IEEE 802.15.4
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - atmel,at86rf212
+ - atmel,at86rf230
+ - atmel,at86rf231
+ - atmel,at86rf233
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpio:
+ maxItems: 1
+
+ sleep-gpio:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 7500000
+
+ xtal-trim:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ maximum: 0xf
+ description: |
+ Fine tuning the internal capacitance arrays of xtal pins:
+ 0 = +0 pF, 0xf = +4.5 pF
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ zigbee@0 {
+ compatible = "atmel,at86rf231";
+ reg = <0>;
+ spi-max-frequency = <7500000>;
+ interrupts = <19 4>;
+ interrupt-parent = <&gpio3>;
+ xtal-trim = /bits/ 8 <0x06>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml b/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
index 4fdc5328826c..8689de1aaea1 100644
--- a/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
+++ b/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
@@ -47,6 +47,8 @@ properties:
phy-handle: true
+ fixed-link: true
+
intel,npe-handle:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/devicetree/bindings/net/lpc-eth.txt b/Documentation/devicetree/bindings/net/lpc-eth.txt
deleted file mode 100644
index cfe0e5991d46..000000000000
--- a/Documentation/devicetree/bindings/net/lpc-eth.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* NXP LPC32xx SoC Ethernet Controller
-
-Required properties:
-- compatible: Should be "nxp,lpc-eth"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain ethernet controller interrupt
-
-Optional properties:
-- phy-mode: See ethernet.txt file in the same directory. If the property is
- absent, "rmii" is assumed.
-- use-iram: Use LPC32xx internal SRAM (IRAM) for DMA buffering
-
-Optional subnodes:
-- mdio : specifies the mdio bus, used as a container for phy nodes according to
- phy.txt in the same directory
-
-
-Example:
-
- mac: ethernet@31060000 {
- compatible = "nxp,lpc-eth";
- reg = <0x31060000 0x1000>;
- interrupt-parent = <&mic>;
- interrupts = <29 0>;
-
- phy-mode = "rmii";
- use-iram;
- };
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index 9e02fd80af83..b45f67f92e80 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -40,7 +40,19 @@ properties:
interrupts:
minItems: 1
- maxItems: 4
+ maxItems: 8
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: fe0
+ - const: fe1
+ - const: fe2
+ - const: fe3
+ - const: pdma0
+ - const: pdma1
+ - const: pdma2
+ - const: pdma3
power-domains:
maxItems: 1
@@ -54,6 +66,10 @@ properties:
- const: gmac
- const: ppe
+ sram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to mmio SRAM
+
mediatek,ethsys:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@@ -135,6 +151,10 @@ allOf:
minItems: 3
maxItems: 3
+ interrupt-names:
+ minItems: 3
+ maxItems: 3
+
clocks:
minItems: 4
maxItems: 4
@@ -146,6 +166,8 @@ allOf:
- const: gp1
- const: gp2
+ sram: false
+
mediatek,infracfg: false
mediatek,wed: false
@@ -166,6 +188,9 @@ allOf:
interrupts:
maxItems: 1
+ interrupt-names:
+ maxItems: 1
+
clocks:
minItems: 2
maxItems: 2
@@ -175,6 +200,8 @@ allOf:
- const: ethif
- const: fe
+ sram: false
+
mediatek,infracfg: false
mediatek,wed: false
@@ -192,6 +219,10 @@ allOf:
minItems: 3
maxItems: 3
+ interrupt-names:
+ minItems: 3
+ maxItems: 3
+
clocks:
minItems: 11
maxItems: 11
@@ -210,6 +241,8 @@ allOf:
- const: sgmii_ck
- const: eth2pll
+ sram: false
+
mediatek,infracfg: false
mediatek,sgmiisys:
@@ -232,6 +265,10 @@ allOf:
minItems: 3
maxItems: 3
+ interrupt-names:
+ minItems: 3
+ maxItems: 3
+
clocks:
minItems: 17
maxItems: 17
@@ -256,6 +293,8 @@ allOf:
- const: sgmii_ck
- const: eth2pll
+ sram: false
+
mediatek,sgmiisys:
minItems: 2
maxItems: 2
@@ -272,7 +311,10 @@ allOf:
then:
properties:
interrupts:
- minItems: 4
+ minItems: 8
+
+ interrupt-names:
+ minItems: 8
clocks:
minItems: 15
@@ -310,7 +352,10 @@ allOf:
then:
properties:
interrupts:
- minItems: 4
+ minItems: 8
+
+ interrupt-names:
+ minItems: 8
clocks:
minItems: 15
@@ -348,7 +393,10 @@ allOf:
then:
properties:
interrupts:
- minItems: 4
+ minItems: 8
+
+ interrupt-names:
+ minItems: 8
clocks:
minItems: 24
@@ -382,7 +430,7 @@ allOf:
- const: xgp3
patternProperties:
- "^mac@[0-1]$":
+ "^mac@[0-2]$":
type: object
unevaluatedProperties: false
allOf:
@@ -507,7 +555,11 @@ examples:
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ethsys CLK_ETH_FE_EN>,
<&ethsys CLK_ETH_GP2_EN>,
<&ethsys CLK_ETH_GP1_EN>,
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8995.txt b/Documentation/devicetree/bindings/net/micrel-ks8995.txt
deleted file mode 100644
index 281bc2498d12..000000000000
--- a/Documentation/devicetree/bindings/net/micrel-ks8995.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Micrel KS8995 SPI controlled Ethernet Switch families
-
-Required properties (according to spi-bus.txt):
-- compatible: either "micrel,ks8995", "micrel,ksz8864" or "micrel,ksz8795"
-
-Optional properties:
-- reset-gpios : phandle of gpio that will be used to reset chip during probe
-
-Example:
-
-spi-master {
- ...
- switch@0 {
- compatible = "micrel,ksz8795";
-
- reg = <0>;
- spi-max-frequency = <50000000>;
- reset-gpios = <&gpio0 46 GPIO_ACTIVE_LOW>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
index d0332eb76ad2..5f49bd9ac5e6 100644
--- a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
@@ -55,6 +55,12 @@ properties:
description: |
Regulator for supply voltage to VIN pin
+ ti,rx-gain-reduction-db:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Specify an RX gain reduction to reduce antenna sensitivity with 5dB per
+ increment, with a maximum of 15dB. Supported values: [0, 5, 10, 15].
+
required:
- compatible
- interrupts
@@ -95,5 +101,6 @@ examples:
irq-status-read-quirk;
en2-rf-quirk;
clock-frequency = <27120000>;
+ ti,rx-gain-reduction-db = <15>;
};
};
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc-eth.yaml b/Documentation/devicetree/bindings/net/nxp,lpc-eth.yaml
new file mode 100644
index 000000000000..dfe9446a5375
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nxp,lpc-eth.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nxp,lpc-eth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx SoC Ethernet Controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc-eth
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ use-iram:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Use LPC32xx internal SRAM (IRAM) for DMA buffering
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@31060000 {
+ compatible = "nxp,lpc-eth";
+ reg = <0x31060000 0x1000>;
+ interrupt-parent = <&mic>;
+ interrupts = <29 0>;
+ phy-mode = "rmii";
+ use-iram;
+ };
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
deleted file mode 100644
index 7edba1264f6f..000000000000
--- a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* NXP LPC1850 GMAC ethernet controller
-
-This device is a platform glue layer for stmmac.
-Please see stmmac.txt for the other unchanged properties.
-
-Required properties:
- - compatible: Should contain "nxp,lpc1850-dwmac"
-
-Examples:
-
-mac: ethernet@40010000 {
- compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
- reg = <0x40010000 0x2000>;
- interrupts = <5>;
- interrupt-names = "macirq";
- clocks = <&ccu1 CLK_CPU_ETHERNET>;
- clock-names = "stmmaceth";
- resets = <&rgu 22>;
- reset-names = "stmmaceth";
-}
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.yaml b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.yaml
new file mode 100644
index 000000000000..05acd9bc7616
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nxp,lpc1850-dwmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC1850 GMAC ethernet controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nxp,lpc1850-dwmac
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - nxp,lpc1850-dwmac
+ - const: snps,dwmac-3.611
+ - const: snps,dwmac
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: stmmaceth
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: macirq
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: stmmaceth
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-ccu.h>
+
+ ethernet@40010000 {
+ compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+ reg = <0x40010000 0x2000>;
+ interrupts = <5>;
+ interrupt-names = "macirq";
+ clocks = <&ccu1 CLK_CPU_ETHERNET>;
+ clock-names = "stmmaceth";
+ resets = <&rgu 22>;
+ reset-names = "stmmaceth";
+ rx-fifo-depth = <256>;
+ tx-fifo-depth = <256>;
+ snps,pbl = <4>;
+ snps,force_thresh_dma_mode;
+ phy-mode = "rgmii-id";
+ };
diff --git a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
index fd4244fceced..ca61cc37a790 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
@@ -22,6 +22,12 @@ properties:
reg:
maxItems: 1
+ vdd-supply:
+ description: Regulator that provides 3.3V VDD power supply.
+
+ vdda-supply:
+ description: Regulator that provides 3.3V VDDA power supply.
+
managers:
type: object
additionalProperties: false
@@ -68,6 +74,15 @@ properties:
"#size-cells":
const: 0
+ vmain-supply:
+ description: Regulator that provides 44-57V VMAIN power supply.
+
+ vaux5-supply:
+ description: Regulator that provides 5V VAUX5 power supply.
+
+ vaux3p3-supply:
+ description: Regulator that provides 3.3V VAUX3P3 power supply.
+
patternProperties:
'^port@[0-7]$':
type: object
@@ -106,10 +121,11 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- manager@0 {
+ manager0: manager@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
+ vmain-supply = <&pse1_supply>;
phys0: port@0 {
reg = <0>;
@@ -161,7 +177,7 @@ examples:
pairset-names = "alternative-a", "alternative-b";
pairsets = <&phys0>, <&phys1>;
polarity-supported = "MDI", "S";
- vpwr-supply = <&vpwr1>;
+ vpwr-supply = <&manager0>;
};
pse_pi1: pse-pi@1 {
reg = <1>;
@@ -169,7 +185,7 @@ examples:
pairset-names = "alternative-a";
pairsets = <&phys2>;
polarity-supported = "MDI";
- vpwr-supply = <&vpwr2>;
+ vpwr-supply = <&manager0>;
};
};
};
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
index d08abcb01211..bb1ee3398655 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -20,6 +20,9 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
'#pse-cells':
const: 1
@@ -27,10 +30,12 @@ properties:
maxItems: 1
channels:
- description: each set of 8 ports can be assigned to one physical
- channels or two for PoE4. This parameter describes the configuration
- of the ports conversion matrix that establishes relationship between
- the logical ports and the physical channels.
+ description: |
+ Defines the 8 physical delivery channels on the controller that can
+ be referenced by PSE PIs through their "pairsets" property. The actual
+ port matrix mapping is created when PSE PIs reference these channels in
+ their pairsets. For 4-pair operation, two channels from the same group
+ (0-3 or 4-7) must be referenced by a single PSE PI.
type: object
additionalProperties: false
@@ -62,9 +67,12 @@ unevaluatedProperties: false
required:
- compatible
- reg
+ - interrupts
examples:
- |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -72,6 +80,8 @@ examples:
ethernet-pse@20 {
compatible = "ti,tps23881";
reg = <0x20>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpiog>;
channels {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
index 3acd09f0da86..7ae5110e7aa2 100644
--- a/Documentation/devicetree/bindings/net/qca,ar803x.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -16,8 +16,37 @@ description: |
allOf:
- $ref: ethernet-phy.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ethernet-phy-id004d.d0c0
+
+ then:
+ properties:
+ reg:
+ const: 7 # This PHY is always at MDIO address 7 in the IPQ5018 SoC
+
+ resets:
+ items:
+ - description:
+ GE PHY MISC reset which triggers a reset across MDC, DSP, RX, and TX lines.
+
+ qcom,dac-preset-short-cable:
+ description:
+ Set if this phy is connected to another phy to adjust the values for
+ MDAC and EDAC to adjust amplitude, bias current settings, and error
+ detection and correction algorithm to accommodate for short cable length.
+ If not set, DAC values are not modified and it is assumed the MDI output pins
+ of this PHY are directly connected to an RJ45 connector.
+ type: boolean
properties:
+ compatible:
+ enum:
+ - ethernet-phy-id004d.d0c0
+
qca,clk-out-frequency:
description: Clock output frequency in Hertz.
$ref: /schemas/types.yaml#/definitions/uint32
@@ -132,3 +161,17 @@ examples:
};
};
};
+ - |
+ #include <dt-bindings/reset/qcom,gcc-ipq5018.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ge_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-id004d.d0c0";
+ reg = <7>;
+
+ resets = <&gcc GCC_GEPHY_MISC_ARES>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
deleted file mode 100644
index 8f5ae0b84eec..000000000000
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Qualcomm QCA7000
-
-The QCA7000 is a serial-to-powerline bridge with a host interface which could
-be configured either as SPI or UART slave. This configuration is done by
-the QCA7000 firmware.
-
-(a) Ethernet over SPI
-
-In order to use the QCA7000 as SPI device it must be defined as a child of a
-SPI master in the device tree.
-
-Required properties:
-- compatible : Should be "qca,qca7000"
-- reg : Should specify the SPI chip select
-- interrupts : The first cell should specify the index of the source
- interrupt and the second cell should specify the trigger
- type as rising edge
-- spi-cpha : Must be set
-- spi-cpol : Must be set
-
-Optional properties:
-- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
- Numbers smaller than 1000000 or greater than 16000000
- are invalid. Missing the property will set the SPI
- frequency to 8000000 Hertz.
-- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode.
- In this mode the SPI master must toggle the chip select
- between each data word. In burst mode these gaps aren't
- necessary, which is faster. This setting depends on how
- the QCA7000 is setup via GPIO pin strapping. If the
- property is missing the driver defaults to burst mode.
-
-The MAC address will be determined using the optional properties
-defined in ethernet.txt.
-
-SPI Example:
-
-/* Freescale i.MX28 SPI master*/
-ssp2: spi@80014000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx28-spi";
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_pins_a>;
-
- qca7000: ethernet@0 {
- compatible = "qca,qca7000";
- reg = <0x0>;
- interrupt-parent = <&gpio3>; /* GPIO Bank 3 */
- interrupts = <25 0x1>; /* Index: 25, rising edge */
- spi-cpha; /* SPI mode: CPHA=1 */
- spi-cpol; /* SPI mode: CPOL=1 */
- spi-max-frequency = <8000000>; /* freq: 8 MHz */
- local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
- };
-};
-
-(b) Ethernet over UART
-
-In order to use the QCA7000 as UART slave it must be defined as a child of a
-UART master in the device tree. It is possible to preconfigure the UART
-settings of the QCA7000 firmware, but it's not possible to change them during
-runtime.
-
-Required properties:
-- compatible : Should be "qca,qca7000"
-
-Optional properties:
-- local-mac-address : see ./ethernet.txt
-- current-speed : current baud rate of QCA7000 which defaults to 115200
- if absent, see also ../serial/serial.yaml
-
-UART Example:
-
-/* Freescale i.MX28 UART */
-auart0: serial@8006a000 {
- compatible = "fsl,imx28-auart", "fsl,imx23-auart";
- reg = <0x8006a000 0x2000>;
- pinctrl-names = "default";
- pinctrl-0 = <&auart0_2pins_a>;
-
- qca7000: ethernet {
- compatible = "qca,qca7000";
- local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
- current-speed = <38400>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.yaml b/Documentation/devicetree/bindings/net/qca,qca7000.yaml
new file mode 100644
index 000000000000..b503c3aa3616
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,qca7000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCA7000
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description: |
+ The QCA7000 is a serial-to-powerline bridge with a host interface which could
+ be configured either as SPI or UART slave. This configuration is done by
+ the QCA7000 firmware.
+
+ (a) Ethernet over SPI
+
+ In order to use the QCA7000 as SPI device it must be defined as a child of a
+ SPI master in the device tree.
+
+ (b) Ethernet over UART
+
+ In order to use the QCA7000 as UART slave it must be defined as a child of a
+ UART master in the device tree. It is possible to preconfigure the UART
+ settings of the QCA7000 firmware, but it's not possible to change them during
+ runtime
+
+properties:
+ compatible:
+ const: qca,qca7000
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ qca,legacy-mode:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set the SPI data transfer of the QCA7000 to legacy mode.
+ In this mode the SPI master must toggle the chip select
+ between each data word. In burst mode these gaps aren't
+ necessary, which is faster. This setting depends on how
+ the QCA7000 is setup via GPIO pin strapping. If the
+ property is missing the driver defaults to burst mode.
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+ - if:
+ required:
+ - reg
+
+ then:
+ properties:
+ spi-cpha: true
+
+ spi-cpol: true
+
+ spi-max-frequency:
+ default: 8000000
+ maximum: 16000000
+ minimum: 1000000
+
+ allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+ else:
+ properties:
+ current-speed:
+ default: 115200
+
+ qca,legacy-mode: false
+
+ allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "qca,qca7000";
+ reg = <0x0>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <8000000>;
+ local-mac-address = [ a0 b0 c0 d0 e0 f0 ];
+ };
+ };
+
+ - |
+ serial {
+ ethernet {
+ compatible = "qca,qca7000";
+ local-mac-address = [ a0 b0 c0 d0 e0 f0 ];
+ current-speed = <38400>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml b/Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml
index c498a9999289..23e39bcea96b 100644
--- a/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/renesas,r9a09g057-gbeth.yaml#
+$id: http://devicetree.org/schemas/net/renesas,rzv2h-gbeth.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: GBETH glue layer for Renesas RZ/V2H(P) (and similar SoCs)
@@ -14,6 +14,7 @@ select:
compatible:
contains:
enum:
+ - renesas,r9a09g047-gbeth
- renesas,r9a09g056-gbeth
- renesas,r9a09g057-gbeth
- renesas,rzv2h-gbeth
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
+ - renesas,r9a09g047-gbeth # RZ/G3E
- renesas,r9a09g056-gbeth # RZ/V2N
- renesas,r9a09g057-gbeth # RZ/V2H(P)
- const: renesas,rzv2h-gbeth
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 90b79283e228..4e3cbaa06229 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -30,6 +30,7 @@ select:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.00a
- snps,dwmac-5.10a
- snps,dwmac-5.20
- snps,dwmac-5.30a
@@ -98,11 +99,13 @@ properties:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.00a
- snps,dwmac-5.10a
- snps,dwmac-5.20
- snps,dwmac-5.30a
- snps,dwxgmac
- snps,dwxgmac-2.10
+ - sophgo,sg2042-dwmac
- sophgo,sg2044-dwmac
- starfive,jh7100-dwmac
- starfive,jh7110-dwmac
@@ -641,6 +644,7 @@ allOf:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.00a
- snps,dwmac-5.10a
- snps,dwmac-5.20
- snps,dwmac-5.30a
diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
deleted file mode 100644
index 612a8e8abc88..000000000000
--- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Altera SOCFPGA SoC DWMAC controller
-
-This is a variant of the dwmac/stmmac driver an inherits all descriptions
-present in Documentation/devicetree/bindings/net/stmmac.txt.
-
-The device node has additional properties:
-
-Required properties:
- - compatible : For Cyclone5/Arria5 SoCs it should contain
- "altr,socfpga-stmmac". For Arria10/Agilex/Stratix10 SoCs
- "altr,socfpga-stmmac-a10-s10".
- Along with "snps,dwmac" and any applicable more detailed
- designware version numbers documented in stmmac.txt
- - altr,sysmgr-syscon : Should be the phandle to the system manager node that
- encompasses the glue register, the register offset, and the register shift.
- On Cyclone5/Arria5, the register shift represents the PHY mode bits, while
- on the Arria10/Stratix10/Agilex platforms, the register shift represents
- bit for each emac to enable/disable signals from the FPGA fabric to the
- EMAC modules.
- - altr,f2h_ptp_ref_clk use f2h_ptp_ref_clk instead of default eosc1 clock
- for ptp ref clk. This affects all emacs as the clock is common.
-
-Optional properties:
-altr,emac-splitter: Should be the phandle to the emac splitter soft IP node if
- DWMAC controller is connected emac splitter.
-phy-mode: The phy mode the ethernet operates in
-altr,sgmii-to-sgmii-converter: phandle to the TSE SGMII converter
-
-This device node has additional phandle dependency, the sgmii converter:
-
-Required properties:
- - compatible : Should be altr,gmii-to-sgmii-2.0
- - reg-names : Should be "eth_tse_control_port"
-
-Example:
-
-gmii_to_sgmii_converter: phy@100000240 {
- compatible = "altr,gmii-to-sgmii-2.0";
- reg = <0x00000001 0x00000240 0x00000008>,
- <0x00000001 0x00000200 0x00000040>;
- reg-names = "eth_tse_control_port";
- clocks = <&sgmii_1_clk_0 &emac1 1 &sgmii_clk_125 &sgmii_clk_125>;
- clock-names = "tse_pcs_ref_clk_clock_connection", "tse_rx_cdr_refclk";
-};
-
-gmac0: ethernet@ff700000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a", "snps,dwmac";
- altr,sysmgr-syscon = <&sysmgr 0x60 0>;
- reg = <0xff700000 0x2000>;
- interrupts = <0 115 4>;
- interrupt-names = "macirq";
- mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
- clocks = <&emac_0_clk>;
- clock-names = "stmmaceth";
- phy-mode = "sgmii";
- altr,gmii-to-sgmii-converter = <&gmii_to_sgmii_converter>;
-};
diff --git a/Documentation/devicetree/bindings/net/sophgo,cv1800b-dwmac.yaml b/Documentation/devicetree/bindings/net/sophgo,cv1800b-dwmac.yaml
new file mode 100644
index 000000000000..b89456f0ef83
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sophgo,cv1800b-dwmac.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/sophgo,cv1800b-dwmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV1800B DWMAC glue layer
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - sophgo,cv1800b-dwmac
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: sophgo,cv1800b-dwmac
+ - const: snps,dwmac-3.70a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: GMAC main clock
+ - description: PTP clock
+
+ clock-names:
+ items:
+ - const: stmmaceth
+ - const: ptp_ref
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: stmmaceth
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ethernet@4070000 {
+ compatible = "sophgo,cv1800b-dwmac", "snps,dwmac-3.70a";
+ reg = <0x04070000 0x10000>;
+ clocks = <&clk 35>, <&clk 36>;
+ clock-names = "stmmaceth", "ptp_ref";
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ phy-handle = <&internal_ephy>;
+ phy-mode = "internal";
+ resets = <&rst 12>;
+ reset-names = "stmmaceth";
+ rx-fifo-depth = <8192>;
+ tx-fifo-depth = <8192>;
+ snps,multicast-filter-bins = <0>;
+ snps,perfect-filter-entries = <1>;
+ snps,aal;
+ snps,txpbl = <8>;
+ snps,rxpbl = <8>;
+ snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
+ snps,axi-config = <&gmac0_stmmac_axi_setup>;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ gmac0_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ queue0 {};
+ };
+
+ gmac0_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <1>;
+ queue0 {};
+ };
+
+ gmac0_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <16 8 4 0 0 0 0>;
+ snps,rd_osr_lmt = <2>;
+ snps,wr_osr_lmt = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
index 8afbd9ebd73f..ce21979a2d9a 100644
--- a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
@@ -15,14 +15,19 @@ select:
contains:
enum:
- sophgo,sg2044-dwmac
+ - sophgo,sg2042-dwmac
required:
- compatible
properties:
compatible:
- items:
- - const: sophgo,sg2044-dwmac
- - const: snps,dwmac-5.30a
+ oneOf:
+ - items:
+ - const: sophgo,sg2042-dwmac
+ - const: snps,dwmac-5.00a
+ - items:
+ - const: sophgo,sg2044-dwmac
+ - const: snps,dwmac-5.30a
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml b/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
index 6d9de3303762..b3492a9aa4ef 100644
--- a/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
+++ b/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
@@ -62,11 +62,13 @@ properties:
items:
- description: GMAC main clock
- description: Peripheral registers interface clock
+ - description: APB glue registers interface clock
clock-names:
items:
- const: stmmaceth
- const: pclk
+ - const: apb
interrupts:
items:
@@ -88,8 +90,8 @@ examples:
compatible = "thead,th1520-gmac", "snps,dwmac-3.70a";
reg = <0xe7070000 0x2000>, <0xec003000 0x1000>;
reg-names = "dwmac", "apb";
- clocks = <&clk 1>, <&clk 2>;
- clock-names = "stmmaceth", "pclk";
+ clocks = <&clk 1>, <&clk 2>, <&clk 3>;
+ clock-names = "stmmaceth", "pclk", "apb";
interrupts = <66>;
interrupt-names = "macirq";
phy-mode = "rgmii-id";
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index 7b3d948f187d..a959c1d7e643 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -284,7 +284,7 @@ examples:
ti,syscon-efuse = <&mcu_conf 0x200>;
phys = <&phy_gmii_sel 1>;
- phy-mode = "rgmii-rxid";
+ phy-mode = "rgmii-id";
phy-handle = <&phy0>;
};
};
diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
index 0e5412cff2bc..d16ca8e0a25d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
@@ -12,7 +12,7 @@ maintainers:
description: |
This node provides properties for configuring the ath9k wireless device.
The node is expected to be specified as a child node of the PCI controller
- to which the wireless chip is connected.
+ or AHB bus to which the wireless chip is connected.
allOf:
- $ref: ieee80211.yaml#
@@ -35,6 +35,12 @@ properties:
- pci168c,0034 # AR9462
- pci168c,0036 # AR9565
- pci168c,0037 # AR1111 and AR9485
+ - qca,ar9130-wifi
+ - qca,ar9330-wifi
+ - qca,ar9340-wifi
+ - qca,qca9530-wifi
+ - qca,qca9550-wifi
+ - qca,qca9560-wifi
reg:
maxItems: 1
@@ -88,3 +94,13 @@ examples:
nvmem-cell-names = "mac-address", "calibration";
};
};
+ - |
+ ahb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ wifi@180c0000 {
+ compatible = "qca,ar9130-wifi";
+ reg = <0x180c0000 0x230000>;
+ interrupts = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
index 653b319fee88..e34d42a30192 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
@@ -35,6 +35,12 @@ properties:
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
+ firmware-name:
+ maxItems: 1
+ description:
+ If present, a board or platform specific string used to lookup
+ usecase-specific firmware files for the device.
+
vddrfacmn-supply:
description: VDD_RFA_CMN supply regulator handle
diff --git a/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml b/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml
new file mode 100644
index 000000000000..04dc5bb2edcc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/ralink,rt2880.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink RT2880 wireless device
+
+maintainers:
+ - Stanislaw Gruszka <stf_xl@wp.pl>
+
+description: |
+ This node provides properties for configuring RT2880 SOC wifi devices.
+ The node is expected to be specified as a root node of the device.
+
+allOf:
+ - $ref: ieee80211.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ralink,rt2880-wifi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ wifi@110180000 {
+ compatible = "ralink,rt2880-wifi";
+ reg = <0x10180000 0x40000>;
+ clocks = <&sysc 16>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+ };
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml b/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
index a27ba7b663d4..0bd7d6b69755 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-qcom-adreno.yaml
@@ -23,7 +23,7 @@ properties:
const: operating-points-v2-adreno
patternProperties:
- '^opp-[0-9]+$':
+ '^opp(-[0-9]+){1,2}$':
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
deleted file mode 100644
index 3abeecf4983f..000000000000
--- a/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-* Freescale 83xx and 512x PCI bridges
-
-Freescale 83xx and 512x SOCs include the same PCI bridge core.
-
-83xx/512x specific notes:
-- reg: should contain two address length tuples
- The first is for the internal PCI bridge registers
- The second is for the PCI config space access registers
-
-Example (MPC8313ERDB)
- pci0: pci@e0008500 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0E -mini PCI */
- 0x7000 0x0 0x0 0x1 &ipic 18 0x8
- 0x7000 0x0 0x0 0x2 &ipic 18 0x8
- 0x7000 0x0 0x0 0x3 &ipic 18 0x8
- 0x7000 0x0 0x0 0x4 &ipic 18 0x8
-
- /* IDSEL 0x0F - PCI slot */
- 0x7800 0x0 0x0 0x1 &ipic 17 0x8
- 0x7800 0x0 0x0 0x2 &ipic 18 0x8
- 0x7800 0x0 0x0 0x3 &ipic 17 0x8
- 0x7800 0x0 0x0 0x4 &ipic 18 0x8>;
- interrupt-parent = <&ipic>;
- interrupts = <66 0x8>;
- bus-range = <0x0 0x0>;
- ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
- 0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008500 0x100 /* internal registers */
- 0xe0008300 0x8>; /* config space access registers */
- compatible = "fsl,mpc8349-pci";
- device_type = "pci";
- };
diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
deleted file mode 100644
index 2b8ca920a7fa..000000000000
--- a/Documentation/devicetree/bindings/pci/aardvark-pci.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Aardvark PCIe controller
-
-This PCIe controller is used on the Marvell Armada 3700 ARM64 SoC.
-
-The Device Tree node describing an Aardvark PCIe controller must
-contain the following properties:
-
- - compatible: Should be "marvell,armada-3700-pcie"
- - reg: range of registers for the PCIe controller
- - interrupts: the interrupt line of the PCIe controller
- - #address-cells: set to <3>
- - #size-cells: set to <2>
- - device_type: set to "pci"
- - ranges: ranges for the PCI memory and I/O regions
- - #interrupt-cells: set to <1>
- - msi-controller: indicates that the PCIe controller can itself
- handle MSI interrupts
- - msi-parent: pointer to the MSI controller to be used
- - interrupt-map-mask and interrupt-map: standard PCI properties to
- define the mapping of the PCIe interface to interrupt numbers.
- - bus-range: PCI bus numbers covered
- - phys: the PCIe PHY handle
- - max-link-speed: see pci.txt
- - reset-gpios: see pci.txt
-
-In addition, the Device Tree describing an Aardvark PCIe controller
-must include a sub-node that describes the legacy interrupt controller
-built into the PCIe controller. This sub-node must have the following
-properties:
-
- - interrupt-controller
- - #interrupt-cells: set to <1>
-
-Example:
-
- pcie0: pcie@d0070000 {
- compatible = "marvell,armada-3700-pcie";
- device_type = "pci";
- reg = <0 0xd0070000 0 0x20000>;
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x00 0xff>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <1>;
- msi-controller;
- msi-parent = <&pcie0>;
- ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
- 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc 0>,
- <0 0 0 2 &pcie_intc 1>,
- <0 0 0 3 &pcie_intc 2>,
- <0 0 0 4 &pcie_intc 3>;
- phys = <&comphy1 0>;
- pcie_intc: interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/pci/amazon,al-alpine-v3-pcie.yaml b/Documentation/devicetree/bindings/pci/amazon,al-alpine-v3-pcie.yaml
new file mode 100644
index 000000000000..45244cad5f30
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/amazon,al-alpine-v3-pcie.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/amazon,al-alpine-v3-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amazon Annapurna Labs Alpine v3 PCIe Host Bridge
+
+maintainers:
+ - Jonathan Chocron <jonnyc@amazon.com>
+
+description:
+ Amazon's Annapurna Labs PCIe Host Controller is based on the Synopsys
+ DesignWare PCI controller.
+
+allOf:
+ - $ref: snps,dw-pcie.yaml#
+
+properties:
+ compatible:
+ enum:
+ - amazon,al-alpine-v2-pcie
+ - amazon,al-alpine-v3-pcie
+
+ reg:
+ items:
+ - description: PCIe ECAM space
+ - description: AL proprietary registers
+ - description: Designware PCIe registers
+
+ reg-names:
+ items:
+ - const: config
+ - const: controller
+ - const: dbi
+
+ interrupts:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@fb600000 {
+ compatible = "amazon,al-alpine-v3-pcie";
+ reg = <0x0 0xfb600000 0x0 0x00100000
+ 0x0 0xfd800000 0x0 0x00010000
+ 0x0 0xfd810000 0x0 0x00001000>;
+ reg-names = "config", "controller", "dbi";
+ bus-range = <0 255>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0x00 0 0 7>;
+ interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; /* INTa */
+ ranges = <0x02000000 0x0 0xc0010000 0x0 0xc0010000 0x0 0x07ff0000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/apm,xgene-pcie.yaml b/Documentation/devicetree/bindings/pci/apm,xgene-pcie.yaml
new file mode 100644
index 000000000000..2504b8235889
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/apm,xgene-pcie.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/apm,xgene-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AppliedMicro X-Gene PCIe interface
+
+maintainers:
+ - Toan Le <toan@os.amperecomputing.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: apm,xgene-storm-pcie
+ - const: apm,xgene-pcie
+ - items:
+ - const: apm,xgene-pcie
+
+ reg:
+ items:
+ - description: Controller configuration registers
+ - description: PCI configuration space registers
+
+ reg-names:
+ items:
+ - const: csr
+ - const: cfg
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pcie
+
+ dma-coherent: true
+
+ msi-parent:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - '#interrupt-cells'
+ - interrupt-map-mask
+ - interrupt-map
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@1f2b0000 {
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0x00 0x1f2b0000 0x0 0x00010000>, /* Controller registers */
+ <0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000>, /* io */
+ <0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000>,
+ <0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1>,
+ <0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1>,
+ <0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1>,
+ <0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
+ dma-coherent;
+ clocks = <&pcie0clk 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
deleted file mode 100644
index cc6dcdb676b9..000000000000
--- a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Axis ARTPEC-6 PCIe interface
-
-This PCIe host controller is based on the Synopsys DesignWare PCIe IP
-and thus inherits all the common properties defined in snps,dw-pcie.yaml.
-
-Required properties:
-- compatible: "axis,artpec6-pcie", "snps,dw-pcie" for ARTPEC-6 in RC mode;
- "axis,artpec6-pcie-ep", "snps,dw-pcie" for ARTPEC-6 in EP mode;
- "axis,artpec7-pcie", "snps,dw-pcie" for ARTPEC-7 in RC mode;
- "axis,artpec7-pcie-ep", "snps,dw-pcie" for ARTPEC-7 in EP mode;
-- reg: base addresses and lengths of the PCIe controller (DBI),
- the PHY controller, and configuration address space.
-- reg-names: Must include the following entries:
- - "dbi"
- - "phy"
- - "config"
-- interrupts: A list of interrupt outputs of the controller. Must contain an
- entry for each entry in the interrupt-names property.
-- interrupt-names: Must include the following entries:
- - "msi": The interrupt that is asserted when an MSI is received
-- axis,syscon-pcie: A phandle pointing to the ARTPEC-6 system controller,
- used to enable and control the Synopsys IP.
-
-Example:
-
- pcie@f8050000 {
- compatible = "axis,artpec6-pcie", "snps,dw-pcie";
- reg = <0xf8050000 0x2000
- 0xf8040000 0x1000
- 0xc0000000 0x2000>;
- reg-names = "dbi", "phy", "config";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- /* downstream I/O */
- ranges = <0x81000000 0 0 0xc0002000 0 0x00010000
- /* non-prefetchable memory */
- 0x82000000 0 0xc0012000 0xc0012000 0 0x1ffee000>;
- num-lanes = <2>;
- bus-range = <0x00 0xff>;
- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
- axis,syscon-pcie = <&syscon>;
- };
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.yaml b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.yaml
new file mode 100644
index 000000000000..dcc5661aa004
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2025 Axis AB
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/axis,artpec6-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Axis ARTPEC-6 PCIe host controller
+
+maintainers:
+ - Jesper Nilsson <jesper.nilsson@axis.com>
+
+description:
+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - axis,artpec6-pcie
+ - axis,artpec6-pcie-ep
+ - axis,artpec7-pcie
+ - axis,artpec7-pcie-ep
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - axis,artpec6-pcie
+ - axis,artpec6-pcie-ep
+ - axis,artpec7-pcie
+ - axis,artpec7-pcie-ep
+ - const: snps,dw-pcie
+
+ reg:
+ minItems: 3
+ maxItems: 4
+
+ reg-names:
+ minItems: 3
+ maxItems: 4
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: msi
+
+ axis,syscon-pcie:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ System controller phandle used to enable and control the Synopsys IP.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - axis,syscon-pcie
+
+oneOf:
+ - $ref: snps,dw-pcie.yaml#
+ properties:
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: phy
+ - const: config
+
+ - $ref: snps,dw-pcie-ep.yaml#
+ properties:
+ reg:
+ minItems: 4
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: dbi2
+ - const: phy
+ - const: addr_space
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pcie@f8050000 {
+ compatible = "axis,artpec6-pcie", "snps,dw-pcie";
+ device_type = "pci";
+ reg = <0xf8050000 0x2000
+ 0xf8040000 0x1000
+ 0xc0000000 0x2000>;
+ reg-names = "dbi", "phy", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0 0xc0002000 0 0x00010000>,
+ <0x82000000 0 0xc0012000 0xc0012000 0 0x1ffee000>;
+ num-lanes = <2>;
+ bus-range = <0x00 0xff>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ axis,syscon-pcie = <&syscon>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index c4f9674e8695..812ef5957cfc 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -107,6 +107,10 @@ properties:
- const: bridge
- const: swinit
+ num-lanes:
+ default: 1
+ maximum: 4
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/marvell,armada-3700-pcie.yaml b/Documentation/devicetree/bindings/pci/marvell,armada-3700-pcie.yaml
new file mode 100644
index 000000000000..68090b3ca419
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/marvell,armada-3700-pcie.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/marvell,armada-3700-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 3700 (Aardvark) PCIe Controller
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+ - Pali Rohár <pali@kernel.org>
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+
+properties:
+ compatible:
+ const: marvell,armada-3700-pcie
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ msi-controller: true
+
+ msi-parent:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ reset-gpios:
+ description: PCIe reset GPIO signals.
+
+ interrupt-controller:
+ type: object
+ additionalProperties: false
+
+ properties:
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ required:
+ - interrupt-controller
+ - '#interrupt-cells'
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#interrupt-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@d0070000 {
+ compatible = "marvell,armada-3700-pcie";
+ device_type = "pci";
+ reg = <0 0xd0070000 0 0x20000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ msi-controller;
+ msi-parent = <&pcie0>;
+ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000>,
+ <0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ phys = <&comphy1 0>;
+ max-link-speed = <2>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+
+ pcie_intc: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index 214caa4ec3d5..1868a10d5b10 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -51,7 +51,7 @@ properties:
max-link-speed:
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [ 1, 2, 3, 4 ]
+ enum: [ 1, 2, 3, 4, 5, 6 ]
msi-map:
description: |
diff --git a/Documentation/devicetree/bindings/pci/pcie-al.txt b/Documentation/devicetree/bindings/pci/pcie-al.txt
deleted file mode 100644
index 2ad1fe466eab..000000000000
--- a/Documentation/devicetree/bindings/pci/pcie-al.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-* Amazon Annapurna Labs PCIe host bridge
-
-Amazon's Annapurna Labs PCIe Host Controller is based on the Synopsys DesignWare
-PCI core. It inherits common properties defined in
-Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml.
-
-Properties of the host controller node that differ from it are:
-
-- compatible:
- Usage: required
- Value type: <stringlist>
- Definition: Value should contain
- - "amazon,al-alpine-v2-pcie" for alpine_v2
- - "amazon,al-alpine-v3-pcie" for alpine_v3
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: Register ranges as listed in the reg-names property
-
-- reg-names:
- Usage: required
- Value type: <stringlist>
- Definition: Must include the following entries
- - "config" PCIe ECAM space
- - "controller" AL proprietary registers
- - "dbi" Designware PCIe registers
-
-Example:
-
- pcie-external0: pcie@fb600000 {
- compatible = "amazon,al-alpine-v3-pcie";
- reg = <0x0 0xfb600000 0x0 0x00100000
- 0x0 0xfd800000 0x0 0x00010000
- 0x0 0xfd810000 0x0 0x00001000>;
- reg-names = "config", "controller", "dbi";
- bus-range = <0 255>;
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-map-mask = <0x00 0 0 7>;
- interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; /* INTa */
- ranges = <0x02000000 0x0 0xc0010000 0x0 0xc0010000 0x0 0x07ff0000>;
- };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
index 0480c58f7d99..ab2509ec1c4b 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
@@ -51,10 +51,18 @@ properties:
phys:
maxItems: 1
+ deprecated: true
+ description:
+ This property is deprecated, instead of referencing this property from
+ the host bridge node, use the property from the PCIe root port node.
phy-names:
items:
- const: pciephy
+ deprecated: true
+ description:
+ Phandle to the register map node. This property is deprecated, and not
+ required to add in the root port also, as the root port has only one phy.
power-domains:
maxItems: 1
@@ -71,12 +79,18 @@ properties:
maxItems: 12
perst-gpios:
- description: GPIO controlled connection to PERST# signal
+ description: GPIO controlled connection to PERST# signal. This property is
+ deprecated, instead of referencing this property from the host bridge node,
+ use the reset-gpios property from the root port node.
maxItems: 1
+ deprecated: true
wake-gpios:
- description: GPIO controlled connection to WAKE# signal
+ description: GPIO controlled connection to WAKE# signal. This property is
+ deprecated, instead of referencing this property from the host bridge node,
+ use the property from the PCIe root port node.
maxItems: 1
+ deprecated: true
vddpe-3v3-supply:
description: PCIe endpoint power supply
@@ -85,6 +99,20 @@ properties:
opp-table:
type: object
+patternProperties:
+ "^pcie@":
+ type: object
+ $ref: /schemas/pci/pci-pci-bridge.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ unevaluatedProperties: false
+
required:
- reg
- reg-names
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml
new file mode 100644
index 000000000000..ef705a02fcd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/qcom,pcie-sa8255p.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SA8255p based firmware managed and ECAM compliant PCIe Root Complex
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+ Qualcomm SA8255p SoC PCIe root complex controller is based on the Synopsys
+ DesignWare PCIe IP which is managed by firmware, and configured in ECAM mode.
+
+properties:
+ compatible:
+ const: qcom,pcie-sa8255p
+
+ reg:
+ description:
+ The base address and size of the ECAM area for accessing PCI
+ Configuration Space, as accessed from the parent bus. The base
+ address corresponds to the first bus in the "bus-range" property. If
+ no "bus-range" is specified, this will be bus 0 (the default).
+ maxItems: 1
+
+ ranges:
+ description:
+ As described in IEEE Std 1275-1994, but must provide at least a
+ definition of non-prefetchable memory. One or both of prefetchable Memory
+ may also be provided.
+ minItems: 1
+ maxItems: 2
+
+ interrupts:
+ minItems: 8
+ maxItems: 8
+
+ interrupt-names:
+ items:
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
+
+ power-domains:
+ maxItems: 1
+
+ dma-coherent: true
+ iommu-map: true
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - power-domains
+ - interrupts
+ - interrupt-names
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pci@1c00000 {
+ compatible = "qcom,pcie-sa8255p";
+ reg = <0x4 0x00000000 0 0x10000000>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x02000000 0x0 0x40100000 0x0 0x40100000 0x0 0x1ff00000>,
+ <0x43000000 0x4 0x10100000 0x4 0x10100000 0x0 0x40000000>;
+ bus-range = <0x00 0xff>;
+ dma-coherent;
+ linux,pci-domain = <0>;
+ power-domains = <&scmi5_pd 0>;
+ iommu-map = <0x0 &pcie_smmu 0x0000 0x1>,
+ <0x100 &pcie_smmu 0x0001 0x1>;
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0", "msi1", "msi2", "msi3",
+ "msi4", "msi5", "msi6", "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
index e3fa232da2ca..19afe2a03409 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8775p.yaml
@@ -16,7 +16,12 @@ description:
properties:
compatible:
- const: qcom,pcie-sa8775p
+ oneOf:
+ - const: qcom,pcie-sa8775p
+ - items:
+ - enum:
+ - qcom,pcie-qcs8300
+ - const: qcom,pcie-sa8775p
reg:
minItems: 6
@@ -61,11 +66,14 @@ properties:
- const: global
resets:
- maxItems: 1
+ items:
+ - description: PCIe controller reset
+ - description: PCIe link down reset
reset-names:
items:
- const: pci
+ - const: link_down
required:
- interconnects
@@ -161,8 +169,10 @@ examples:
power-domains = <&gcc PCIE_0_GDSC>;
- resets = <&gcc GCC_PCIE_0_BCR>;
- reset-names = "pci";
+ resets = <&gcc GCC_PCIE_0_BCR>,
+ <&gcc GCC_PCIE_0_LINK_DOWN_BCR>;
+ reset-names = "pci",
+ "link_down";
perst-gpios = <&tlmm 2 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
index ff508f592a1a..4d0a91556603 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
@@ -165,9 +165,6 @@ examples:
iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
<0x100 &apps_smmu 0x1c81 0x1>;
- phys = <&pcie1_phy>;
- phy-names = "pciephy";
-
pinctrl-names = "default";
pinctrl-0 = <&pcie1_clkreq_n>;
@@ -176,7 +173,18 @@ examples:
resets = <&gcc GCC_PCIE_1_BCR>;
reset-names = "pci";
- perst-gpios = <&tlmm 2 GPIO_ACTIVE_LOW>;
vddpe-3v3-supply = <&pp3300_ssd>;
+ pcie1_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ phys = <&pcie1_phy>;
+
+ reset-gpios = <&tlmm 2 GPIO_ACTIVE_LOW>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
index 331fc25d7a17..34a4d7b2c845 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8180x.yaml
@@ -33,8 +33,8 @@ properties:
- const: mhi # MHI registers
clocks:
- minItems: 8
- maxItems: 8
+ minItems: 6
+ maxItems: 6
clock-names:
items:
@@ -44,8 +44,6 @@ properties:
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
- const: slave_q2a # Slave Q2A clock
- - const: ref # REFERENCE clock
- - const: tbu # PCIe TBU clock
interrupts:
minItems: 8
@@ -117,17 +115,13 @@ examples:
<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
<&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
<&gcc GCC_PCIE_0_SLV_AXI_CLK>,
- <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
- <&gcc GCC_PCIE_0_CLKREF_CLK>,
- <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>;
clock-names = "pipe",
"aux",
"cfg",
"bus_master",
"bus_slave",
- "slave_q2a",
- "ref",
- "tbu";
+ "slave_q2a";
dma-coherent;
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
index a604f2a79de3..26b247a41785 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml
@@ -16,7 +16,12 @@ description:
properties:
compatible:
- const: qcom,pcie-sm8150
+ oneOf:
+ - const: qcom,pcie-sm8150
+ - items:
+ - enum:
+ - qcom,pcie-qcs615
+ - const: qcom,pcie-sm8150
reg:
minItems: 5
@@ -33,8 +38,8 @@ properties:
- const: mhi # MHI registers
clocks:
- minItems: 8
- maxItems: 8
+ minItems: 6
+ maxItems: 6
clock-names:
items:
@@ -44,8 +49,6 @@ properties:
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
- const: slave_q2a # Slave Q2A clock
- - const: tbu # PCIe TBU clock
- - const: ref # REFERENCE clock
interrupts:
minItems: 8
@@ -111,17 +114,13 @@ examples:
<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
<&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
<&gcc GCC_PCIE_0_SLV_AXI_CLK>,
- <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
- <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>;
clock-names = "pipe",
"aux",
"cfg",
"bus_master",
"bus_slave",
- "slave_q2a",
- "tbu",
- "ref";
+ "slave_q2a";
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
index 69e82f438f58..b3216141881c 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
@@ -108,7 +108,7 @@ properties:
- description: See native 'dbi' CSR region for details.
enum: [ ctrl ]
- description: See native 'elbi/app' CSR region for details.
- enum: [ apb, mgmt, link, ulreg, appl ]
+ enum: [ apb, mgmt, link, ulreg, appl, controller ]
- description: See native 'atu' CSR region for details.
enum: [ atu_dma ]
- description: Syscon-related CSR regions.
diff --git a/Documentation/devicetree/bindings/pci/sophgo,sg2044-pcie.yaml b/Documentation/devicetree/bindings/pci/sophgo,sg2044-pcie.yaml
new file mode 100644
index 000000000000..ff1133bae3ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/sophgo,sg2044-pcie.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/sophgo,sg2044-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DesignWare based PCIe Root Complex controller on Sophgo SoCs
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+description:
+ SG2044 SoC PCIe Root Complex controller is based on the Synopsys DesignWare
+ PCIe IP and thus inherits all the common properties defined in
+ snps,dw-pcie.yaml.
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
+
+properties:
+ compatible:
+ const: sophgo,sg2044-pcie
+
+ reg:
+ items:
+ - description: Data Bus Interface (DBI) registers
+ - description: iATU registers
+ - description: Config registers
+ - description: Sophgo designed configuration registers
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: atu
+ - const: config
+ - const: app
+
+ clocks:
+ items:
+ - description: core clk
+
+ clock-names:
+ items:
+ - const: core
+
+ interrupt-controller:
+ description: Interrupt controller node for handling legacy PCI interrupts.
+ type: object
+
+ properties:
+ "#address-cells":
+ const: 0
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupt-controller: true
+
+ interrupts:
+ items:
+ - description: combined legacy interrupt
+
+ required:
+ - "#address-cells"
+ - "#interrupt-cells"
+ - interrupt-controller
+ - interrupts
+
+ additionalProperties: false
+
+ msi-parent: true
+
+ ranges:
+ maxItems: 5
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@6c00400000 {
+ compatible = "sophgo,sg2044-pcie";
+ reg = <0x6c 0x00400000 0x0 0x00001000>,
+ <0x6c 0x00700000 0x0 0x00004000>,
+ <0x40 0x00000000 0x0 0x00001000>,
+ <0x6c 0x00780c00 0x0 0x00000400>;
+ reg-names = "dbi", "atu", "config", "app";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ clocks = <&clk 0>;
+ clock-names = "core";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ msi-parent = <&msi>;
+ ranges = <0x01000000 0x0 0x00000000 0x40 0x10000000 0x0 0x00200000>,
+ <0x42000000 0x0 0x00000000 0x0 0x00000000 0x0 0x04000000>,
+ <0x02000000 0x0 0x04000000 0x0 0x04000000 0x0 0x04000000>,
+ <0x43000000 0x42 0x00000000 0x42 0x00000000 0x2 0x00000000>,
+ <0x03000000 0x41 0x00000000 0x41 0x00000000 0x1 0x00000000>;
+
+ interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&intc>;
+ interrupts = <64 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt b/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt
deleted file mode 100644
index d5a14f5dad46..000000000000
--- a/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-SPEAr13XX PCIe DT detail:
-================================
-
-SPEAr13XX uses the Synopsys DesignWare PCIe controller and ST MiPHY as PHY
-controller.
-
-Required properties:
-- compatible : should be "st,spear1340-pcie", "snps,dw-pcie".
-- phys : phandle to PHY node associated with PCIe controller
-- phy-names : must be "pcie-phy"
-- All other definitions as per generic PCI bindings
-
- Optional properties:
-- st,pcie-is-gen1 indicates that forced gen1 initialization is needed.
diff --git a/Documentation/devicetree/bindings/pci/st,spear1340-pcie.yaml b/Documentation/devicetree/bindings/pci/st,spear1340-pcie.yaml
new file mode 100644
index 000000000000..784f97b3cb7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/st,spear1340-pcie.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/st,spear1340-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST SPEAr1340 PCIe controller
+
+maintainers:
+ - Pratyush Anand <pratyush.anand@gmail.com>
+
+description:
+ SPEAr13XX uses the Synopsys DesignWare PCIe controller and ST MiPHY as PHY
+ controller.
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: st,spear1340-pcie
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: st,spear1340-pcie
+ - const: snps,dw-pcie
+
+ phys:
+ maxItems: 1
+
+ st,pcie-is-gen1:
+ type: boolean
+ description: Indicates forced gen1 initialization is needed.
+
+required:
+ - compatible
+ - phys
+ - phy-names
+
+allOf:
+ - $ref: snps,dw-pcie.yaml#
+
+unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt
deleted file mode 100644
index 92490330dc1c..000000000000
--- a/Documentation/devicetree/bindings/pci/xgene-pci.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* AppliedMicro X-Gene PCIe interface
-
-Required properties:
-- device_type: set to "pci"
-- compatible: should contain "apm,xgene-pcie" to identify the core.
-- reg: A list of physical base address and length for each set of controller
- registers. Must contain an entry for each entry in the reg-names
- property.
-- reg-names: Must include the following entries:
- "csr": controller configuration registers.
- "cfg": PCIe configuration space registers.
-- #address-cells: set to <3>
-- #size-cells: set to <2>
-- ranges: ranges for the outbound memory, I/O regions.
-- dma-ranges: ranges for the inbound memory regions.
-- #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map: standard PCI properties
- to define the mapping of the PCIe interface to interrupt
- numbers.
-- clocks: from common clock binding: handle to pci clock.
-
-Optional properties:
-- status: Either "ok" or "disabled".
-- dma-coherent: Present if DMA operations are coherent
-
-Example:
-
- pcie0: pcie@1f2b0000 {
- status = "disabled";
- device_type = "pci";
- compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
- 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
- reg-names = "csr", "cfg";
- ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
- 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
- dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
- 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
- interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
- dma-coherent;
- clocks = <&pcie0clk 0>;
- };
-
diff --git a/Documentation/devicetree/bindings/phy/apm,xgene-phy.yaml b/Documentation/devicetree/bindings/phy/apm,xgene-phy.yaml
new file mode 100644
index 000000000000..0674391feeae
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/apm,xgene-phy.yaml
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/apm,xgene-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene 15Gbps Multi-purpose PHY
+
+maintainers:
+ - Khuong Dinh <khuong@os.amperecomputing.com>
+
+description:
+ PHY nodes are defined to describe on-chip 15Gbps Multi-purpose PHY. Each
+ PHY (pair of lanes) has its own node.
+
+properties:
+ compatible:
+ items:
+ - const: apm,xgene-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ description:
+ Possible values are 0 (SATA), 1 (SGMII), 2 (PCIe), 3 (USB), and 4 (XFI).
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ apm,tx-eye-tuning:
+ description:
+ Manual control to fine tune the capture of the serial bit lines from the
+ automatic calibrated position. Two set of 3-tuple setting for each
+ supported link speed on the host. Range from 0 to 127 in unit of one bit
+ period.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 127
+ default: 10
+
+ apm,tx-eye-direction:
+ description:
+ Eye tuning manual control direction. 0 means sample data earlier than the
+ nominal sampling point. 1 means sample data later than the nominal
+ sampling point. Two set of 3-tuple setting for each supported link speed
+ on the host.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ enum: [0, 1]
+ default: 0
+
+ apm,tx-boost-gain:
+ description:
+ Frequency boost AC (LSB 3-bit) and DC (2-bit) gain control. Two set of
+ 3-tuple setting for each supported link speed on the host. Range is
+ between 0 to 31 in unit of dB. Default is 3.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 31
+
+ apm,tx-amplitude:
+ description:
+ Amplitude control. Two set of 3-tuple setting for each supported link
+ speed on the host. Range is between 0 to 199500 in unit of uV.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 199500
+ default: 199500
+
+ apm,tx-pre-cursor1:
+ description:
+ 1st pre-cursor emphasis taps control. Two set of 3-tuple setting for
+ each supported link speed on the host. Range is 0 to 273000 in unit of
+ uV.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 273000
+ default: 0
+
+ apm,tx-pre-cursor2:
+ description:
+ 2nd pre-cursor emphasis taps control. Two set of 3-tuple setting for
+ each supported link speed on the host. Range is 0 to 127400 in unit uV.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 127400
+ default: 0
+
+ apm,tx-post-cursor:
+ description: |
+ Post-cursor emphasis taps control. Two set of 3-tuple setting for Gen1,
+ Gen2, and Gen3 link speeds. Range is between 0 to 31 in unit of 18.2mV.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 2
+ maxItems: 2
+ items:
+ minItems: 3
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 31
+ default: 0xf
+
+ apm,tx-speed:
+ description: >
+ Tx operating speed. One set of 3-tuple for each supported link speed on
+ the host:
+
+ 0 = 1-2Gbps
+ 1 = 2-4Gbps (1st tuple default)
+ 2 = 4-8Gbps
+ 3 = 8-15Gbps (2nd tuple default)
+ 4 = 2.5-4Gbps
+ 5 = 4-5Gbps
+ 6 = 5-6Gbps
+ 7 = 6-16Gbps (3rd tuple default).
+
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ maximum: 7
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@1f21a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x1f21a000 0x100>;
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt
deleted file mode 100644
index 602cf952b92b..000000000000
--- a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-* APM X-Gene 15Gbps Multi-purpose PHY nodes
-
-PHY nodes are defined to describe on-chip 15Gbps Multi-purpose PHY. Each
-PHY (pair of lanes) has its own node.
-
-Required properties:
-- compatible : Shall be "apm,xgene-phy".
-- reg : PHY memory resource is the SDS PHY access resource.
-- #phy-cells : Shall be 1 as it expects one argument for setting
- the mode of the PHY. Possible values are 0 (SATA),
- 1 (SGMII), 2 (PCIe), 3 (USB), and 4 (XFI).
-
-Optional properties:
-- status : Shall be "ok" if enabled or "disabled" if disabled.
- Default is "ok".
-- clocks : Reference to the clock entry.
-- apm,tx-eye-tuning : Manual control to fine tune the capture of the serial
- bit lines from the automatic calibrated position.
- Two set of 3-tuple setting for each (up to 3)
- supported link speed on the host. Range from 0 to
- 127 in unit of one bit period. Default is 10.
-- apm,tx-eye-direction : Eye tuning manual control direction. 0 means sample
- data earlier than the nominal sampling point. 1 means
- sample data later than the nominal sampling point.
- Two set of 3-tuple setting for each (up to 3)
- supported link speed on the host. Default is 0.
-- apm,tx-boost-gain : Frequency boost AC (LSB 3-bit) and DC (2-bit)
- gain control. Two set of 3-tuple setting for each
- (up to 3) supported link speed on the host. Range is
- between 0 to 31 in unit of dB. Default is 3.
-- apm,tx-amplitude : Amplitude control. Two set of 3-tuple setting for
- each (up to 3) supported link speed on the host.
- Range is between 0 to 199500 in unit of uV.
- Default is 199500 uV.
-- apm,tx-pre-cursor1 : 1st pre-cursor emphasis taps control. Two set of
- 3-tuple setting for each (up to 3) supported link
- speed on the host. Range is 0 to 273000 in unit of
- uV. Default is 0.
-- apm,tx-pre-cursor2 : 2nd pre-cursor emphasis taps control. Two set of
- 3-tuple setting for each (up to 3) supported link
- speed on the host. Range is 0 to 127400 in unit uV.
- Default is 0x0.
-- apm,tx-post-cursor : Post-cursor emphasis taps control. Two set of
- 3-tuple setting for Gen1, Gen2, and Gen3. Range is
- between 0 to 0x1f in unit of 18.2mV. Default is 0xf.
-- apm,tx-speed : Tx operating speed. One set of 3-tuple for each
- supported link speed on the host.
- 0 = 1-2Gbps
- 1 = 2-4Gbps (1st tuple default)
- 2 = 4-8Gbps
- 3 = 8-15Gbps (2nd tuple default)
- 4 = 2.5-4Gbps
- 5 = 4-5Gbps
- 6 = 5-6Gbps
- 7 = 6-16Gbps (3rd tuple default)
-
-NOTE: PHY override parameters are board specific setting.
-
-Example:
- phy1: phy@1f21a000 {
- compatible = "apm,xgene-phy";
- reg = <0x0 0x1f21a000 0x0 0x100>;
- #phy-cells = <1>;
- };
-
- phy2: phy@1f22a000 {
- compatible = "apm,xgene-phy";
- reg = <0x0 0x1f22a000 0x0 0x100>;
- #phy-cells = <1>;
- };
-
- phy3: phy@1f23a000 {
- compatible = "apm,xgene-phy";
- reg = <0x0 0x1f23a000 0x0 0x100>;
- #phy-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt b/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt
deleted file mode 100644
index c0155f842f62..000000000000
--- a/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Berlin SATA PHY
----------------
-
-Required properties:
-- compatible: should be one of
- "marvell,berlin2-sata-phy"
- "marvell,berlin2q-sata-phy"
-- address-cells: should be 1
-- size-cells: should be 0
-- phy-cells: from the generic PHY bindings, must be 1
-- reg: address and length of the register
-- clocks: reference to the clock entry
-
-Sub-nodes:
-Each PHY should be represented as a sub-node.
-
-Sub-nodes required properties:
-- reg: the PHY number
-
-Example:
- sata_phy: phy@f7e900a0 {
- compatible = "marvell,berlin2q-sata-phy";
- reg = <0xf7e900a0 0x200>;
- clocks = <&chip CLKID_SATA>;
- #address-cells = <1>;
- #size-cells = <0>;
- #phy-cells = <1>;
-
- sata-phy@0 {
- reg = <0>;
- };
-
- sata-phy@1 {
- reg = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt b/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt
deleted file mode 100644
index be33780f668e..000000000000
--- a/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* Marvell Berlin USB PHY
-
-Required properties:
-- compatible: "marvell,berlin2-usb-phy" or "marvell,berlin2cd-usb-phy"
-- reg: base address and length of the registers
-- #phys-cells: should be 0
-- resets: reference to the reset controller
-
-Example:
-
- usb-phy@f774000 {
- compatible = "marvell,berlin2-usb-phy";
- reg = <0xf774000 0x128>;
- #phy-cells = <0>;
- resets = <&chip 0x104 14>;
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt b/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt
deleted file mode 100644
index 04f063aa7883..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-BROADCOM NORTHSTAR2 USB2 (DUAL ROLE DEVICE) PHY
-
-Required properties:
- - compatible: brcm,ns2-drd-phy
- - reg: offset and length of the NS2 PHY related registers.
- - reg-names
- The below registers must be provided.
- icfg - for DRD ICFG configurations
- rst-ctrl - for DRD IDM reset
- crmu-ctrl - for CRMU core vdd, PHY and PHY PLL reset
- usb2-strap - for port over current polarity reversal
- - #phy-cells: Must be 0. No args required.
- - vbus-gpios: vbus gpio binding
- - id-gpios: id gpio binding
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
- usbdrd_phy: phy@66000960 {
- #phy-cells = <0>;
- compatible = "brcm,ns2-drd-phy";
- reg = <0x66000960 0x24>,
- <0x67012800 0x4>,
- <0x6501d148 0x4>,
- <0x664d0700 0x4>;
- reg-names = "icfg", "rst-ctrl",
- "crmu-ctrl", "usb2-strap";
- id-gpios = <&gpio_g 30 0>;
- vbus-gpios = <&gpio_g 31 0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.yaml
new file mode 100644
index 000000000000..1fab97de5c0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,ns2-drd-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Northstar2 USB2 Dual Role Device PHY
+
+maintainers:
+ - Florian Fainelli <florian.fainelli@broadcom.com>
+ - Hauke Mehrtens <hauke@hauke-m.de>
+ - Rafał Miłecki <zajec5@gmail.com>
+
+properties:
+ compatible:
+ const: brcm,ns2-drd-phy
+
+ reg:
+ items:
+ - description: DRD ICFG configurations
+ - description: DRD IDM reset
+ - description: CRMU core vdd, PHY and PHY PLL reset
+ - description: Port over current polarity reversal
+
+ reg-names:
+ items:
+ - const: icfg
+ - const: rst-ctrl
+ - const: crmu-ctrl
+ - const: usb2-strap
+
+ '#phy-cells':
+ const: 0
+
+ id-gpios:
+ maxItems: 1
+ description: ID GPIO line
+
+ vbus-gpios:
+ maxItems: 1
+ description: VBUS GPIO line
+
+required:
+ - '#phy-cells'
+ - compatible
+ - reg
+ - reg-names
+ - id-gpios
+ - vbus-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@66000960 {
+ #phy-cells = <0>;
+ compatible = "brcm,ns2-drd-phy";
+ reg = <0x66000960 0x24>, <0x67012800 0x4>, <0x6501d148 0x4>, <0x664d0700 0x4>;
+ reg-names = "icfg", "rst-ctrl", "crmu-ctrl", "usb2-strap";
+ id-gpios = <&gpio_g 30 0>;
+ vbus-gpios = <&gpio_g 31 0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt b/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt
deleted file mode 100644
index e8d82286beb9..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Broadcom Stingray PCIe PHY
-
-Required properties:
-- compatible: must be "brcm,sr-pcie-phy"
-- reg: base address and length of the PCIe SS register space
-- brcm,sr-cdru: phandle to the CDRU syscon node
-- brcm,sr-mhb: phandle to the MHB syscon node
-- #phy-cells: Must be 1, denotes the PHY index
-
-For PAXB based root complex, one can have a configuration of up to 8 PHYs
-PHY index goes from 0 to 7
-
-For the internal PAXC based root complex, PHY index is always 8
-
-Example:
- mhb: syscon@60401000 {
- compatible = "brcm,sr-mhb", "syscon";
- reg = <0 0x60401000 0 0x38c>;
- };
-
- cdru: syscon@6641d000 {
- compatible = "brcm,sr-cdru", "syscon";
- reg = <0 0x6641d000 0 0x400>;
- };
-
- pcie_phy: phy@40000000 {
- compatible = "brcm,sr-pcie-phy";
- reg = <0 0x40000000 0 0x800>;
- brcm,sr-cdru = <&cdru>;
- brcm,sr-mhb = <&mhb>;
- #phy-cells = <1>;
- };
-
- /* users of the PCIe PHY */
-
- pcie0: pcie@48000000 {
- ...
- ...
- phys = <&pcie_phy 0>;
- phy-names = "pcie-phy";
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.yaml
new file mode 100644
index 000000000000..60ccc0813ed5
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,sr-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Stingray PCIe PHY
+
+maintainers:
+ - Ray Jui <ray.jui@broadcom.com>
+
+description: >
+ For PAXB based root complex, one can have a configuration of up to 8 PHYs.
+ PHY index goes from 0 to 7.
+
+ For the internal PAXC based root complex, PHY index is always 8.
+
+properties:
+ compatible:
+ const: brcm,sr-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 1
+
+ brcm,sr-cdru:
+ description: phandle to the CDRU syscon node
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ brcm,sr-mhb:
+ description: phandle to the MHB syscon node
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@40000000 {
+ compatible = "brcm,sr-pcie-phy";
+ reg = <0x40000000 0x800>;
+ brcm,sr-cdru = <&cdru>;
+ brcm,sr-mhb = <&mhb>;
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,sr-usb-combo-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sr-usb-combo-phy.yaml
new file mode 100644
index 000000000000..6224ba0f2990
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,sr-usb-combo-phy.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,sr-usb-combo-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Stingray USB PHY
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,sr-usb-combo-phy
+ - brcm,sr-usb-hs-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ description: PHY cell count indicating PHY type
+ enum: [ 0, 1 ]
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,sr-usb-combo-phy
+ then:
+ properties:
+ '#phy-cells':
+ const: 1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,sr-usb-hs-phy
+ then:
+ properties:
+ '#phy-cells':
+ const: 0
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@0 {
+ compatible = "brcm,sr-usb-combo-phy";
+ reg = <0x00000000 0x100>;
+ #phy-cells = <1>;
+ };
+ - |
+ usb-phy@20000 {
+ compatible = "brcm,sr-usb-hs-phy";
+ reg = <0x00020000 0x100>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt b/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt
deleted file mode 100644
index 4ba298966af9..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Broadcom Stingray USB PHY
-
-Required properties:
- - compatible : should be one of the listed compatibles
- - "brcm,sr-usb-combo-phy" is combo PHY has two PHYs, one SS and one HS.
- - "brcm,sr-usb-hs-phy" is a single HS PHY.
- - reg: offset and length of the PHY blocks registers
- - #phy-cells:
- - Must be 1 for brcm,sr-usb-combo-phy as it expects one argument to indicate
- the PHY number of two PHYs. 0 for HS PHY and 1 for SS PHY.
- - Must be 0 for brcm,sr-usb-hs-phy.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
- usbphy0: usb-phy@0 {
- compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00000000 0x100>;
- #phy-cells = <1>;
- };
-
- usbphy1: usb-phy@10000 {
- compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00010000 0x100>,
- #phy-cells = <1>;
- };
-
- usbphy2: usb-phy@20000 {
- compatible = "brcm,sr-usb-hs-phy";
- reg = <0x00020000 0x100>,
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/dm816x-phy.txt b/Documentation/devicetree/bindings/phy/dm816x-phy.txt
deleted file mode 100644
index 2fe3d11d063d..000000000000
--- a/Documentation/devicetree/bindings/phy/dm816x-phy.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Device tree binding documentation for am816x USB PHY
-=========================
-
-Required properties:
-- compatible : should be "ti,dm816x-usb-phy"
-- reg : offset and length of the PHY register set.
-- reg-names : name for the phy registers
-- clocks : phandle to the clock
-- clock-names : name of the clock
-- syscon: phandle for the syscon node to access misc registers
-- #phy-cells : from the generic PHY bindings, must be 1
-- syscon: phandle for the syscon node to access misc registers
-
-Example:
-
-usb_phy0: usb-phy@20 {
- compatible = "ti,dm8168-usb-phy";
- reg = <0x20 0x8>;
- reg-names = "phy";
- clocks = <&main_fapll 6>;
- clock-names = "refclk";
- #phy-cells = <0>;
- syscon = <&scm_conf>;
-};
diff --git a/Documentation/devicetree/bindings/phy/hisilicon,hi6220-usb-phy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hi6220-usb-phy.yaml
new file mode 100644
index 000000000000..376586a666e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/hisilicon,hi6220-usb-phy.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,hi6220-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon hi6220 USB PHY
+
+maintainers:
+ - Zhangfei Gao <zhangfei.gao@linaro.org>
+
+properties:
+ compatible:
+ const: hisilicon,hi6220-usb-phy
+
+ '#phy-cells':
+ const: 0
+
+ phy-supply:
+ description: PHY power supply.
+
+ hisilicon,peripheral-syscon:
+ description: Phandle to the system controller for PHY control.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+additionalProperties: false
+
+examples:
+ - |
+ usbphy {
+ compatible = "hisilicon,hi6220-usb-phy";
+ #phy-cells = <0>;
+ phy-supply = <&fixed_5v_hub>;
+ hisilicon,peripheral-syscon = <&sys_ctrl>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/hisilicon,hix5hd2-sata-phy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hix5hd2-sata-phy.yaml
new file mode 100644
index 000000000000..2993dd6b40a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/hisilicon,hix5hd2-sata-phy.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,hix5hd2-sata-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon hix5hd2 SATA PHY
+
+maintainers:
+ - Jiancheng Xue <xuejiancheng@huawei.com>
+
+properties:
+ compatible:
+ const: hisilicon,hix5hd2-sata-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+ hisilicon,peripheral-syscon:
+ description: Phandle of syscon used to control peripheral
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ hisilicon,power-reg:
+ description: Offset and bit number within peripheral-syscon register controlling SATA power supply
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: Offset within peripheral-syscon register
+ - description: Bit number controlling SATA power supply
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@f9900000 {
+ compatible = "hisilicon,hix5hd2-sata-phy";
+ reg = <0xf9900000 0x10000>;
+ #phy-cells = <0>;
+ hisilicon,peripheral-syscon = <&peripheral_ctrl>;
+ hisilicon,power-reg = <0x8 10>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/hisilicon,inno-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,inno-usb2-phy.yaml
new file mode 100644
index 000000000000..51ea0e54ce35
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/hisilicon,inno-usb2-phy.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,inno-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon INNO USB2 PHY
+
+maintainers:
+ - Pengcheng Li <lpc.li@hisilicon.com>
+
+description:
+ The INNO USB2 PHY device should be a child node of peripheral controller that
+ contains the PHY configuration register, and each device supports up to 2 PHY
+ ports which are represented as child nodes of INNO USB2 PHY device.
+
+properties:
+ compatible:
+ enum:
+ - hisilicon,hi3798cv200-usb2-phy
+ - hisilicon,hi3798mv100-usb2-phy
+ - hisilicon,inno-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^phy@[0-1]$":
+ description: PHY port subnode
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maximum: 1
+
+ "#phy-cells":
+ const: 0
+
+ resets:
+ maxItems: 1
+
+ required:
+ - reg
+ - "#phy-cells"
+ - resets
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/histb-clock.h>
+
+ usb2-phy@120 {
+ compatible = "hisilicon,hi3798cv200-usb2-phy";
+ reg = <0x120 0x4>;
+ clocks = <&crg HISTB_USB2_PHY1_REF_CLK>;
+ resets = <&crg 0xbc 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ resets = <&crg 0xbc 8>;
+ };
+
+ phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ resets = <&crg 0xbc 9>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/hix5hd2-phy.txt b/Documentation/devicetree/bindings/phy/hix5hd2-phy.txt
deleted file mode 100644
index 296168b74d24..000000000000
--- a/Documentation/devicetree/bindings/phy/hix5hd2-phy.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Hisilicon hix5hd2 SATA PHY
------------------------
-
-Required properties:
-- compatible: should be "hisilicon,hix5hd2-sata-phy"
-- reg: offset and length of the PHY registers
-- #phy-cells: must be 0
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Optional Properties:
-- hisilicon,peripheral-syscon: phandle of syscon used to control peripheral.
-- hisilicon,power-reg: offset and bit number within peripheral-syscon,
- register of controlling sata power supply.
-
-Example:
- sata_phy: phy@f9900000 {
- compatible = "hisilicon,hix5hd2-sata-phy";
- reg = <0xf9900000 0x10000>;
- #phy-cells = <0>;
- hisilicon,peripheral-syscon = <&peripheral_ctrl>;
- hisilicon,power-reg = <0x8 10>;
- };
diff --git a/Documentation/devicetree/bindings/phy/img,pistachio-usb-phy.yaml b/Documentation/devicetree/bindings/phy/img,pistachio-usb-phy.yaml
new file mode 100644
index 000000000000..bcc19bc68297
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/img,pistachio-usb-phy.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/img,pistachio-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagination Pistachio USB PHY
+
+maintainers:
+ - Andrew Bresticker <abrestic@chromium.org>
+
+properties:
+ compatible:
+ const: img,pistachio-usb-phy
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: usb_phy
+
+ '#phy-cells':
+ const: 0
+
+ phy-supply:
+ description: USB VBUS supply. Must supply 5.0V.
+
+ img,refclk:
+ description:
+ Reference clock source for the USB PHY. See
+ <dt-bindings/phy/phy-pistachio-usb.h> for valid values.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ img,cr-top:
+ description: CR_TOP syscon phandle.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - '#phy-cells'
+ - img,refclk
+ - img,cr-top
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/phy/phy-pistachio-usb.h>
+ #include <dt-bindings/clock/pistachio-clk.h>
+
+ usb-phy {
+ compatible = "img,pistachio-usb-phy";
+ clocks = <&clk_core CLK_USB_PHY>;
+ clock-names = "usb_phy";
+ #phy-cells = <0>;
+ phy-supply = <&usb_vbus>;
+ img,refclk = <REFCLK_CLK_CORE>;
+ img,cr-top = <&cr_top>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt b/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt
deleted file mode 100644
index 300830dda0bf..000000000000
--- a/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-TI Keystone USB PHY
-
-Required properties:
- - compatible: should be "ti,keystone-usbphy".
- - #address-cells, #size-cells : should be '1' if the device has sub-nodes
- with 'reg' property.
- - reg : Address and length of the usb phy control register set.
-
-The main purpose of this PHY driver is to enable the USB PHY reference clock
-gate on the Keystone SOC for both the USB2 and USB3 PHY. Otherwise it is just
-an NOP PHY driver. Hence this node is referenced as both the usb2 and usb3
-phy node in the USB Glue layer driver node.
-
-usb_phy: usb_phy@2620738 {
- compatible = "ti,keystone-usbphy";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x2620738 32>;
-};
diff --git a/Documentation/devicetree/bindings/phy/lantiq,ase-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/lantiq,ase-usb2-phy.yaml
new file mode 100644
index 000000000000..99b5da705ca4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/lantiq,ase-usb2-phy.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/lantiq,ase-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq XWAY SoC RCU USB 1.1/2.0 PHY
+
+maintainers:
+ - Hauke Mehrtens <hauke@hauke-m.de>
+
+description:
+ This node has to be a sub node of the Lantiq RCU block.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lantiq,ase-usb2-phy
+ - lantiq,danube-usb2-phy
+ - lantiq,xrx100-usb2-phy
+ - lantiq,xrx200-usb2-phy
+ - lantiq,xrx300-usb2-phy
+
+ reg:
+ items:
+ - description: Offset of the USB PHY configuration register
+ - description: Offset of the USB Analog configuration register
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: phy
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ items:
+ - enum: [ phy, ctrl ]
+ - const: ctrl
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usb2-phy@18 {
+ compatible = "lantiq,xrx200-usb2-phy";
+ reg = <0x18 4>, <0x38 4>;
+ clocks = <&pmu 1>;
+ clock-names = "phy";
+ resets = <&reset1 4 4>, <&reset0 4 4>;
+ reset-names = "phy", "ctrl";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-375-usb-cluster.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-375-usb-cluster.yaml
new file mode 100644
index 000000000000..1706c31644e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,armada-375-usb-cluster.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,armada-375-usb-cluster.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Armada 375 USB Cluster
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description:
+ Control register for the Armada 375 USB cluster, managing USB2 and USB3 features.
+
+properties:
+ compatible:
+ const: marvell,armada-375-usb-cluster
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ description: Number of PHY cells in specifier. 1 for USB2, 2 for USB3.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usbcluster: usb-cluster@18400 {
+ compatible = "marvell,armada-375-usb-cluster";
+ reg = <0x18400 0x4>;
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-380-comphy.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-380-comphy.yaml
new file mode 100644
index 000000000000..dcb4c0007832
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,armada-380-comphy.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,armada-380-comphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 38x COMPHY controller
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+description:
+ This comphy controller can be found on Marvell Armada 38x. It provides a
+ number of shared PHYs used by various interfaces (network, sata, usb,
+ PCIe...).
+
+properties:
+ compatible:
+ items:
+ - const: marvell,armada-380-comphy
+
+ reg:
+ items:
+ - description: COMPHY register location and length
+ - description: Configuration register location and length
+
+ reg-names:
+ items:
+ - const: comphy
+ - const: conf
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^phy@[0-5]$':
+ description: A COMPHY lane
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maximum: 1
+
+ '#phy-cells':
+ description: Input port index for the PHY lane
+ const: 1
+
+ required:
+ - reg
+ - '#phy-cells'
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ comphy: phy@18300 {
+ compatible = "marvell,armada-380-comphy";
+ reg = <0x18300 0x100>, <0x18460 4>;
+ reg-names = "comphy", "conf";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpm_comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ cpm_comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,berlin2-sata-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,berlin2-sata-phy.yaml
new file mode 100644
index 000000000000..6fc9ff96e682
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,berlin2-sata-phy.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,berlin2-sata-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Berlin SATA PHY
+
+maintainers:
+ - Antoine Tenart <atenart@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - marvell,berlin2-sata-phy
+ - marvell,berlin2q-sata-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ '#phy-cells':
+ const: 1
+
+patternProperties:
+ '^sata-phy@[0-1]$':
+ description: A SATA PHY sub-node.
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maximum: 1
+ description: PHY index number.
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#address-cells'
+ - '#size-cells'
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/berlin2q.h>
+
+ phy@f7e900a0 {
+ compatible = "marvell,berlin2q-sata-phy";
+ reg = <0xf7e900a0 0x200>;
+ clocks = <&chip CLKID_SATA>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #phy-cells = <1>;
+
+ sata-phy@0 {
+ reg = <0>;
+ };
+
+ sata-phy@1 {
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,berlin2-usb-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,berlin2-usb-phy.yaml
new file mode 100644
index 000000000000..b401e12a600c
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,berlin2-usb-phy.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,berlin2-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Berlin USB PHY
+
+maintainers:
+ - Antoine Tenart <atenart@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - marvell,berlin2-usb-phy
+ - marvell,berlin2cd-usb-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@f774000 {
+ compatible = "marvell,berlin2-usb-phy";
+ reg = <0xf774000 0x128>;
+ #phy-cells = <0>;
+ resets = <&chip 0x104 14>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,comphy-cp110.yaml b/Documentation/devicetree/bindings/phy/marvell,comphy-cp110.yaml
new file mode 100644
index 000000000000..d9501df42886
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,comphy-cp110.yaml
@@ -0,0 +1,154 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,comphy-cp110.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MVEBU COMPHY Controller
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+
+description: >
+ COMPHY controllers can be found on the following Marvell MVEBU SoCs:
+
+ * Armada 7k/8k (on the CP110)
+ * Armada 3700
+
+ It provides a number of shared PHYs used by various interfaces (network, SATA,
+ USB, PCIe...).
+
+properties:
+ compatible:
+ enum:
+ - marvell,comphy-cp110
+ - marvell,comphy-a3700
+
+ reg:
+ minItems: 1
+ items:
+ - description: Generic COMPHY registers
+ - description: Lane 1 (PCIe/GbE) registers (Armada 3700)
+ - description: Lane 0 (USB3/GbE) registers (Armada 3700)
+ - description: Lane 2 (SATA/USB3) registers (Armada 3700)
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: comphy
+ - const: lane1_pcie_gbe
+ - const: lane0_usb3_gbe
+ - const: lane2_sata_usb3
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ clocks:
+ maxItems: 3
+ description: Reference clocks for CP110; MG clock, MG Core clock, AXI clock
+
+ clock-names:
+ items:
+ - const: mg_clk
+ - const: mg_core_clk
+ - const: axi_clk
+
+ marvell,system-controller:
+ description: Phandle to the Marvell system controller (CP110 only)
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+patternProperties:
+ '^phy@[0-2]$':
+ description: A COMPHY lane child node
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: COMPHY lane number
+
+ '#phy-cells':
+ const: 1
+
+ required:
+ - reg
+ - '#phy-cells'
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: marvell,comphy-a3700
+
+ then:
+ properties:
+ clocks: false
+ clock-names: false
+
+ required:
+ - reg-names
+
+ else:
+ required:
+ - marvell,system-controller
+
+examples:
+ - |
+ phy@120000 {
+ compatible = "marvell,comphy-cp110";
+ reg = <0x120000 0x6000>;
+ clocks = <&clk 1 5>, <&clk 1 6>, <&clk 1 18>;
+ clock-names = "mg_clk", "mg_core_clk", "axi_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ marvell,system-controller = <&syscon0>;
+
+ phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+ };
+
+ - |
+ phy@18300 {
+ compatible = "marvell,comphy-a3700";
+ reg = <0x18300 0x300>,
+ <0x1F000 0x400>,
+ <0x5C000 0x400>,
+ <0xe0178 0x8>;
+ reg-names = "comphy",
+ "lane1_pcie_gbe",
+ "lane0_usb3_gbe",
+ "lane2_sata_usb3";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+
+ comphy2: phy@2 {
+ reg = <2>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp2-usb-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp2-usb-phy.yaml
new file mode 100644
index 000000000000..af1ae2406f65
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp2-usb-phy.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,mmp2-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MMP2/PXA USB PHY
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+ compatible:
+ enum:
+ - marvell,mmp2-usb-phy
+ - marvell,pxa910-usb-phy
+ - marvell,pxa168-usb-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ usbphy@d4207000 {
+ compatible = "marvell,mmp2-usb-phy";
+ reg = <0xd4207000 0x40>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/marvell,mvebu-sata-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mvebu-sata-phy.yaml
new file mode 100644
index 000000000000..81e942428911
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,mvebu-sata-phy.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,mvebu-sata-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MVEBU SATA PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
+
+properties:
+ compatible:
+ const: marvell,mvebu-sata-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: sata
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ sata-phy@84000 {
+ compatible = "marvell,mvebu-sata-phy";
+ reg = <0x84000 0x0334>;
+ clocks = <&gate_clk 15>;
+ clock-names = "sata";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
index 3c28ec50f097..286a4fcc977d 100644
--- a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
@@ -72,11 +72,6 @@ allOf:
contains:
const: fsl,imx8qxp-mipi-dphy
then:
- properties:
- assigned-clocks: false
- assigned-clock-parents: false
- assigned-clock-rates: false
-
required:
- fsl,syscon
diff --git a/Documentation/devicetree/bindings/phy/motorola,cpcap-usb-phy.yaml b/Documentation/devicetree/bindings/phy/motorola,cpcap-usb-phy.yaml
new file mode 100644
index 000000000000..0febd04a61f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/motorola,cpcap-usb-phy.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/motorola,cpcap-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Motorola CPCAP PMIC USB PHY
+
+maintainers:
+ - Tony Lindgren <tony@atomide.com>
+
+properties:
+ compatible:
+ enum:
+ - motorola,cpcap-usb-phy
+ - motorola,mapphone-cpcap-usb-phy
+
+ '#phy-cells':
+ const: 0
+
+ interrupts:
+ description: CPCAP PMIC interrupts used by the USB PHY
+ items:
+ - description: id_ground interrupt
+ - description: id_float interrupt
+ - description: se0conn interrupt
+ - description: vbusvld interrupt
+ - description: sessvld interrupt
+ - description: sessend interrupt
+ - description: se1 interrupt
+ - description: dm interrupt
+ - description: dp interrupt
+
+ interrupt-names:
+ description: Interrupt names
+ items:
+ - const: id_ground
+ - const: id_float
+ - const: se0conn
+ - const: vbusvld
+ - const: sessvld
+ - const: sessend
+ - const: se1
+ - const: dm
+ - const: dp
+
+ io-channels:
+ description: IIO ADC channels used by the USB PHY
+ items:
+ - description: vbus channel
+ - description: id channel
+
+ io-channel-names:
+ items:
+ - const: vbus
+ - const: id
+
+ vusb-supply: true
+
+ pinctrl-names:
+ items:
+ - const: default
+ - const: ulpi
+ - const: utmi
+ - const: uart
+
+ mode-gpios:
+ description: Optional GPIOs for configuring alternate modes
+ items:
+ - description: "mode selection GPIO #0"
+ - description: "mode selection GPIO #1"
+
+required:
+ - compatible
+ - '#phy-cells'
+ - interrupts-extended
+ - interrupt-names
+ - io-channels
+ - io-channel-names
+ - vusb-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ phy {
+ compatible = "motorola,mapphone-cpcap-usb-phy";
+ #phy-cells = <0>;
+ interrupts-extended = <
+ &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
+ &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
+ &cpcap 48 1
+ >;
+ interrupt-names = "id_ground", "id_float", "se0conn", "vbusvld",
+ "sessvld", "sessend", "se1", "dm", "dp";
+ io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>;
+ io-channel-names = "vbus", "id";
+ vusb-supply = <&vusb>;
+ pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>;
+ pinctrl-1 = <&usb_ulpi_pins>;
+ pinctrl-2 = <&usb_utmi_pins>;
+ pinctrl-3 = <&uart3_pins>;
+ pinctrl-names = "default", "ulpi", "utmi", "uart";
+ mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>, <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/motorola,mapphone-mdm6600.yaml b/Documentation/devicetree/bindings/phy/motorola,mapphone-mdm6600.yaml
new file mode 100644
index 000000000000..cb6544b3478d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/motorola,mapphone-mdm6600.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/motorola,mapphone-mdm6600.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Motorola Mapphone MDM6600 USB PHY
+
+maintainers:
+ - Tony Lindgren <tony@atomide.com>
+
+properties:
+ compatible:
+ items:
+ - const: motorola,mapphone-mdm6600
+
+ enable-gpios:
+ description: GPIO to enable the USB PHY
+ maxItems: 1
+
+ power-gpios:
+ description: GPIO to power on the device
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO to reset the device
+ maxItems: 1
+
+ motorola,mode-gpios:
+ description: Two GPIOs to configure MDM6600 USB start-up mode for normal mode versus USB flashing mode
+ items:
+ - description: normal mode select GPIO
+ - description: USB flashing mode select GPIO
+
+ motorola,cmd-gpios:
+ description: Three GPIOs to control the power state of the MDM6600
+ items:
+ - description: power state control GPIO 0
+ - description: power state control GPIO 1
+ - description: power state control GPIO 2
+
+ motorola,status-gpios:
+ description: Three GPIOs to read the power state of the MDM6600
+ items:
+ - description: power state read GPIO 0
+ - description: power state read GPIO 1
+ - description: power state read GPIO 2
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - enable-gpios
+ - power-gpios
+ - reset-gpios
+ - motorola,mode-gpios
+ - motorola,cmd-gpios
+ - motorola,status-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ usb-phy {
+ compatible = "motorola,mapphone-mdm6600";
+ enable-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+ power-gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio2 17 GPIO_ACTIVE_HIGH>;
+ motorola,mode-gpios = <&gpio5 20 GPIO_ACTIVE_HIGH>,
+ <&gpio5 21 GPIO_ACTIVE_HIGH>;
+ motorola,cmd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>,
+ <&gpio4 8 GPIO_ACTIVE_HIGH>,
+ <&gpio5 14 GPIO_ACTIVE_HIGH>;
+ motorola,status-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>,
+ <&gpio2 21 GPIO_ACTIVE_HIGH>,
+ <&gpio2 23 GPIO_ACTIVE_HIGH>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt b/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt
deleted file mode 100644
index 8b5a7a28a35b..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-mvebu armada 38x comphy driver
-------------------------------
-
-This comphy controller can be found on Marvell Armada 38x. It provides a
-number of shared PHYs used by various interfaces (network, sata, usb,
-PCIe...).
-
-Required properties:
-
-- compatible: should be "marvell,armada-380-comphy"
-- reg: should contain the comphy register location and length.
-- #address-cells: should be 1.
-- #size-cells: should be 0.
-
-Optional properties:
-
-- reg-names: must be "comphy" as the first name, and "conf".
-- reg: must contain the comphy register location and length as the first
- pair, followed by an optional configuration register address and
- length pair.
-
-A sub-node is required for each comphy lane provided by the comphy.
-
-Required properties (child nodes):
-
-- reg: comphy lane number.
-- #phy-cells : from the generic phy bindings, must be 1. Defines the
- input port to use for a given comphy lane.
-
-Example:
-
- comphy: phy@18300 {
- compatible = "marvell,armada-380-comphy";
- reg-names = "comphy", "conf";
- reg = <0x18300 0x100>, <0x18460 4>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpm_comphy0: phy@0 {
- reg = <0>;
- #phy-cells = <1>;
- };
-
- cpm_comphy1: phy@1 {
- reg = <1>;
- #phy-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
deleted file mode 100644
index c3a29c5feea3..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* Atheros AR71XX/9XXX USB PHY
-
-Required properties:
-- compatible: "qca,ar7100-usb-phy"
-- #phys-cells: should be 0
-- reset-names: "phy"[, "suspend-override"]
-- resets: references to the reset controllers
-
-Example:
-
- usb-phy {
- compatible = "qca,ar7100-usb-phy";
-
- reset-names = "phy", "suspend-override";
- resets = <&rst 4>, <&rst 3>;
-
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt b/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
deleted file mode 100644
index 2eb9b2b69037..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Motorola CPCAP PMIC USB PHY binding
-
-Required properties:
-compatible: Shall be either "motorola,cpcap-usb-phy" or
- "motorola,mapphone-cpcap-usb-phy"
-#phy-cells: Shall be 0
-interrupts: CPCAP PMIC interrupts used by the USB PHY
-interrupt-names: Interrupt names
-io-channels: IIO ADC channels used by the USB PHY
-io-channel-names: IIO ADC channel names
-vusb-supply: Regulator for the PHY
-
-Optional properties:
-pinctrl: Optional alternate pin modes for the PHY
-pinctrl-names: Names for optional pin modes
-mode-gpios: Optional GPIOs for configuring alternate modes
-
-Example:
-cpcap_usb2_phy: phy {
- compatible = "motorola,mapphone-cpcap-usb-phy";
- pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>;
- pinctrl-1 = <&usb_ulpi_pins>;
- pinctrl-2 = <&usb_utmi_pins>;
- pinctrl-3 = <&uart3_pins>;
- pinctrl-names = "default", "ulpi", "utmi", "uart";
- #phy-cells = <0>;
- interrupts-extended = <
- &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
- &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
- &cpcap 48 1
- >;
- interrupt-names =
- "id_ground", "id_float", "se0conn", "vbusvld",
- "sessvld", "sessend", "se1", "dm", "dp";
- mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH
- &gpio1 0 GPIO_ACTIVE_HIGH>;
- io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>;
- io-channel-names = "vbus", "id";
- vusb-supply = <&vusb>;
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt b/Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt
deleted file mode 100644
index c26478be391b..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-TI DA8xx/OMAP-L1xx/AM18xx USB PHY
-
-Required properties:
- - compatible: must be "ti,da830-usb-phy".
- - #phy-cells: must be 1.
-
-This device controls the PHY for both the USB 1.1 OHCI and USB 2.0 OTG
-controllers on DA8xx SoCs. Consumers of this device should use index 0 for
-the USB 2.0 phy device and index 1 for the USB 1.1 phy device.
-
-It also requires a "syscon" node with compatible = "ti,da830-cfgchip", "syscon"
-to access the CFGCHIP2 register.
-
-Example:
-
- cfgchip: cfgchip@1417c {
- compatible = "ti,da830-cfgchip", "syscon";
- reg = <0x1417c 0x14>;
- };
-
- usb_phy: usb-phy {
- compatible = "ti,da830-usb-phy";
- #phy-cells = <1>;
- };
-
- usb20: usb@200000 {
- compatible = "ti,da830-musb";
- reg = <0x200000 0x1000>;
- interrupts = <58>;
- phys = <&usb_phy 0>;
- phy-names = "usb-phy";
- };
-
- usb11: usb@225000 {
- compatible = "ti,da830-ohci";
- reg = <0x225000 0x1000>;
- interrupts = <59>;
- phys = <&usb_phy 1>;
- phy-names = "usb-phy";
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt b/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt
deleted file mode 100644
index f17a56e2152f..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Hisilicon hi6220 usb PHY
------------------------
-
-Required properties:
-- compatible: should be "hisilicon,hi6220-usb-phy"
-- #phy-cells: must be 0
-- hisilicon,peripheral-syscon: phandle of syscon used to control phy.
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
- usb_phy: usbphy {
- compatible = "hisilicon,hi6220-usb-phy";
- #phy-cells = <0>;
- phy-supply = <&fixed_5v_hub>;
- hisilicon,peripheral-syscon = <&sys_ctrl>;
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-hisi-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-hisi-inno-usb2.txt
deleted file mode 100644
index 104953e849e7..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-hisi-inno-usb2.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Device tree bindings for HiSilicon INNO USB2 PHY
-
-Required properties:
-- compatible: Should be one of the following strings:
- "hisilicon,inno-usb2-phy",
- "hisilicon,hi3798cv200-usb2-phy".
-- reg: Should be the address space for PHY configuration register in peripheral
- controller, e.g. PERI_USB0 for USB 2.0 PHY01 on Hi3798CV200 SoC.
-- clocks: The phandle and clock specifier pair for INNO USB2 PHY device
- reference clock.
-- resets: The phandle and reset specifier pair for INNO USB2 PHY device reset
- signal.
-- #address-cells: Must be 1.
-- #size-cells: Must be 0.
-
-The INNO USB2 PHY device should be a child node of peripheral controller that
-contains the PHY configuration register, and each device supports up to 2 PHY
-ports which are represented as child nodes of INNO USB2 PHY device.
-
-Required properties for PHY port node:
-- reg: The PHY port instance number.
-- #phy-cells: Defined by generic PHY bindings. Must be 0.
-- resets: The phandle and reset specifier pair for PHY port reset signal.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
-
-perictrl: peripheral-controller@8a20000 {
- compatible = "hisilicon,hi3798cv200-perictrl", "simple-mfd";
- reg = <0x8a20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x8a20000 0x1000>;
-
- usb2_phy1: usb2-phy@120 {
- compatible = "hisilicon,hi3798cv200-usb2-phy";
- reg = <0x120 0x4>;
- clocks = <&crg HISTB_USB2_PHY1_REF_CLK>;
- resets = <&crg 0xbc 4>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- usb2_phy1_port0: phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- resets = <&crg 0xbc 8>;
- };
-
- usb2_phy1_port1: phy@1 {
- reg = <1>;
- #phy-cells = <0>;
- resets = <&crg 0xbc 9>;
- };
- };
-
- usb2_phy2: usb2-phy@124 {
- compatible = "hisilicon,hi3798cv200-usb2-phy";
- reg = <0x124 0x4>;
- clocks = <&crg HISTB_USB2_PHY2_REF_CLK>;
- resets = <&crg 0xbc 6>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- usb2_phy2_port0: phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- resets = <&crg 0xbc 10>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt b/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt
deleted file mode 100644
index 643948b6b576..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Lantiq XWAY SoC RCU USB 1.1/2.0 PHY binding
-===========================================
-
-This binding describes the USB PHY hardware provided by the RCU module on the
-Lantiq XWAY SoCs.
-
-This node has to be a sub node of the Lantiq RCU block.
-
--------------------------------------------------------------------------------
-Required properties (controller (parent) node):
-- compatible : Should be one of
- "lantiq,ase-usb2-phy"
- "lantiq,danube-usb2-phy"
- "lantiq,xrx100-usb2-phy"
- "lantiq,xrx200-usb2-phy"
- "lantiq,xrx300-usb2-phy"
-- reg : Defines the following sets of registers in the parent
- syscon device
- - Offset of the USB PHY configuration register
- - Offset of the USB Analog configuration
- register (only for xrx200 and xrx200)
-- clocks : References to the (PMU) "phy" clk gate.
-- clock-names : Must be "phy"
-- resets : References to the RCU USB configuration reset bits.
-- reset-names : Must be one of the following:
- "phy" (optional)
- "ctrl" (shared)
-
--------------------------------------------------------------------------------
-Example for the USB PHYs on an xRX200 SoC:
- usb_phy0: usb2-phy@18 {
- compatible = "lantiq,xrx200-usb2-phy";
- reg = <0x18 4>, <0x38 4>;
-
- clocks = <&pmu PMU_GATE_USB0_PHY>;
- clock-names = "phy";
- resets = <&reset1 4 4>, <&reset0 4 4>;
- reset-names = "phy", "ctrl";
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt b/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
deleted file mode 100644
index 3bb821cd6a7f..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-NXP LPC18xx/43xx internal USB OTG PHY binding
----------------------------------------------
-
-This file contains documentation for the internal USB OTG PHY found
-in NXP LPC18xx and LPC43xx SoCs.
-
-Required properties:
-- compatible : must be "nxp,lpc1850-usb-otg-phy"
-- clocks : must be exactly one entry
-See: Documentation/devicetree/bindings/clock/clock-bindings.txt
-- #phy-cells : must be 0 for this phy
-See: Documentation/devicetree/bindings/phy/phy-bindings.txt
-
-The phy node must be a child of the creg syscon node.
-
-Example:
-creg: syscon@40043000 {
- compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
- reg = <0x40043000 0x1000>;
-
- usb0_otg_phy: phy {
- compatible = "nxp,lpc1850-usb-otg-phy";
- clocks = <&ccu1 CLK_USB0>;
- #phy-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-mapphone-mdm6600.txt b/Documentation/devicetree/bindings/phy/phy-mapphone-mdm6600.txt
deleted file mode 100644
index 29427d4f047a..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mapphone-mdm6600.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Device tree binding documentation for Motorola Mapphone MDM6600 USB PHY
-
-Required properties:
-- compatible Must be "motorola,mapphone-mdm6600"
-- enable-gpios GPIO to enable the USB PHY
-- power-gpios GPIO to power on the device
-- reset-gpios GPIO to reset the device
-- motorola,mode-gpios Two GPIOs to configure MDM6600 USB start-up mode for
- normal mode versus USB flashing mode
-- motorola,cmd-gpios Three GPIOs to control the power state of the MDM6600
-- motorola,status-gpios Three GPIOs to read the power state of the MDM6600
-
-Example:
-
-usb-phy {
- compatible = "motorola,mapphone-mdm6600";
- enable-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
- power-gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio2 17 GPIO_ACTIVE_HIGH>;
- motorola,mode-gpios = <&gpio5 20 GPIO_ACTIVE_HIGH>,
- <&gpio5 21 GPIO_ACTIVE_HIGH>;
- motorola,cmd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>,
- <&gpio4 8 GPIO_ACTIVE_HIGH>,
- <&gpio5 14 GPIO_ACTIVE_HIGH>;
- motorola,status-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>,
- <&gpio2 21 GPIO_ACTIVE_HIGH>,
- <&gpio2 23 GPIO_ACTIVE_HIGH>;
- #phy-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
deleted file mode 100644
index 5ffd0f55d010..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-MVEBU comphy drivers
---------------------
-
-COMPHY controllers can be found on the following Marvell MVEBU SoCs:
-* Armada 7k/8k (on the CP110)
-* Armada 3700
-It provides a number of shared PHYs used by various interfaces (network, SATA,
-USB, PCIe...).
-
-Required properties:
-
-- compatible: should be one of:
- * "marvell,comphy-cp110" for Armada 7k/8k
- * "marvell,comphy-a3700" for Armada 3700
-- reg: should contain the COMPHY register(s) location(s) and length(s).
- * 1 entry for Armada 7k/8k
- * 4 entries for Armada 3700 along with the corresponding reg-names
- properties, memory areas are:
- * Generic COMPHY registers
- * Lane 1 (PCIe/GbE)
- * Lane 0 (USB3/GbE)
- * Lane 2 (SATA/USB3)
-- marvell,system-controller: should contain a phandle to the system
- controller node (only for Armada 7k/8k)
-- #address-cells: should be 1.
-- #size-cells: should be 0.
-
-Optional properlties:
-
-- clocks: pointers to the reference clocks for this device (CP110 only),
- consequently: MG clock, MG Core clock, AXI clock.
-- clock-names: names of used clocks for CP110 only, must be :
- "mg_clk", "mg_core_clk" and "axi_clk".
-
-A sub-node is required for each comphy lane provided by the comphy.
-
-Required properties (child nodes):
-
-- reg: COMPHY lane number.
-- #phy-cells : from the generic PHY bindings, must be 1. Defines the
- input port to use for a given comphy lane.
-
-Examples:
-
- CP11X_LABEL(comphy): phy@120000 {
- compatible = "marvell,comphy-cp110";
- reg = <0x120000 0x6000>;
- marvell,system-controller = <&CP11X_LABEL(syscon0)>;
- clocks = <&CP11X_LABEL(clk) 1 5>, <&CP11X_LABEL(clk) 1 6>,
- <&CP11X_LABEL(clk) 1 18>;
- clock-names = "mg_clk", "mg_core_clk", "axi_clk";
- #address-cells = <1>;
- #size-cells = <0>;
-
- CP11X_LABEL(comphy0): phy@0 {
- reg = <0>;
- #phy-cells = <1>;
- };
-
- CP11X_LABEL(comphy1): phy@1 {
- reg = <1>;
- #phy-cells = <1>;
- };
- };
-
- comphy: phy@18300 {
- compatible = "marvell,comphy-a3700";
- reg = <0x18300 0x300>,
- <0x1F000 0x400>,
- <0x5C000 0x400>,
- <0xe0178 0x8>;
- reg-names = "comphy",
- "lane1_pcie_gbe",
- "lane0_usb3_gbe",
- "lane2_sata_usb3";
- #address-cells = <1>;
- #size-cells = <0>;
-
-
- comphy0: phy@0 {
- reg = <0>;
- #phy-cells = <1>;
- };
-
- comphy1: phy@1 {
- reg = <1>;
- #phy-cells = <1>;
- };
-
- comphy2: phy@2 {
- reg = <2>;
- #phy-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu.txt b/Documentation/devicetree/bindings/phy/phy-mvebu.txt
deleted file mode 100644
index 64afdd13d91d..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mvebu.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* Marvell MVEBU SATA PHY
-
-Power control for the SATA phy found on Marvell MVEBU SoCs.
-
-This document extends the binding described in phy-bindings.txt
-
-Required properties :
-
- - reg : Offset and length of the register set for the SATA device
- - compatible : Should be "marvell,mvebu-sata-phy"
- - clocks : phandle of clock and specifier that supplies the device
- - clock-names : Should be "sata"
-
-Example:
- sata-phy@84000 {
- compatible = "marvell,mvebu-sata-phy";
- reg = <0x84000 0x0334>;
- clocks = <&gate_clk 15>;
- clock-names = "sata";
- #phy-cells = <0>;
- };
-
-Armada 375 USB cluster
-----------------------
-
-Armada 375 comes with an USB2 host and device controller and an USB3
-controller. The USB cluster control register allows to manage common
-features of both USB controllers.
-
-Required properties:
-
-- compatible: "marvell,armada-375-usb-cluster"
-- reg: Should contain usb cluster register location and length.
-- #phy-cells : from the generic phy bindings, must be 1. Possible
-values are 1 (USB2), 2 (USB3).
-
-Example:
- usbcluster: usb-cluster@18400 {
- compatible = "marvell,armada-375-usb-cluster";
- reg = <0x18400 0x4>;
- #phy-cells = <1>
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-pxa-usb.txt b/Documentation/devicetree/bindings/phy/phy-pxa-usb.txt
deleted file mode 100644
index d80e36a77ec5..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-pxa-usb.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Marvell PXA USB PHY
--------------------
-
-Required properties:
-- compatible: one of: "marvell,mmp2-usb-phy", "marvell,pxa910-usb-phy",
- "marvell,pxa168-usb-phy",
-- #phy-cells: must be 0
-
-Example:
- usb-phy: usbphy@d4207000 {
- compatible = "marvell,mmp2-usb-phy";
- reg = <0xd4207000 0x40>;
- #phy-cells = <0>;
- status = "okay";
- };
-
-This document explains the device tree binding. For general
-information about PHY subsystem refer to Documentation/driver-api/phy/phy.rst
diff --git a/Documentation/devicetree/bindings/phy/pistachio-usb-phy.txt b/Documentation/devicetree/bindings/phy/pistachio-usb-phy.txt
deleted file mode 100644
index c7970c07ee32..000000000000
--- a/Documentation/devicetree/bindings/phy/pistachio-usb-phy.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-IMG Pistachio USB PHY
-=====================
-
-Required properties:
---------------------
- - compatible: Must be "img,pistachio-usb-phy".
- - #phy-cells: Must be 0. See ./phy-bindings.txt for details.
- - clocks: Must contain an entry for each entry in clock-names.
- See ../clock/clock-bindings.txt for details.
- - clock-names: Must include "usb_phy".
- - img,cr-top: Must contain a phandle to the CR_TOP syscon node.
- - img,refclk: Indicates the reference clock source for the USB PHY.
- See <dt-bindings/phy/phy-pistachio-usb.h> for a list of valid values.
-
-Optional properties:
---------------------
- - phy-supply: USB VBUS supply. Must supply 5.0V.
-
-Example:
---------
-usb_phy: usb-phy {
- compatible = "img,pistachio-usb-phy";
- clocks = <&clk_core CLK_USB_PHY>;
- clock-names = "usb_phy";
- phy-supply = <&usb_vbus>;
- img,refclk = <REFCLK_CLK_CORE>;
- img,cr-top = <&cr_top>;
- #phy-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/phy/qca,ar7100-usb-phy.yaml b/Documentation/devicetree/bindings/phy/qca,ar7100-usb-phy.yaml
new file mode 100644
index 000000000000..029665530829
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qca,ar7100-usb-phy.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qca,ar7100-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atheros AR71XX/9XXX USB PHY
+
+maintainers:
+ - Alban Bedel <albeu@free.fr>
+
+properties:
+ compatible:
+ items:
+ - const: qca,ar7100-usb-phy
+
+ reset-names:
+ description: Names of reset lines in order.
+ minItems: 1
+ items:
+ - const: phy
+ - const: suspend-override
+
+ resets:
+ description: References to the reset controllers.
+ minItems: 1
+ items:
+ - description: Reset controller for phy
+ - description: Reset controller for suspend-override
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reset-names
+ - resets
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy {
+ compatible = "qca,ar7100-usb-phy";
+ reset-names = "phy", "suspend-override";
+ resets = <&rst 4>, <&rst 3>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,m31-eusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,m31-eusb2-phy.yaml
new file mode 100644
index 000000000000..c84c62d0e8cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,m31-eusb2-phy.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,m31-eusb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm M31 eUSB2 phy
+
+maintainers:
+ - Wesley Cheng <quic_wcheng@quicinc.com>
+
+description:
+ M31 based eUSB2 controller, which supports LS/FS/HS usb connectivity
+ on Qualcomm chipsets. It is paired with a eUSB2 repeater.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sm8750-m31-eusb2-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ items:
+ - description: reference clock
+
+ clock-names:
+ items:
+ - const: ref
+
+ resets:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+ description:
+ Phandle to eUSB2 repeater
+
+ vdd-supply:
+ description:
+ Phandle to 0.88V regulator supply to PHY digital circuit.
+
+ vdda12-supply:
+ description:
+ Phandle to 1.2V regulator supply to PHY refclk pll block.
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - resets
+ - vdd-supply
+ - vdda12-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ usb_1_hsphy: phy@88e3000 {
+ compatible = "qcom,sm8750-m31-eusb2-phy";
+ reg = <0x88e3000 0x29c>;
+
+ clocks = <&tcsrcc_usb2_clkref_en>;
+ clock-names = "ref";
+
+ resets = <&gcc_qusb2phy_prim_bcr>;
+
+ #phy-cells = <0>;
+
+ vdd-supply = <&vreg_l2d_0p88>;
+ vdda12-supply = <&vreg_l3g_1p2>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 2c6c9296e4c0..a1ae8c7988c8 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -145,6 +145,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs615-qmp-gen3x1-pcie-phy
- qcom,sar2130p-qmp-gen3x2-pcie-phy
- qcom,sc8180x-qmp-pcie-phy
- qcom,sdm845-qhp-pcie-phy
@@ -175,7 +176,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,qcs615-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index 358a6736a951..38ce04c35d94 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -29,6 +29,7 @@ properties:
- qcom,sm8450-qmp-usb3-dp-phy
- qcom,sm8550-qmp-usb3-dp-phy
- qcom,sm8650-qmp-usb3-dp-phy
+ - qcom,sm8750-qmp-usb3-dp-phy
- qcom,x1e80100-qmp-usb3-dp-phy
reg:
@@ -133,6 +134,7 @@ allOf:
- qcom,sm6350-qmp-usb3-dp-phy
- qcom,sm8550-qmp-usb3-dp-phy
- qcom,sm8650-qmp-usb3-dp-phy
+ - qcom,sm8750-qmp-usb3-dp-phy
- qcom,x1e80100-qmp-usb3-dp-phy
then:
required:
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
index 142b3c8839d6..854f70af0a6c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
@@ -17,6 +17,7 @@ properties:
oneOf:
- items:
- enum:
+ - qcom,milos-snps-eusb2-phy
- qcom,sar2130p-snps-eusb2-phy
- qcom,sdx75-snps-eusb2-phy
- qcom,sm8650-snps-eusb2-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index d16a543a7848..27f064a71c9f 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -39,21 +39,18 @@ properties:
description: High-Speed disconnect threshold
minimum: 0
maximum: 7
- default: 0
qcom,tune-usb2-amplitude:
$ref: /schemas/types.yaml#/definitions/uint8
description: High-Speed transmit amplitude
minimum: 0
maximum: 15
- default: 8
qcom,tune-usb2-preem:
$ref: /schemas/types.yaml#/definitions/uint8
description: High-Speed TX pre-emphasis tuning
minimum: 0
maximum: 7
- default: 5
required:
- compatible
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index 2822dce8d9f4..f45c5f039ae8 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -40,6 +40,10 @@ properties:
- renesas,usb2-phy-r9a07g054 # RZ/V2L
- const: renesas,rzg2l-usb2-phy
+ - items:
+ - const: renesas,usb2-phy-r9a09g056 # RZ/V2N
+ - const: renesas,usb2-phy-r9a09g057
+
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
index b2250e4a6b1b..16967ef8e9ec 100644
--- a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
@@ -29,6 +29,7 @@ properties:
- samsung,s5pv210-mipi-video-phy
- samsung,exynos5420-mipi-video-phy
- samsung,exynos5433-mipi-video-phy
+ - samsung,exynos7870-mipi-video-phy
"#phy-cells":
const: 1
@@ -46,19 +47,20 @@ properties:
deprecated: true
description:
Phandle to PMU system controller interface, valid for
- samsung,exynos5433-mipi-video-phy (if not a child of PMU).
+ samsung,exynos5433-mipi-video-phy and samsung,exynos7870-mipi-video-phy
+ (if not a child of PMU).
samsung,disp-sysreg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to DISP system controller interface, valid for
- samsung,exynos5433-mipi-video-phy.
+ samsung,exynos5433-mipi-video-phy and samsung,exynos7870-mipi-video-phy.
samsung,cam0-sysreg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to CAM0 system controller interface, valid for
- samsung,exynos5433-mipi-video-phy.
+ samsung,exynos5433-mipi-video-phy and samsung,exynos7870-mipi-video-phy.
samsung,cam1-sysreg:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -84,7 +86,13 @@ allOf:
samsung,disp-sysreg: false
samsung,cam0-sysreg: false
samsung,cam1-sysreg: false
- else:
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-mipi-video-phy
+ then:
properties:
syscon: false
required:
@@ -92,6 +100,19 @@ allOf:
- samsung,cam0-sysreg
- samsung,cam1-sysreg
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7870-mipi-video-phy
+ then:
+ properties:
+ syscon: false
+ samsung,cam1-sysreg: false
+ required:
+ - samsung,disp-sysreg
+ - samsung,cam0-sysreg
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index cc60d2f6f70e..e906403208c0 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -33,6 +33,7 @@ properties:
- samsung,exynos7-usbdrd-phy
- samsung,exynos7870-usbdrd-phy
- samsung,exynos850-usbdrd-phy
+ - samsung,exynos990-usbdrd-phy
clocks:
minItems: 1
@@ -217,6 +218,7 @@ allOf:
- samsung,exynos5420-usbdrd-phy
- samsung,exynos7870-usbdrd-phy
- samsung,exynos850-usbdrd-phy
+ - samsung,exynos990-usbdrd-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/st,spear1310-miphy.yaml b/Documentation/devicetree/bindings/phy/st,spear1310-miphy.yaml
new file mode 100644
index 000000000000..32f81615ddad
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/st,spear1310-miphy.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/st,spear1310-miphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST SPEAr miphy
+
+maintainers:
+ - Pratyush Anand <pratyush.anand@gmail.com>
+
+description:
+ ST Microelectronics SPEAr miphy is a phy controller supporting PCIe and SATA.
+
+properties:
+ compatible:
+ enum:
+ - st,spear1310-miphy
+ - st,spear1340-miphy
+
+ reg:
+ maxItems: 1
+
+ misc:
+ description: Phandle for the syscon node to access misc registers.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ '#phy-cells':
+ description: >
+ Cell[0] indicates interface type: 0 = SATA, 1 = PCIe.
+ const: 1
+
+ phy-id:
+ description: Instance id of the phy. Required when multiple PHYs are present.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - misc
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ miphy@1000 {
+ compatible = "st,spear1310-miphy";
+ reg = <0x1000 0x100>;
+ misc = <&syscon>;
+ #phy-cells = <1>;
+ phy-id = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/st-spear-miphy.txt b/Documentation/devicetree/bindings/phy/st-spear-miphy.txt
deleted file mode 100644
index 2a6bfdcc09b3..000000000000
--- a/Documentation/devicetree/bindings/phy/st-spear-miphy.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-ST SPEAr miphy DT details
-=========================
-
-ST Microelectronics SPEAr miphy is a phy controller supporting PCIe and SATA.
-
-Required properties:
-- compatible : should be "st,spear1310-miphy" or "st,spear1340-miphy"
-- reg : offset and length of the PHY register set.
-- misc: phandle for the syscon node to access misc registers
-- #phy-cells : from the generic PHY bindings, must be 1.
- - cell[1]: 0 if phy used for SATA, 1 for PCIe.
-
-Optional properties:
-- phy-id: Instance id of the phy. Only required when there are multiple phys
- present on a implementation.
diff --git a/Documentation/devicetree/bindings/phy/ti,da830-usb-phy.yaml b/Documentation/devicetree/bindings/phy/ti,da830-usb-phy.yaml
new file mode 100644
index 000000000000..e168cbce8fd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,da830-usb-phy.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/ti,da830-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DA8xx/OMAP-L1xx/AM18xx USB PHY
+
+maintainers:
+ - David Lechner <david@lechnology.com>
+
+description: >
+ This device controls the PHY for both the USB 1.1 OHCI and USB 2.0 OTG
+ controllers on DA8xx SoCs.
+
+ It also requires a "syscon" node with compatible = "ti,da830-cfgchip", "syscon"
+ to access the CFGCHIP2 register.
+
+properties:
+ compatible:
+ items:
+ - const: ti,da830-usb-phy
+
+ '#phy-cells':
+ const: 1
+ description:
+ Consumers of this device should use index 0 for the USB 2.0 phy device and
+ index 1 for the USB 1.1 phy device.
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: usb0_clk48
+ - const: usb1_clk48
+
+required:
+ - compatible
+ - '#phy-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy {
+ compatible = "ti,da830-usb-phy";
+ #phy-cells = <1>;
+ clocks = <&usb_phy_clk 0>, <&usb_phy_clk 1>;
+ clock-names = "usb0_clk48", "usb1_clk48";
+ };
diff --git a/Documentation/devicetree/bindings/phy/ti,dm8168-usb-phy.yaml b/Documentation/devicetree/bindings/phy/ti,dm8168-usb-phy.yaml
new file mode 100644
index 000000000000..673dc1d37dcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,dm8168-usb-phy.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/ti,dm8168-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DM8168 USB PHY
+
+maintainers:
+ - Tony Lindgren <tony@atomide.com>
+
+properties:
+ compatible:
+ const: ti,dm8168-usb-phy
+
+ reg:
+ maxItems: 1
+
+ reg-names:
+ items:
+ - const: phy
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: refclk
+
+ '#phy-cells':
+ const: 0
+
+ syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle for the syscon node to access misc registers.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - '#phy-cells'
+ - syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@20 {
+ compatible = "ti,dm8168-usb-phy";
+ reg = <0x20 0x8>;
+ reg-names = "phy";
+ clocks = <&main_fapll 6>;
+ clock-names = "refclk";
+ #phy-cells = <0>;
+ syscon = <&scm_conf>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/ti,keystone-usbphy.yaml b/Documentation/devicetree/bindings/phy/ti,keystone-usbphy.yaml
new file mode 100644
index 000000000000..08dc18e7feea
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,keystone-usbphy.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/ti,keystone-usbphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI Keystone USB PHY
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+ - Santosh Shilimkar <ssantosh@kernel.org>
+
+description:
+ The main purpose of this PHY driver is to enable the USB PHY reference clock
+ gate on the Keystone SOC for both the USB2 and USB3 PHY. Otherwise it is just
+ an NOP PHY driver. Hence this node is referenced as both the usb2 and usb3
+ phy node in the USB Glue layer driver node.
+
+properties:
+ compatible:
+ const: ti,keystone-usbphy
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@2620738 {
+ compatible = "ti,keystone-usbphy";
+ reg = <0x2620738 32>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml b/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
index a6ef4797e5c5..6ba66c2033b4 100644
--- a/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/amlogic,pinctrl-a4.yaml
@@ -15,11 +15,18 @@ allOf:
properties:
compatible:
oneOf:
- - const: amlogic,pinctrl-a4
+ - enum:
+ - amlogic,pinctrl-a4
+ - amlogic,pinctrl-s6
+ - amlogic,pinctrl-s7
- items:
- enum:
- amlogic,pinctrl-a5
- const: amlogic,pinctrl-a4
+ - items:
+ - enum:
+ - amlogic,pinctrl-s7d
+ - const: amlogic,pinctrl-s7
"#address-cells":
const: 2
diff --git a/Documentation/devicetree/bindings/pinctrl/eswin,eic7700-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/eswin,eic7700-pinctrl.yaml
new file mode 100644
index 000000000000..d46e7ee6372d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/eswin,eic7700-pinctrl.yaml
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/eswin,eic7700-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Eswin Eic7700 Pinctrl
+
+maintainers:
+ - Yulin Lu <luyulin@eswincomputing.com>
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+description: |
+ eic7700 pin configuration nodes act as a container for an arbitrary number of
+ subnodes. Each of these subnodes represents some desired configuration for one or
+ more pins. This configuration can include the mux function to select on those pin(s),
+ and various pin configuration parameters, such as input-enable, pull-up, etc.
+
+properties:
+ compatible:
+ const: eswin,eic7700-pinctrl
+
+ reg:
+ maxItems: 1
+
+ vrgmii-supply:
+ description:
+ Regulator supply for the RGMII interface IO power domain.
+ This property should reference a regulator that provides either 1.8V or 3.3V,
+ depending on the board-level voltage configuration required by the RGMII interface.
+
+patternProperties:
+ '-grp$':
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ '-pins$':
+ type: object
+
+ properties:
+ pins:
+ description:
+ For eic7700, specifies the name(s) of one or more pins to be configured by
+ this node.
+ items:
+ enum: [ chip_mode, mode_set0, mode_set1, mode_set2, mode_set3, xin,
+ rst_out_n, key_reset_n, gpio0, por_sel, jtag0_tck, jtag0_tms,
+ jtag0_tdi, jtag0_tdo, gpio5, spi2_cs0_n, jtag1_tck, jtag1_tms,
+ jtag1_tdi, jtag1_tdo, gpio11, spi2_cs1_n, pcie_clkreq_n,
+ pcie_wake_n, pcie_perst_n, hdmi_scl, hdmi_sda, hdmi_cec,
+ jtag2_trst, rgmii0_clk_125, rgmii0_txen, rgmii0_txclk,
+ rgmii0_txd0, rgmii0_txd1, rgmii0_txd2, rgmii0_txd3, i2s0_bclk,
+ i2s0_wclk, i2s0_sdi, i2s0_sdo, i2s_mclk, rgmii0_rxclk,
+ rgmii0_rxdv, rgmii0_rxd0, rgmii0_rxd1, rgmii0_rxd2, rgmii0_rxd3,
+ i2s2_bclk, i2s2_wclk, i2s2_sdi, i2s2_sdo, gpio27, gpio28, gpio29,
+ rgmii0_mdc, rgmii0_mdio, rgmii0_intb, rgmii1_clk_125, rgmii1_txen,
+ rgmii1_txclk, rgmii1_txd0, rgmii1_txd1, rgmii1_txd2, rgmii1_txd3,
+ i2s1_bclk, i2s1_wclk, i2s1_sdi, i2s1_sdo, gpio34, rgmii1_rxclk,
+ rgmii1_rxdv, rgmii1_rxd0, rgmii1_rxd1, rgmii1_rxd2, rgmii1_rxd3,
+ spi1_cs0_n, spi1_clk, spi1_d0, spi1_d1, spi1_d2, spi1_d3, spi1_cs1_n,
+ rgmii1_mdc, rgmii1_mdio, rgmii1_intb, usb0_pwren, usb1_pwren,
+ i2c0_scl, i2c0_sda, i2c1_scl, i2c1_sda, i2c2_scl, i2c2_sda,
+ i2c3_scl, i2c3_sda, i2c4_scl, i2c4_sda, i2c5_scl, i2c5_sda,
+ uart0_tx, uart0_rx, uart1_tx, uart1_rx, uart1_cts, uart1_rts,
+ uart2_tx, uart2_rx, jtag2_tck, jtag2_tms, jtag2_tdi, jtag2_tdo,
+ fan_pwm, fan_tach, mipi_csi0_xvs, mipi_csi0_xhs, mipi_csi0_mclk,
+ mipi_csi1_xvs, mipi_csi1_xhs, mipi_csi1_mclk, mipi_csi2_xvs,
+ mipi_csi2_xhs, mipi_csi2_mclk, mipi_csi3_xvs, mipi_csi3_xhs,
+ mipi_csi3_mclk, mipi_csi4_xvs, mipi_csi4_xhs, mipi_csi4_mclk,
+ mipi_csi5_xvs, mipi_csi5_xhs, mipi_csi5_mclk, spi3_cs_n, spi3_clk,
+ spi3_di, spi3_do, gpio92, gpio93, s_mode, gpio95, spi0_cs_n,
+ spi0_clk, spi0_d0, spi0_d1, spi0_d2, spi0_d3, i2c10_scl,
+ i2c10_sda, i2c11_scl, i2c11_sda, gpio106, boot_sel0, boot_sel1,
+ boot_sel2, boot_sel3, gpio111, lpddr_ref_clk ]
+
+ function:
+ description:
+ Specify the alternative function to be configured for the
+ given pins.
+ enum: [ disabled, boot_sel, chip_mode, emmc, fan_tach,
+ gpio, hdmi, i2c, i2s, jtag, ddr_ref_clk_sel,
+ lpddr_ref_clk, mipi_csi, osc, pcie, pwm,
+ rgmii, reset, sata, sdio, spi, s_mode, uart, usb ]
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ bias-disable: true
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ input-enable: true
+
+ input-disable: true
+
+ drive-strength-microamp: true
+
+ required:
+ - pins
+
+ additionalProperties: false
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ - if:
+ properties:
+ pins:
+ anyOf:
+ - pattern: '^rgmii'
+ - const: lpddr_ref_clk
+ then:
+ properties:
+ drive-strength-microamp:
+ enum: [3000, 6000, 9000, 12000, 15000, 18000, 21000, 24000]
+ else:
+ properties:
+ drive-strength-microamp:
+ enum: [6000, 9000, 12000, 15000, 18000, 21000, 24000, 27000]
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pinctrl@51600080 {
+ compatible = "eswin,eic7700-pinctrl";
+ reg = <0x51600080 0x1fff80>;
+ vrgmii-supply = <&vcc_1v8>;
+
+ dev-active-grp {
+ /* group node defining 1 standard pin */
+ gpio10-pins {
+ pins = "jtag1_tdo";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+
+ /* group node defining 2 I2C pins */
+ i2c6-pins {
+ pins = "uart1_cts", "uart1_rts";
+ function = "i2c";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8189-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8189-pinctrl.yaml
new file mode 100644
index 000000000000..32e4653da5db
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8189-pinctrl.yaml
@@ -0,0 +1,213 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,mt8189-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT8189 Pin Controller
+
+maintainers:
+ - Lei Xue <lei.xue@mediatek.com>
+ - Cathy Xu <ot_cathy.xu@mediatek.com>
+
+description:
+ The MediaTek's MT8189 Pin controller is used to control SoC pins.
+
+properties:
+ compatible:
+ const: mediatek,mt8189-pinctrl
+
+ reg:
+ items:
+ - description: gpio base
+ - description: lm group IO
+ - description: rb0 group IO
+ - description: rb1 group IO
+ - description: bm0 group IO
+ - description: bm1 group IO
+ - description: bm2 group IO
+ - description: lt0 group IO
+ - description: lt1 group IO
+ - description: rt group IO
+ - description: eint0 group IO
+ - description: eint1 group IO
+ - description: eint2 group IO
+ - description: eint3 group IO
+ - description: eint4 group IO
+
+ reg-names:
+ items:
+ - const: base
+ - const: lm
+ - const: rb0
+ - const: rb1
+ - const: bm0
+ - const: bm1
+ - const: bm2
+ - const: lt0
+ - const: lt1
+ - const: rt
+ - const: eint0
+ - const: eint1
+ - const: eint2
+ - const: eint3
+ - const: eint4
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ gpio-line-names: true
+
+# PIN CONFIGURATION NODES
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ '^pins':
+ type: object
+ $ref: /schemas/pinctrl/pincfg-node.yaml
+ additionalProperties: false
+ description:
+ A pinctrl node should contain at least one subnode representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and input
+ schmitt.
+
+ properties:
+ pinmux:
+ description:
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are
+ defined as macros in arch/arm64/boot/dts/mediatek/mt8189-pinfunc.h
+ directly, for this SoC.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt8189 pull down PUPD/R0/R1 type define value.
+ - enum: [75000, 5000]
+ description: mt8189 pull down RSEL type si unit value(ohm).
+ description: |
+ For pull down type is normal, it doesn't need add R1R0 define
+ and resistance value.
+
+ For pull down type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8189.
+
+ For pull down type is PD/RSEL, it can add resistance value(ohm)
+ to set different resistance by identifying property
+ "mediatek,rsel-resistance-in-si-unit".
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt8189 pull up PUPD/R0/R1 type define value.
+ - enum: [1000, 1500, 2000, 3000, 4000, 5000, 75000]
+ description: mt8189 pull up RSEL type si unit value(ohm).
+ description: |
+ For pull up type is normal, it don't need add R1R0 define
+ and resistance value.
+
+ For pull up type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8189.
+
+ For pull up type is PU/RSEL, it can add resistance value(ohm)
+ to set different resistance by identifying property
+ "mediatek,rsel-resistance-in-si-unit".
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ required:
+ - pinmux
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/mt65xx.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #define PINMUX_GPIO51__FUNC_SCL0 (MTK_PIN_NO(51) | 2)
+ #define PINMUX_GPIO52__FUNC_SDA0 (MTK_PIN_NO(52) | 2)
+
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt8189-pinctrl";
+ reg = <0x10005000 0x1000>,
+ <0x11b50000 0x1000>,
+ <0x11c50000 0x1000>,
+ <0x11c60000 0x1000>,
+ <0x11d20000 0x1000>,
+ <0x11d30000 0x1000>,
+ <0x11d40000 0x1000>,
+ <0x11e20000 0x1000>,
+ <0x11e30000 0x1000>,
+ <0x11f20000 0x1000>,
+ <0x11ce0000 0x1000>,
+ <0x11de0000 0x1000>,
+ <0x11e60000 0x1000>,
+ <0x1c01e000 0x1000>,
+ <0x11f00000 0x1000>;
+ reg-names = "base", "lm", "rb0", "rb1", "bm0" , "bm1",
+ "bm2", "lt0", "lt1", "rt", "eint0", "eint1",
+ "eint2", "eint3", "eint4";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 182>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH 0>;
+ #interrupt-cells = <2>;
+
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO51__FUNC_SCL0>,
+ <PINMUX_GPIO52__FUNC_SDA0>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt b/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt
deleted file mode 100644
index bd8b0c69fa44..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-NXP LPC18xx/43xx SCU pin controller Device Tree Bindings
---------------------------------------------------------
-
-Required properties:
-- compatible : Should be "nxp,lpc1850-scu"
-- reg : Address and length of the register set for the device
-- clocks : Clock specifier (see clock bindings for details)
-
-The lpc1850-scu driver uses the generic pin multiplexing and generic pin
-configuration documented in pinctrl-bindings.txt.
-
-The following generic nodes are supported:
- - function
- - pins
- - bias-disable
- - bias-pull-up
- - bias-pull-down
- - drive-strength
- - input-enable
- - input-disable
- - input-schmitt-enable
- - input-schmitt-disable
- - slew-rate
-
-NXP specific properties:
- - nxp,gpio-pin-interrupt : Assign pin to gpio pin interrupt controller
- irq number 0 to 7. See example below.
-
-Not all pins support all properties so either refer to the NXP 1850/4350
-user manual or the pin table in the pinctrl-lpc18xx driver for supported
-pin properties.
-
-Example:
-pinctrl: pinctrl@40086000 {
- compatible = "nxp,lpc1850-scu";
- reg = <0x40086000 0x1000>;
- clocks = <&ccu1 CLK_CPU_SCU>;
-
- i2c0_pins: i2c0-pins {
- i2c0_pins_cfg {
- pins = "i2c0_scl", "i2c0_sda";
- function = "i2c0";
- input-enable;
- };
- };
-
- uart0_pins: uart0-pins {
- uart0_rx_cfg {
- pins = "pf_11";
- function = "uart0";
- bias-disable;
- input-enable;
- };
-
- uart0_tx_cfg {
- pins = "pf_10";
- function = "uart0";
- bias-disable;
- };
- };
-
- gpio_joystick_pins: gpio-joystick-pins {
- gpio_joystick_1_cfg {
- pins = "p9_0";
- function = "gpio";
- nxp,gpio-pin-interrupt = <0>;
- input-enable;
- bias-disable;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.yaml b/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.yaml
new file mode 100644
index 000000000000..11f41359b5c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/nxp,lpc1850-scu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC18xx/43xx SCU pin controller
+
+description:
+ Not all pins support all pin generic node properties so either refer to
+ the NXP 1850/4350 user manual or the pin table in the pinctrl-lpc18xx
+ driver for supported pin properties.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: nxp,lpc1850-scu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ '_cfg$':
+ type: object
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ unevaluatedProperties: false
+
+ properties:
+ nxp,gpio-pin-interrupt:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+ description:
+ Assign pin to gpio pin interrupt controller
+ irq number 0 to 7. See example below.
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-ccu.h>
+
+ pinctrl@40086000 {
+ compatible = "nxp,lpc1850-scu";
+ reg = <0x40086000 0x1000>;
+ clocks = <&ccu1 CLK_CPU_SCU>;
+
+ gpio-joystick-pins {
+ gpio-joystick-1_cfg {
+ pins = "p9_0";
+ function = "gpio";
+ nxp,gpio-pin-interrupt = <0>;
+ input-enable;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,milos-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,milos-tlmm.yaml
new file mode 100644
index 000000000000..0091204df20a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,milos-tlmm.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,milos-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. Milos TLMM block
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+description:
+ Top Level Mode Multiplexer pin controller in Qualcomm Milos SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,milos-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 84
+
+ gpio-line-names:
+ maxItems: 167
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-milos-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-milos-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-milos-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-5][0-9]|16[0-7])$"
+ - enum: [ ufs_reset, sdc2_clk, sdc2_cmd, sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ gpio, aoss_cti, atest_char, atest_usb, audio_ext_mclk0,
+ audio_ext_mclk1, audio_ref_clk, cam_mclk, cci_async_in0,
+ cci_i2c_scl, cci_i2c_sda, cci_timer, coex_uart1_rx,
+ coex_uart1_tx, dbg_out_clk, ddr_bist_complete, ddr_bist_fail,
+ ddr_bist_start, ddr_bist_stop, ddr_pxi0, ddr_pxi1, dp0_hot,
+ egpio, gcc_gp1, gcc_gp2, gcc_gp3, host2wlan_sol, i2s0_data0,
+ i2s0_data1, i2s0_sck, i2s0_ws, ibi_i3c, jitter_bist, mdp_vsync,
+ mdp_vsync0_out, mdp_vsync1_out, mdp_vsync2_out, mdp_vsync3_out,
+ mdp_vsync_e, nav_gpio0, nav_gpio1, nav_gpio2, pcie0_clk_req_n,
+ pcie1_clk_req_n, phase_flag, pll_bist_sync, pll_clk_aux,
+ prng_rosc0, prng_rosc1, prng_rosc2, prng_rosc3, qdss_cti,
+ qdss_gpio, qlink0_enable, qlink0_request, qlink0_wmss,
+ qlink1_enable, qlink1_request, qlink1_wmss, qspi0, qup0_se0,
+ qup0_se1, qup0_se2, qup0_se3, qup0_se4, qup0_se5, qup0_se6,
+ qup1_se0, qup1_se1, qup1_se2, qup1_se3, qup1_se4, qup1_se5,
+ qup1_se6, resout_gpio_n, sd_write_protect, sdc1_clk, sdc1_cmd,
+ sdc1_data, sdc1_rclk, sdc2_clk, sdc2_cmd, sdc2_data,
+ sdc2_fb_clk, tb_trig_sdc1, tb_trig_sdc2, tgu_ch0_trigout,
+ tgu_ch1_trigout, tmess_prng0, tmess_prng1, tmess_prng2,
+ tmess_prng3, tsense_pwm1, tsense_pwm2, uim0_clk, uim0_data,
+ uim0_present, uim0_reset, uim1_clk_mira, uim1_clk_mirb,
+ uim1_data_mira, uim1_data_mirb, uim1_present_mira,
+ uim1_present_mirb, uim1_reset_mira, uim1_reset_mirb, usb0_hs,
+ usb0_phy_ps, vfr_0, vfr_1, vsense_trigger_mirnat, wcn_sw,
+ wcn_sw_ctrl ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,milos-tlmm";
+ reg = <0x0f100000 0x300000>;
+
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-ranges = <&tlmm 0 0 168>;
+
+ gpio-wo-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ qup-uart5-default-state {
+ pins = "gpio25", "gpio26";
+ function = "qup0_se5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 055cea5452eb..5e6dfcc3fe9b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -27,6 +27,7 @@ properties:
- qcom,pm6450-gpio
- qcom,pm7250b-gpio
- qcom,pm7325-gpio
+ - qcom,pm7550-gpio
- qcom,pm7550ba-gpio
- qcom,pm8005-gpio
- qcom,pm8018-gpio
@@ -64,6 +65,7 @@ properties:
- qcom,pmi8994-gpio
- qcom,pmi8998-gpio
- qcom,pmih0108-gpio
+ - qcom,pmiv0104-gpio
- qcom,pmk8350-gpio
- qcom,pmk8550-gpio
- qcom,pmm8155au-gpio
@@ -228,6 +230,7 @@ allOf:
- qcom,pmc8180-gpio
- qcom,pmc8380-gpio
- qcom,pmi8994-gpio
+ - qcom,pmiv0104-gpio
- qcom,pmm8155au-gpio
then:
properties:
@@ -261,6 +264,7 @@ allOf:
- qcom,pm660l-gpio
- qcom,pm6150l-gpio
- qcom,pm7250b-gpio
+ - qcom,pm7550-gpio
- qcom,pm8038-gpio
- qcom,pm8150b-gpio
- qcom,pm8150l-gpio
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 960758dc417f..125af766b992 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -135,7 +135,7 @@ additionalProperties:
description:
Pin bank index.
- minimum: 0
- maximum: 13
+ maximum: 14
description:
Mux 0 means GPIO and mux 1 to N means
the specific device function.
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-hdp.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-hdp.yaml
new file mode 100644
index 000000000000..845b6b7b7552
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-hdp.yaml
@@ -0,0 +1,187 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) STMicroelectronics 2025.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/st,stm32-hdp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32 Hardware Debug Port Mux/Config
+
+maintainers:
+ - Clément LE GOFFIC <legoffic.clement@gmail.com>
+
+description:
+ STMicroelectronics's STM32 MPUs integrate a Hardware Debug Port (HDP).
+ It allows to output internal signals on SoC's GPIO.
+
+properties:
+ compatible:
+ enum:
+ - st,stm32mp131-hdp
+ - st,stm32mp151-hdp
+ - st,stm32mp251-hdp
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ "^hdp[0-7]-pins$":
+ type: object
+ $ref: pinmux-node.yaml#
+ additionalProperties: false
+
+ properties:
+ pins:
+ pattern: '^HDP[0-7]$'
+
+ function: true
+
+ required:
+ - function
+ - pins
+
+allOf:
+ - $ref: pinctrl.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32mp131-hdp
+ then:
+ patternProperties:
+ "^hdp[0-7]-pins$":
+ properties:
+ function:
+ enum: [ pwr_pwrwake_sys, pwr_stop_forbidden, pwr_stdby_wakeup, pwr_encomp_vddcore,
+ bsec_out_sec_niden, aiec_sys_wakeup, none, ddrctrl_lp_req,
+ pwr_ddr_ret_enable_n, dts_clk_ptat, sram3ctrl_tamp_erase_act, gpoval0,
+ pwr_sel_vth_vddcpu, pwr_mpu_ram_lowspeed, ca7_naxierrirq, pwr_okin_mr,
+ bsec_out_sec_dbgen, aiec_c1_wakeup, rcc_pwrds_mpu, ddrctrl_dfi_ctrlupd_req,
+ ddrctrl_cactive_ddrc_asr, sram3ctrl_hw_erase_act, nic400_s0_bready, gpoval1,
+ pwr_pwrwake_mpu, pwr_mpu_clock_disable_ack, ca7_ndbgreset_i,
+ bsec_in_rstcore_n, bsec_out_sec_bsc_dis, ddrctrl_dfi_init_complete,
+ ddrctrl_perf_op_is_refresh, ddrctrl_gskp_dfi_lp_req, sram3ctrl_sw_erase_act,
+ nic400_s0_bvalid, gpoval2, pwr_sel_vth_vddcore, pwr_mpu_clock_disable_req,
+ ca7_npmuirq0, ca7_nfiqout0, bsec_out_sec_dftlock, bsec_out_sec_jtag_dis,
+ rcc_pwrds_sys, sram3ctrl_tamp_erase_req, ddrctrl_stat_ddrc_reg_selfref_type0,
+ dts_valobus1_0, dts_valobus2_0, tamp_potential_tamp_erfcfg, nic400_s0_wready,
+ nic400_s0_rready, gpoval3, pwr_stop2_active, ca7_nl2reset_i,
+ ca7_npreset_varm_i, bsec_out_sec_dften, bsec_out_sec_dbgswenable,
+ eth1_out_pmt_intr_o, eth2_out_pmt_intr_o, ddrctrl_stat_ddrc_reg_selfref_type1,
+ ddrctrl_cactive_0, dts_valobus1_1, dts_valobus2_1, tamp_nreset_sram_ercfg,
+ nic400_s0_wlast, nic400_s0_rlast, gpoval4, ca7_standbywfil2,
+ pwr_vth_vddcore_ack, ca7_ncorereset_i, ca7_nirqout0, bsec_in_pwrok,
+ bsec_out_sec_deviceen, eth1_out_lpi_intr_o, eth2_out_lpi_intr_o,
+ ddrctrl_cactive_ddrc, ddrctrl_wr_credit_cnt, dts_valobus1_2, dts_valobus2_2,
+ pka_pka_itamp_out, nic400_s0_wvalid, nic400_s0_rvalid, gpoval5,
+ ca7_standbywfe0, pwr_vth_vddcpu_ack, ca7_evento, bsec_in_tamper_det,
+ bsec_out_sec_spniden, eth1_out_mac_speed_o1, eth2_out_mac_speed_o1,
+ ddrctrl_csysack_ddrc, ddrctrl_lpr_credit_cnt, dts_valobus1_3, dts_valobus2_3,
+ saes_tamper_out, nic400_s0_awready, nic400_s0_arready, gpoval6,
+ ca7_standbywfi0, pwr_rcc_vcpu_rdy, ca7_eventi, ca7_dbgack0, bsec_out_fuse_ok,
+ bsec_out_sec_spiden, eth1_out_mac_speed_o0, eth2_out_mac_speed_o0,
+ ddrctrl_csysreq_ddrc, ddrctrl_hpr_credit_cnt, dts_valobus1_4, dts_valobus2_4,
+ rng_tamper_out, nic400_s0_awavalid, nic400_s0_aravalid, gpoval7 ]
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32mp151-hdp
+ then:
+ patternProperties:
+ "^hdp[0-7]-pins$":
+ properties:
+ function:
+ enum: [ pwr_pwrwake_sys, cm4_sleepdeep, pwr_stdby_wkup, pwr_encomp_vddcore,
+ bsec_out_sec_niden, none, rcc_cm4_sleepdeep, gpu_dbg7, ddrctrl_lp_req,
+ pwr_ddr_ret_enable_n, dts_clk_ptat, gpoval0, pwr_pwrwake_mcu, cm4_halted,
+ ca7_naxierrirq, pwr_okin_mr, bsec_out_sec_dbgen, exti_sys_wakeup,
+ rcc_pwrds_mpu, gpu_dbg6, ddrctrl_dfi_ctrlupd_req, ddrctrl_cactive_ddrc_asr,
+ gpoval1, pwr_pwrwake_mpu, cm4_rxev, ca7_npmuirq1, ca7_nfiqout1,
+ bsec_in_rstcore_n, exti_c2_wakeup, rcc_pwrds_mcu, gpu_dbg5,
+ ddrctrl_dfi_init_complete, ddrctrl_perf_op_is_refresh,
+ ddrctrl_gskp_dfi_lp_req, gpoval2, pwr_sel_vth_vddcore, cm4_txev, ca7_npmuirq0,
+ ca7_nfiqout0, bsec_out_sec_dftlock, exti_c1_wakeup, rcc_pwrds_sys, gpu_dbg4,
+ ddrctrl_stat_ddrc_reg_selfref_type0, ddrctrl_cactive_1, dts_valobus1_0,
+ dts_valobus2_0, gpoval3, pwr_mpu_pdds_not_cstbydis, cm4_sleeping, ca7_nreset1,
+ ca7_nirqout1, bsec_out_sec_dften, bsec_out_sec_dbgswenable,
+ eth_out_pmt_intr_o, gpu_dbg3, ddrctrl_stat_ddrc_reg_selfref_type1,
+ ddrctrl_cactive_0, dts_valobus1_1, dts_valobus2_1, gpoval4, ca7_standbywfil2,
+ pwr_vth_vddcore_ack, ca7_nreset0, ca7_nirqout0, bsec_in_pwrok,
+ bsec_out_sec_deviceen, eth_out_lpi_intr_o, gpu_dbg2, ddrctrl_cactive_ddrc,
+ ddrctrl_wr_credit_cnt, dts_valobus1_2, dts_valobus2_2, gpoval5,
+ ca7_standbywfi1, ca7_standbywfe1, ca7_evento, ca7_dbgack1,
+ bsec_out_sec_spniden, eth_out_mac_speed_o1, gpu_dbg1, ddrctrl_csysack_ddrc,
+ ddrctrl_lpr_credit_cnt, dts_valobus1_3, dts_valobus2_3, gpoval6,
+ ca7_standbywfi0, ca7_standbywfe0, ca7_dbgack0, bsec_out_fuse_ok,
+ bsec_out_sec_spiden, eth_out_mac_speed_o0, gpu_dbg0, ddrctrl_csysreq_ddrc,
+ ddrctrl_hpr_credit_cnt, dts_valobus1_4, dts_valobus2_4, gpoval7 ]
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32mp251-hdp
+ then:
+ patternProperties:
+ "^hdp[0-7]-pins$":
+ properties:
+ function:
+ enum: [ pwr_pwrwake_sys, cpu2_sleep_deep, bsec_out_tst_sdr_unlock_or_disable_scan,
+ bsec_out_nidenm, bsec_out_nidena, cpu2_state_0, rcc_pwrds_sys, gpu_dbg7,
+ ddrss_csysreq_ddrc, ddrss_dfi_phyupd_req, cpu3_sleep_deep,
+ d2_gbl_per_clk_bus_req, pcie_usb_cxpl_debug_info_ei_0,
+ pcie_usb_cxpl_debug_info_ei_8, d3_state_0, gpoval0, pwr_pwrwake_cpu2,
+ cpu2_halted, cpu2_state_1, bsec_out_dbgenm, bsec_out_dbgena, exti1_sys_wakeup,
+ rcc_pwrds_cpu2, gpu_dbg6, ddrss_csysack_ddrc, ddrss_dfi_phymstr_req,
+ cpu3_halted, d2_gbl_per_dma_req, pcie_usb_cxpl_debug_info_ei_1,
+ pcie_usb_cxpl_debug_info_ei_9, d3_state_1, gpoval1, pwr_pwrwake_cpu1,
+ cpu2_rxev, cpu1_npumirq1, cpu1_nfiqout1, bsec_out_shdbgen, exti1_cpu2_wakeup,
+ rcc_pwrds_cpu1, gpu_dbg5, ddrss_cactive_ddrc, ddrss_dfi_lp_req, cpu3_rxev,
+ hpdma1_clk_bus_req, pcie_usb_cxpl_debug_info_ei_2,
+ pcie_usb_cxpl_debug_info_ei_10, d3_state_2, gpoval2, pwr_sel_vth_vddcpu,
+ cpu2_txev, cpu1_npumirq0, cpu1_nfiqout0, bsec_out_ddbgen, exti1_cpu1_wakeup,
+ cpu3_state_0, gpu_dbg4, ddrss_mcdcg_en, ddrss_dfi_freq_0, cpu3_txev,
+ hpdma2_clk_bus_req, pcie_usb_cxpl_debug_info_ei_3,
+ pcie_usb_cxpl_debug_info_ei_11, d1_state_0, gpoval3, pwr_sel_vth_vddcore,
+ cpu2_sleeping, cpu1_evento, cpu1_nirqout1, bsec_out_spnidena, exti2_d3_wakeup,
+ eth1_out_pmt_intr_o, gpu_dbg3, ddrss_dphycg_en, ddrss_obsp0, cpu3_sleeping,
+ hpdma3_clk_bus_req, pcie_usb_cxpl_debug_info_ei_4,
+ pcie_usb_cxpl_debug_info_ei_12, d1_state_1, gpoval4, cpu1_standby_wfil2,
+ none, cpu1_nirqout0, bsec_out_spidena, exti2_cpu3_wakeup, eth1_out_lpi_intr_o,
+ gpu_dbg2, ddrctrl_dfi_init_start, ddrss_obsp1, cpu3_state_1,
+ d3_gbl_per_clk_bus_req, pcie_usb_cxpl_debug_info_ei_5,
+ pcie_usb_cxpl_debug_info_ei_13, d1_state_2, gpoval5, cpu1_standby_wfi1,
+ cpu1_standby_wfe1, cpu1_halted1, cpu1_naxierrirq, bsec_out_spnidenm,
+ exti2_cpu2_wakeup, eth2_out_pmt_intr_o, gpu_dbg1, ddrss_dfi_init_complete,
+ ddrss_obsp2, d2_state_0, d3_gbl_per_dma_req, pcie_usb_cxpl_debug_info_ei_6,
+ pcie_usb_cxpl_debug_info_ei_14, cpu1_state_0, gpoval6, cpu1_standby_wfi0,
+ cpu1_standby_wfe0, cpu1_halted0, bsec_out_spidenm, exti2_cpu1__wakeup,
+ eth2_out_lpi_intr_o, gpu_dbg0, ddrss_dfi_ctrlupd_req, ddrss_obsp3, d2_state_1,
+ lpdma1_clk_bus_req, pcie_usb_cxpl_debug_info_ei_7,
+ pcie_usb_cxpl_debug_info_ei_15, cpu1_state_1, gpoval7 ]
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+
+ pinctrl@54090000 {
+ compatible = "st,stm32mp151-hdp";
+ reg = <0x54090000 0x400>;
+ clocks = <&rcc HDP>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdp2_gpo>;
+ hdp2_gpo: hdp2-pins {
+ function = "gpoval2";
+ pins = "HDP2";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index a28d77748095..961161c2ab62 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -32,13 +32,16 @@ properties:
'#address-cells':
const: 1
+
'#size-cells':
const: 1
ranges: true
+
pins-are-numbered:
$ref: /schemas/types.yaml#/definitions/flag
deprecated: true
+
hwlocks: true
interrupts:
@@ -67,22 +70,29 @@ patternProperties:
additionalProperties: false
properties:
gpio-controller: true
+
'#gpio-cells':
const: 2
+
interrupt-controller: true
'#interrupt-cells':
const: 2
reg:
maxItems: 1
+
clocks:
maxItems: 1
+
resets:
maxItems: 1
+
gpio-line-names: true
+
gpio-ranges:
minItems: 1
maxItems: 16
+
ngpios:
description:
Number of available gpios in a bank.
@@ -160,9 +170,13 @@ patternProperties:
* ...
* 16 : Alternate Function 15
* 17 : Analog
+ * 18 : Reserved
To simplify the usage, macro is available to generate "pinmux" field.
This macro is available here:
- include/dt-bindings/pinctrl/stm32-pinfunc.h
+ Setting the pinmux's function to the Reserved (RSVD) value is used to inform
+ the driver that it shall not apply the mux setting. This can be used to
+ reserve some pins, for example to a co-processor not running Linux.
Some examples of using macro:
/* GPIO A9 set as alternate function 2 */
... {
@@ -176,21 +190,32 @@ patternProperties:
... {
pinmux = <STM32_PINMUX('A', 9, ANALOG)>;
};
+ /* GPIO A9 reserved for co-processor */
+ ... {
+ pinmux = <STM32_PINMUX('A', 9, RSVD)>;
+ };
bias-disable:
type: boolean
+
bias-pull-down:
type: boolean
+
bias-pull-up:
type: boolean
+
drive-push-pull:
type: boolean
+
drive-open-drain:
type: boolean
+
output-low:
type: boolean
+
output-high:
type: boolean
+
slew-rate:
description: |
0: Low speed
diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
index 307c99c07721..ac9a76fc5876 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
@@ -48,7 +48,6 @@ properties:
battery device.
monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
description: |
phandle to a "simple-battery" compatible node.
diff --git a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
index 845822c87f2a..0e99a218e662 100644
--- a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
@@ -53,15 +53,16 @@ properties:
minimum: 50000
maximum: 500000
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to the battery node being monitored
+ monitored-battery: true
required:
- compatible
- reg
- monitored-battery
+allOf:
+ - $ref: power-supply.yaml#
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
index a76afe3ca299..8cee37b9879e 100644
--- a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
@@ -58,9 +58,7 @@ properties:
minimum: 100000
maximum: 3200000
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to the battery node being monitored
+ monitored-battery: true
interrupts:
maxItems: 1
@@ -78,6 +76,7 @@ required:
- monitored-battery
allOf:
+ - $ref: power-supply.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index 256adbef55eb..0b5d005dc780 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -73,9 +73,7 @@ properties:
description: |
Indicates that the device state has changed.
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to the battery node being monitored
+ monitored-battery: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
index dc697b6147b2..f7bde324153d 100644
--- a/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
@@ -43,10 +43,7 @@ properties:
minItems: 1
maxItems: 8 # Should be enough
- monitored-battery:
- description:
- Specifies the phandle of a simple-battery connected to this gauge
- $ref: /schemas/types.yaml#/definitions/phandle
+ monitored-battery: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
index 90c7dc7632c5..70f5cd6eaeab 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
@@ -38,9 +38,7 @@ properties:
- const: usbin_i
- const: usbin_v
- monitored-battery:
- description: phandle to the simple-battery node
- $ref: /schemas/types.yaml#/definitions/phandle
+ monitored-battery: true
required:
- compatible
@@ -51,6 +49,9 @@ required:
- io-channel-names
- monitored-battery
+allOf:
+ - $ref: power-supply.yaml#
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml
index 5b3edd79a523..d91eced9f5fb 100644
--- a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml
@@ -18,7 +18,6 @@ properties:
const: richtek,rt5033-charger
monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
description: |
Phandle to the monitored battery according to battery.yaml. The battery
node needs to contain five parameters.
@@ -54,6 +53,9 @@ properties:
required:
- monitored-battery
+allOf:
+ - $ref: power-supply.yaml#
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
index 525abdfb3e2d..c464aa82255a 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
@@ -17,9 +17,7 @@ properties:
compatible:
const: stericsson,ab8500-btemp
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to battery node
+ monitored-battery: true
battery:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
index 10bbdcfc87b6..39914b9e0cf5 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
@@ -17,9 +17,7 @@ properties:
compatible:
const: stericsson,ab8500-chargalg
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to battery node
+ monitored-battery: true
battery:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
index e33329b3af61..994fac12c8da 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
@@ -17,9 +17,7 @@ properties:
compatible:
const: stericsson,ab8500-charger
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to battery node
+ monitored-battery: true
battery:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
index 6a724ca90e99..92e4eb08fd61 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
@@ -17,9 +17,7 @@ properties:
compatible:
const: stericsson,ab8500-fg
- monitored-battery:
- $ref: /schemas/types.yaml#/definitions/phandle
- description: phandle to battery node
+ monitored-battery: true
battery:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml b/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
index 2d552becbfe6..65ed92bb05f3 100644
--- a/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
@@ -23,9 +23,7 @@ properties:
interrupts:
maxItems: 1
- monitored-battery:
- description: phandle to the battery node
- $ref: /schemas/types.yaml#/definitions/phandle
+ monitored-battery: true
summit,enable-usb-charging:
type: boolean
@@ -94,6 +92,7 @@ properties:
unevaluatedProperties: false
allOf:
+ - $ref: power-supply.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
index 3504c76a01d8..a90d558e7f86 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
@@ -26,11 +26,7 @@ properties:
- const: x-powers,axp813-battery-power-supply
- const: x-powers,axp813-battery-power-supply
- monitored-battery:
- description:
- Specifies the phandle of an optional simple-battery connected to
- this gauge.
- $ref: /schemas/types.yaml#/definitions/phandle
+ monitored-battery: true
x-powers,no-thermistor:
type: boolean
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
index a66007951d58..188a25194000 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
@@ -144,8 +144,8 @@ examples:
interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
index 5dcc2a32c080..a8cddf7e2fe1 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
@@ -15,17 +15,26 @@ description:
properties:
compatible:
- enum:
- - qcom,sc8180x-adsp-pas
- - qcom,sc8180x-cdsp-pas
- - qcom,sc8180x-slpi-pas
- - qcom,sm8150-adsp-pas
- - qcom,sm8150-cdsp-pas
- - qcom,sm8150-mpss-pas
- - qcom,sm8150-slpi-pas
- - qcom,sm8250-adsp-pas
- - qcom,sm8250-cdsp-pas
- - qcom,sm8250-slpi-pas
+ oneOf:
+ - items:
+ - enum:
+ - qcom,qcs615-adsp-pas
+ - const: qcom,sm8150-adsp-pas
+ - items:
+ - enum:
+ - qcom,qcs615-cdsp-pas
+ - const: qcom,sm8150-cdsp-pas
+ - enum:
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sc8180x-slpi-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8150-mpss-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-cdsp-pas
+ - qcom,sm8250-slpi-pas
reg:
maxItems: 1
@@ -62,16 +71,17 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sc8180x-adsp-pas
- - qcom,sc8180x-cdsp-pas
- - qcom,sc8180x-slpi-pas
- - qcom,sm8150-adsp-pas
- - qcom,sm8150-cdsp-pas
- - qcom,sm8150-slpi-pas
- - qcom,sm8250-adsp-pas
- - qcom,sm8250-cdsp-pas
- - qcom,sm8250-slpi-pas
+ contains:
+ enum:
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sc8180x-slpi-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-cdsp-pas
+ - qcom,sm8250-slpi-pas
then:
properties:
interrupts:
@@ -88,12 +98,13 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sc8180x-adsp-pas
- - qcom,sc8180x-cdsp-pas
- - qcom,sm8150-adsp-pas
- - qcom,sm8150-cdsp-pas
- - qcom,sm8250-cdsp-pas
+ contains:
+ enum:
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8250-cdsp-pas
then:
properties:
power-domains:
diff --git a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
index b38f8252342e..f78614100ea8 100644
--- a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
+++ b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
@@ -24,6 +24,7 @@ properties:
- items:
- enum:
- microchip,sam9x7-trng
+ - microchip,sama7d65-trng
- const: microchip,sam9x60-trng
clocks:
diff --git a/Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml b/Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml
index 5d3ac737abcb..e61f22eca85b 100644
--- a/Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml
@@ -16,9 +16,14 @@ allOf:
properties:
compatible:
- enum:
- - amlogic,a4-rtc
- - amlogic,a5-rtc
+ oneOf:
+ - enum:
+ - amlogic,a4-rtc
+ - amlogic,a5-rtc
+ - items:
+ - enum:
+ - amlogic,c3-rtc
+ - const: amlogic,a5-rtc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.yaml b/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.yaml
index e88b847a1cc5..e896ba59302a 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/nxp,lpc1788-rtc.yaml
@@ -18,7 +18,12 @@ allOf:
properties:
compatible:
- const: nxp,lpc1788-rtc
+ oneOf:
+ - items:
+ - enum:
+ - nxp,lpc1850-rtc
+ - const: nxp,lpc1788-rtc
+ - const: nxp,lpc1788-rtc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,lpc3220-rtc.yaml b/Documentation/devicetree/bindings/rtc/nxp,lpc3220-rtc.yaml
new file mode 100644
index 000000000000..53353de4cb37
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,lpc3220-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,lpc3220-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx SoC Real-time Clock
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,lpc3220-rtc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ start-year: true
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: rtc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/lpc32xx-clock.h>
+
+ rtc@40024000 {
+ compatible = "nxp,lpc3220-rtc";
+ reg = <0x40024000 0x1000>;
+ interrupt-parent = <&sic1>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LPC32XX_CLK_RTC>;
+ };
+
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml
index 2f892f8640d1..1e6277e524c2 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml
@@ -12,6 +12,7 @@ maintainers:
properties:
compatible:
enum:
+ - microcrystal,rv8063
- microcrystal,rv8263
- nxp,pcf85063
- nxp,pcf85063a
@@ -44,13 +45,19 @@ properties:
wakeup-source: true
+ spi-cs-high: true
+
+ spi-3wire: true
+
allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
- $ref: rtc.yaml#
- if:
properties:
compatible:
contains:
enum:
+ - microcrystal,rv8063
- microcrystal,rv8263
then:
properties:
@@ -65,12 +72,23 @@ allOf:
properties:
quartz-load-femtofarads:
const: 7000
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - microcrystal,rv8063
+ then:
+ properties:
+ spi-cs-high: false
+ spi-3wire: false
required:
- compatible
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -90,3 +108,16 @@ examples:
};
};
};
+
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@0 {
+ compatible = "microcrystal,rv8063";
+ reg = <0>;
+ spi-cs-high;
+ spi-3wire;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml b/Documentation/devicetree/bindings/rtc/sophgo,cv1800b-rtc.yaml
index 5cf186c396c9..c695d2ff9fcc 100644
--- a/Documentation/devicetree/bindings/soc/sophgo/sophgo,cv1800b-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/sophgo,cv1800b-rtc.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/sophgo/sophgo,cv1800b-rtc.yaml#
+$id: http://devicetree.org/schemas/rtc/sophgo,cv1800b-rtc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Real Time Clock of the Sophgo CV1800 SoC
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index 7330a7200831..5e0c7cd25cc6 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -63,8 +63,6 @@ properties:
- microcrystal,rv3029
# Real Time Clock
- microcrystal,rv8523
- # NXP LPC32xx SoC Real-time Clock
- - nxp,lpc3220-rtc
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
- ricoh,r2025sd
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index f34b4986b336..5f9d541d177a 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -609,6 +609,16 @@ properties:
- renesas,r9a09g077m44 # RZ/T2H with Quad Cortex-A55 + Dual Cortex-R52 - no security
- const: renesas,r9a09g077
+ - description: RZ/N2H (R9A09G087)
+ items:
+ - enum:
+ - renesas,rzn2h-evk # RZ/N2H Evaluation Board (RTK9RZN2H0S00000BJ)
+ - enum:
+ - renesas,r9a09g087m04 # RZ/N2H with Single Cortex-A55 + Dual Cortex-R52 - no security
+ - renesas,r9a09g087m24 # RZ/N2H with Dual Cortex-A55 + Dual Cortex-R52 - no security
+ - renesas,r9a09g087m44 # RZ/N2H with Quad Cortex-A55 + Dual Cortex-R52 - no security
+ - const: renesas,r9a09g087
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml b/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
index a05e61431824..ce99c2d8c35d 100644
--- a/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
+++ b/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
@@ -16,9 +16,14 @@ description:
properties:
compatible:
- enum:
- - atmel,at91rm9200-ssc
- - atmel,at91sam9g45-ssc
+ oneOf:
+ - enum:
+ - atmel,at91rm9200-ssc
+ - atmel,at91sam9g45-ssc
+ - items:
+ - enum:
+ - microchip,sam9x7-ssc
+ - const: atmel,at91sam9g45-ssc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 08654ed4021b..f3dd18681aa6 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -450,6 +450,8 @@ properties:
- ti,tps53679
# TI Dual channel DCAP+ multiphase controller TPS53681
- ti,tps53681
+ # TI Dual channel DCAP+ multiphase controller TPS53685 with AMD-SVI3
+ - ti,tps53685
# TI Dual channel DCAP+ multiphase controller TPS53688
- ti,tps53688
# TI DC-DC converters on PMBus
diff --git a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
index 32fd535a514a..1dec54fb00f3 100644
--- a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
@@ -9,21 +9,20 @@ title: Mediatek Universal Flash Storage (UFS) Controller
maintainers:
- Stanley Chu <stanley.chu@mediatek.com>
-allOf:
- - $ref: ufs-common.yaml
-
properties:
compatible:
enum:
- mediatek,mt8183-ufshci
- mediatek,mt8192-ufshci
+ - mediatek,mt8195-ufshci
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 8
clock-names:
- items:
- - const: ufs
+ minItems: 1
+ maxItems: 8
phys:
maxItems: 1
@@ -33,6 +32,10 @@ properties:
vcc-supply: true
+ mediatek,ufs-disable-mcq:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: The mask to disable MCQ (Multi-Circular Queue) for UFS host.
+
required:
- compatible
- clocks
@@ -43,6 +46,37 @@ required:
unevaluatedProperties: false
+allOf:
+ - $ref: ufs-common.yaml
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt8195-ufshci
+ then:
+ properties:
+ clocks:
+ minItems: 8
+ clock-names:
+ items:
+ - const: ufs
+ - const: ufs_aes
+ - const: ufs_tick
+ - const: unipro_sysclk
+ - const: unipro_tick
+ - const: unipro_mp_bclk
+ - const: ufs_tx_symbol
+ - const: ufs_mem_sub
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ items:
+ - const: ufs
+
examples:
- |
#include <dt-bindings/clock/mt8183-clk.h>
diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
index 8dac5eba61b4..dfd084ed9024 100644
--- a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
@@ -32,6 +32,7 @@ properties:
- qcom,ipq8064-dwc3
- qcom,ipq8074-dwc3
- qcom,ipq9574-dwc3
+ - qcom,milos-dwc3
- qcom,msm8953-dwc3
- qcom,msm8994-dwc3
- qcom,msm8996-dwc3
@@ -338,6 +339,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,milos-dwc3
- qcom,qcm2290-dwc3
- qcom,qcs615-dwc3
- qcom,sar2130p-dwc3
@@ -453,6 +455,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,milos-dwc3
- qcom,x1e80100-dwc3
then:
properties:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 8af7622fcb59..77160cd47f54 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -405,6 +405,8 @@ patternProperties:
description: Diodes, Inc.
"^dioo,.*":
description: Dioo Microcircuit Co., Ltd
+ "^djn,.*":
+ description: Shenzhen DJN Optronics Technology Co., Ltd
"^dlc,.*":
description: DLC Display Co., Ltd.
"^dlg,.*":
@@ -679,6 +681,8 @@ patternProperties:
description: Huawei Technologies Co., Ltd.
"^hugsun,.*":
description: Shenzhen Hugsun Technology Co. Ltd.
+ "^huiling,.*":
+ description: Shenzhen Huiling Information Technology Co., Ltd.
"^hwacom,.*":
description: HwaCom Systems Inc.
"^hxt,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/nxp,pnx4008-wdt.yaml b/Documentation/devicetree/bindings/watchdog/nxp,pnx4008-wdt.yaml
index 35ef940cbabe..8964c1c5d522 100644
--- a/Documentation/devicetree/bindings/watchdog/nxp,pnx4008-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/nxp,pnx4008-wdt.yaml
@@ -19,6 +19,9 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 5a91df105141..607589592bfb 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -131,6 +131,29 @@ It supports two optional parameters:
``--no-virtualenv``
Use OS packaging for Sphinx instead of Python virtual environment.
+Installing Sphinx Minimal Version
+---------------------------------
+
+When changing Sphinx build system, it is important to ensure that
+the minimal version will still be supported. Nowadays, it is
+becoming harder to do that on modern distributions, as it is not
+possible to install with Python 3.13 and above.
+
+Testing with the lowest supported Python version as defined at
+Documentation/process/changes.rst can be done by creating
+a venv with it with, and install minimal requirements with::
+
+ /usr/bin/python3.9 -m venv sphinx_min
+ . sphinx_min/bin/activate
+ pip install -r Documentation/sphinx/min_requirements.txt
+
+A more comprehensive test can be done by using:
+
+ scripts/test_doc_build.py
+
+Such script create one Python venv per supported version,
+optionally building documentation for a range of Sphinx versions.
+
Sphinx Build
============
diff --git a/Documentation/driver-api/cxl/conventions.rst b/Documentation/driver-api/cxl/conventions.rst
new file mode 100644
index 000000000000..da347a81a237
--- /dev/null
+++ b/Documentation/driver-api/cxl/conventions.rst
@@ -0,0 +1,47 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=======================================
+Compute Express Link: Linux Conventions
+=======================================
+
+There exists shipping platforms that bend or break CXL specification
+expectations. Record the details and the rationale for those deviations.
+Borrow the ACPI Code First template format to capture the assumptions
+and tradeoffs such that multiple platform implementations can follow the
+same convention.
+
+<(template) Title>
+==================
+
+Document
+--------
+CXL Revision <rev>, Version <ver>
+
+License
+-------
+SPDX-License Identifier: CC-BY-4.0
+
+Creator/Contributors
+--------------------
+
+Summary of the Change
+---------------------
+
+<Detail the conflict with the specification and where available the
+assumptions and tradeoffs taken by the hardware platform.>
+
+
+Benefits of the Change
+----------------------
+
+<Detail what happens if platforms and Linux do not adopt this
+convention.>
+
+References
+----------
+
+Detailed Description of the Change
+----------------------------------
+
+<Propose spec language that corrects the conflict.>
diff --git a/Documentation/driver-api/cxl/devices/device-types.rst b/Documentation/driver-api/cxl/devices/device-types.rst
index f5e4330c1cfe..923f5d89bc04 100644
--- a/Documentation/driver-api/cxl/devices/device-types.rst
+++ b/Documentation/driver-api/cxl/devices/device-types.rst
@@ -63,13 +63,13 @@ A Type-2 CXL Device:
* Supports cxl.io, cxl.cache, and cxl.mem protocols
* Optionally implements coherent cache and Host-Managed Device Memory
-* Is typically an accelerator device w/ high bandwidth memory.
+* Is typically an accelerator device with high bandwidth memory.
The primary difference between a type-1 and type-2 device is the presence
of host-managed device memory, which allows the device to operate on a
-local memory bank - while the CPU sill has coherent DMA to the same memory.
+local memory bank - while the CPU still has coherent DMA to the same memory.
-The allows things like GPUs to expose their memory via DAX devices or file
+This allows things like GPUs to expose their memory via DAX devices or file
descriptors, allows drivers and programs direct access to device memory
rather than use block-transfer semantics.
@@ -89,7 +89,7 @@ basic coherent DMA.
Switch
------
-A CXL switch is a device capacity of routing any CXL (and by extension, PCIe)
+A CXL switch is a device capable of routing any CXL (and by extension, PCIe)
protocol between an upstream, downstream, or peer devices. Many devices, such
as Multi-Logical Devices, imply the presence of switching in some manner.
@@ -103,7 +103,7 @@ A Single-Logical Device (SLD) is a device which presents a single device to
one or more heads.
A Multi-Logical Device (MLD) is a device which may present multiple devices
-to one or more devices.
+to one or more upstream devices.
A Single-Headed Device exposes only a single physical connection.
diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst
index 9e1414ad3357..c1106a68b67c 100644
--- a/Documentation/driver-api/cxl/index.rst
+++ b/Documentation/driver-api/cxl/index.rst
@@ -14,6 +14,7 @@ that have impacts on each other. The docs here break up configurations steps.
theory-of-operation
maturity-map
+ conventions
.. toctree::
:maxdepth: 2
diff --git a/Documentation/driver-api/cxl/linux/cxl-driver.rst b/Documentation/driver-api/cxl/linux/cxl-driver.rst
index 9759e90c3cf1..dd6dd17dc536 100644
--- a/Documentation/driver-api/cxl/linux/cxl-driver.rst
+++ b/Documentation/driver-api/cxl/linux/cxl-driver.rst
@@ -20,7 +20,7 @@ The CXL driver is split into a number of drivers.
* cxl_port - initializes root and provides port enumeration interface.
* cxl_acpi - initializes root decoders and interacts with ACPI data.
* cxl_p/mem - initializes memory devices
-* cxl_pci - uses cxl_port to enumates the actual fabric hierarchy.
+* cxl_pci - uses cxl_port to enumerate the actual fabric hierarchy.
Driver Devices
==============
diff --git a/Documentation/driver-api/cxl/theory-of-operation.rst b/Documentation/driver-api/cxl/theory-of-operation.rst
index 40793dad3630..257f513e320c 100644
--- a/Documentation/driver-api/cxl/theory-of-operation.rst
+++ b/Documentation/driver-api/cxl/theory-of-operation.rst
@@ -29,8 +29,8 @@ Platform firmware enumerates a menu of interleave options at the "CXL root port"
(Linux term for the top of the CXL decode topology). From there, PCIe topology
dictates which endpoints can participate in which Host Bridge decode regimes.
Each PCIe Switch in the path between the root and an endpoint introduces a point
-at which the interleave can be split. For example platform firmware may say at a
-given range only decodes to 1 one Host Bridge, but that Host Bridge may in turn
+at which the interleave can be split. For example, platform firmware may say a
+given range only decodes to one Host Bridge, but that Host Bridge may in turn
interleave cycles across multiple Root Ports. An intervening Switch between a
port and an endpoint may interleave cycles across multiple Downstream Switch
Ports, etc.
@@ -187,7 +187,7 @@ decodes them to "ports", "ports" decode to "endpoints", and "endpoints"
represent the decode from SPA (System Physical Address) to DPA (Device Physical
Address).
-Continuing the RAID analogy, disks have both topology metadata and on device
+Continuing the RAID analogy, disks have both topology metadata and on-device
metadata that determine RAID set assembly. CXL Port topology and CXL Port link
status is metadata for CXL.mem set assembly. The CXL Port topology is enumerated
by the arrival of a CXL.mem device. I.e. unless and until the PCIe core attaches
@@ -197,7 +197,7 @@ the Linux PCI core to tear down switch-level CXL resources because the endpoint
->remove() event cleans up the port data that was established to support that
Memory Expander.
-The port metadata and potential decode schemes that a give memory device may
+The port metadata and potential decode schemes that a given memory device may
participate can be determined via a command like::
# cxl list -BDMu -d root -m mem3
@@ -249,8 +249,8 @@ participate can be determined via a command like::
...which queries the CXL topology to ask "given CXL Memory Expander with a kernel
device name of 'mem3' which platform level decode ranges may this device
participate". A given expander can participate in multiple CXL.mem interleave
-sets simultaneously depending on how many decoder resource it has. In this
-example mem3 can participate in one or more of a PMEM interleave that spans to
+sets simultaneously depending on how many decoder resources it has. In this
+example mem3 can participate in one or more of a PMEM interleave that spans two
Host Bridges, a PMEM interleave that targets a single Host Bridge, a Volatile
memory interleave that spans 2 Host Bridges, and a Volatile memory interleave
that only targets a single Host Bridge.
diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst
index e6855cd37e85..eca72d9b9ed8 100644
--- a/Documentation/driver-api/dpll.rst
+++ b/Documentation/driver-api/dpll.rst
@@ -214,6 +214,24 @@ offset values are fractional with 3-digit decimal places and shell be
divided with ``DPLL_PIN_PHASE_OFFSET_DIVIDER`` to get integer part and
modulo divided to get fractional part.
+Phase offset monitor
+====================
+
+Phase offset measurement is typically performed against the current active
+source. However, some DPLL (Digital Phase-Locked Loop) devices may offer
+the capability to monitor phase offsets across all available inputs.
+The attribute and current feature state shall be included in the response
+message of the ``DPLL_CMD_DEVICE_GET`` command for supported DPLL devices.
+In such cases, users can also control the feature using the
+``DPLL_CMD_DEVICE_SET`` command by setting the ``enum dpll_feature_state``
+values for the attribute.
+Once enabled the phase offset measurements for the input shall be returned
+in the ``DPLL_A_PIN_PHASE_OFFSET`` attribute.
+
+ =============================== ========================
+ ``DPLL_A_PHASE_OFFSET_MONITOR`` attr state of a feature
+ =============================== ========================
+
Embedded SYNC
=============
@@ -235,6 +253,31 @@ the pin.
``DPLL_A_PIN_ESYNC_PULSE`` pulse type of Embedded SYNC
========================================= =================================
+Reference SYNC
+==============
+
+The device may support the Reference SYNC feature, which allows the combination
+of two inputs into a input pair. In this configuration, clock signals
+from both inputs are used to synchronize the DPLL device. The higher frequency
+signal is utilized for the loop bandwidth of the DPLL, while the lower frequency
+signal is used to syntonize the output signal of the DPLL device. This feature
+enables the provision of a high-quality loop bandwidth signal from an external
+source.
+
+A capable input provides a list of inputs that can be bound with to create
+Reference SYNC. To control this feature, the user must request a desired
+state for a target pin: use ``DPLL_PIN_STATE_CONNECTED`` to enable or
+``DPLL_PIN_STATE_DISCONNECTED`` to disable the feature. An input pin can be
+bound to only one other pin at any given time.
+
+ ============================== ==========================================
+ ``DPLL_A_PIN_REFERENCE_SYNC`` nested attribute for providing info or
+ requesting configuration of the Reference
+ SYNC feature
+ ``DPLL_A_PIN_ID`` target pin id for Reference SYNC feature
+ ``DPLL_A_PIN_STATE`` state of Reference SYNC connection
+ ============================== ==========================================
+
Configuration commands group
============================
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index ae433261e11a..85d86f92c41b 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -750,7 +750,7 @@ compliance:
- Test your driver with the appropriate in-kernel real-time test cases for both
level and edge IRQs
-* [1] http://www.spinics.net/lists/linux-omap/msg120425.html
+* [1] https://lore.kernel.org/r/1437496011-11486-1-git-send-email-bigeasy@linutronix.de/
* [2] https://lore.kernel.org/r/1443209283-20781-2-git-send-email-grygorii.strashko@ti.com
* [3] https://lore.kernel.org/r/1443209283-20781-3-git-send-email-grygorii.strashko@ti.com
diff --git a/Documentation/driver-api/media/v4l2-controls.rst b/Documentation/driver-api/media/v4l2-controls.rst
index b2e91804829b..fc04907589ab 100644
--- a/Documentation/driver-api/media/v4l2-controls.rst
+++ b/Documentation/driver-api/media/v4l2-controls.rst
@@ -110,6 +110,7 @@ For sub-device drivers:
v4l2_ctrl_handler_free(&foo->ctrl_handler);
+:c:func:`v4l2_ctrl_handler_free` does not touch the handler's ``error`` field.
2) Add controls:
@@ -191,12 +192,8 @@ These functions are typically called right after the
V4L2_CID_TEST_PATTERN, ARRAY_SIZE(test_pattern) - 1, 0,
0, test_pattern);
...
- if (foo->ctrl_handler.error) {
- int err = foo->ctrl_handler.error;
-
- v4l2_ctrl_handler_free(&foo->ctrl_handler);
- return err;
- }
+ if (foo->ctrl_handler.error)
+ return v4l2_ctrl_handler_free(&foo->ctrl_handler);
The :c:func:`v4l2_ctrl_new_std` function returns the v4l2_ctrl pointer to
the new control, but if you do not need to access the pointer outside the
diff --git a/Documentation/driver-api/soundwire/bra.rst b/Documentation/driver-api/soundwire/bra.rst
index 8500253fa3e8..c08ab2591496 100644
--- a/Documentation/driver-api/soundwire/bra.rst
+++ b/Documentation/driver-api/soundwire/bra.rst
@@ -333,4 +333,4 @@ FIFO sizes to avoid xruns.
Alignment requirements are currently not enforced at the core level
but at the platform-level, e.g. for Intel the data sizes must be
-multiples of 32 bytes.
+equal to or larger than 16 bytes.
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index 1c14ba08fbfc..c2d3996b5b40 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -2,7 +2,7 @@
Fault injection capabilities infrastructure
===========================================
-See also drivers/md/md-faulty.c and "every_nth" module option for scsi_debug.
+See also "every_nth" module option for scsi_debug.
Available fault injection capabilities
diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst
index 08dd5e254cc5..5b283f3d1eb1 100644
--- a/Documentation/filesystems/dax.rst
+++ b/Documentation/filesystems/dax.rst
@@ -206,7 +206,6 @@ stall the CPU for an extended period, you should also not attempt to
implement direct_access.
These block devices may be used for inspiration:
-- brd: RAM backed block device driver
- pmem: NVDIMM persistent memory driver
diff --git a/Documentation/filesystems/ext4/atomic_writes.rst b/Documentation/filesystems/ext4/atomic_writes.rst
index f65767df3620..aeb47ace738d 100644
--- a/Documentation/filesystems/ext4/atomic_writes.rst
+++ b/Documentation/filesystems/ext4/atomic_writes.rst
@@ -148,10 +148,10 @@ reserved during:
only required to handle a split extent across leaf blocks.
How to
-------
+~~~~~~
Creating Filesystems with Atomic Write Support
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
First check the atomic write units supported by block device.
See :ref:`atomic_write_bdev_support` for more details.
@@ -176,7 +176,7 @@ Where ``-b`` specifies the block size, ``-C`` specifies the cluster size in byte
and ``-O bigalloc`` enables the bigalloc feature.
Application Interface
-~~~~~~~~~~~~~~~~~~~~~
+^^^^^^^^^^^^^^^^^^^^^
Applications can use the ``pwritev2()`` system call with the ``RWF_ATOMIC`` flag
to perform atomic writes:
@@ -204,7 +204,7 @@ writes are supported.
.. _atomic_write_bdev_support:
Hardware Support
-----------------
+~~~~~~~~~~~~~~~~
The underlying storage device must support atomic write operations.
Modern NVMe and SCSI devices often provide this capability.
@@ -217,7 +217,7 @@ Nonzero values for these attributes indicate that the device supports
atomic writes.
See Also
---------
+~~~~~~~~
* :doc:`bigalloc` - Documentation on the bigalloc feature
* :doc:`allocators` - Documentation on block allocation in ext4
diff --git a/Documentation/filesystems/ext4/bitmaps.rst b/Documentation/filesystems/ext4/bitmaps.rst
index 91c45d86e9bb..9d7d7b083a25 100644
--- a/Documentation/filesystems/ext4/bitmaps.rst
+++ b/Documentation/filesystems/ext4/bitmaps.rst
@@ -19,10 +19,3 @@ necessarily the case that no blocks are in use -- if ``meta_bg`` is set,
the bitmaps and group descriptor live inside the group. Unfortunately,
ext2fs_test_block_bitmap2() will return '0' for those locations,
which produces confusing debugfs output.
-
-Inode Table
------------
-Inode tables are statically allocated at mkfs time. Each block group
-descriptor points to the start of the table, and the superblock records
-the number of inodes per group. See the section on inodes for more
-information.
diff --git a/Documentation/filesystems/ext4/blockgroup.rst b/Documentation/filesystems/ext4/blockgroup.rst
index ed5a5cac6d40..7cbf0b2b778e 100644
--- a/Documentation/filesystems/ext4/blockgroup.rst
+++ b/Documentation/filesystems/ext4/blockgroup.rst
@@ -1,7 +1,10 @@
.. SPDX-License-Identifier: GPL-2.0
+Block Groups
+------------
+
Layout
-------
+~~~~~~
The layout of a standard block group is approximately as follows (each
of these fields is discussed in a separate section below):
@@ -60,7 +63,7 @@ groups (flex_bg). Leftover space is used for file data blocks, indirect
block maps, extent tree blocks, and extended attributes.
Flexible Block Groups
----------------------
+~~~~~~~~~~~~~~~~~~~~~
Starting in ext4, there is a new feature called flexible block groups
(flex_bg). In a flex_bg, several block groups are tied together as one
@@ -78,7 +81,7 @@ if flex_bg is enabled. The number of block groups that make up a
flex_bg is given by 2 ^ ``sb.s_log_groups_per_flex``.
Meta Block Groups
------------------
+~~~~~~~~~~~~~~~~~
Without the option META_BG, for safety concerns, all block group
descriptors copies are kept in the first block group. Given the default
@@ -117,7 +120,7 @@ Please see an important note about ``BLOCK_UNINIT`` in the section about
block and inode bitmaps.
Lazy Block Group Initialization
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A new feature for ext4 are three block group descriptor flags that
enable mkfs to skip initializing other parts of the block group
diff --git a/Documentation/filesystems/ext4/dynamic.rst b/Documentation/filesystems/ext4/dynamic.rst
index bb0c84333341..bbad439aada2 100644
--- a/Documentation/filesystems/ext4/dynamic.rst
+++ b/Documentation/filesystems/ext4/dynamic.rst
@@ -6,7 +6,9 @@ Dynamic Structures
Dynamic metadata are created on the fly when files and blocks are
allocated to files.
-.. include:: inodes.rst
-.. include:: ifork.rst
-.. include:: directory.rst
-.. include:: attributes.rst
+.. toctree::
+
+ inodes
+ ifork
+ directory
+ attributes
diff --git a/Documentation/filesystems/ext4/globals.rst b/Documentation/filesystems/ext4/globals.rst
index b17418974fd3..c6a6abce818a 100644
--- a/Documentation/filesystems/ext4/globals.rst
+++ b/Documentation/filesystems/ext4/globals.rst
@@ -6,9 +6,12 @@ Global Structures
The filesystem is sharded into a number of block groups, each of which
have static metadata at fixed locations.
-.. include:: super.rst
-.. include:: group_descr.rst
-.. include:: bitmaps.rst
-.. include:: mmp.rst
-.. include:: journal.rst
-.. include:: orphan.rst
+.. toctree::
+
+ super
+ group_descr
+ bitmaps
+ inode_table
+ mmp
+ journal
+ orphan
diff --git a/Documentation/filesystems/ext4/index.rst b/Documentation/filesystems/ext4/index.rst
index 705d813d558f..1ff8150c50e9 100644
--- a/Documentation/filesystems/ext4/index.rst
+++ b/Documentation/filesystems/ext4/index.rst
@@ -5,7 +5,7 @@ ext4 Data Structures and Algorithms
===================================
.. toctree::
- :maxdepth: 6
+ :maxdepth: 2
:numbered:
about
diff --git a/Documentation/filesystems/ext4/inode_table.rst b/Documentation/filesystems/ext4/inode_table.rst
new file mode 100644
index 000000000000..f7900a52c0d5
--- /dev/null
+++ b/Documentation/filesystems/ext4/inode_table.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Inode Table
+-----------
+
+Inode tables are statically allocated at mkfs time. Each block group
+descriptor points to the start of the table, and the superblock records
+the number of inodes per group. See :doc:`inode documentation <inodes>`
+for more information on inode table layout.
diff --git a/Documentation/filesystems/ext4/overview.rst b/Documentation/filesystems/ext4/overview.rst
index 9d4054c17ecb..171c3963d7f6 100644
--- a/Documentation/filesystems/ext4/overview.rst
+++ b/Documentation/filesystems/ext4/overview.rst
@@ -16,13 +16,15 @@ All fields in ext4 are written to disk in little-endian order. HOWEVER,
all fields in jbd2 (the journal) are written to disk in big-endian
order.
-.. include:: blocks.rst
-.. include:: blockgroup.rst
-.. include:: special_inodes.rst
-.. include:: allocators.rst
-.. include:: checksums.rst
-.. include:: bigalloc.rst
-.. include:: inlinedata.rst
-.. include:: eainode.rst
-.. include:: verity.rst
-.. include:: atomic_writes.rst
+.. toctree::
+
+ blocks
+ blockgroup
+ special_inodes
+ allocators
+ checksums
+ bigalloc
+ inlinedata
+ eainode
+ verity
+ atomic_writes
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 440e4ae74e44..e5bb89452aff 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -218,7 +218,7 @@ mode=%s Control block allocation mode which supports "adaptive"
fragmentation/after-GC situation itself. The developers use these
modes to understand filesystem fragmentation/after-GC condition well,
and eventually get some insights to handle them better.
- In "fragment:segment", f2fs allocates a new segment in ramdom
+ In "fragment:segment", f2fs allocates a new segment in random
position. With this, we can simulate the after-GC condition.
In "fragment:block", we can scatter block allocation with
"max_fragment_chunk" and "max_fragment_hole" sysfs nodes.
@@ -238,9 +238,9 @@ usrjquota=<file> Appoint specified file and type during mount, so that quota
grpjquota=<file> information can be properly updated during recovery flow,
prjjquota=<file> <quota file>: must be in root directory;
jqfmt=<quota type> <quota type>: [vfsold,vfsv0,vfsv1].
-offusrjquota Turn off user journalled quota.
-offgrpjquota Turn off group journalled quota.
-offprjjquota Turn off project journalled quota.
+usrjquota= Turn off user journalled quota.
+grpjquota= Turn off group journalled quota.
+prjjquota= Turn off project journalled quota.
quota Enable plain user disk quota accounting.
noquota Disable all plain disk quota option.
alloc_mode=%s Adjust block allocation policy, which supports "reuse"
@@ -261,7 +261,7 @@ test_dummy_encryption=%s
The argument may be either "v1" or "v2", in order to
select the corresponding fscrypt policy version.
checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enable"
- to reenable checkpointing. Is enabled by default. While
+ to re-enable checkpointing. Is enabled by default. While
disabled, any unmounting or unexpected shutdowns will cause
the filesystem contents to appear as they did when the
filesystem was mounted with that option.
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 4133a336486d..ab989807a2cb 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -9,7 +9,7 @@ Overlay Filesystem
This document describes a prototype for a new approach to providing
overlay-filesystem functionality in Linux (sometimes referred to as
union-filesystems). An overlay-filesystem tries to present a
-filesystem which is the result over overlaying one filesystem on top
+filesystem which is the result of overlaying one filesystem on top
of the other.
@@ -61,7 +61,7 @@ Inode properties
|Configuration | Persistent | Uniform | st_ino == d_ino | d_ino == i_ino |
| | st_ino | st_dev | | [*] |
+==============+=====+======+=====+======+========+========+========+=======+
-| | dir | !dir | dir | !dir | dir + !dir | dir | !dir |
+| | dir | !dir | dir | !dir | dir | !dir | dir | !dir |
+--------------+-----+------+-----+------+--------+--------+--------+-------+
| All layers | Y | Y | Y | Y | Y | Y | Y | Y |
| on same fs | | | | | | | | |
@@ -425,7 +425,7 @@ of information from up to three different layers:
The "lower data" file can be on any lower layer, except from the top most
lower layer.
-Below the top most lower layer, any number of lower most layers may be defined
+Below the topmost lower layer, any number of lowermost layers may be defined
as "data-only" lower layers, using double colon ("::") separators.
A normal lower layer is not allowed to be below a data-only layer, so single
colon separators are not allowed to the right of double colon ("::") separators.
@@ -445,8 +445,8 @@ to the absolute path of the "lower data" file in the "data-only" lower layer.
Instead of explicitly enabling "metacopy=on" it is sufficient to specify at
least one data-only layer to enable redirection of data to a data-only layer.
-In this case other forms of metacopy are rejected. Note: this way data-only
-layers may be used toghether with "userxattr", in which case careful attention
+In this case other forms of metacopy are rejected. Note: this way, data-only
+layers may be used together with "userxattr", in which case careful attention
must be given to privileges needed to change the "user.overlay.redirect" xattr
to prevent misuse.
@@ -515,7 +515,7 @@ supports these values:
The metacopy digest is never generated or used. This is the
default if verity option is not specified.
- "on":
- Whenever a metacopy files specifies an expected digest, the
+ Whenever a metacopy file specifies an expected digest, the
corresponding data file must match the specified digest. When
generating a metacopy file the verity digest will be set in it
based on the source file (if it has one).
@@ -537,7 +537,7 @@ Using an upper layer path and/or a workdir path that are already used by
another overlay mount is not allowed and may fail with EBUSY. Using
partially overlapping paths is not allowed and may fail with EBUSY.
If files are accessed from two overlayfs mounts which share or overlap the
-upper layer and/or workdir path the behavior of the overlay is undefined,
+upper layer and/or workdir path, the behavior of the overlay is undefined,
though it will not result in a crash or deadlock.
Mounting an overlay using an upper layer path, where the upper layer path
@@ -778,7 +778,7 @@ controlled by the "uuid" mount option, which supports these values:
- "auto": (default)
UUID is taken from xattr "trusted.overlay.uuid" if it exists.
Upgrade to "uuid=on" on first time mount of new overlay filesystem that
- meets the prerequites.
+ meets the prerequisites.
Downgrade to "uuid=null" for existing overlay filesystems that were never
mounted with "uuid=on".
@@ -794,20 +794,20 @@ without significant effort.
The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
-In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+In order to avoid giving a false sense of safety, the syncfs (and fsync)
semantics of volatile mounts are slightly different than that of the rest of
VFS. If any writeback error occurs on the upperdir's filesystem after a
volatile mount takes place, all sync functions will return an error. Once this
condition is reached, the filesystem will not recover, and every subsequent sync
-call will return an error, even if the upperdir has not experience a new error
+call will return an error, even if the upperdir has not experienced a new error
since the last sync call.
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong
-indicator that user should throw away upper and work directories and create
-fresh one. In very limited cases where the user knows that the system has
-not crashed and contents of upperdir are intact, The "volatile" directory
+indicator that the user should discard upper and work directories and create
+fresh ones. In very limited cases where the user knows that the system has
+not crashed and contents of upperdir are intact, the "volatile" directory
can be removed.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 5236cb52e357..2971551b7235 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -1196,12 +1196,14 @@ SecPageTables
Memory consumed by secondary page tables, this currently includes
KVM mmu and IOMMU allocations on x86 and arm64.
NFS_Unstable
- Always zero. Previous counted pages which had been written to
+ Always zero. Previously counted pages which had been written to
the server, but has not been committed to stable storage.
Bounce
- Memory used for block device "bounce buffers"
+ Always zero. Previously memory used for block device
+ "bounce buffers".
WritebackTmp
- Memory used by FUSE for temporary writeback buffers
+ Always zero. Previously memory used by FUSE for temporary
+ writeback buffers.
CommitLimit
Based on the overcommit ratio ('vm.overcommit_ratio'),
this is the total amount of memory currently available to
diff --git a/Documentation/filesystems/ubifs-authentication.rst b/Documentation/filesystems/ubifs-authentication.rst
index 3d85ee88719a..106bb9c056f6 100644
--- a/Documentation/filesystems/ubifs-authentication.rst
+++ b/Documentation/filesystems/ubifs-authentication.rst
@@ -443,6 +443,6 @@ References
[DM-VERITY] https://www.kernel.org/doc/Documentation/device-mapper/verity.rst
-[FSCRYPT-POLICY2] https://www.spinics.net/lists/linux-ext4/msg58710.html
+[FSCRYPT-POLICY2] https://lore.kernel.org/r/20171023214058.128121-1-ebiggers3@gmail.com/
[UBIFS-WP] http://www.linux-mtd.infradead.org/doc/ubifs_whitepaper.pdf
diff --git a/Documentation/gpu/amdgpu/debugging.rst b/Documentation/gpu/amdgpu/debugging.rst
index 7cbfea0606e1..ac914d524741 100644
--- a/Documentation/gpu/amdgpu/debugging.rst
+++ b/Documentation/gpu/amdgpu/debugging.rst
@@ -85,3 +85,21 @@ UMR
GPU debugging and diagnostics tool. Please see the umr
`documentation <https://umr.readthedocs.io/en/main/>`_ for more information
about its capabilities.
+
+Debugging backlight brightness
+==============================
+Default backlight brightness is intended to be set via the policy advertised
+by the firmware. Firmware will often provide different defaults for AC or DC.
+Furthermore, some userspace software will save backlight brightness during
+the previous boot and attempt to restore it.
+
+Some firmware also has support for a feature called "Custom Backlight Curves"
+where an input value for brightness is mapped along a linearly interpolated
+curve of brightness values that better match display characteristics.
+
+In the event of problems happening with backlight, there is a trace event
+that can be enabled at bootup to log every brightness change request.
+This can help isolate where the problem is. To enable the trace event add
+the following to the kernel command line:
+
+ tp_printk trace_event=amdgpu_dm:amdgpu_dm_brightness:mod:amdgpu trace_buf_size=1M
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 69f72e71a96e..843facf01b2d 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -446,6 +446,23 @@ telemetry information (devcoredump, syslog). This is useful because the first
hang is usually the most critical one which can result in consequential hangs or
complete wedging.
+Task information
+----------------
+
+The information about which application (if any) was involved in the device
+wedging is useful for userspace if they want to notify the user about what
+happened (e.g. the compositor display a message to the user "The <task name>
+caused a graphical error and the system recovered") or to implement policies
+(e.g. the daemon may "ban" an task that keeps resetting the device). If the task
+information is available, the uevent will display as ``PID=<pid>`` and
+``TASK=<task name>``. Otherwise, ``PID`` and ``TASK`` will not appear in the
+event string.
+
+The reliability of this information is driver and hardware specific, and should
+be taken with a caution regarding it's precision. To have a big picture of what
+really happened, the devcoredump file provides much more detailed information
+about the device state and about the event.
+
Consumer prerequisites
----------------------
@@ -693,3 +710,22 @@ dma-buf interoperability
Please see Documentation/userspace-api/dma-buf-alloc-exchange.rst for
information on how dma-buf is integrated and exposed within DRM.
+
+
+Trace events
+============
+
+See Documentation/trace/tracepoints.rst for information about using
+Linux Kernel Tracepoints.
+In the DRM subsystem, some events are considered stable uAPI to avoid
+breaking tools (e.g.: GPUVis, umr) relying on them. Stable means that fields
+cannot be removed, nor their formatting updated. Adding new fields is
+possible, under the normal uAPI requirements.
+
+Stable uAPI events
+------------------
+
+From ``drivers/gpu/drm/scheduler/gpu_scheduler_trace.h``
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+ :doc: uAPI trace events \ No newline at end of file
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 7a469df675d8..72932fa31b8d 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -112,10 +112,10 @@ panel self refresh.
Atomic Plane Helpers
--------------------
-.. kernel-doc:: drivers/gpu/drm/i915/display/intel_atomic_plane.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_plane.c
:doc: atomic plane helpers
-.. kernel-doc:: drivers/gpu/drm/i915/display/intel_atomic_plane.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_plane.c
:internal:
Asynchronous Page Flip
@@ -204,6 +204,12 @@ DMC Firmware Support
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc.c
:internal:
+DMC Flip Queue
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_flipq.c
+ :doc: DMC Flip Queue
+
DMC wakelock support
--------------------
diff --git a/Documentation/gpu/nova/core/devinit.rst b/Documentation/gpu/nova/core/devinit.rst
new file mode 100644
index 000000000000..70c819a96a00
--- /dev/null
+++ b/Documentation/gpu/nova/core/devinit.rst
@@ -0,0 +1,61 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Device Initialization (devinit)
+==================================
+The devinit process is complex and subject to change. This document provides a high-level
+overview using the Ampere GPU family as an example. The goal is to provide a conceptual
+overview of the process to aid in understanding the corresponding kernel code.
+
+Device initialization (devinit) is a crucial sequence of register read/write operations
+that occur after a GPU reset. The devinit sequence is essential for properly configuring
+the GPU hardware before it can be used.
+
+The devinit engine is an interpreter program that typically runs on the PMU (Power Management
+Unit) microcontroller of the GPU. This interpreter executes a "script" of initialization
+commands. The devinit engine itself is part of the VBIOS ROM in the same ROM image as the
+FWSEC (Firmware Security) image (see fwsec.rst and vbios.rst) and it runs before the
+nova-core driver is even loaded. On an Ampere GPU, the devinit ucode is separate from the
+FWSEC ucode. It is launched by FWSEC, which runs on the GSP in 'heavy-secure' mode, while
+devinit runs on the PMU in 'light-secure' mode.
+
+Key Functions of devinit
+------------------------
+devinit performs several critical tasks:
+
+1. Programming VRAM memory controller timings
+2. Power sequencing
+3. Clock and PLL (Phase-Locked Loop) configuration
+4. Thermal management
+
+Low-level Firmware Initialization Flow
+--------------------------------------
+Upon reset, several microcontrollers on the GPU (such as PMU, SEC2, GSP, etc.) run GPU
+firmware (gfw) code to set up the GPU and its core parameters. Most of the GPU is
+considered unusable until this initialization process completes.
+
+These low-level GPU firmware components are typically:
+
+1. Located in the VBIOS ROM in the same ROM partition (see vbios.rst and fwsec.rst).
+2. Executed in sequence on different microcontrollers:
+
+ - The devinit engine typically but not necessarily runs on the PMU.
+ - On an Ampere GPU, the FWSEC typically runs on the GSP (GPU System Processor) in
+ heavy-secure mode.
+
+Before the driver can proceed with further initialization, it must wait for a signal
+indicating that core initialization is complete (known as GFW_BOOT). This signal is
+asserted by the FWSEC running on the GSP in heavy-secure mode.
+
+Runtime Considerations
+----------------------
+It's important to note that the devinit sequence also needs to run during suspend/resume
+operations at runtime, not just during initial boot, as it is critical to power management.
+
+Security and Access Control
+---------------------------
+The initialization process involves careful privilege management. For example, before
+accessing certain completion status registers, the driver must check privilege level
+masks. Some registers are only accessible after secure firmware (FWSEC) lowers the
+privilege level to allow CPU (LS/low-secure) access. This is the case, for example,
+when receiving the GFW_BOOT signal. \ No newline at end of file
diff --git a/Documentation/gpu/nova/core/falcon.rst b/Documentation/gpu/nova/core/falcon.rst
new file mode 100644
index 000000000000..33137082eb6c
--- /dev/null
+++ b/Documentation/gpu/nova/core/falcon.rst
@@ -0,0 +1,158 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Falcon (FAst Logic Controller)
+==============================
+The following sections describe the Falcon core and the ucode running on it.
+The descriptions are based on the Ampere GPU or earlier designs; however, they
+should mostly apply to future designs as well, but everything is subject to
+change. The overview provided here is mainly tailored towards understanding the
+interactions of nova-core driver with the Falcon.
+
+NVIDIA GPUs embed small RISC-like microcontrollers called Falcon cores, which
+handle secure firmware tasks, initialization, and power management. Modern
+NVIDIA GPUs may have multiple such Falcon instances (e.g., GSP (the GPU system
+processor) and SEC2 (the security engine)) and also may integrate a RISC-V core.
+This core is capable of running both RISC-V and Falcon code.
+
+The code running on the Falcon cores is also called 'ucode', and will be
+referred to as such in the following sections.
+
+Falcons have separate instruction and data memories (IMEM/DMEM) and provide a
+small DMA engine (via the FBIF - "Frame Buffer Interface") to load code from
+system memory. The nova-core driver must reset and configure the Falcon, load
+its firmware via DMA, and start its CPU.
+
+Falcon security levels
+======================
+Falcons can run in Non-secure (NS), Light Secure (LS), or Heavy Secure (HS)
+modes.
+
+Heavy Secured (HS) also known as Privilege Level 3 (PL3)
+--------------------------------------------------------
+HS ucode is the most trusted code and has access to pretty much everything on
+the chip. The HS binary includes a signature in it which is verified at boot.
+This signature verification is done by the hardware itself, thus establishing a
+root of trust. For example, the FWSEC-FRTS command (see fwsec.rst) runs on the
+GSP in HS mode. FRTS, which involves setting up and loading content into the WPR
+(Write Protect Region), has to be done by the HS ucode and cannot be done by the
+host CPU or LS ucode.
+
+Light Secured (LS or PL2) and Non Secured (NS or PL0)
+-----------------------------------------------------
+These modes are less secure than HS. Like HS, the LS or NS ucode binary also
+typically includes a signature in it. To load firmware in LS or NS mode onto a
+Falcon, another Falcon needs to be running in HS mode, which also establishes the
+root of trust. For example, in the case of an Ampere GPU, the CPU runs the "Booter"
+ucode in HS mode on the SEC2 Falcon, which then authenticates and runs the
+run-time GSP binary (GSP-RM) in LS mode on the GSP Falcon. Similarly, as an
+example, after reset on an Ampere, FWSEC runs on the GSP which then loads the
+devinit engine onto the PMU in LS mode.
+
+Root of trust establishment
+---------------------------
+To establish a root of trust, the code running on a Falcon must be immutable and
+hardwired into a read-only memory (ROM). This follows industry norms for
+verification of firmware. This code is called the Boot ROM (BROM). The nova-core
+driver on the CPU communicates with Falcon's Boot ROM through various Falcon
+registers prefixed with "BROM" (see regs.rs).
+
+After nova-core driver reads the necessary ucode from VBIOS, it programs the
+BROM and DMA registers to trigger the Falcon to load the HS ucode from the system
+memory into the Falcon's IMEM/DMEM. Once the HS ucode is loaded, it is verified
+by the Falcon's Boot ROM.
+
+Once the verified HS code is running on a Falcon, it can verify and load other
+LS/NS ucode binaries onto other Falcons and start them. The process of signature
+verification is the same as HS; just in this case, the hardware (BROM) doesn't
+compute the signature, but the HS ucode does.
+
+The root of trust is therefore established as follows:
+ Hardware (Boot ROM running on the Falcon) -> HS ucode -> LS/NS ucode.
+
+On an Ampere GPU, for example, the boot verification flow is:
+ Hardware (Boot ROM running on the SEC2) ->
+ HS ucode (Booter running on the SEC2) ->
+ LS ucode (GSP-RM running on the GSP)
+
+.. note::
+ While the CPU can load HS ucode onto a Falcon microcontroller and have it
+ verified by the hardware and run, the CPU itself typically does not load
+ LS or NS ucode and run it. Loading of LS or NS ucode is done mainly by the
+ HS ucode. For example, on an Ampere GPU, after the Booter ucode runs on the
+ SEC2 in HS mode and loads the GSP-RM binary onto the GSP, it needs to run
+ the "SEC2-RTOS" ucode at runtime. This presents a problem: there is no
+ component to load the SEC2-RTOS ucode onto the SEC2. The CPU cannot load
+ LS code, and GSP-RM must run in LS mode. To overcome this, the GSP is
+ temporarily made to run HS ucode (which is itself loaded by the CPU via
+ the nova-core driver using a "GSP-provided sequencer") which then loads
+ the SEC2-RTOS ucode onto the SEC2 in LS mode. The GSP then resumes
+ running its own GSP-RM LS ucode.
+
+Falcon memory subsystem and DMA engine
+======================================
+Falcons have separate instruction and data memories (IMEM/DMEM)
+and contains a small DMA engine called FBDMA (Framebuffer DMA) which does
+DMA transfers to/from the IMEM/DMEM memory inside the Falcon via the FBIF
+(Framebuffer Interface), to external memory.
+
+DMA transfers are possible from the Falcon's memory to both the system memory
+and the framebuffer memory (VRAM).
+
+To perform a DMA via the FBDMA, the FBIF is configured to decide how the memory
+is accessed (also known as aperture type). In the nova-core driver, this is
+determined by the `FalconFbifTarget` enum.
+
+The IO-PMP block (Input/Output Physical Memory Protection) unit in the Falcon
+controls access by the FBDMA to the external memory.
+
+Conceptual diagram (not exact) of the Falcon and its memory subsystem is as follows::
+
+ External Memory (Framebuffer / System DRAM)
+ ^ |
+ | |
+ | v
+ +-----------------------------------------------------+
+ | | |
+ | +---------------+ | |
+ | | FBIF |-------+ | FALCON
+ | | (FrameBuffer | Memory Interface | PROCESSOR
+ | | InterFace) | |
+ | | Apertures | |
+ | | Configures | |
+ | | mem access | |
+ | +-------^-------+ |
+ | | |
+ | | FBDMA uses configured FBIF apertures |
+ | | to access External Memory
+ | |
+ | +-------v--------+ +---------------+
+ | | FBDMA | cfg | RISC |
+ | | (FrameBuffer |<---->| CORE |----->. Direct Core Access
+ | | DMA Engine) | | | |
+ | | - Master dev. | | (can run both | |
+ | +-------^--------+ | Falcon and | |
+ | | cfg--->| RISC-V code) | |
+ | | / | | |
+ | | | +---------------+ | +------------+
+ | | | | | BROM |
+ | | | <--->| (Boot ROM) |
+ | | / | +------------+
+ | | v |
+ | +---------------+ |
+ | | IO-PMP | Controls access by FBDMA |
+ | | (IO Physical | and other IO Masters |
+ | | Memory Protect) |
+ | +-------^-------+ |
+ | | |
+ | | Protected Access Path for FBDMA |
+ | v |
+ | +---------------------------------------+ |
+ | | Memory | |
+ | | +---------------+ +------------+ | |
+ | | | IMEM | | DMEM | |<-----+
+ | | | (Instruction | | (Data | |
+ | | | Memory) | | Memory) | |
+ | | +---------------+ +------------+ |
+ | +---------------------------------------+
+ +-----------------------------------------------------+
diff --git a/Documentation/gpu/nova/core/fwsec.rst b/Documentation/gpu/nova/core/fwsec.rst
new file mode 100644
index 000000000000..c440edbe420c
--- /dev/null
+++ b/Documentation/gpu/nova/core/fwsec.rst
@@ -0,0 +1,181 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+=========================
+FWSEC (Firmware Security)
+=========================
+This document briefly/conceptually describes the FWSEC (Firmware Security) image
+and its role in the GPU boot sequence. As such, this information is subject to
+change in the future and is only current as of the Ampere GPU family. However,
+hopefully the concepts described will be useful for understanding the kernel code
+that deals with it. All the information is derived from publicly available
+sources such as public drivers and documentation.
+
+The role of FWSEC is to provide a secure boot process. It runs in
+'Heavy-secure' mode, and performs firmware verification after a GPU reset
+before loading various ucode images onto other microcontrollers on the GPU,
+such as the PMU and GSP.
+
+FWSEC itself is an application stored in the VBIOS ROM in the FWSEC partition of
+ROM (see vbios.rst for more details). It contains different commands like FRTS
+(Firmware Runtime Services) and SB (Secure Booting other microcontrollers after
+reset and loading them with other non-FWSEC ucode). The kernel driver only needs
+to perform FRTS, since Secure Boot (SB) has already completed by the time the driver
+is loaded.
+
+The FRTS command carves out the WPR2 region (Write protected region) which contains
+data required for power management. Once setup, only HS mode ucode can access it
+(see falcon.rst for privilege levels).
+
+The FWSEC image is located in the VBIOS ROM in the partition of the ROM that contains
+various ucode images (also known as applications) -- one of them being FWSEC. For how
+it is extracted, see vbios.rst and the vbios.rs source code.
+
+The Falcon data for each ucode images (including the FWSEC image) is a combination
+of headers, data sections (DMEM) and instruction code sections (IMEM). All these
+ucode images are stored in the same ROM partition and the PMU table is used to look
+up the application to load it based on its application ID (see vbios.rs).
+
+For the nova-core driver, the FWSEC contains an 'application interface' called
+DMEMMAPPER. This interface is used to execute the 'FWSEC-FRTS' command, among others.
+For Ampere, FWSEC is running on the GSP in Heavy-secure mode and runs FRTS.
+
+FWSEC Memory Layout
+-------------------
+The memory layout of the FWSEC image is as follows::
+
+ +---------------------------------------------------------------+
+ | FWSEC ROM image (type 0xE0) |
+ | |
+ | +---------------------------------+ |
+ | | PMU Falcon Ucode Table | |
+ | | (PmuLookupTable) | |
+ | | +-------------------------+ | |
+ | | | Table Header | | |
+ | | | - version: 0x01 | | |
+ | | | - header_size: 6 | | |
+ | | | - entry_size: 6 | | |
+ | | | - entry_count: N | | |
+ | | | - desc_version:3(unused)| | |
+ | | +-------------------------+ | |
+ | | ... | |
+ | | +-------------------------+ | |
+ | | | Entry for FWSEC (0x85) | | |
+ | | | (PmuLookupTableEntry) | | |
+ | | | - app_id: 0x85 (FWSEC) |----|----+ |
+ | | | - target_id: 0x01 (PMU) | | | |
+ | | | - data: offset ---------|----|----|---+ look up FWSEC |
+ | | +-------------------------+ | | | |
+ | +---------------------------------+ | | |
+ | | | |
+ | | | |
+ | +---------------------------------+ | | |
+ | | FWSEC Ucode Component |<---+ | |
+ | | (aka Falcon data) | | |
+ | | +-------------------------+ | | |
+ | | | FalconUCodeDescV3 |<---|--------+ |
+ | | | - hdr | | |
+ | | | - stored_size | | |
+ | | | - pkc_data_offset | | |
+ | | | - interface_offset -----|----|----------------+ |
+ | | | - imem_phys_base | | | |
+ | | | - imem_load_size | | | |
+ | | | - imem_virt_base | | | |
+ | | | - dmem_phys_base | | | |
+ | | | - dmem_load_size | | | |
+ | | | - engine_id_mask | | | |
+ | | | - ucode_id | | | |
+ | | | - signature_count | | look up sig | |
+ | | | - signature_versions --------------+ | |
+ | | +-------------------------+ | | | |
+ | | (no gap) | | | |
+ | | +-------------------------+ | | | |
+ | | | Signatures Section |<---|-----+ | |
+ | | | (384 bytes per sig) | | | |
+ | | | - RSA-3K Signature 1 | | | |
+ | | | - RSA-3K Signature 2 | | | |
+ | | | ... | | | |
+ | | +-------------------------+ | | |
+ | | | | |
+ | | +-------------------------+ | | |
+ | | | IMEM Section (Code) | | | |
+ | | | | | | |
+ | | | Contains instruction | | | |
+ | | | code etc. | | | |
+ | | +-------------------------+ | | |
+ | | | | |
+ | | +-------------------------+ | | |
+ | | | DMEM Section (Data) | | | |
+ | | | | | | |
+ | | | +---------------------+ | | | |
+ | | | | Application | |<---|----------------+ |
+ | | | | Interface Table | | | |
+ | | | | (FalconAppifHdrV1) | | | |
+ | | | | Header: | | | |
+ | | | | - version: 0x01 | | | |
+ | | | | - header_size: 4 | | | |
+ | | | | - entry_size: 8 | | | |
+ | | | | - entry_count: N | | | |
+ | | | | | | | |
+ | | | | Entries: | | | |
+ | | | | +-----------------+ | | | |
+ | | | | | DEVINIT (ID 1) | | | | |
+ | | | | | - id: 0x01 | | | | |
+ | | | | | - dmemOffset X -|-|-|----+ |
+ | | | | +-----------------+ | | | |
+ | | | | +-----------------+ | | | |
+ | | | | | DMEMMAPPER(ID 4)| | | | |
+ | | | | | - id: 0x04 | | | | Used only for DevInit |
+ | | | | | (NVFW_FALCON_ | | | | application (not FWSEC) |
+ | | | | | APPIF_ID_DMEMMAPPER) | |
+ | | | | | - dmemOffset Y -|-|-|----|-----+ |
+ | | | | +-----------------+ | | | | |
+ | | | +---------------------+ | | | |
+ | | | | | | |
+ | | | +---------------------+ | | | |
+ | | | | DEVINIT Engine |<|----+ | Used by FWSEC |
+ | | | | Interface | | | | app. |
+ | | | +---------------------+ | | | |
+ | | | | | | |
+ | | | +---------------------+ | | | |
+ | | | | DMEM Mapper (ID 4) |<|----+-----+ |
+ | | | | (FalconAppifDmemmapperV3) | |
+ | | | | - signature: "DMAP" | | | |
+ | | | | - version: 0x0003 | | | |
+ | | | | - Size: 64 bytes | | | |
+ | | | | - cmd_in_buffer_off | |----|------------+ |
+ | | | | - cmd_in_buffer_size| | | | |
+ | | | | - cmd_out_buffer_off| |----|------------|-----+ |
+ | | | | - cmd_out_buffer_sz | | | | | |
+ | | | | - init_cmd | | | | | |
+ | | | | - features | | | | | |
+ | | | | - cmd_mask0/1 | | | | | |
+ | | | +---------------------+ | | | | |
+ | | | | | | | |
+ | | | +---------------------+ | | | | |
+ | | | | Command Input Buffer|<|----|------------+ | |
+ | | | | - Command data | | | | |
+ | | | | - Arguments | | | | |
+ | | | +---------------------+ | | | |
+ | | | | | | |
+ | | | +---------------------+ | | | |
+ | | | | Command Output |<|----|------------------+ |
+ | | | | Buffer | | | |
+ | | | | - Results | | | |
+ | | | | - Status | | | |
+ | | | +---------------------+ | | |
+ | | +-------------------------+ | |
+ | +---------------------------------+ |
+ | |
+ +---------------------------------------------------------------+
+
+.. note::
+ This is using an GA-102 Ampere GPU as an example and could vary for future GPUs.
+
+.. note::
+ The FWSEC image also plays a role in memory scrubbing (ECC initialization) and VPR
+ (Video Protected Region) initialization as well. Before the nova-core driver is even
+ loaded, the FWSEC image is running on the GSP in heavy-secure mode. After the devinit
+ sequence completes, it does VRAM memory scrubbing (ECC initialization). On consumer
+ GPUs, it scrubs only part of memory and then initiates 'async scrubbing'. Before this
+ async scrubbing completes, the unscrubbed VRAM cannot be used for allocation (thus DRM
+ memory allocators need to wait for this scrubbing to complete).
diff --git a/Documentation/gpu/nova/core/todo.rst b/Documentation/gpu/nova/core/todo.rst
index 8a459fc08812..894a1e9c3741 100644
--- a/Documentation/gpu/nova/core/todo.rst
+++ b/Documentation/gpu/nova/core/todo.rst
@@ -14,14 +14,17 @@ Tasks may have the following fields:
- ``Contact``: The person that can be contacted for further information about
the task.
+A task might have `[ABCD]` code after its name. This code can be used to grep
+into the code for `TODO` entries related to it.
+
Enablement (Rust)
=================
Tasks that are not directly related to nova-core, but are preconditions in terms
of required APIs.
-FromPrimitive API
------------------
+FromPrimitive API [FPRI]
+------------------------
Sometimes the need arises to convert a number to a value of an enum or a
structure.
@@ -41,8 +44,27 @@ automatically generates the corresponding mappings between a value and a number.
| Complexity: Beginner
| Link: https://docs.rs/num/latest/num/trait.FromPrimitive.html
-Generic register abstraction
-----------------------------
+Conversion from byte slices for types implementing FromBytes [TRSM]
+-------------------------------------------------------------------
+
+We retrieve several structures from byte streams coming from the BIOS or loaded
+firmware. At the moment converting the bytes slice into the proper type require
+an inelegant `unsafe` operation; this will go away once `FromBytes` implements
+a proper `from_bytes` method.
+
+| Complexity: Beginner
+
+CoherentAllocation improvements [COHA]
+--------------------------------------
+
+`CoherentAllocation` needs a safe way to write into the allocation, and to
+obtain slices within the allocation.
+
+| Complexity: Beginner
+| Contact: Abdiel Janulgue
+
+Generic register abstraction [REGA]
+-----------------------------------
Work out how register constants and structures can be automatically generated
through generalized macros.
@@ -102,16 +124,40 @@ Usage:
let boot0 = Boot0::read(&bar);
pr_info!("Revision: {}\n", boot0.revision());
-Note: a work-in-progress implementation currently resides in
+A work-in-progress implementation currently resides in
`drivers/gpu/nova-core/regs/macros.rs` and is used in nova-core. It would be
nice to improve it (possibly using proc macros) and move it to the `kernel`
crate so it can be used by other components as well.
+Features desired before this happens:
+
+* Relative register with build-time base address validation,
+* Arrays of registers with build-time index validation,
+* Make I/O optional I/O (for field values that are not registers),
+* Support other sizes than `u32`,
+* Allow visibility control for registers and individual fields,
+* Use Rust slice syntax to express fields ranges.
+
| Complexity: Advanced
| Contact: Alexandre Courbot
-Delay / Sleep abstractions
---------------------------
+Numerical operations [NUMM]
+---------------------------
+
+Nova uses integer operations that are not part of the standard library (or not
+implemented in an optimized way for the kernel). These include:
+
+- Aligning up and down to a power of two,
+- The "Find Last Set Bit" (`fls` function of the C part of the kernel)
+ operation.
+
+A `num` core kernel module is being designed to provide these operations.
+
+| Complexity: Intermediate
+| Contact: Alexandre Courbot
+
+Delay / Sleep abstractions [DLAY]
+---------------------------------
Rust abstractions for the kernel's delay() and sleep() functions.
@@ -159,18 +205,6 @@ mailing list yet.
| Complexity: Intermediate
| Contact: Abdiel Janulgue
-ELF utils
----------
-
-Rust implementation of ELF header representation to retrieve section header
-tables, names, and data from an ELF-formatted images.
-
-There is preceding work from Abdiel Janulgue, which hasn't made it to the
-mailing list yet.
-
-| Complexity: Beginner
-| Contact: Abdiel Janulgue
-
PCI MISC APIs
-------------
@@ -179,12 +213,11 @@ capability, MSI API abstractions.
| Complexity: Beginner
-Auxiliary bus abstractions
---------------------------
-
-Rust abstraction for the auxiliary bus APIs.
+XArray bindings [XARR]
+----------------------
-This is needed to connect nova-core to the nova-drm driver.
+We need bindings for `xa_alloc`/`xa_alloc_cyclic` in order to generate the
+auxiliary device IDs.
| Complexity: Intermediate
@@ -216,15 +249,6 @@ Build the radix3 page table to map the firmware.
| Complexity: Intermediate
| Contact: Abdiel Janulgue
-vBIOS support
--------------
-
-Parse the vBIOS and probe the structures required for driver initialization.
-
-| Contact: Dave Airlie
-| Reference: Vec extensions
-| Complexity: Intermediate
-
Initial Devinit support
-----------------------
@@ -234,23 +258,6 @@ configuration.
| Contact: Dave Airlie
| Complexity: Beginner
-Boot Falcon controller
-----------------------
-
-Infrastructure to load and execute falcon (sec2) firmware images; handle the
-GSP falcon processor and fwsec loading.
-
-| Complexity: Advanced
-| Contact: Dave Airlie
-
-GPU Timer support
------------------
-
-Support for the GPU's internal timer peripheral.
-
-| Complexity: Beginner
-| Contact: Dave Airlie
-
MMU / PT management
-------------------
diff --git a/Documentation/gpu/nova/core/vbios.rst b/Documentation/gpu/nova/core/vbios.rst
new file mode 100644
index 000000000000..efd40087480c
--- /dev/null
+++ b/Documentation/gpu/nova/core/vbios.rst
@@ -0,0 +1,181 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+==========
+VBIOS
+==========
+This document describes the layout of the VBIOS image which is a series of concatenated
+images in the ROM of the GPU. The VBIOS is mirrored onto the BAR 0 space and is read
+by both Boot ROM firmware (also known as IFR or init-from-rom firmware) on the GPU to
+bootstrap various microcontrollers (PMU, SEC, GSP) with critical initialization before
+the driver loads, as well as by the nova-core driver in the kernel to boot the GSP.
+
+The format of the images in the ROM follow the "BIOS Specification" part of the
+PCI specification, with Nvidia-specific extensions. The ROM images of type FwSec
+are the ones that contain Falcon ucode and what we are mainly looking for.
+
+As an example, the following are the different image types that can be found in the
+VBIOS of an Ampere GA102 GPU which is supported by the nova-core driver.
+
+- PciAt Image (Type 0x00) - This is the standard PCI BIOS image, whose name
+ likely comes from the "IBM PC/AT" architecture.
+
+- EFI Image (Type 0x03) - This is the EFI BIOS image. It contains the UEFI GOP
+ driver that is used to display UEFI graphics output.
+
+- First FwSec Image (Type 0xE0) - The first FwSec image (Secure Firmware)
+
+- Second FwSec Image (Type 0xE0) - The second FwSec image (Secure Firmware)
+ contains various microcodes (also known as an applications) that do a range
+ of different functions. The FWSEC ucode is run in heavy-secure mode and
+ typically runs directly on the GSP (it could be running on a different
+ designated processor in future generations but as of Ampere, it is the GSP).
+ This firmware then loads other firmware ucodes onto the PMU and SEC2
+ microcontrollers for gfw initialization after GPU reset and before the driver
+ loads (see devinit.rst). The DEVINIT ucode is itself another ucode that is
+ stored in this ROM partition.
+
+Once located, the Falcon ucodes have "Application Interfaces" in their data
+memory (DMEM). For FWSEC, the application interface we use for FWSEC is the
+"DMEM mapper" interface which is configured to run the "FRTS" command. This
+command carves out the WPR2 (Write-Protected Region) in VRAM. It then places
+important power-management data, called 'FRTS', into this region. The WPR2
+region is only accessible to heavy-secure ucode.
+
+.. note::
+ It is not clear why FwSec has 2 different partitions in the ROM, but they both
+ are of type 0xE0 and can be identified as such. This could be subject to change
+ in future generations.
+
+VBIOS ROM Layout
+----------------
+The VBIOS layout is roughly a series of concatenated images laid out as follows::
+
+ +----------------------------------------------------------------------------+
+ | VBIOS (Starting at ROM_OFFSET: 0x300000) |
+ +----------------------------------------------------------------------------+
+ | +-----------------------------------------------+ |
+ | | PciAt Image (Type 0x00) | |
+ | +-----------------------------------------------+ |
+ | | +-------------------+ | |
+ | | | ROM Header | | |
+ | | | (Signature 0xAA55)| | |
+ | | +-------------------+ | |
+ | | | rom header's pci_data_struct_offset | |
+ | | | points to the PCIR structure | |
+ | | V | |
+ | | +-------------------+ | |
+ | | | PCIR Structure | | |
+ | | | (Signature "PCIR")| | |
+ | | | last_image: 0x80 | | |
+ | | | image_len: size | | |
+ | | | in 512-byte units | | |
+ | | +-------------------+ | |
+ | | | | |
+ | | | NPDE immediately follows PCIR | |
+ | | V | |
+ | | +-------------------+ | |
+ | | | NPDE Structure | | |
+ | | | (Signature "NPDE")| | |
+ | | | last_image: 0x00 | | |
+ | | +-------------------+ | |
+ | | | |
+ | | +-------------------+ | |
+ | | | BIT Header | (Signature scanning | |
+ | | | (Signature "BIT") | provides the location | |
+ | | +-------------------+ of the BIT table) | |
+ | | | header is | |
+ | | | followed by a table of tokens | |
+ | | V one of which is for falcon data. | |
+ | | +-------------------+ | |
+ | | | BIT Tokens | | |
+ | | | ______________ | | |
+ | | | | Falcon Data | | | |
+ | | | | Token (0x70)|---+------------>------------+--+ |
+ | | | +-------------+ | falcon_data_ptr() | | |
+ | | +-------------------+ | V |
+ | +-----------------------------------------------+ | |
+ | (no gap between images) | |
+ | +-----------------------------------------------+ | |
+ | | EFI Image (Type 0x03) | | |
+ | +-----------------------------------------------+ | |
+ | | Contains the UEFI GOP driver (Graphics Output)| | |
+ | | +-------------------+ | | |
+ | | | ROM Header | | | |
+ | | +-------------------+ | | |
+ | | | PCIR Structure | | | |
+ | | +-------------------+ | | |
+ | | | NPDE Structure | | | |
+ | | +-------------------+ | | |
+ | | | Image data | | | |
+ | | +-------------------+ | | |
+ | +-----------------------------------------------+ | |
+ | (no gap between images) | |
+ | +-----------------------------------------------+ | |
+ | | First FwSec Image (Type 0xE0) | | |
+ | +-----------------------------------------------+ | |
+ | | +-------------------+ | | |
+ | | | ROM Header | | | |
+ | | +-------------------+ | | |
+ | | | PCIR Structure | | | |
+ | | +-------------------+ | | |
+ | | | NPDE Structure | | | |
+ | | +-------------------+ | | |
+ | | | Image data | | | |
+ | | +-------------------+ | | |
+ | +-----------------------------------------------+ | |
+ | (no gap between images) | |
+ | +-----------------------------------------------+ | |
+ | | Second FwSec Image (Type 0xE0) | | |
+ | +-----------------------------------------------+ | |
+ | | +-------------------+ | | |
+ | | | ROM Header | | | |
+ | | +-------------------+ | | |
+ | | | PCIR Structure | | | |
+ | | +-------------------+ | | |
+ | | | NPDE Structure | | | |
+ | | +-------------------+ | | |
+ | | | | |
+ | | +-------------------+ | | |
+ | | | PMU Lookup Table | <- falcon_data_offset <----+ |
+ | | | +-------------+ | pmu_lookup_table | |
+ | | | | Entry 0x85 | | | |
+ | | | | FWSEC_PROD | | | |
+ | | | +-------------+ | | |
+ | | +-------------------+ | |
+ | | | | |
+ | | | points to | |
+ | | V | |
+ | | +-------------------+ | |
+ | | | FalconUCodeDescV3 | <- falcon_ucode_offset | |
+ | | | (FWSEC Firmware) | fwsec_header() | |
+ | | +-------------------+ | |
+ | | | immediately followed by... | |
+ | | V | |
+ | | +----------------------------+ | |
+ | | | Signatures + FWSEC Ucode | | |
+ | | | fwsec_sigs(), fwsec_ucode()| | |
+ | | +----------------------------+ | |
+ | +-----------------------------------------------+ |
+ | |
+ +----------------------------------------------------------------------------+
+
+.. note::
+ This diagram is created based on an GA-102 Ampere GPU as an example and could
+ vary for future or other GPUs.
+
+.. note::
+ For more explanations of acronyms, see the detailed descriptions in `vbios.rs`.
+
+Falcon data Lookup
+------------------
+A key part of the VBIOS extraction code (vbios.rs) is to find the location of the
+Falcon data in the VBIOS which contains the PMU lookup table. This lookup table is
+used to find the required Falcon ucode based on an application ID.
+
+The location of the PMU lookup table is found by scanning the BIT (`BIOS Information Table`_)
+tokens for a token with the id `BIT_TOKEN_ID_FALCON_DATA` (0x70) which indicates the
+offset of the same from the start of the VBIOS image. Unfortunately, the offset
+does not account for the EFI image located between the PciAt and FwSec images.
+The `vbios.rs` code compensates for this with appropriate arithmetic.
+
+.. _`BIOS Information Table`: https://download.nvidia.com/open-gpu-doc/BIOS-Information-Table/1/BIOS-Information-Table.html
diff --git a/Documentation/gpu/nova/index.rst b/Documentation/gpu/nova/index.rst
index 2701b3f4af35..e39cb3163581 100644
--- a/Documentation/gpu/nova/index.rst
+++ b/Documentation/gpu/nova/index.rst
@@ -28,3 +28,7 @@ vGPU manager VFIO driver and the nova-drm driver.
core/guidelines
core/todo
+ core/vbios
+ core/devinit
+ core/fwsec
+ core/falcon
diff --git a/Documentation/gpu/rfc/gpusvm.rst b/Documentation/gpu/rfc/gpusvm.rst
index bcf66a8137a6..469db1372f16 100644
--- a/Documentation/gpu/rfc/gpusvm.rst
+++ b/Documentation/gpu/rfc/gpusvm.rst
@@ -74,14 +74,20 @@ Overview of baseline design
:doc: Locking
.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
- :doc: Migration
-
-.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
:doc: Partial Unmapping of Ranges
.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
:doc: Examples
+Overview of drm_pagemap design
+==============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_pagemap.c
+ :doc: Overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_pagemap.c
+ :doc: Migration
+
Possible future design features
===============================
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index c57777a24e03..be8637da3fe9 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -515,6 +515,21 @@ Contact: Douglas Anderson <dianders@chromium.org>
Level: Starter
+Remove devm_drm_put_bridge()
+----------------------------
+
+Due to how the panel bridge handles the drm_bridge object lifetime, special
+care must be taken to dispose of the drm_bridge object when the
+panel_bridge is removed. This is currently managed using
+devm_drm_put_bridge(), but that is an unsafe, temporary workaround. To fix
+that, the DRM panel lifetime needs to be reworked. After the rework is
+done, remove devm_drm_put_bridge() and the TODO in
+drm_panel_bridge_remove().
+
+Contact: Maxime Ripard <mripard@kernel.org>,
+ Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+Level: Intermediate
Core refactorings
=================
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index ba04ac7c2167..8a8b1002931f 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -89,6 +89,17 @@ You can also run subtests if you do not want to run the entire test::
sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms"
sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip
+Testing With KUnit
+==================
+
+KUnit (Kernel unit testing framework) provides a common framework for unit tests
+within the Linux kernel.
+More information in ../dev-tools/kunit/index.rst .
+
+To run the VKMS KUnit tests::
+
+ tools/testing/kunit/kunit.py run --kunitconfig=drivers/gpu/drm/vkms/tests
+
TODO
====
@@ -122,8 +133,8 @@ There's lots of plane features we could add support for:
- Scaling.
-- Additional buffer formats, especially YUV formats for video like NV12.
- Low/high bpp RGB formats would also be interesting.
+- Additional buffer formats. Low/high bpp RGB formats would be interesting
+ [Good to get started].
- Async updates (currently only possible on cursor plane using the legacy
cursor api).
diff --git a/Documentation/gpu/xe/xe_configfs.rst b/Documentation/gpu/xe/xe_configfs.rst
index 9b9d941eb20e..7f8ec39dc6dd 100644
--- a/Documentation/gpu/xe/xe_configfs.rst
+++ b/Documentation/gpu/xe/xe_configfs.rst
@@ -2,9 +2,15 @@
.. _xe_configfs:
-============
+===========
Xe Configfs
-============
+===========
.. kernel-doc:: drivers/gpu/drm/xe/xe_configfs.c
:doc: Xe Configfs
+
+Internal API
+============
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_configfs.c
+ :internal:
diff --git a/Documentation/hid/intel-thc-hid.rst b/Documentation/hid/intel-thc-hid.rst
index dc9250787fc5..8b378c57b5aa 100644
--- a/Documentation/hid/intel-thc-hid.rst
+++ b/Documentation/hid/intel-thc-hid.rst
@@ -188,6 +188,34 @@ Control register.
Reset line is controlled by BIOS (or EFI) through ACPI _RST method, driver needs to call this
device ACPI _RST method to reset touch IC during initialization.
+2.3 Max input size control
+--------------------------
+
+This is a new feature introduced in Panther Lake platform, THC hardware allows driver to set
+a max input size for RxDMA. After this max size gets set and enabled, for every input report
+packet reading, THC hardware sequencer will first read incoming input packet size, then compare
+input packet size with the given max size:
+
+- if input packet size <= max size, THC continues using input packet size to finish the reading
+- if input packet size > max size, there is potential input data crash risk during
+ transferring, THC will use max size instead of input packet size for reading
+
+This feature is used to avoid data corruption which will cause RxDMA buffer overrun issue for
+I2C bus, and enhance whole system stability.
+
+2.4 Interrupt delay
+-------------------
+
+Because of MCU performance limitation, some touch devices cannot de-assert interrupt pin
+immediately after input data is transferred, which cause an interrupt toggle delay. But THC
+always detects next interrupt immediately after last input interrupt is handled. In this
+case, the delayed interrupt de-assertion will be recognized as a new interrupt signal by THC,
+and causes THC to start an input report reading spuriously.
+
+In order to avoid this situation, THC introduced interrupt delay new feature in Panther Lake
+platform, where THC allows driver to set an interrupt delay. After this feature is enabled,
+THC will delay this given time for next interrupt detection.
+
3. High level concept
=====================
diff --git a/Documentation/hwmon/adp1050.rst b/Documentation/hwmon/adp1050.rst
index 8fa937064886..32514084fbdc 100644
--- a/Documentation/hwmon/adp1050.rst
+++ b/Documentation/hwmon/adp1050.rst
@@ -13,6 +13,32 @@ Supported chips:
Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADP1050.pdf
+ * Analog Devices ADP1051
+
+ Prefix: 'adp1051'
+
+ Addresses scanned: I2C 0x70 - 0x77
+
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADP1051.pdf
+
+ * Analog Devices ADP1055
+
+ Prefix: 'adp1055'
+
+ Addresses scanned: I2C 0x4B - 0x77
+
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADP1055.pdf
+
+ * Analog Devices LTP8800-1A/-2/-4A
+
+ Prefix: 'ltp8800'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/LTP8800-1A.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTP8800-2.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTP8800-4A.pdf
+
Authors:
- Radu Sabau <radu.sabau@analog.com>
@@ -21,14 +47,17 @@ Authors:
Description
-----------
-This driver supprts hardware monitoring for Analog Devices ADP1050 Digital
-Controller for Isolated Power Supply with PMBus interface.
+This driver supports hardware monitoring for Analog Devices ADP1050, ADP1051,
+and ADP1055 Digital Controller for Isolated Power Supply with PMBus interface,
+and the LTP8800 step-down μModule regulators.
-The ADP1050 is an advanced digital controller with a PMBusâ„¢
+The ADP1050, ADP1051, and ADP1055 are advanced digital controllers with PMBusâ„¢
interface targeting high density, high efficiency dc-to-dc power
-conversion used to monitor system temperatures, voltages and currents.
-Through the PMBus interface, the device can monitor input/output voltages,
-input current and temperature.
+conversion used to monitor system temperatures, voltages and currents. The
+LTP8800 is a family of step-down μModule regulators that provides microprocessor
+core voltage from 54V power distribution architecture. Through the PMBus
+interface, the device can monitor input/output voltages, input current and
+temperature.
Usage Notes
-----------
@@ -49,16 +78,46 @@ Sysfs Attributes
in1_label "vin"
in1_input Measured input voltage
in1_alarm Input voltage alarm
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage high alarm
+in1_lcrit Critical minimum input voltage
+in1_lcrit_alarm Input voltage critical low alarm
in2_label "vout1"
in2_input Measured output voltage
in2_crit Critical maximum output voltage
in2_crit_alarm Output voltage high alarm
in2_lcrit Critical minimum output voltage
in2_lcrit_alarm Output voltage critical low alarm
+in2_max Critical maximum output voltage
+in2_max_alarm Output voltage critical max alarm
+in2_min Critical minimum output voltage
+in2_min_alarm Output voltage critical min alarm
curr1_label "iin"
curr1_input Measured input current.
curr1_alarm Input current alarm
+curr1_crit Critical maximum input current
+curr1_crit_alarm Input current high alarm
+curr2_label "iout1"
+curr2_input Measured output current
+curr2_alarm Output current alarm
+curr2_crit Critical maximum output current
+curr2_crit_alarm Output current high alarm
+curr2_lcrit Critical minimum output current
+curr2_lcrit_alarm Output current critical low alarm
+curr2_max Critical maximum output current
+curr2_max_alarm Output current critical max alarm
+power1_label "pout1"
+power1_input Measured output power
+power1_crit Critical maximum output power
+power1_crit_alarm Output power high alarm
temp1_input Measured temperature
temp1_crit Critical high temperature
temp1_crit_alarm Chip temperature critical high alarm
+temp1_max Critical maximum temperature
+temp1_max_alarm Temperature critical max alarm
+temp2_input Measured temperature
+temp2_crit Critical high temperature
+temp2_crit_alarm Chip temperature critical high alarm
+temp2_max Critical maximum temperature
+temp2_max_alarm Temperature critical max alarm
================= ========================================
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
index 816d1f9947ea..de2f2985f06f 100644
--- a/Documentation/hwmon/asus_ec_sensors.rst
+++ b/Documentation/hwmon/asus_ec_sensors.rst
@@ -11,6 +11,7 @@ Supported boards:
* Pro WS X570-ACE
* ProArt X570-CREATOR WIFI
* ProArt X670E-CREATOR WIFI
+ * ProArt X870E-CREATOR WIFI
* ProArt B550-CREATOR
* ROG CROSSHAIR VIII DARK HERO
* ROG CROSSHAIR VIII HERO (WI-FI)
@@ -29,6 +30,7 @@ Supported boards:
* ROG STRIX X570-F GAMING
* ROG STRIX X570-I GAMING
* ROG STRIX Z390-F GAMING
+ * ROG STRIX Z490-F GAMING
* ROG STRIX Z690-A GAMING WIFI D4
* ROG ZENITH II EXTREME
* ROG ZENITH II EXTREME ALPHA
diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst
index 7ed794087f84..2e99cfd556a0 100644
--- a/Documentation/hwmon/corsair-psu.rst
+++ b/Documentation/hwmon/corsair-psu.rst
@@ -17,7 +17,7 @@ Supported devices:
Corsair HX1000i (Legacy and Series 2023)
- Corsair HX1200i (Legacy and Series 2023)
+ Corsair HX1200i (Legacy, Series 2023 and Series 2025)
Corsair HX1500i (Legacy and Series 2023)
diff --git a/Documentation/hwmon/tps53679.rst b/Documentation/hwmon/tps53679.rst
index 3b9561648c24..dd5e4a37375d 100644
--- a/Documentation/hwmon/tps53679.rst
+++ b/Documentation/hwmon/tps53679.rst
@@ -43,6 +43,14 @@ Supported chips:
Datasheet: https://www.ti.com/lit/gpn/TPS53681
+ * Texas Instruments TPS53685
+
+ Prefix: 'tps53685'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.ti.com/lit/gpn/TPS53685
+
* Texas Instruments TPS53688
Prefix: 'tps53688'
diff --git a/Documentation/input/devices/edt-ft5x06.rst b/Documentation/input/devices/edt-ft5x06.rst
index 1ccc94b192b7..e410d73d4841 100644
--- a/Documentation/input/devices/edt-ft5x06.rst
+++ b/Documentation/input/devices/edt-ft5x06.rst
@@ -29,8 +29,25 @@ The driver allows configuration of the touch screen via a set of sysfs files:
For debugging purposes the driver provides a few files in the debug
-filesystem (if available in the kernel). In /sys/kernel/debug/edt_ft5x06
-you'll find the following files:
+filesystem (if available in the kernel). They are located in:
+
+ /sys/kernel/debug/i2c/<i2c-bus>/<i2c-device>/
+
+If you don't know the bus and device numbers, you can look them up with this
+command:
+
+ $ ls -l /sys/bus/i2c/drivers/edt_ft5x06
+
+The dereference of the symlink will contain the needed information. You will
+need the last two elements of its path:
+
+ 0-0038 -> ../../../../devices/platform/soc/fcfee800.i2c/i2c-0/0-0038
+
+So in this case, the location for the debug files is:
+
+ /sys/kernel/debug/i2c/i2c-0/0-0038/
+
+There, you'll find the following files:
num_x, num_y:
(readonly) contains the number of sensor fields in X- and
diff --git a/Documentation/input/gamepad.rst b/Documentation/input/gamepad.rst
index eca17a7f5258..0c918b6f288b 100644
--- a/Documentation/input/gamepad.rst
+++ b/Documentation/input/gamepad.rst
@@ -190,8 +190,21 @@ Gamepads report the following events:
Rumble is advertised as FF_RUMBLE.
+- Grip buttons:
+
+ Many pads include buttons on the rear, usually referred to as either grip or
+ rear buttons, or paddles. These are often reprogrammable by the firmware to
+ appear as "normal" buttons, but are sometimes exposed to software too. Some
+ notable examples of this are the Steam Deck, which has R4, R5, L4, and L5 on
+ the back; the Xbox Elite pads, which have P1-P4; and the Switch 2 Pro
+ Controller, which has GL and GR.
+
+ For these controllers, BTN_GRIPR and BTN_GRIPR2 should be used for the top
+ and bottom (if present) right grip button(s), and BTN_GRIPL and BTN_GRIPL2
+ should be used for the top and bottom (if present) left grip button(s).
+
- Profile:
- Some pads provide a multi-value profile selection switch. An example is the
- XBox Adaptive and the XBox Elite 2 controllers. When the active profile is
- switched, its newly selected value is emitted as an ABS_PROFILE event.
+ Some pads provide a multi-value profile selection switch. Examples include
+ the Xbox Adaptive and the Xbox Elite 2 controllers. When the active profile
+ is switched, its newly selected value is emitted as an ABS_PROFILE event.
diff --git a/Documentation/kbuild/kconfig.rst b/Documentation/kbuild/kconfig.rst
index fc4e845bc249..d213c4f599a4 100644
--- a/Documentation/kbuild/kconfig.rst
+++ b/Documentation/kbuild/kconfig.rst
@@ -67,12 +67,12 @@ Environment variables for ``*config``:
with its value when saving the configuration, instead of using the
default, ``CONFIG_``.
-Environment variables for ``{allyes/allmod/allno/rand}config``:
+Environment variables for ``{allyes/allmod/allno/alldef/rand}config``:
``KCONFIG_ALLCONFIG``
- The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
- use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
- that contains config symbols that the user requires to be set to a
+ The allyesconfig/allmodconfig/alldefconfig/allnoconfig/randconfig variants
+ can also use the environment variable KCONFIG_ALLCONFIG as a flag or a
+ filename that contains config symbols that the user requires to be set to a
specific value. If KCONFIG_ALLCONFIG is used without a filename where
KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", ``make *config``
checks for a file named "all{yes/mod/no/def/random}.config"
diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index af245161d8e7..ba2f658bc241 100644
--- a/Documentation/mm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
@@ -30,8 +30,6 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+
| pte_protnone | Tests a PROT_NONE PTE |
+---------------------------+--------------------------------------------------+
-| pte_devmap | Tests a ZONE_DEVICE mapped PTE |
-+---------------------------+--------------------------------------------------+
| pte_soft_dirty | Tests a soft dirty PTE |
+---------------------------+--------------------------------------------------+
| pte_swp_soft_dirty | Tests a soft dirty swapped PTE |
@@ -104,8 +102,6 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pmd_protnone | Tests a PROT_NONE PMD |
+---------------------------+--------------------------------------------------+
-| pmd_devmap | Tests a ZONE_DEVICE mapped PMD |
-+---------------------------+--------------------------------------------------+
| pmd_soft_dirty | Tests a soft dirty PMD |
+---------------------------+--------------------------------------------------+
| pmd_swp_soft_dirty | Tests a soft dirty swapped PMD |
@@ -177,8 +173,6 @@ PUD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pud_write | Tests a writable PUD |
+---------------------------+--------------------------------------------------+
-| pud_devmap | Tests a ZONE_DEVICE mapped PUD |
-+---------------------------+--------------------------------------------------+
| pud_mkyoung | Creates a young PUD |
+---------------------------+--------------------------------------------------+
| pud_mkold | Creates an old PUD |
@@ -242,13 +236,13 @@ SWAP Page Table Helpers
========================
+---------------------------+--------------------------------------------------+
-| __pte_to_swp_entry | Creates a swapped entry (arch) from a mapped PTE |
+| __pte_to_swp_entry | Creates a swp_entry_t (arch) from a swap PTE |
+---------------------------+--------------------------------------------------+
-| __swp_to_pte_entry | Creates a mapped PTE from a swapped entry (arch) |
+| __swp_entry_to_pte | Creates a swap PTE from a swp_entry_t (arch) |
+---------------------------+--------------------------------------------------+
-| __pmd_to_swp_entry | Creates a swapped entry (arch) from a mapped PMD |
+| __pmd_to_swp_entry | Creates a swp_entry_t (arch) from a swap PMD |
+---------------------------+--------------------------------------------------+
-| __swp_to_pmd_entry | Creates a mapped PMD from a swapped entry (arch) |
+| __swp_entry_to_pmd | Creates a swap PMD from a swp_entry_t (arch) |
+---------------------------+--------------------------------------------------+
| is_migration_entry | Tests a migration (read or write) swapped entry |
+-------------------------------+----------------------------------------------+
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index ddc50db3afa4..03f8137256f5 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -452,9 +452,9 @@ that supports each action are as below.
- ``lru_deprio``: Deprioritize the region on its LRU lists.
Supported by ``paddr`` operations set.
- ``migrate_hot``: Migrate the regions prioritizing warmer regions.
- Supported by ``paddr`` operations set.
+ Supported by ``vaddr``, ``fvaddr`` and ``paddr`` operations set.
- ``migrate_cold``: Migrate the regions prioritizing colder regions.
- Supported by ``paddr`` operations set.
+ Supported by ``vaddr``, ``fvaddr`` and ``paddr`` operations set.
- ``stat``: Do nothing but count the statistics.
Supported by all operations sets.
diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst
index ce3e98458339..5cd07905a193 100644
--- a/Documentation/mm/damon/maintainer-profile.rst
+++ b/Documentation/mm/damon/maintainer-profile.rst
@@ -7,9 +7,9 @@ The DAMON subsystem covers the files that are listed in 'DATA ACCESS MONITOR'
section of 'MAINTAINERS' file.
The mailing lists for the subsystem are damon@lists.linux.dev and
-linux-mm@kvack.org. Patches should be made against the `mm-unstable tree
-<https://git.kernel.org/akpm/mm/h/mm-unstable>`_ whenever possible and posted
-to the mailing lists.
+linux-mm@kvack.org. Patches should be made against the `mm-new tree
+<https://git.kernel.org/akpm/mm/h/mm-new>`_ whenever possible and posted to the
+mailing lists.
SCM Trees
---------
@@ -17,17 +17,19 @@ SCM Trees
There are multiple Linux trees for DAMON development. Patches under
development or testing are queued in `damon/next
<https://git.kernel.org/sj/h/damon/next>`_ by the DAMON maintainer.
-Sufficiently reviewed patches will be queued in `mm-unstable
-<https://git.kernel.org/akpm/mm/h/mm-unstable>`_ by the memory management
-subsystem maintainer. After more sufficient tests, the patches will be queued
-in `mm-stable <https://git.kernel.org/akpm/mm/h/mm-stable>`_, and finally
-pull-requested to the mainline by the memory management subsystem maintainer.
-
-Note again the patches for `mm-unstable tree
-<https://git.kernel.org/akpm/mm/h/mm-unstable>`_ are queued by the memory
-management subsystem maintainer. If the patches requires some patches in
-`damon/next tree <https://git.kernel.org/sj/h/damon/next>`_ which not yet merged
-in mm-unstable, please make sure the requirement is clearly specified.
+Sufficiently reviewed patches will be queued in `mm-new
+<https://git.kernel.org/akpm/mm/h/mm-new>`_ by the memory management subsystem
+maintainer. As more sufficient tests are done, the patches will move to
+`mm-unstable <https://git.kernel.org/akpm/mm/h/mm-unstable>`_ and then to
+`mm-stable <https://git.kernel.org/akpm/mm/h/mm-stable>`_. And finally those
+will be pull-requested to the mainline by the memory management subsystem
+maintainer.
+
+Note again the patches for `mm-new tree
+<https://git.kernel.org/akpm/mm/h/mm-new>`_ are queued by the memory management
+subsystem maintainer. If the patches requires some patches in `damon/next tree
+<https://git.kernel.org/sj/h/damon/next>`_ which not yet merged in mm-new,
+please make sure the requirement is clearly specified.
Submit checklist addendum
-------------------------
@@ -53,8 +55,9 @@ Further doing below and putting the results will be helpful.
Key cycle dates
---------------
-Patches can be sent anytime. Key cycle dates of the `mm-unstable
-<https://git.kernel.org/akpm/mm/h/mm-unstable>`_ and `mm-stable
+Patches can be sent anytime. Key cycle dates of the `mm-new
+<https://git.kernel.org/akpm/mm/h/mm-new>`_, `mm-unstable
+<https://git.kernel.org/akpm/mm/h/mm-unstable>`_and `mm-stable
<https://git.kernel.org/akpm/mm/h/mm-stable>`_ trees depend on the memory
management subsystem maintainer.
diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst
index d3ada3e45e10..fb45acba16ac 100644
--- a/Documentation/mm/index.rst
+++ b/Documentation/mm/index.rst
@@ -56,7 +56,6 @@ documentation, or deleted if it has served its purpose.
page_owner
page_table_check
remap_file_pages
- slub
split_page_table_lock
transhuge
unevictable-lru
diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst
index 519b35a4caf5..34602b254aa6 100644
--- a/Documentation/mm/page_migration.rst
+++ b/Documentation/mm/page_migration.rst
@@ -146,18 +146,33 @@ Steps:
18. The new page is moved to the LRU and can be scanned by the swapper,
etc. again.
-Non-LRU page migration
-======================
-
-Although migration originally aimed for reducing the latency of memory
-accesses for NUMA, compaction also uses migration to create high-order
-pages. For compaction purposes, it is also useful to be able to move
-non-LRU pages, such as zsmalloc and virtio-balloon pages.
-
-If a driver wants to make its pages movable, it should define a struct
-movable_operations. It then needs to call __SetPageMovable() on each
-page that it may be able to move. This uses the ``page->mapping`` field,
-so this field is not available for the driver to use for other purposes.
+movable_ops page migration
+==========================
+
+Selected typed, non-folio pages (e.g., pages inflated in a memory balloon,
+zsmalloc pages) can be migrated using the movable_ops migration framework.
+
+The "struct movable_operations" provide callbacks specific to a page type
+for isolating, migrating and un-isolating (putback) these pages.
+
+Once a page is indicated as having movable_ops, that condition must not
+change until the page was freed back to the buddy. This includes not
+changing/clearing the page type and not changing/clearing the
+PG_movable_ops page flag.
+
+Arbitrary drivers cannot currently make use of this framework, as it
+requires:
+
+(a) a page type
+(b) indicating them as possibly having movable_ops in page_has_movable_ops()
+ based on the page type
+(c) returning the movable_ops from page_movable_ops() based on the page
+ type
+(d) not reusing the PG_movable_ops and PG_movable_ops_isolated page flags
+ for other purposes
+
+For example, balloon drivers can make use of this framework through the
+balloon-compaction infrastructure residing in the core kernel.
Monitoring Migration
=====================
diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
index d3ac106e6b14..9af11b5bd145 100644
--- a/Documentation/mm/physical_memory.rst
+++ b/Documentation/mm/physical_memory.rst
@@ -584,7 +584,7 @@ Compaction control
``compact_blockskip_flush``
Set to true when compaction migration scanner and free scanner meet, which
- means the ``PB_migrate_skip`` bits should be cleared.
+ means the ``PB_compact_skip`` bits should be cleared.
``contiguous``
Set to true when the zone is contiguous (in other words, no hole).
diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst
index e6756e78b476..be49e2a269e4 100644
--- a/Documentation/mm/process_addrs.rst
+++ b/Documentation/mm/process_addrs.rst
@@ -303,7 +303,9 @@ There are four key operations typically performed on page tables:
1. **Traversing** page tables - Simply reading page tables in order to traverse
them. This only requires that the VMA is kept stable, so a lock which
establishes this suffices for traversal (there are also lockless variants
- which eliminate even this requirement, such as :c:func:`!gup_fast`).
+ which eliminate even this requirement, such as :c:func:`!gup_fast`). There is
+ also a special case of page table traversal for non-VMA regions which we
+ consider separately below.
2. **Installing** page table mappings - Whether creating a new mapping or
modifying an existing one in such a way as to change its identity. This
requires that the VMA is kept stable via an mmap or VMA lock (explicitly not
@@ -335,15 +337,13 @@ ahead and perform these operations on page tables (though internally, kernel
operations that perform writes also acquire internal page table locks to
serialise - see the page table implementation detail section for more details).
+.. note:: We free empty PTE tables on zap under the RCU lock - this does not
+ change the aforementioned locking requirements around zapping.
+
When **installing** page table entries, the mmap or VMA lock must be held to
keep the VMA stable. We explore why this is in the page table locking details
section below.
-.. warning:: Page tables are normally only traversed in regions covered by VMAs.
- If you want to traverse page tables in areas that might not be
- covered by VMAs, heavier locking is required.
- See :c:func:`!walk_page_range_novma` for details.
-
**Freeing** page tables is an entirely internal memory management operation and
has special requirements (see the page freeing section below for more details).
@@ -355,6 +355,44 @@ has special requirements (see the page freeing section below for more details).
from the reverse mappings, but no other VMAs can be permitted to be
accessible and span the specified range.
+Traversing non-VMA page tables
+------------------------------
+
+We've focused above on traversal of page tables belonging to VMAs. It is also
+possible to traverse page tables which are not represented by VMAs.
+
+Kernel page table mappings themselves are generally managed but whatever part of
+the kernel established them and the aforementioned locking rules do not apply -
+for instance vmalloc has its own set of locks which are utilised for
+establishing and tearing down page its page tables.
+
+However, for convenience we provide the :c:func:`!walk_kernel_page_table_range`
+function which is synchronised via the mmap lock on the :c:macro:`!init_mm`
+kernel instantiation of the :c:struct:`!struct mm_struct` metadata object.
+
+If an operation requires exclusive access, a write lock is used, but if not, a
+read lock suffices - we assert only that at least a read lock has been acquired.
+
+Since, aside from vmalloc and memory hot plug, kernel page tables are not torn
+down all that often - this usually suffices, however any caller of this
+functionality must ensure that any additionally required locks are acquired in
+advance.
+
+We also permit a truly unusual case is the traversal of non-VMA ranges in
+**userland** ranges, as provided for by :c:func:`!walk_page_range_debug`.
+
+This has only one user - the general page table dumping logic (implemented in
+:c:macro:`!mm/ptdump.c`) - which seeks to expose all mappings for debug purposes
+even if they are highly unusual (possibly architecture-specific) and are not
+backed by a VMA.
+
+We must take great care in this case, as the :c:func:`!munmap` implementation
+detaches VMAs under an mmap write lock before tearing down page tables under a
+downgraded mmap read lock.
+
+This means such an operation could race with this, and thus an mmap **write**
+lock is required.
+
Lock ordering
-------------
@@ -461,6 +499,10 @@ Locking Implementation Details
Page table locking details
--------------------------
+.. note:: This section explores page table locking requirements for page tables
+ encompassed by a VMA. See the above section on non-VMA page table
+ traversal for details on how we handle that case.
+
In addition to the locks described in the terminology section above, we have
additional locks dedicated to page tables:
diff --git a/Documentation/mm/slab.rst b/Documentation/mm/slab.rst
index 87d5a5bb172f..2bcc58ada302 100644
--- a/Documentation/mm/slab.rst
+++ b/Documentation/mm/slab.rst
@@ -3,3 +3,10 @@
===============
Slab Allocation
===============
+
+Functions and structures
+========================
+
+.. kernel-doc:: mm/slab.h
+.. kernel-doc:: mm/slub.c
+ :internal:
diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml
index 840dc4504216..c6832633ab7b 100644
--- a/Documentation/netlink/specs/conntrack.yaml
+++ b/Documentation/netlink/specs/conntrack.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: conntrack
protocol: netlink-raw
protonum: 12
@@ -195,17 +195,17 @@ attribute-sets:
-
name: tuple-attrs
attributes:
- -
+ -
name: tuple-ip
type: nest
nested-attributes: tuple-ip-attrs
doc: conntrack l3 information
- -
+ -
name: tuple-proto
type: nest
nested-attributes: tuple-proto-attrs
doc: conntrack l4 information
- -
+ -
name: tuple-zone
type: u16
byte-order: big-endian
@@ -213,74 +213,74 @@ attribute-sets:
-
name: protoinfo-tcp-attrs
attributes:
- -
+ -
name: tcp-state
type: u8
enum: nf-ct-tcp-state
doc: tcp connection state
- -
+ -
name: tcp-wscale-original
type: u8
doc: window scaling factor in original direction
- -
+ -
name: tcp-wscale-reply
type: u8
doc: window scaling factor in reply direction
- -
+ -
name: tcp-flags-original
type: binary
struct: nf-ct-tcp-flags-mask
- -
+ -
name: tcp-flags-reply
type: binary
struct: nf-ct-tcp-flags-mask
-
name: protoinfo-dccp-attrs
attributes:
- -
+ -
name: dccp-state
type: u8
doc: dccp connection state
- -
+ -
name: dccp-role
type: u8
- -
+ -
name: dccp-handshake-seq
type: u64
byte-order: big-endian
- -
+ -
name: dccp-pad
type: pad
-
name: protoinfo-sctp-attrs
attributes:
- -
+ -
name: sctp-state
type: u8
doc: sctp connection state
enum: nf-ct-sctp-state
- -
+ -
name: vtag-original
type: u32
byte-order: big-endian
- -
+ -
name: vtag-reply
type: u32
byte-order: big-endian
-
name: protoinfo-attrs
attributes:
- -
+ -
name: protoinfo-tcp
type: nest
nested-attributes: protoinfo-tcp-attrs
doc: conntrack tcp state information
- -
+ -
name: protoinfo-dccp
type: nest
nested-attributes: protoinfo-dccp-attrs
doc: conntrack dccp state information
- -
+ -
name: protoinfo-sctp
type: nest
nested-attributes: protoinfo-sctp-attrs
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 38ddc04f9e6d..bb87111d5e16 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: devlink
protocol: genetlink-legacy
@@ -224,6 +224,10 @@ definitions:
value: 10
-
name: binary
+ -
+ name: rate-tc-index-max
+ type: const
+ value: 7
attribute-sets:
-
@@ -744,7 +748,7 @@ attribute-sets:
name: flash-update-overwrite-mask
type: bitfield32
enum: flash-overwrite
- enum-as-flags: True
+ enum-as-flags: true
-
name: reload-action
type: u8
@@ -753,12 +757,12 @@ attribute-sets:
name: reload-actions-performed
type: bitfield32
enum: reload-action
- enum-as-flags: True
+ enum-as-flags: true
-
name: reload-limits
type: bitfield32
enum: reload-action
- enum-as-flags: True
+ enum-as-flags: true
-
name: dev-stats
type: nest
@@ -812,14 +816,14 @@ attribute-sets:
name: rate-parent-node-name
type: string
-
- name: region-max-snapshots
- type: u32
+ name: region-max-snapshots
+ type: u32
-
name: linecard-index
type: u32
-
- name: linecard-state
- type: u8
+ name: linecard-state
+ type: u8
-
name: linecard-type
type: string
@@ -844,7 +848,11 @@ attribute-sets:
-
name: region-direct
type: flag
-
+ -
+ name: rate-tc-bws
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-rate-tc-bws
-
name: dl-dev-stats
subset-of: devlink
@@ -917,7 +925,7 @@ attribute-sets:
name: caps
type: bitfield32
enum: port-fn-attr-cap
- enum-as-flags: True
+ enum-as-flags: true
-
name: dl-dpipe-tables
@@ -1139,7 +1147,7 @@ attribute-sets:
-
name: param-type
- # TODO: fill in the attribute param-value-list
+ # TODO: fill in the attribute param-value-list
-
name: dl-region-snapshots
@@ -1249,6 +1257,22 @@ attribute-sets:
-
name: flash
type: flag
+ -
+ name: dl-rate-tc-bws
+ name-prefix: devlink-rate-tc-attr-
+ attributes:
+ -
+ name: index
+ type: u8
+ checks:
+ max: rate-tc-index-max
+ -
+ name: bw
+ type: u32
+ doc: |
+ Specifies the bandwidth share assigned to the Traffic Class.
+ The bandwidth for the traffic class is determined
+ in proportion to the sum of the shares of all configured classes.
operations:
enum-model: directional
@@ -1257,7 +1281,7 @@ operations:
name: get
doc: Get devlink instances.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1266,7 +1290,7 @@ operations:
attributes: &dev-id-attrs
- bus-name
- dev-name
- reply: &get-reply
+ reply: &get-reply
value: 3
attributes:
- bus-name
@@ -1280,7 +1304,7 @@ operations:
name: port-get
doc: Get devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1304,8 +1328,8 @@ operations:
name: port-set
doc: Set devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1321,8 +1345,8 @@ operations:
name: port-new
doc: Create devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1343,8 +1367,8 @@ operations:
name: port-del
doc: Delete devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1355,8 +1379,8 @@ operations:
name: port-split
doc: Split devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1371,8 +1395,8 @@ operations:
name: port-unsplit
doc: Unplit devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1383,7 +1407,7 @@ operations:
name: sb-get
doc: Get shared buffer instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1405,7 +1429,7 @@ operations:
name: sb-pool-get
doc: Get shared buffer pool instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1428,8 +1452,8 @@ operations:
name: sb-pool-set
doc: Set shared buffer pool instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1446,7 +1470,7 @@ operations:
name: sb-port-pool-get
doc: Get shared buffer port-pool combinations and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1470,8 +1494,8 @@ operations:
name: sb-port-pool-set
doc: Set shared buffer port-pool combinations and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1488,7 +1512,7 @@ operations:
name: sb-tc-pool-bind-get
doc: Get shared buffer port-TC to pool bindings and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1513,8 +1537,8 @@ operations:
name: sb-tc-pool-bind-set
doc: Set shared buffer port-TC to pool bindings and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1533,8 +1557,8 @@ operations:
name: sb-occ-snapshot
doc: Take occupancy snapshot of shared buffer.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1549,8 +1573,8 @@ operations:
name: sb-occ-max-clear
doc: Clear occupancy watermarks of shared buffer.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1564,8 +1588,8 @@ operations:
name: eswitch-get
doc: Get eswitch attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1584,8 +1608,8 @@ operations:
name: eswitch-set
doc: Set eswitch attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1596,7 +1620,7 @@ operations:
name: dpipe-table-get
doc: Get dpipe table attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1616,7 +1640,7 @@ operations:
name: dpipe-entries-get
doc: Get dpipe entries attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1635,7 +1659,7 @@ operations:
name: dpipe-headers-get
doc: Get dpipe headers attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1653,8 +1677,8 @@ operations:
name: dpipe-table-counters-set
doc: Set dpipe counter attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1669,8 +1693,8 @@ operations:
name: resource-set
doc: Set resource attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1685,7 +1709,7 @@ operations:
name: resource-dump
doc: Get resource attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1704,8 +1728,8 @@ operations:
name: reload
doc: Reload devlink.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-dev-lock
post: devlink-nl-post-doit-dev-lock
@@ -1728,7 +1752,7 @@ operations:
name: param-get
doc: Get param instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1748,8 +1772,8 @@ operations:
name: param-set
doc: Set param instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1766,7 +1790,7 @@ operations:
name: region-get
doc: Get region instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1789,8 +1813,8 @@ operations:
name: region-new
doc: Create region snapshot.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1810,8 +1834,8 @@ operations:
name: region-del
doc: Delete region snapshot.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1822,8 +1846,8 @@ operations:
name: region-read
doc: Read region data.
attribute-set: devlink
- dont-validate: [ dump-strict ]
- flags: [ admin-perm ]
+ dont-validate: [dump-strict]
+ flags: [admin-perm]
dump:
request:
attributes:
@@ -1847,7 +1871,7 @@ operations:
name: port-param-get
doc: Get port param instances.
attribute-set: devlink
- dont-validate: [ strict, dump-strict ]
+ dont-validate: [strict, dump-strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1863,8 +1887,8 @@ operations:
name: port-param-set
doc: Set port param instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1873,9 +1897,11 @@ operations:
-
name: info-get
- doc: Get device information, like driver name, hardware and firmware versions etc.
+ doc: |
+ Get device information, like driver name, hardware and firmware versions
+ etc.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1900,7 +1926,7 @@ operations:
name: health-reporter-get
doc: Get health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1921,8 +1947,8 @@ operations:
name: health-reporter-set
doc: Set health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1940,8 +1966,8 @@ operations:
name: health-reporter-recover
doc: Recover health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1952,8 +1978,8 @@ operations:
name: health-reporter-diagnose
doc: Diagnose health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1964,8 +1990,8 @@ operations:
name: health-reporter-dump-get
doc: Dump health reporter instances.
attribute-set: devlink
- dont-validate: [ dump-strict ]
- flags: [ admin-perm ]
+ dont-validate: [dump-strict]
+ flags: [admin-perm]
dump:
request:
attributes: *health-reporter-id-attrs
@@ -1978,8 +2004,8 @@ operations:
name: health-reporter-dump-clear
doc: Clear dump of health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1990,8 +2016,8 @@ operations:
name: flash-update
doc: Flash update devlink instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2007,7 +2033,7 @@ operations:
name: trap-get
doc: Get trap instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2029,8 +2055,8 @@ operations:
name: trap-set
doc: Set trap instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2045,7 +2071,7 @@ operations:
name: trap-group-get
doc: Get trap group instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2067,8 +2093,8 @@ operations:
name: trap-group-set
doc: Set trap group instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2084,7 +2110,7 @@ operations:
name: trap-policer-get
doc: Get trap policer instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2106,8 +2132,8 @@ operations:
name: trap-policer-set
doc: Get trap policer instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2123,8 +2149,8 @@ operations:
name: health-reporter-test
doc: Test health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -2136,7 +2162,7 @@ operations:
name: rate-get
doc: Get rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2159,8 +2185,8 @@ operations:
name: rate-set
doc: Set rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2174,13 +2200,14 @@ operations:
- rate-tx-priority
- rate-tx-weight
- rate-parent-node-name
+ - rate-tc-bws
-
name: rate-new
doc: Create rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2194,13 +2221,14 @@ operations:
- rate-tx-priority
- rate-tx-weight
- rate-parent-node-name
+ - rate-tc-bws
-
name: rate-del
doc: Delete rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2214,7 +2242,7 @@ operations:
name: linecard-get
doc: Get line card instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2236,8 +2264,8 @@ operations:
name: linecard-set
doc: Set line card instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2252,7 +2280,7 @@ operations:
name: selftests-get
doc: Get device selftest instances.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2269,8 +2297,8 @@ operations:
name: selftests-run
doc: Run device selftest instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index f434140b538e..5decee61a2c4 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: dpll
doc: DPLL subsystem.
@@ -240,6 +240,20 @@ definitions:
integer part of a measured phase offset value.
Value of (DPLL_A_PHASE_OFFSET % DPLL_PHASE_OFFSET_DIVIDER) is a
fractional part of a measured phase offset value.
+ -
+ type: enum
+ name: feature-state
+ doc: |
+ Allow control (enable/disable) and status checking over features.
+ entries:
+ -
+ name: disable
+ doc: |
+ feature shall be disabled
+ -
+ name: enable
+ doc: |
+ feature shall be enabled
attribute-sets:
-
@@ -293,6 +307,14 @@ attribute-sets:
be put to message multiple times to indicate possible parallel
quality levels (e.g. one specified by ITU option 1 and another
one specified by option 2).
+ -
+ name: phase-offset-monitor
+ type: u32
+ enum: feature-state
+ doc: Receive or request state of phase offset monitor feature.
+ If enabled, dpll device shall monitor and notify all currently
+ available inputs for changes of their phase offset against the
+ dpll device.
-
name: pin
enum-name: dpll_a_pin
@@ -406,6 +428,15 @@ attribute-sets:
doc: |
A ratio of high to low state of a SYNC signal pulse embedded
into base clock frequency. Value is in percents.
+ -
+ name: reference-sync
+ type: nest
+ multi-attr: true
+ nested-attributes: reference-sync
+ doc: |
+ Capable pin provides list of pins that can be bound to create a
+ reference-sync pin pair.
+
-
name: pin-parent-device
subset-of: pin
@@ -436,6 +467,14 @@ attribute-sets:
name: frequency-min
-
name: frequency-max
+ -
+ name: reference-sync
+ subset-of: pin
+ attributes:
+ -
+ name: id
+ -
+ name: state
operations:
enum-name: dpll_cmd
@@ -445,7 +484,7 @@ operations:
doc: |
Get id of dpll device that matches given attributes
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-lock-doit
@@ -464,7 +503,7 @@ operations:
doc: |
Get list of DPLL devices (dump) or attributes of a single dpll device
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pre-doit
@@ -483,6 +522,7 @@ operations:
- temp
- clock-id
- type
+ - phase-offset-monitor
dump:
reply: *dev-attrs
@@ -491,7 +531,7 @@ operations:
name: device-set
doc: Set attributes for a DPLL device
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pre-doit
@@ -499,6 +539,7 @@ operations:
request:
attributes:
- id
+ - phase-offset-monitor
-
name: device-create-ntf
doc: Notification about device appearing
@@ -519,7 +560,7 @@ operations:
doc: |
Get id of a pin that matches given attributes
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-lock-doit
@@ -547,7 +588,7 @@ operations:
a given dpll device
- do request with target dpll and target pin - single pin attributes
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pin-pre-doit
@@ -574,6 +615,7 @@ operations:
- esync-frequency
- esync-frequency-supported
- esync-pulse
+ - reference-sync
dump:
request:
@@ -585,7 +627,7 @@ operations:
name: pin-set
doc: Set attributes of a target pin
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pin-pre-doit
@@ -601,6 +643,7 @@ operations:
- parent-pin
- phase-adjust
- esync-frequency
+ - reference-sync
-
name: pin-create-ntf
doc: Notification about pin appearing
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 348c6ad548f5..1bc1bd7d33c2 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ethtool
protocol: genetlink-legacy
@@ -15,14 +15,14 @@ definitions:
name: udp-tunnel-type
enum-name:
type: enum
- entries: [ vxlan, geneve, vxlan-gpe ]
+ entries: [vxlan, geneve, vxlan-gpe]
enum-cnt-name: __ethtool-udp-tunnel-type-cnt
render-max: true
-
name: stringset
type: enum
entries: []
- header: linux/ethtool.h # skip rendering, no actual definition
+ header: linux/ethtool.h # skip rendering, no actual definition
-
name: header-flags
type: flags
@@ -58,49 +58,51 @@ definitions:
doc: The firmware flashing process was stopped due to an error.
-
name: c33-pse-ext-state
- doc: "groups of PSE extended states functions. IEEE 802.3-2022 33.2.4.4 Variables"
+ doc: |
+ "groups of PSE extended states functions. IEEE 802.3-2022 33.2.4.4
+ Variables"
type: enum
name-prefix: ethtool-c33-pse-ext-state-
header: linux/ethtool.h
entries:
- -
- name: none
- doc: none
- -
- name: error-condition
- doc: Group of error_condition states
- -
- name: mr-mps-valid
- doc: Group of mr_mps_valid states
- -
- name: mr-pse-enable
- doc: Group of mr_pse_enable states
- -
- name: option-detect-ted
- doc: Group of option_detect_ted states
- -
- name: option-vport-lim
- doc: Group of option_vport_lim states
- -
- name: ovld-detected
- doc: Group of ovld_detected states
- -
- name: power-not-available
- doc: Group of power_not_available states
- -
- name: short-detected
- doc: Group of short_detected states
+ -
+ name: none
+ doc: none
+ -
+ name: error-condition
+ doc: Group of error_condition states
+ -
+ name: mr-mps-valid
+ doc: Group of mr_mps_valid states
+ -
+ name: mr-pse-enable
+ doc: Group of mr_pse_enable states
+ -
+ name: option-detect-ted
+ doc: Group of option_detect_ted states
+ -
+ name: option-vport-lim
+ doc: Group of option_vport_lim states
+ -
+ name: ovld-detected
+ doc: Group of ovld_detected states
+ -
+ name: power-not-available
+ doc: Group of power_not_available states
+ -
+ name: short-detected
+ doc: Group of short_detected states
-
name: phy-upstream-type
enum-name: phy-upstream
header: linux/ethtool.h
type: enum
name-prefix: phy-upstream
- entries: [ mac, phy ]
+ entries: [mac, phy]
-
name: tcp-data-split
type: enum
- entries: [ unknown, disabled, enabled ]
+ entries: [unknown, disabled, enabled]
-
name: hwtstamp-source
doc: Source of the hardware timestamp
@@ -119,6 +121,92 @@ definitions:
doc: |
Hardware timestamp comes from one PHY device
of the network topology
+ -
+ name: pse-event
+ doc: PSE event list for the PSE controller
+ type: flags
+ name-prefix: ethtool-
+ entries:
+ -
+ name: pse-event-over-current
+ doc: PSE output current is too high
+ -
+ name: pse-event-over-temp
+ doc: PSE in over temperature state
+ -
+ name: c33-pse-event-detection
+ doc: |
+ detection process occur on the PSE. IEEE 802.3-2022 33.2.5 and
+ 145.2.6 PSE detection of PDs. IEEE 802.3-202 30.9.1.1.5
+ aPSEPowerDetectionStatus
+ -
+ name: c33-pse-event-classification
+ doc: |
+ classification process occur on the PSE. IEEE 802.3-2022 33.2.6
+ and 145.2.8 classification of PDs mutual identification.
+ IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification.
+ -
+ name: c33-pse-event-disconnection
+ doc: |
+ PD has been disconnected on the PSE. IEEE 802.3-2022 33.3.8
+ and 145.3.9 PD Maintain Power Signature. IEEE 802.3-2022
+ 33.5.1.2.9 MPS Absent. IEEE 802.3-2022 30.9.1.1.20
+ aPSEMPSAbsentCounter.
+ -
+ name: pse-event-over-budget
+ doc: PSE turned off due to over budget situation
+ -
+ name: pse-event-sw-pw-control-error
+ doc: PSE faced an error managing the power control from software
+ -
+ name: input-xfrm
+ doc: RSS hash function transformations.
+ type: flags
+ enum-name:
+ name-prefix: rxh-xfrm-
+ header: linux/ethtool.h
+ entries:
+ -
+ name: sym-xor
+ doc: >-
+ XOR the corresponding source and destination fields of each specified
+ protocol. Both copies of the XOR'ed fields are fed into the RSS and
+ RXHASH calculation. Note that this XORing reduces the input set
+ entropy and could be exploited to reduce the RSS queue spread.
+ -
+ name: sym-or-xor
+ doc: >-
+ Similar to SYM_XOR, except that one copy of the XOR'ed fields is
+ replaced by an OR of the same fields.
+ -
+ name: rxfh-fields
+ name-prefix: rxh-
+ enum-name:
+ header: linux/ethtool.h
+ type: flags
+ entries:
+ -
+ name: l2da
+ value: 1
+ -
+ name: vlan
+ -
+ name: l3-proto
+ -
+ name: ip-src
+ -
+ name: ip-dst
+ -
+ name: l4-b-0-1
+ doc: src port in case of TCP/UDP/SCTP
+ -
+ name: l4-b-2-3
+ doc: dst port in case of TCP/UDP/SCTP
+ -
+ name: gtp-teid
+ -
+ name: discard
+ value: 31
attribute-sets:
-
@@ -1227,7 +1315,7 @@ attribute-sets:
-
name: stat
type: u64
- type-value: [ id ]
+ type-value: [id]
-
name: hist-rx
type: nest
@@ -1396,6 +1484,135 @@ attribute-sets:
type: nest
multi-attr: true
nested-attributes: c33-pse-pw-limit
+ -
+ name: pse-pw-d-id
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: pse-prio-max
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: pse-prio
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: flow
+ attr-cnt-name: --ethtool-a-flow-cnt
+ doc: |
+ Flow types, corresponding to those defined in the old
+ ethtool header for RXFH and RXNFC as ${PROTO}_FLOW.
+ The values are not matching the old ones to avoid carrying
+ into Netlink the IP_USER_FLOW vs IPV4_FLOW vs IPV4_USER_FLOW confusion.
+ attributes:
+ -
+ name: ether
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ip4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ip6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: tcp4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: tcp6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: udp4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: udp6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: sctp4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: sctp6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ah4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ah6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: esp4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: esp6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ah-esp4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: ah-esp6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpc4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpc6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpc-teid4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpc-teid6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-eh4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-eh6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-ul4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-ul6
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-dl4
+ type: uint
+ enum: rxfh-fields
+ -
+ name: gtpu-dl6
+ type: uint
+ enum: rxfh-fields
-
name: rss
attr-cnt-name: __ethtool-a-rss-cnt
@@ -1424,9 +1641,14 @@ attribute-sets:
-
name: input-xfrm
type: u32
+ enum: input-xfrm
-
name: start-context
type: u32
+ -
+ name: flow-hash
+ type: nest
+ nested-attributes: flow
-
name: plca
attr-cnt-name: __ethtool-a-plca-cnt
@@ -1556,6 +1778,19 @@ attribute-sets:
name: hwtstamp-flags
type: nest
nested-attributes: bitset
+ -
+ name: pse-ntf
+ attr-cnt-name: --ethtool-a-pse-ntf-cnt
+ attributes:
+ -
+ name: header
+ type: nest
+ nested-attributes: header
+ -
+ name: events
+ type: uint
+ enum: pse-event
+ doc: List of events reported by the PSE controller
operations:
enum-model: directional
@@ -2109,14 +2344,14 @@ operations:
request:
attributes:
- header
- reply:
- attributes:
- - header
- offset
- length
- page
- bank
- i2c-address
+ reply:
+ attributes:
+ - header
- data
dump: *module-eeprom-get-op
-
@@ -2206,6 +2441,9 @@ operations:
- c33-pse-ext-substate
- c33-pse-avail-pw-limit
- c33-pse-pw-limit-ranges
+ - pse-pw-d-id
+ - pse-prio-max
+ - pse-prio
dump: *pse-get-op
-
name: pse-set
@@ -2220,6 +2458,7 @@ operations:
- podl-pse-admin-control
- c33-pse-admin-control
- c33-pse-avail-pw-limit
+ - pse-prio
-
name: rss-get
doc: Get RSS params.
@@ -2239,6 +2478,7 @@ operations:
- indir
- hkey
- input-xfrm
+ - flow-hash
dump:
request:
attributes:
@@ -2414,3 +2654,79 @@ operations:
attributes: *tsconfig
reply:
attributes: *tsconfig
+ -
+ name: pse-ntf
+ doc: Notification for PSE events.
+
+ attribute-set: pse-ntf
+
+ event:
+ attributes:
+ - header
+ - events
+ -
+ name: rss-set
+ doc: Set RSS params.
+
+ attribute-set: rss
+
+ do:
+ request:
+ attributes:
+ - header
+ - context
+ - hfunc
+ - indir
+ - hkey
+ - input-xfrm
+ - flow-hash
+ -
+ name: rss-ntf
+ doc: |
+ Notification for change in RSS configuration.
+ For additional contexts only modifications use this notification,
+ creation and deletion have dedicated messages.
+ notify: rss-get
+ -
+ name: rss-create-act
+ doc: Create an RSS context.
+ attribute-set: rss
+ do:
+ request: &rss-create-attrs
+ attributes:
+ - header
+ - context
+ - hfunc
+ - indir
+ - hkey
+ - input-xfrm
+ reply: *rss-create-attrs
+ -
+ name: rss-create-ntf
+ doc: |
+ Notification for creation of an additional RSS context.
+ notify: rss-create-act
+ -
+ name: rss-delete-act
+ doc: Delete an RSS context.
+ attribute-set: rss
+ do:
+ request:
+ attributes:
+ - header
+ - context
+ -
+ name: rss-delete-ntf
+ doc: |
+ Notification for deletion of an additional RSS context.
+ attribute-set: rss
+ event:
+ attributes:
+ - header
+ - context
+
+mcast-groups:
+ list:
+ -
+ name: monitor
+ c-define-name: ethtool-mcgrp-monitor-name
diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml
index b02ab19817d3..57735726262e 100644
--- a/Documentation/netlink/specs/fou.yaml
+++ b/Documentation/netlink/specs/fou.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: fou
protocol: genetlink-legacy
@@ -18,7 +18,7 @@ definitions:
name: encap-type
name-prefix: fou-encap-
enum-name:
- entries: [ unspec, direct, gue ]
+ entries: [unspec, direct, gue]
attribute-sets:
-
@@ -81,8 +81,8 @@ operations:
doc: Add port.
attribute-set: fou
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: &all_attrs
@@ -103,8 +103,8 @@ operations:
doc: Delete port.
attribute-set: fou
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: &select_attrs
@@ -122,7 +122,7 @@ operations:
name: get
doc: Get tunnel info.
attribute-set: fou
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
request: *select_attrs
diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml
index b934cc513e3d..95c3fade7a8d 100644
--- a/Documentation/netlink/specs/handshake.yaml
+++ b/Documentation/netlink/specs/handshake.yaml
@@ -4,7 +4,7 @@
#
# Copyright (c) 2023, Oracle and/or its affiliates.
#
-
+---
name: handshake
protocol: genetlink
@@ -16,17 +16,17 @@ definitions:
type: enum
name: handler-class
value-start: 0
- entries: [ none, tlshd, max ]
+ entries: [none, tlshd, max]
-
type: enum
name: msg-type
value-start: 0
- entries: [ unspec, clienthello, serverhello ]
+ entries: [unspec, clienthello, serverhello]
-
type: enum
name: auth
value-start: 0
- entries: [ unspec, unauth, psk, x509 ]
+ entries: [unspec, unauth, psk, x509]
attribute-sets:
-
@@ -71,6 +71,9 @@ attribute-sets:
-
name: peername
type: string
+ -
+ name: keyring
+ type: u32
-
name: done
attributes:
@@ -95,7 +98,7 @@ operations:
name: accept
doc: Handler retrieves next queued handshake request
attribute-set: accept
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -109,6 +112,7 @@ operations:
- peer-identity
- certificate
- peername
+ - keyring
-
name: done
doc: Handler reports handshake completion
diff --git a/Documentation/netlink/specs/lockd.yaml b/Documentation/netlink/specs/lockd.yaml
index bbd4da5fe54b..3dc4ac1a051b 100644
--- a/Documentation/netlink/specs/lockd.yaml
+++ b/Documentation/netlink/specs/lockd.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: lockd
protocol: genetlink
uapi-header: linux/lockd_netlink.h
@@ -26,7 +26,7 @@ operations:
name: server-set
doc: set the lockd server parameters
attribute-set: server
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index fb57860fe778..02f1ddcfbf1c 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: mptcp_pm
protocol: genetlink-legacy
doc: Multipath TCP.
@@ -17,72 +17,72 @@ definitions:
enum-name: mptcp-event-type
name-prefix: mptcp-event-
entries:
- -
- name: unspec
- doc: unused event
- -
- name: created
- doc: >-
- A new MPTCP connection has been created. It is the good time to
- allocate memory and send ADD_ADDR if needed. Depending on the
- traffic-patterns it can take a long time until the
- MPTCP_EVENT_ESTABLISHED is sent.
- Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, server-side.
- -
- name: established
- doc: >-
- A MPTCP connection is established (can start new subflows).
- Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, server-side.
- -
- name: closed
- doc: >-
- A MPTCP connection has stopped.
- Attribute: token.
- -
- name: announced
- value: 6
- doc: >-
- A new address has been announced by the peer.
- Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
- -
- name: removed
- doc: >-
- An address has been lost by the peer.
- Attributes: token, rem_id.
- -
- name: sub-established
- value: 10
- doc: >-
- A new subflow has been established. 'error' should not be set.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: sub-closed
- doc: >-
- A subflow has been closed. An error (copy of sk_err) could be set if an
- error has been detected for this subflow.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: sub-priority
- value: 13
- doc: >-
- The priority of a subflow has changed. 'error' should not be set.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: listener-created
- value: 15
- doc: >-
- A new PM listener is created.
- Attributes: family, sport, saddr4 | saddr6.
- -
- name: listener-closed
- doc: >-
- A PM listener is closed.
- Attributes: family, sport, saddr4 | saddr6.
+ -
+ name: unspec
+ doc: unused event
+ -
+ name: created
+ doc: >-
+ A new MPTCP connection has been created. It is the good time to
+ allocate memory and send ADD_ADDR if needed. Depending on the
+ traffic-patterns it can take a long time until the
+ MPTCP_EVENT_ESTABLISHED is sent.
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
+ -
+ name: established
+ doc: >-
+ A MPTCP connection is established (can start new subflows).
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
+ -
+ name: closed
+ doc: >-
+ A MPTCP connection has stopped.
+ Attribute: token.
+ -
+ name: announced
+ value: 6
+ doc: >-
+ A new address has been announced by the peer.
+ Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+ -
+ name: removed
+ doc: >-
+ An address has been lost by the peer.
+ Attributes: token, rem_id.
+ -
+ name: sub-established
+ value: 10
+ doc: >-
+ A new subflow has been established. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: sub-closed
+ doc: >-
+ A subflow has been closed. An error (copy of sk_err) could be set if
+ an error has been detected for this subflow.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: sub-priority
+ value: 13
+ doc: >-
+ The priority of a subflow has changed. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: listener-created
+ value: 15
+ doc: >-
+ A new PM listener is created.
+ Attributes: family, sport, saddr4 | saddr6.
+ -
+ name: listener-closed
+ doc: >-
+ A PM listener is closed.
+ Attributes: family, sport, saddr4 | saddr6.
attribute-sets:
-
@@ -277,8 +277,8 @@ operations:
name: add-addr
doc: Add endpoint
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &add-addr-attrs
request:
attributes:
@@ -287,39 +287,39 @@ operations:
name: del-addr
doc: Delete endpoint
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *add-addr-attrs
-
name: get-addr
doc: Get endpoint information
attribute-set: attr
- dont-validate: [ strict ]
+ dont-validate: [strict]
do: &get-addr-attrs
request:
attributes:
- - addr
- - token
+ - addr
+ - token
reply:
attributes:
- - addr
+ - addr
dump:
reply:
- attributes:
- - addr
+ attributes:
+ - addr
-
name: flush-addrs
doc: Flush addresses
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *add-addr-attrs
-
name: set-limits
doc: Set protocol limits
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &mptcp-limits
request:
attributes:
@@ -329,10 +329,10 @@ operations:
name: get-limits
doc: Get protocol limits
attribute-set: attr
- dont-validate: [ strict ]
+ dont-validate: [strict]
do: &mptcp-get-limits
request:
- attributes:
+ attributes:
- rcv-add-addrs
- subflows
reply:
@@ -343,8 +343,8 @@ operations:
name: set-flags
doc: Change endpoint flags
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &mptcp-set-flags
request:
attributes:
@@ -355,8 +355,8 @@ operations:
name: announce
doc: Announce new address
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &announce-add
request:
attributes:
@@ -366,19 +366,19 @@ operations:
name: remove
doc: Announce removal
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do:
request:
- attributes:
- - token
- - loc-id
+ attributes:
+ - token
+ - loc-id
-
name: subflow-create
doc: Create subflow
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &sf-create
request:
attributes:
@@ -389,6 +389,6 @@ operations:
name: subflow-destroy
doc: Destroy subflow
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *sf-create
diff --git a/Documentation/netlink/specs/net_shaper.yaml b/Documentation/netlink/specs/net_shaper.yaml
index 8ebad0d02904..0b1b54be48f9 100644
--- a/Documentation/netlink/specs/net_shaper.yaml
+++ b/Documentation/netlink/specs/net_shaper.yaml
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+---
name: net-shaper
doc: |
@@ -243,7 +244,7 @@ operations:
The set operation can't be used to create a @node scope shaper,
use the @group operation instead.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
@@ -274,7 +275,7 @@ operations:
node with infinite bandwidth. The queue's implicit node
feeds an implicit RR node at the root of the hierarchy.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
@@ -305,7 +306,7 @@ operations:
full identifier, comprising @binding and @handle, is provided
as the reply.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index c0ef6d0d7786..c035dc0f64fd 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: netdev
doc:
@@ -31,7 +31,7 @@ definitions:
-
name: hw-offload
doc:
- This feature informs if netdev supports XDP hw offloading.
+ This feature informs if netdev supports XDP hw offloading.
-
name: rx-sg
doc:
@@ -48,16 +48,19 @@ definitions:
entries:
-
name: timestamp
- doc:
- Device is capable of exposing receive HW timestamp via bpf_xdp_metadata_rx_timestamp().
+ doc: |
+ Device is capable of exposing receive HW timestamp via
+ bpf_xdp_metadata_rx_timestamp().
-
name: hash
- doc:
- Device is capable of exposing receive packet hash via bpf_xdp_metadata_rx_hash().
+ doc: |
+ Device is capable of exposing receive packet hash via
+ bpf_xdp_metadata_rx_hash().
-
name: vlan-tag
- doc:
- Device is capable of exposing receive packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
+ doc: |
+ Device is capable of exposing receive packet VLAN tag via
+ bpf_xdp_metadata_rx_vlan_tag().
-
type: flags
name: xsk-flags
@@ -77,11 +80,15 @@ definitions:
-
name: queue-type
type: enum
- entries: [ rx, tx ]
+ entries: [rx, tx]
-
name: qstats-scope
type: flags
- entries: [ queue ]
+ entries: [queue]
+ -
+ name: napi-threaded
+ type: enum
+ entries: [disabled, enabled]
attribute-sets:
-
@@ -205,7 +212,7 @@ attribute-sets:
-
name: alloc-fast
type: uint
- value: 8 # reserve some attr ids in case we need more metadata later
+ value: 8 # reserve some attr ids in case we need more metadata later
-
name: alloc-slow
type: uint
@@ -280,6 +287,13 @@ attribute-sets:
doc: The timeout, in nanoseconds, of how long to suspend irq
processing, if event polling finds events
type: uint
+ -
+ name: threaded
+ doc: Whether the NAPI is configured to operate in threaded polling
+ mode. If this is set to enabled then the NAPI context operates
+ in threaded polling mode.
+ type: u32
+ enum: napi-threaded
-
name: xsk-info
attributes: []
@@ -367,7 +381,7 @@ attribute-sets:
For drivers supporting XDP, XDP is considered the first layer
of the stack, so packets consumed by XDP are still counted here.
type: uint
- value: 8 # reserve some attr ids in case we need more metadata later
+ value: 8 # reserve some attr ids in case we need more metadata later
-
name: rx-bytes
doc: Successfully received bytes, see `rx-packets`.
@@ -425,9 +439,9 @@ attribute-sets:
-
name: rx-hw-gro-packets
doc: |
- Number of packets that were coalesced from smaller packets by the device.
- Counts only packets coalesced with the HW-GRO netdevice feature,
- LRO-coalesced packets are not counted.
+ Number of packets that were coalesced from smaller packets by the
+ device. Counts only packets coalesced with the HW-GRO netdevice
+ feature, LRO-coalesced packets are not counted.
type: uint
-
name: rx-hw-gro-bytes
@@ -436,8 +450,8 @@ attribute-sets:
-
name: rx-hw-gro-wire-packets
doc: |
- Number of packets that were coalesced to bigger packetss with the HW-GRO
- netdevice feature. LRO-coalesced packets are not counted.
+ Number of packets that were coalesced to bigger packetss with the
+ HW-GRO netdevice feature. LRO-coalesced packets are not counted.
type: uint
-
name: rx-hw-gro-wire-bytes
@@ -691,6 +705,7 @@ operations:
- defer-hard-irqs
- gro-flush-timeout
- irq-suspend-timeout
+ - threaded
dump:
request:
attributes:
@@ -721,7 +736,7 @@ operations:
name: bind-rx
doc: Bind dmabuf to netdev
attribute-set: dmabuf
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -735,7 +750,7 @@ operations:
name: napi-set
doc: Set configurable NAPI instance settings.
attribute-set: napi
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -743,6 +758,7 @@ operations:
- defer-hard-irqs
- gro-flush-timeout
- irq-suspend-timeout
+ - threaded
-
name: bind-tx
doc: Bind dmabuf to netdev for TX
@@ -757,7 +773,7 @@ operations:
- id
kernel-family:
- headers: [ "net/netdev_netlink.h"]
+ headers: ["net/netdev_netlink.h"]
sock-priv: struct netdev_nl_sock
mcast-groups:
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
index 8d1a3c01708f..100363029e82 100644
--- a/Documentation/netlink/specs/nfsd.yaml
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nfsd
protocol: genetlink
uapi-header: linux/nfsd_netlink.h
@@ -151,7 +151,7 @@ operations:
name: threads-set
doc: set the number of running threads
attribute-set: server
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -174,7 +174,7 @@ operations:
name: version-set
doc: set nfs enabled versions
attribute-set: server-proto
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -191,7 +191,7 @@ operations:
name: listener-set
doc: set nfs running sockets
attribute-set: server-sock
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -208,7 +208,7 @@ operations:
name: pool-mode-set
doc: set the current server pool-mode
attribute-set: pool-mode
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
diff --git a/Documentation/netlink/specs/nftables.yaml b/Documentation/netlink/specs/nftables.yaml
index bd938bd01b6b..2ee10d92d644 100644
--- a/Documentation/netlink/specs/nftables.yaml
+++ b/Documentation/netlink/specs/nftables.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nftables
protocol: netlink-raw
protonum: 12
@@ -1205,7 +1205,9 @@ operations:
- name
-
name: destroytable
- doc: Delete an existing table with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing table with destroy semantics (ignoring ENOENT
+ errors).
attribute-set: table-attrs
fixed-header: nfgenmsg
do:
@@ -1249,7 +1251,9 @@ operations:
- name
-
name: destroychain
- doc: Delete an existing chain with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing chain with destroy semantics (ignoring ENOENT
+ errors).
attribute-set: chain-attrs
fixed-header: nfgenmsg
do:
@@ -1307,7 +1311,8 @@ operations:
- name
-
name: destroyrule
- doc: Delete an existing rule with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing rule with destroy semantics (ignoring ENOENT errors).
attribute-set: rule-attrs
fixed-header: nfgenmsg
do:
@@ -1351,7 +1356,8 @@ operations:
- name
-
name: destroyset
- doc: Delete an existing set with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing set with destroy semantics (ignoring ENOENT errors).
attribute-set: set-attrs
fixed-header: nfgenmsg
do:
diff --git a/Documentation/netlink/specs/nl80211.yaml b/Documentation/netlink/specs/nl80211.yaml
index 3611b11a7d8f..610fdd5e000e 100644
--- a/Documentation/netlink/specs/nl80211.yaml
+++ b/Documentation/netlink/specs/nl80211.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nl80211
protocol: genetlink-legacy
@@ -285,7 +285,7 @@ attribute-sets:
type: u16
-
name: sta-flags
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: sta-listen-interval
type: u16
@@ -297,14 +297,14 @@ attribute-sets:
type: u32
-
name: sta-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-bands
type: nest
nested-attributes: wiphy-bands
-
name: mntr-flags
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mesh-id
type: binary
@@ -317,7 +317,7 @@ attribute-sets:
display-hint: mac
-
name: mpath-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: bss-cts-prot
type: u8
@@ -339,16 +339,16 @@ attribute-sets:
type: binary
-
name: reg-rules
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mesh-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: bss-basic-rates
type: binary
-
name: wiphy-txq-params
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-freq
type: u32
@@ -370,16 +370,16 @@ attribute-sets:
type: u8
-
name: scan-frequencies
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: scan-ssids
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: generation
type: u32
-
name: bss
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: reg-initiator
type: u8
@@ -416,10 +416,10 @@ attribute-sets:
display-hint: hex
-
name: freq-before
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: freq-after
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: freq-fixed
type: flag
@@ -483,10 +483,10 @@ attribute-sets:
type: binary
-
name: key
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: keys
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pid
type: u32
@@ -495,7 +495,7 @@ attribute-sets:
type: u8
-
name: survey-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pmkid
type: binary
@@ -513,7 +513,7 @@ attribute-sets:
type: u8
-
name: tx-rates
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: frame-match
type: binary
@@ -525,7 +525,7 @@ attribute-sets:
type: u32
-
name: cqm
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: local-state-change
type: flag
@@ -575,13 +575,13 @@ attribute-sets:
type: u16
-
name: key-default-types
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-remain-on-channel-duration
type: u32
-
name: mesh-setup
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-antenna-avail-tx
type: u32
@@ -596,7 +596,7 @@ attribute-sets:
type: u8
-
name: wowlan-triggers
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wowlan-triggers-supported
type: nest
@@ -615,7 +615,7 @@ attribute-sets:
nested-attributes: supported-iftypes
-
name: rekey-data
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-num-sched-scan-ssids
type: u8
@@ -624,7 +624,7 @@ attribute-sets:
type: u16
-
name: scan-supp-rates
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: hidden-ssid
type: u32
@@ -636,7 +636,7 @@ attribute-sets:
type: binary
-
name: sta-wme
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: support-ap-uapsd
type: flag
@@ -645,13 +645,13 @@ attribute-sets:
type: flag
-
name: sched-scan-match
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-match-sets
type: u8
-
name: pmksa-candidate
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: tx-no-cck-rate
type: flag
@@ -680,7 +680,7 @@ attribute-sets:
name: feature-flags
type: u32
enum: feature-flags
- enum-as-flags: True
+ enum-as-flags: true
-
name: probe-resp-offload
type: u32
@@ -749,7 +749,7 @@ attribute-sets:
type: u32
-
name: mac-addrs
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mac-acl-max
type: u32
@@ -798,7 +798,7 @@ attribute-sets:
type: u16
-
name: coalesce-rule
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: ch-switch-count
type: u32
@@ -807,7 +807,7 @@ attribute-sets:
type: flag
-
name: csa-ies
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: cntdwn-offs-beacon
type: binary
@@ -929,13 +929,13 @@ attribute-sets:
type: u32
-
name: sched-scan-plans
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pbss
type: flag
-
name: bss-select
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: sta-support-p2p-ps
type: u8
@@ -944,7 +944,7 @@ attribute-sets:
type: binary
-
name: iftype-ext-capa
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mu-mimo-group-data
type: binary
@@ -975,10 +975,10 @@ attribute-sets:
type: u32
-
name: nan-func
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: nan-match
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: fils-kek
type: binary
@@ -1067,16 +1067,16 @@ attribute-sets:
type: binary
-
name: ftm-responder
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: ftm-responder-stats
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: timeout
type: u32
-
name: peer-measurements
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: airtime-weight
type: u16
@@ -1094,7 +1094,7 @@ attribute-sets:
type: flag
-
name: he-obss-pd
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-edmg-channels
type: u8
@@ -1106,13 +1106,13 @@ attribute-sets:
type: u16
-
name: he-bss-color
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: iftype-akm-suites
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: tid-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: control-port-no-preauth
type: flag
@@ -1133,16 +1133,16 @@ attribute-sets:
type: u32
-
name: scan-freq-khz
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: he-6ghz-capability
type: binary
-
name: fils-discovery
- type: binary # TOOD: nest
+ type: binary # TOOD: nest
-
name: unsol-bcast-probe-resp
- type: binary # TOOD: nest
+ type: binary # TOOD: nest
-
name: s1g-capability
type: binary
@@ -1173,13 +1173,13 @@ attribute-sets:
type: u8
-
name: color-change-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mbssid-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mbssid-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: radar-background
type: flag
@@ -1194,7 +1194,7 @@ attribute-sets:
type: flag
-
name: mlo-links
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mlo-link-id
type: u8
@@ -1234,7 +1234,7 @@ attribute-sets:
type: flag
-
name: ema-rnr-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mlo-link-disabled
type: flag
@@ -1252,10 +1252,10 @@ attribute-sets:
type: flag
-
name: wiphy-radios
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-interface-combinations
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: vif-radio-mask
type: u32
@@ -1799,8 +1799,9 @@ operations:
-
name: get-wiphy
doc: |
- Get information about a wiphy or dump a list of all wiphys. Requests to dump get-wiphy
- should unconditionally include the split-wiphy-dump flag in the request.
+ Get information about a wiphy or dump a list of all wiphys. Requests to
+ dump get-wiphy should unconditionally include the split-wiphy-dump flag
+ in the request.
attribute-set: nl80211-attrs
do:
request:
diff --git a/Documentation/netlink/specs/nlctrl.yaml b/Documentation/netlink/specs/nlctrl.yaml
index a36535350bdb..8b4472a6aa36 100644
--- a/Documentation/netlink/specs/nlctrl.yaml
+++ b/Documentation/netlink/specs/nlctrl.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nlctrl
protocol: genetlink-legacy
uapi-header: linux/genetlink.h
@@ -76,12 +76,12 @@ attribute-sets:
-
name: policy
type: nest-type-value
- type-value: [ policy-id, attr-id ]
+ type-value: [policy-id, attr-id]
nested-attributes: policy-attrs
-
name: op-policy
type: nest-type-value
- type-value: [ op-id ]
+ type-value: [op-id]
nested-attributes: op-policy-attrs
-
name: op
diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml
index ba76426a542d..1b91045cee2e 100644
--- a/Documentation/netlink/specs/ovpn.yaml
+++ b/Documentation/netlink/specs/ovpn.yaml
@@ -4,7 +4,7 @@
#
# Copyright (c) 2024-2025, OpenVPN Inc.
#
-
+---
name: ovpn
protocol: genetlink
@@ -19,7 +19,7 @@ definitions:
-
type: enum
name: cipher-alg
- entries: [ none, aes-gcm, chacha20-poly1305 ]
+ entries: [none, aes-gcm, chacha20-poly1305]
-
type: enum
name: del-peer-reason
@@ -32,7 +32,7 @@ definitions:
-
type: enum
name: key-slot
- entries: [ primary, secondary ]
+ entries: [primary, secondary]
attribute-sets:
-
@@ -42,8 +42,8 @@ attribute-sets:
name: id
type: u32
doc: >-
- The unique ID of the peer in the device context. To be used to identify
- peers during operations for a specific device
+ The unique ID of the peer in the device context. To be used to
+ identify peers during operations for a specific device
checks:
max: 0xFFFFFF
-
@@ -382,7 +382,7 @@ operations:
-
name: peer-new
attribute-set: ovpn-peer-new-input
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Add a remote peer
do:
pre: ovpn-nl-pre-doit
@@ -394,7 +394,7 @@ operations:
-
name: peer-set
attribute-set: ovpn-peer-set-input
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: modify a remote peer
do:
pre: ovpn-nl-pre-doit
@@ -406,7 +406,7 @@ operations:
-
name: peer-get
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Retrieve data about existing remote peers (or a specific one)
do:
pre: ovpn-nl-pre-doit
@@ -428,7 +428,7 @@ operations:
-
name: peer-del
attribute-set: ovpn-peer-del-input
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Delete existing remote peer
do:
pre: ovpn-nl-pre-doit
@@ -446,7 +446,7 @@ operations:
-
name: key-new
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Add a cipher key for a specific peer
do:
pre: ovpn-nl-pre-doit
@@ -458,7 +458,7 @@ operations:
-
name: key-get
attribute-set: ovpn-keyconf-get
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Retrieve non-sensitive data about peer key and cipher
do:
pre: ovpn-nl-pre-doit
@@ -473,7 +473,7 @@ operations:
-
name: key-swap
attribute-set: ovpn-keyconf-swap-input
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Swap primary and secondary session keys for a specific peer
do:
pre: ovpn-nl-pre-doit
@@ -492,7 +492,7 @@ operations:
-
name: key-del
attribute-set: ovpn-keyconf-del-input
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Delete cipher key for a specific peer
do:
pre: ovpn-nl-pre-doit
diff --git a/Documentation/netlink/specs/ovs_datapath.yaml b/Documentation/netlink/specs/ovs_datapath.yaml
index df6a8f94975e..0c0abf3f9f05 100644
--- a/Documentation/netlink/specs/ovs_datapath.yaml
+++ b/Documentation/netlink/specs/ovs_datapath.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_datapath
version: 2
protocol: genetlink-legacy
diff --git a/Documentation/netlink/specs/ovs_flow.yaml b/Documentation/netlink/specs/ovs_flow.yaml
index 7974aa7d8905..2dac9c8add57 100644
--- a/Documentation/netlink/specs/ovs_flow.yaml
+++ b/Documentation/netlink/specs/ovs_flow.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_flow
version: 1
protocol: genetlink-legacy
@@ -293,9 +293,10 @@ definitions:
enum-name: ovs-hash-alg
type: enum
doc: |
- Data path hash algorithm for computing Datapath hash. The algorithm type only specifies
- the fields in a flow will be used as part of the hash. Each datapath is free to use its
- own hash algorithm. The hash value will be opaque to the user space daemon.
+ Data path hash algorithm for computing Datapath hash. The algorithm type
+ only specifies the fields in a flow will be used as part of the hash. Each
+ datapath is free to use its own hash algorithm. The hash value will be
+ opaque to the user space daemon.
entries:
- ovs-hash-alg-l4
@@ -615,7 +616,9 @@ attribute-sets:
name: set
type: nest
nested-attributes: key-attrs
- doc: Replaces the contents of an existing header. The single nested attribute specifies a header to modify and its value.
+ doc: |
+ Replaces the contents of an existing header. The single nested
+ attribute specifies a header to modify and its value.
-
name: push-vlan
type: binary
@@ -630,7 +633,8 @@ attribute-sets:
type: nest
nested-attributes: sample-attrs
doc: |
- Probabilistically executes actions, as specified in the nested attributes.
+ Probabilistically executes actions, as specified in the nested
+ attributes.
-
name: recirc
type: u32
diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
index 306da6bb842d..da47e65fd574 100644
--- a/Documentation/netlink/specs/ovs_vport.yaml
+++ b/Documentation/netlink/specs/ovs_vport.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_vport
version: 2
protocol: genetlink-legacy
@@ -21,7 +21,7 @@ definitions:
type: enum
enum-name: ovs-vport-type
name-prefix: ovs-vport-type-
- entries: [ unspec, netdev, internal, gre, vxlan, geneve ]
+ entries: [unspec, netdev, internal, gre, vxlan, geneve]
-
name: ovs-vport-stats
type: struct
diff --git a/Documentation/netlink/specs/rt-addr.yaml b/Documentation/netlink/specs/rt-addr.yaml
index 4f86aa1075da..bafe3bfeabfb 100644
--- a/Documentation/netlink/specs/rt-addr.yaml
+++ b/Documentation/netlink/specs/rt-addr.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-addr
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
diff --git a/Documentation/netlink/specs/rt-link.yaml b/Documentation/netlink/specs/rt-link.yaml
index 28c4cf66517c..210394c188a3 100644
--- a/Documentation/netlink/specs/rt-link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-link
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
diff --git a/Documentation/netlink/specs/rt-neigh.yaml b/Documentation/netlink/specs/rt-neigh.yaml
index e9cba164e3d1..30a9ee16f128 100644
--- a/Documentation/netlink/specs/rt-neigh.yaml
+++ b/Documentation/netlink/specs/rt-neigh.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-neigh
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
@@ -79,6 +79,7 @@ definitions:
entries:
- managed
- locked
+ - ext-validated
-
name: rtm-type
type: enum
diff --git a/Documentation/netlink/specs/rt-route.yaml b/Documentation/netlink/specs/rt-route.yaml
index 800f3a823d47..5b514ddeff1d 100644
--- a/Documentation/netlink/specs/rt-route.yaml
+++ b/Documentation/netlink/specs/rt-route.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-route
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
@@ -117,7 +117,7 @@ attribute-sets:
name: multipath
type: binary
-
- name: protoinfo # not used
+ name: protoinfo # not used
type: binary
-
name: flow
@@ -127,10 +127,10 @@ attribute-sets:
type: binary
struct: rta-cacheinfo
-
- name: session # not used
+ name: session # not used
type: binary
-
- name: mp-algo # not used
+ name: mp-algo # not used
type: binary
-
name: table
@@ -155,7 +155,7 @@ attribute-sets:
type: u16
-
name: encap
- type: binary # tunnel specific nest
+ type: binary # tunnel specific nest
-
name: expires
type: u32
diff --git a/Documentation/netlink/specs/rt-rule.yaml b/Documentation/netlink/specs/rt-rule.yaml
index 003707ca4a3e..46b1d426e7e8 100644
--- a/Documentation/netlink/specs/rt-rule.yaml
+++ b/Documentation/netlink/specs/rt-rule.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-rule
protocol: netlink-raw
uapi-header: linux/fib_rules.h
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 42d74c9aeb54..b1afc7ab3539 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: tc
protocol: netlink-raw
uapi-header: linux/pkt_cls.h
@@ -57,6 +57,23 @@ definitions:
- tunoam
- tuncrit
-
+ name: dualpi2-drop-overload
+ type: enum
+ entries: [overflow, drop]
+ -
+ name: dualpi2-drop-early
+ type: enum
+ entries: [drop-dequeue, drop-enqueue]
+ -
+ name: dualpi2-ecn-mask
+ type: enum
+ value-start: 1
+ entries: [l4s-ect, cla-ect, any-ect]
+ -
+ name: dualpi2-split-gso
+ type: enum
+ entries: [no-split-gso, split-gso]
+ -
name: tc-stats
type: struct
members:
@@ -76,7 +93,8 @@ definitions:
name: overlimits
type: u32
doc: |
- Number of throttle events when this flow goes out of allocated bandwidth
+ Number of throttle events when this flow goes out of allocated
+ bandwidth
-
name: bps
type: u32
@@ -751,7 +769,9 @@ definitions:
-
name: count
type: u32
- doc: How many drops we've done since the last time we entered dropping state
+ doc: |
+ How many drops we've done since the last time we entered dropping
+ state
-
name: lastcount
type: u32
@@ -823,6 +843,58 @@ definitions:
name: drop-overmemory
type: u32
-
+ name: tc-dualpi2-xstats
+ type: struct
+ members:
+ -
+ name: prob
+ type: u32
+ doc: Current base PI probability
+ -
+ name: delay-c
+ type: u32
+ doc: Current C-queue delay in microseconds
+ -
+ name: delay-l
+ type: u32
+ doc: Current L-queue delay in microseconds
+ -
+ name: pkts-in-c
+ type: u32
+ doc: Number of packets enqueued in the C-queue
+ -
+ name: pkts-in-l
+ type: u32
+ doc: Number of packets enqueued in the L-queue
+ -
+ name: maxq
+ type: u32
+ doc: Maximum number of packets seen by the DualPI2
+ -
+ name: ecn-mark
+ type: u32
+ doc: All packets marked with ECN
+ -
+ name: step-mark
+ type: u32
+ doc: Only packets marked with ECN due to L-queue step AQM
+ -
+ name: credit
+ type: s32
+ doc: Current credit value for WRR
+ -
+ name: memory-used
+ type: u32
+ doc: Memory used in bytes by the DualPI2
+ -
+ name: max-memory-used
+ type: u32
+ doc: Maximum memory used in bytes by the DualPI2
+ -
+ name: memory-limit
+ type: u32
+ doc: Memory limit in bytes
+ -
name: tc-fq-pie-xstats
type: struct
members:
@@ -845,7 +917,7 @@ definitions:
-
name: ecn-mark
type: u32
- doc: Packets marked with ecn
+ doc: Packets marked with ECN
-
name: new-flow-count
type: u32
@@ -988,7 +1060,7 @@ definitions:
-
name: ecn-mark
type: u32
- doc: Packets marked with ecn
+ doc: Packets marked with ECN
-
name: tc-red-xstats
type: struct
@@ -1161,7 +1233,7 @@ definitions:
-
name: keys
type: binary
- struct: tc-u32-key # TODO: array
+ struct: tc-u32-key # TODO: array
-
name: tc-u32-pcnt
type: struct
@@ -1174,7 +1246,7 @@ definitions:
type: u64
-
name: kcnts
- type: u64 # TODO: array
+ type: u64 # TODO: array
-
name: tcf-t
type: struct
@@ -1336,7 +1408,7 @@ definitions:
-
name: keys
type: binary
- struct: tc-pedit-key # TODO: array
+ struct: tc-pedit-key # TODO: array
-
name: tc-pedit-key
type: struct
@@ -2282,6 +2354,78 @@ attribute-sets:
name: quantum
type: u32
-
+ name: dualpi2-attrs
+ name-prefix: tca-dualpi2-
+ attributes:
+ -
+ name: limit
+ type: u32
+ doc: Limit of total number of packets in queue
+ -
+ name: memory-limit
+ type: u32
+ doc: Memory limit of total number of packets in queue
+ -
+ name: target
+ type: u32
+ doc: Classic target delay in microseconds
+ -
+ name: tupdate
+ type: u32
+ doc: Drop probability update interval time in microseconds
+ -
+ name: alpha
+ type: u32
+ doc: Integral gain factor in Hz for PI controller
+ -
+ name: beta
+ type: u32
+ doc: Proportional gain factor in Hz for PI controller
+ -
+ name: step-thresh-pkts
+ type: u32
+ doc: L4S step marking threshold in packets
+ -
+ name: step-thresh-us
+ type: u32
+ doc: L4S Step marking threshold in microseconds
+ -
+ name: min-qlen-step
+ type: u32
+ doc: Packets enqueued to the L-queue can apply the step threshold
+ when the queue length of L-queue is larger than this value.
+ (0 is recommended)
+ -
+ name: coupling
+ type: u8
+ doc: Probability coupling factor between Classic and L4S
+ (2 is recommended)
+ -
+ name: drop-overload
+ type: u8
+ doc: Control the overload strategy (drop to preserve latency or
+ let the queue overflow)
+ enum: dualpi2-drop-overload
+ -
+ name: drop-early
+ type: u8
+ doc: Decide where the Classic packets are PI-based dropped or marked
+ enum: dualpi2-drop-early
+ -
+ name: c-protection
+ type: u8
+ doc: Classic WRR weight in percentage (from 0 to 100)
+ -
+ name: ecn-mask
+ type: u8
+ doc: Configure the L-queue ECN classifier
+ enum: dualpi2-ecn-mask
+ -
+ name: split-gso
+ type: u8
+ doc: Split aggregated skb or not
+ enum: dualpi2-split-gso
+ -
name: ematch-attrs
name-prefix: tca-ematch-
attr-max-name: tca-ematch-tree-max
@@ -2885,7 +3029,7 @@ attribute-sets:
attributes:
-
name: parms
- type: binary # array of struct: tc-gred-qopt
+ type: binary # array of struct: tc-gred-qopt
-
name: stab
type: binary
@@ -3335,10 +3479,10 @@ attribute-sets:
struct: tc-police
-
name: rate
- type: binary # TODO
+ type: binary # TODO
-
name: peakrate
- type: binary # TODO
+ type: binary # TODO
-
name: avrate
type: u32
@@ -3698,7 +3842,7 @@ sub-messages:
value: choke
attribute-set: choke-attrs
-
- value: clsact # no content
+ value: clsact # no content
-
value: codel
attribute-set: codel-attrs
@@ -3706,6 +3850,9 @@ sub-messages:
value: drr
attribute-set: drr-attrs
-
+ value: dualpi2
+ attribute-set: dualpi2-attrs
+ -
value: etf
attribute-set: etf-attrs
-
@@ -3742,12 +3889,12 @@ sub-messages:
value: htb
attribute-set: htb-attrs
-
- value: ingress # no content
+ value: ingress # no content
-
value: matchall
attribute-set: matchall-attrs
-
- value: mq # no content
+ value: mq # no content
-
value: mqprio
fixed-header: tc-mqprio-qopt
@@ -3873,6 +4020,9 @@ sub-messages:
value: codel
fixed-header: tc-codel-xstats
-
+ value: dualpi2
+ fixed-header: tc-dualpi2-xstats
+ -
value: fq
fixed-header: tc-fq-qd-stats
-
diff --git a/Documentation/netlink/specs/tcp_metrics.yaml b/Documentation/netlink/specs/tcp_metrics.yaml
index 1bd94f43e526..13144aeed31a 100644
--- a/Documentation/netlink/specs/tcp_metrics.yaml
+++ b/Documentation/netlink/specs/tcp_metrics.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: tcp_metrics
protocol: genetlink-legacy
@@ -133,7 +133,7 @@ operations:
doc: Retrieve metrics.
attribute-set: tcp-metrics
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
request: &sel_attrs
@@ -162,8 +162,8 @@ operations:
doc: Delete metrics.
attribute-set: tcp-metrics
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: *sel_attrs
diff --git a/Documentation/netlink/specs/team.yaml b/Documentation/netlink/specs/team.yaml
index c13529e011c9..cf02d47d12a4 100644
--- a/Documentation/netlink/specs/team.yaml
+++ b/Documentation/netlink/specs/team.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: team
protocol: genetlink-legacy
@@ -152,7 +152,7 @@ operations:
doc: No operation
value: 0
attribute-set: team
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
# Actually it only reply the team netlink family
@@ -164,8 +164,8 @@ operations:
name: options-set
doc: Set team options
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request: &option_attrs
@@ -178,8 +178,8 @@ operations:
name: options-get
doc: Get team options info
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request:
@@ -191,8 +191,8 @@ operations:
name: port-list-get
doc: Get team ports info
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request:
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index dceeb0d763aa..50d92084a49c 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -209,13 +209,10 @@ Libbpf
Libbpf is a helper library for eBPF and XDP that makes using these
technologies a lot simpler. It also contains specific helper functions
-in tools/lib/bpf/xsk.h for facilitating the use of AF_XDP. It
-contains two types of functions: those that can be used to make the
-setup of AF_XDP socket easier and ones that can be used in the data
-plane to access the rings safely and quickly. To see an example on how
-to use this API, please take a look at the sample application in
-samples/bpf/xdpsock_usr.c which uses libbpf for both setup and data
-plane operations.
+in tools/testing/selftests/bpf/xsk.h for facilitating the use of
+AF_XDP. It contains two types of functions: those that can be used to
+make the setup of AF_XDP socket easier and ones that can be used in the
+data plane to access the rings safely and quickly.
We recommend that you use this library unless you have become a power
user. It will make your program a lot simpler.
@@ -372,9 +369,8 @@ needs to explicitly notify the kernel to send any packets put on the
TX ring. This can be accomplished either by a poll() call, as in the
RX path, or by calling sendto().
-An example of how to use this flag can be found in
-samples/bpf/xdpsock_user.c. An example with the use of libbpf helpers
-would look like this for the TX path:
+An example with the use of libbpf helpers would look like this for the
+TX path:
.. code-block:: c
@@ -442,6 +438,15 @@ is created by a privileged process and passed to a non-privileged one.
Once the option is set, kernel will refuse attempts to bind that socket
to a different interface. Updating the value requires CAP_NET_RAW.
+XDP_MAX_TX_SKB_BUDGET setsockopt
+--------------------------------
+
+This setsockopt sets the maximum number of descriptors that can be handled
+and passed to the driver at one send syscall. It is applied in the copy
+mode to allow application to tune the per-socket maximum iteration for
+better throughput and less frequency of send syscall.
+Allowed range is [32, xs->tx->nentries].
+
XDP_STATISTICS getsockopt
-------------------------
@@ -549,12 +554,12 @@ later in this document.
Usage
-----
-In order to use AF_XDP sockets two parts are needed. The
-user-space application and the XDP program. For a complete setup and
-usage example, please refer to the sample application. The user-space
-side is xdpsock_user.c and the XDP side is part of libbpf.
+In order to use AF_XDP sockets two parts are needed. The user-space
+application and the XDP program. For a complete setup and usage example,
+please refer to the xdp-project at
+https://github.com/xdp-project/bpf-examples/tree/main/AF_XDP-example.
-The XDP code sample included in tools/lib/bpf/xsk.c is the following:
+The XDP code sample is the following:
.. code-block:: c
@@ -752,11 +757,12 @@ to facilitate extending a zero-copy driver with multi-buffer support.
Sample application
==================
-
-There is a xdpsock benchmarking/test application included that
-demonstrates how to use AF_XDP sockets with private UMEMs. Say that
-you would like your UDP traffic from port 4242 to end up in queue 16,
-that we will enable AF_XDP on. Here, we use ethtool for this::
+There is a xdpsock benchmarking/test application that can be found at
+https://github.com/xdp-project/bpf-examples/tree/main/AF_XDP-example
+that demonstrates how to use AF_XDP sockets with private
+UMEMs. Say that you would like your UDP traffic from port 4242 to end
+up in queue 16, that we will enable AF_XDP on. Here, we use ethtool
+for this::
ethtool -N p3p2 rx-flow-hash udp4 fn
ethtool -N p3p2 flow-type udp4 src-port 4242 dst-port 4242 \
@@ -773,7 +779,7 @@ can be displayed with "-h", as usual.
This sample application uses libbpf to make the setup and usage of
AF_XDP simpler. If you want to know how the raw uapi of AF_XDP is
really used to make something more advanced, take a look at the libbpf
-code in tools/lib/bpf/xsk.[ch].
+code in tools/testing/selftests/bpf/xsk.[ch].
FAQ
=======
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index a4c1291d2561..f8f5766703d4 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -562,6 +562,12 @@ lacp_rate
The default is slow.
+broadcast_neighbor
+
+ Option specifying whether to broadcast ARP/ND packets to all
+ active slaves. This option has no effect in modes other than
+ 802.3ad mode. The default is off (0).
+
max_bonds
Specifies the number of bonding devices to create for this
@@ -767,8 +773,9 @@ num_unsol_na
greater than 1.
The valid range is 0 - 255; the default value is 1. These options
- affect only the active-backup mode. These options were added for
- bonding versions 3.3.0 and 3.4.0 respectively.
+ affect the active-backup or 802.3ad (broadcast_neighbor enabled) mode.
+ These options were added for bonding versions 3.3.0 and 3.4.0
+ respectively.
From Linux 3.0 and bonding version 3.7.1, these notifications
are generated by the ipv4 and ipv6 code and the numbers of
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index b018ce346392..bc1b585355f7 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -1104,15 +1104,12 @@ for writing CAN network device driver are described below:
General Settings
----------------
-.. code-block:: C
-
- dev->type = ARPHRD_CAN; /* the netdevice hardware type */
- dev->flags = IFF_NOARP; /* CAN has no arp */
+CAN network device drivers can use alloc_candev_mqs() and friends instead of
+alloc_netdev_mqs(), to automatically take care of CAN-specific setup:
- dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> Classical CAN interface */
+.. code-block:: C
- or alternative, when the controller supports CAN with flexible data rate:
- dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
+ dev = alloc_candev_mqs(...);
The struct can_frame or struct canfd_frame is the payload of each socket
buffer (skbuff) in the protocol family PF_CAN.
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index 4561e8ab9e08..14784a0a6a8a 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -56,6 +56,9 @@ ena_netdev.[ch] Main Linux kernel driver.
ena_ethtool.c ethtool callbacks.
ena_xdp.[ch] XDP files
ena_pci_id_tbl.h Supported device IDs.
+ena_phc.[ch] PTP hardware clock infrastructure (see `PHC`_ for more info)
+ena_devlink.[ch] devlink files.
+ena_debugfs.[ch] debugfs files.
================= ======================================================
Management Interface:
@@ -221,6 +224,99 @@ descriptor it was received on would be recycled. When a packet smaller
than RX copybreak bytes is received, it is copied into a new memory
buffer and the RX descriptor is returned to HW.
+.. _`PHC`:
+
+PTP Hardware Clock (PHC)
+========================
+.. _`ptp-userspace-api`: https://docs.kernel.org/driver-api/ptp.html#ptp-hardware-clock-user-space-api
+.. _`testptp`: https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/ptp/testptp.c
+
+ENA Linux driver supports PTP hardware clock providing timestamp reference to achieve nanosecond resolution.
+
+**PHC support**
+
+PHC depends on the PTP module, which needs to be either loaded as a module or compiled into the kernel.
+
+Verify if the PTP module is present:
+
+.. code-block:: shell
+
+ grep -w '^CONFIG_PTP_1588_CLOCK=[ym]' /boot/config-`uname -r`
+
+- If no output is provided, the ENA driver cannot be loaded with PHC support.
+
+**PHC activation**
+
+The feature is turned off by default, in order to turn the feature on, the ENA driver
+can be loaded in the following way:
+
+- devlink:
+
+.. code-block:: shell
+
+ sudo devlink dev param set pci/<domain:bus:slot.function> name enable_phc value true cmode driverinit
+ sudo devlink dev reload pci/<domain:bus:slot.function>
+ # for example:
+ sudo devlink dev param set pci/0000:00:06.0 name enable_phc value true cmode driverinit
+ sudo devlink dev reload pci/0000:00:06.0
+
+All available PTP clock sources can be tracked here:
+
+.. code-block:: shell
+
+ ls /sys/class/ptp
+
+PHC support and capabilities can be verified using ethtool:
+
+.. code-block:: shell
+
+ ethtool -T <interface>
+
+**PHC timestamp**
+
+To retrieve PHC timestamp, use `ptp-userspace-api`_, usage example using `testptp`_:
+
+.. code-block:: shell
+
+ testptp -d /dev/ptp$(ethtool -T <interface> | awk '/PTP Hardware Clock:/ {print $NF}') -k 1
+
+PHC get time requests should be within reasonable bounds,
+avoid excessive utilization to ensure optimal performance and efficiency.
+The ENA device restricts the frequency of PHC get time requests to a maximum
+of 125 requests per second. If this limit is surpassed, the get time request
+will fail, leading to an increment in the phc_err_ts statistic.
+
+**PHC statistics**
+
+PHC can be monitored using debugfs (if mounted):
+
+.. code-block:: shell
+
+ sudo cat /sys/kernel/debug/<domain:bus:slot.function>/phc_stats
+
+ # for example:
+ sudo cat /sys/kernel/debug/0000:00:06.0/phc_stats
+
+PHC errors must remain below 1% of all PHC requests to maintain the desired level of accuracy and reliability
+
+================= ======================================================
+**phc_cnt** | Number of successful retrieved timestamps (below expire timeout).
+**phc_exp** | Number of expired retrieved timestamps (above expire timeout).
+**phc_skp** | Number of skipped get time attempts (during block period).
+**phc_err_dv** | Number of failed get time attempts due to device errors (entering into block state).
+**phc_err_ts** | Number of failed get time attempts due to timestamp errors (entering into block state),
+ | This occurs if driver exceeded the request limit or device received an invalid timestamp.
+================= ======================================================
+
+PHC timeouts:
+
+================= ======================================================
+**expire** | Max time for a valid timestamp retrieval, passing this threshold will fail
+ | the get time request and block new requests until block timeout.
+**block** | Blocking period starts once get time request expires or fails,
+ | all get time requests during block period will be skipped.
+================= ======================================================
+
Statistics
==========
@@ -268,6 +364,18 @@ RSS
- The user can provide a hash key, hash function, and configure the
indirection table through `ethtool(8)`.
+DEVLINK SUPPORT
+===============
+.. _`devlink`: https://www.kernel.org/doc/html/latest/networking/devlink/index.html
+
+`devlink`_ supports reloading the driver and initiating re-negotiation with the ENA device
+
+.. code-block:: shell
+
+ sudo devlink dev reload pci/<domain:bus:slot.function>
+ # for example:
+ sudo devlink dev reload pci/0000:00:06.0
+
DATA PATH
=========
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 139b4c75a191..40ac552641a3 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -58,7 +58,9 @@ Contents:
ti/tlan
ti/icssg_prueth
wangxun/txgbe
+ wangxun/txgbevf
wangxun/ngbe
+ wangxun/ngbevf
.. only:: subproject and html
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index 3c46a48d99ba..0bca293cf9cb 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -927,6 +927,19 @@ To enable/disable UDP Segmentation Offload, issue the following command::
# ethtool -K <ethX> tx-udp-segmentation [off|on]
+PTP pin interface
+-----------------
+All adapters support standard PTP pin interface. SDPs (Software Definable Pin)
+are single ended pins with both periodic output and external timestamp
+supported. There are also specific differential input/output pins (TIME_SYNC,
+1PPS) with only one of the functions supported.
+
+There are adapters with DPLL, where pins are connected to the DPLL instead of
+being exposed on the board. You have to be aware that in those configurations,
+only SDP pins are exposed and each pin has its own fixed direction.
+To see input signal on those PTP pins, you need to configure DPLL properly.
+Output signal is only visible on DPLL and to send it to the board SMA/U.FL pins,
+DPLL output pins have to be manually configured.
GNSS module
-----------
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index 43d72c8b713b..754c81436408 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -1341,3 +1341,35 @@ Device Counters
- The number of times the device owned queue had not enough buffers
allocated.
- Error
+
+ * - `pci_bw_inbound_high`
+ - The number of times the device crossed the high inbound pcie bandwidth
+ threshold. To be compared to pci_bw_inbound_low to check if the device
+ is in a congested state.
+ If pci_bw_inbound_high == pci_bw_inbound_low then the device is not congested.
+ If pci_bw_inbound_high > pci_bw_inbound_low then the device is congested.
+ - Tnformative
+
+ * - `pci_bw_inbound_low`
+ - The number of times the device crossed the low inbound PCIe bandwidth
+ threshold. To be compared to pci_bw_inbound_high to check if the device
+ is in a congested state.
+ If pci_bw_inbound_high == pci_bw_inbound_low then the device is not congested.
+ If pci_bw_inbound_high > pci_bw_inbound_low then the device is congested.
+ - Informative
+
+ * - `pci_bw_outbound_high`
+ - The number of times the device crossed the high outbound pcie bandwidth
+ threshold. To be compared to pci_bw_outbound_low to check if the device
+ is in a congested state.
+ If pci_bw_outbound_high == pci_bw_outbound_low then the device is not congested.
+ If pci_bw_outbound_high > pci_bw_outbound_low then the device is congested.
+ - Informative
+
+ * - `pci_bw_outbound_low`
+ - The number of times the device crossed the low outbound PCIe bandwidth
+ threshold. To be compared to pci_bw_outbound_high to check if the device
+ is in a congested state.
+ If pci_bw_outbound_high == pci_bw_outbound_low then the device is not congested.
+ If pci_bw_outbound_high > pci_bw_outbound_low then the device is congested.
+ - Informative
diff --git a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
index f8592dec8851..afb8353daefd 100644
--- a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
+++ b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
@@ -28,6 +28,36 @@ devlink dev info provides version information for all three components. In
addition to the version the hg commit hash of the build is included as a
separate entry.
+Configuration
+-------------
+
+Ringparams (ethtool -g / -G)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+fbnic has two submission (host -> device) rings for every completion
+(device -> host) ring. The three ring objects together form a single
+"queue" as used by higher layer software (a Rx, or a Tx queue).
+
+For Rx the two submission rings are used to pass empty pages to the NIC.
+Ring 0 is the Header Page Queue (HPQ), NIC will use its pages to place
+L2-L4 headers (or full frames if frame is not header-data split).
+Ring 1 is the Payload Page Queue (PPQ) and used for packet payloads.
+The completion ring is used to receive packet notifications / metadata.
+ethtool ``rx`` ringparam maps to the size of the completion ring,
+``rx-mini`` to the HPQ, and ``rx-jumbo`` to the PPQ.
+
+For Tx both submission rings can be used to submit packets, the completion
+ring carries notifications for both. fbnic uses one of the submission
+rings for normal traffic from the stack and the second one for XDP frames.
+ethtool ``tx`` ringparam controls both the size of the submission rings
+and the completion ring.
+
+Every single entry on the HPQ and PPQ (``rx-mini``, ``rx-jumbo``)
+corresponds to 4kB of allocated memory, while entries on the remaining
+rings are in units of descriptors (8B). The ideal ratio of submission
+and completion ring sizes will depend on the workload, as for small packets
+multiple packets will fit into a single page.
+
Upgrading Firmware
------------------
diff --git a/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst b/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst
index a88946bd188b..d3e130455043 100644
--- a/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst
+++ b/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst
@@ -268,14 +268,14 @@ Example 1: One port tx AVB configuration scheme for target board
// Run your appropriate tools with socket option "SO_PRIORITY"
// to 3 for class A and/or to 2 for class B
- // (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+ // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/)
./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500&
./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500&
13) ::
// run your listener on workstation (should be in same vlan)
- // (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+ // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/)
./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
Receiving data rate: 39012 kbps
Receiving data rate: 39012 kbps
@@ -555,7 +555,7 @@ Example 2: Two port tx AVB configuration scheme for target board
20) ::
// run your listener on workstation (should be in same vlan)
- // (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+ // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/)
./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
Receiving data rate: 39012 kbps
Receiving data rate: 39012 kbps
diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst b/Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst
new file mode 100644
index 000000000000..a39e3d5a1038
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+==================================================================
+Linux Base Virtual Function Driver for Wangxun(R) Gigabit Ethernet
+==================================================================
+
+WangXun Gigabit Virtual Function Linux driver.
+Copyright(c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+
+Support
+=======
+For general information, go to the website at:
+https://www.net-swift.com
+
+If you got any problem, contact Wangxun support team via nic-support@net-swift.com
+and Cc: netdev.
diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst
new file mode 100644
index 000000000000..b2f759b7b518
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===========================================================================
+Linux Base Virtual Function Driver for Wangxun(R) 10/25/40 Gigabit Ethernet
+===========================================================================
+
+WangXun 10/25/40 Gigabit Virtual Function Linux driver.
+Copyright(c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+
+Support
+=======
+For general information, go to the website at:
+https://www.net-swift.com
+
+If you got any problem, contact Wangxun support team via nic-support@net-swift.com
+and Cc: netdev.
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index 4e01dc32bc08..211b58177e12 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -137,3 +137,9 @@ own name.
* - ``event_eq_size``
- u32
- Control the size of asynchronous control events EQ.
+ * - ``enable_phc``
+ - Boolean
+ - Enable PHC (PTP Hardware Clock) functionality in the device.
+ * - ``clock_id``
+ - u64
+ - Clock ID used by the device for registering DPLL devices and pins.
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
index 9d22d41a7cd1..5e397798a402 100644
--- a/Documentation/networking/devlink/devlink-port.rst
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -418,6 +418,14 @@ API allows to configure following rate object's parameters:
to all node children limits. ``tx_max`` is an upper limit for children.
``tx_share`` is a total bandwidth distributed among children.
+``tc_bw``
+ Allow users to set the bandwidth allocation per traffic class on rate
+ objects. This enables fine-grained QoS configurations by assigning a relative
+ share value to each traffic class. The bandwidth is distributed in proportion
+ to the share value for each class, relative to the sum of all shares.
+ When applied to a non-leaf node, tc_bw determines how bandwidth is shared
+ among its child elements.
+
``tx_priority`` and ``tx_weight`` can be used simultaneously. In that case
nodes with the same priority form a WFQ subgroup in the sibling group
and arbitration among them is based on assigned weights.
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index 8319f43b5933..270a65a01411 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -85,6 +85,8 @@ parameters, info versions, and other features it supports.
ionic
ice
ixgbe
+ kvaser_pciefd
+ kvaser_usb
mlx4
mlx5
mlxsw
@@ -98,3 +100,4 @@ parameters, info versions, and other features it supports.
iosm
octeontx2
sfc
+ zl3073x
diff --git a/Documentation/networking/devlink/kvaser_pciefd.rst b/Documentation/networking/devlink/kvaser_pciefd.rst
new file mode 100644
index 000000000000..075edd2a508a
--- /dev/null
+++ b/Documentation/networking/devlink/kvaser_pciefd.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+kvaser_pciefd devlink support
+=============================
+
+This document describes the devlink features implemented by the
+``kvaser_pciefd`` device driver.
+
+Info versions
+=============
+
+The ``kvaser_pciefd`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``fw``
+ - running
+ - Version of the firmware running on the device. Also available
+ through ``ethtool -i`` as ``firmware-version``.
diff --git a/Documentation/networking/devlink/kvaser_usb.rst b/Documentation/networking/devlink/kvaser_usb.rst
new file mode 100644
index 000000000000..403db3766cb4
--- /dev/null
+++ b/Documentation/networking/devlink/kvaser_usb.rst
@@ -0,0 +1,33 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+kvaser_usb devlink support
+==========================
+
+This document describes the devlink features implemented by the
+``kvaser_usb`` device driver.
+
+Info versions
+=============
+
+The ``kvaser_usb`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``fw``
+ - running
+ - Version of the firmware running on the device. Also available
+ through ``ethtool -i`` as ``firmware-version``.
+ * - ``board.rev``
+ - fixed
+ - The device hardware revision.
+ * - ``board.id``
+ - fixed
+ - The device EAN (product number).
+ * - ``serial_number``
+ - fixed
+ - The device serial number.
diff --git a/Documentation/networking/devlink/netdevsim.rst b/Documentation/networking/devlink/netdevsim.rst
index 88482725422c..3932004eae82 100644
--- a/Documentation/networking/devlink/netdevsim.rst
+++ b/Documentation/networking/devlink/netdevsim.rst
@@ -62,7 +62,7 @@ Rate objects
The ``netdevsim`` driver supports rate objects management, which includes:
-- registerging/unregistering leaf rate objects per VF devlink port;
+- registering/unregistering leaf rate objects per VF devlink port;
- creation/deletion node rate objects;
- setting tx_share and tx_max rate values for any rate object type;
- setting parent node for any rate object type.
diff --git a/Documentation/networking/devlink/zl3073x.rst b/Documentation/networking/devlink/zl3073x.rst
new file mode 100644
index 000000000000..4b6cfaf38643
--- /dev/null
+++ b/Documentation/networking/devlink/zl3073x.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+zl3073x devlink support
+=======================
+
+This document describes the devlink features implemented by the ``zl3073x``
+device driver.
+
+Parameters
+==========
+
+.. list-table:: Generic parameters implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Mode
+ - Notes
+ * - ``clock_id``
+ - driverinit
+ - Set the clock ID that is used by the driver for registering DPLL devices
+ and pins.
+
+Info versions
+=============
+
+The ``zl3073x`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 5 90
+
+ * - Name
+ - Type
+ - Example
+ - Description
+ * - ``asic.id``
+ - fixed
+ - 1E94
+ - Chip identification number
+ * - ``asic.rev``
+ - fixed
+ - 300
+ - Chip revision number
+ * - ``fw``
+ - running
+ - 7006
+ - Firmware version number
+ * - ``custom_cfg``
+ - running
+ - 1.3.0.1
+ - Device configuration version customized by OEM
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index b6e9af4d0f1b..ab20c644af24 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -239,6 +239,9 @@ Userspace to kernel:
``ETHTOOL_MSG_PHY_GET`` get Ethernet PHY information
``ETHTOOL_MSG_TSCONFIG_GET`` get hw timestamping configuration
``ETHTOOL_MSG_TSCONFIG_SET`` set hw timestamping configuration
+ ``ETHTOOL_MSG_RSS_SET`` set RSS settings
+ ``ETHTOOL_MSG_RSS_CREATE_ACT`` create an additional RSS context
+ ``ETHTOOL_MSG_RSS_DELETE_ACT`` delete an additional RSS context
===================================== =================================
Kernel to userspace:
@@ -281,6 +284,7 @@ Kernel to userspace:
``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters
``ETHTOOL_MSG_PSE_GET_REPLY`` PSE parameters
``ETHTOOL_MSG_RSS_GET_REPLY`` RSS settings
+ ``ETHTOOL_MSG_RSS_NTF`` RSS settings
``ETHTOOL_MSG_PLCA_GET_CFG_REPLY`` PLCA RS parameters
``ETHTOOL_MSG_PLCA_GET_STATUS_REPLY`` PLCA RS status
``ETHTOOL_MSG_PLCA_NTF`` PLCA RS parameters
@@ -290,6 +294,11 @@ Kernel to userspace:
``ETHTOOL_MSG_PHY_NTF`` Ethernet PHY information change
``ETHTOOL_MSG_TSCONFIG_GET_REPLY`` hw timestamping configuration
``ETHTOOL_MSG_TSCONFIG_SET_REPLY`` new hw timestamping configuration
+ ``ETHTOOL_MSG_PSE_NTF`` PSE events notification
+ ``ETHTOOL_MSG_RSS_NTF`` RSS settings notification
+ ``ETHTOOL_MSG_RSS_CREATE_ACT_REPLY`` create an additional RSS context
+ ``ETHTOOL_MSG_RSS_CREATE_NTF`` additional RSS context created
+ ``ETHTOOL_MSG_RSS_DELETE_NTF`` additional RSS context deleted
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -1788,6 +1797,11 @@ Kernel response contents:
limit of the PoE PSE.
``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested Supported power limit
configuration ranges.
+ ``ETHTOOL_A_PSE_PW_D_ID`` u32 Index of the PSE power domain
+ ``ETHTOOL_A_PSE_PRIO_MAX`` u32 Priority maximum configurable
+ on the PoE PSE
+ ``ETHTOOL_A_PSE_PRIO`` u32 Priority of the PoE PSE
+ currently configured
========================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
@@ -1861,6 +1875,15 @@ identifies the C33 PSE power limit ranges through
If the controller works with fixed classes, the min and max values will be
equal.
+The ``ETHTOOL_A_PSE_PW_D_ID`` attribute identifies the index of PSE power
+domain.
+
+When set, the optional ``ETHTOOL_A_PSE_PRIO_MAX`` attribute identifies
+the PSE maximum priority value.
+When set, the optional ``ETHTOOL_A_PSE_PRIO`` attributes is used to
+identifies the currently configured PSE priority.
+For a description of PSE priority attributes, see ``PSE_SET``.
+
PSE_SET
=======
@@ -1874,6 +1897,8 @@ Request contents:
``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` u32 Control PSE Admin state
``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` u32 Control PoE PSE available
power limit
+ ``ETHTOOL_A_PSE_PRIO`` u32 Control priority of the
+ PoE PSE
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
@@ -1896,6 +1921,38 @@ various existing products that document power consumption in watts rather than
classes. If power limit configuration based on classes is needed, the
conversion can be done in user space, for example by ethtool.
+When set, the optional ``ETHTOOL_A_PSE_PRIO`` attributes is used to
+control the PSE priority. Allowed priority value are between zero and
+the value of ``ETHTOOL_A_PSE_PRIO_MAX`` attribute.
+
+A lower value indicates a higher priority, meaning that a priority value
+of 0 corresponds to the highest port priority.
+Port priority serves two functions:
+
+ - Power-up Order: After a reset, ports are powered up in order of their
+ priority from highest to lowest. Ports with higher priority
+ (lower values) power up first.
+ - Shutdown Order: When the power budget is exceeded, ports with lower
+ priority (higher values) are turned off first.
+
+PSE_NTF
+=======
+
+Notify PSE events.
+
+Notification contents:
+
+ =============================== ====== ========================
+ ``ETHTOOL_A_PSE_HEADER`` nested request header
+ ``ETHTOOL_A_PSE_EVENTS`` bitset PSE events
+ =============================== ====== ========================
+
+When set, the optional ``ETHTOOL_A_PSE_EVENTS`` attribute identifies the
+PSE events.
+
+.. kernel-doc:: include/uapi/linux/ethtool_netlink_generated.h
+ :identifiers: ethtool_pse_event
+
RSS_GET
=======
@@ -1919,14 +1976,15 @@ used to ignore context 0s and only dump additional contexts).
Kernel response contents:
-===================================== ====== ==========================
+===================================== ====== ===============================
``ETHTOOL_A_RSS_HEADER`` nested reply header
``ETHTOOL_A_RSS_CONTEXT`` u32 context number
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
``ETHTOOL_A_RSS_INPUT_XFRM`` u32 RSS input data transformation
-===================================== ====== ==========================
+ ``ETHTOOL_A_RSS_FLOW_HASH`` nested Header fields included in hash
+===================================== ====== ===============================
ETHTOOL_A_RSS_HFUNC attribute is bitmap indicating the hash function
being used. Current supported options are toeplitz, xor or crc32.
@@ -1935,6 +1993,67 @@ indicates queue number.
ETHTOOL_A_RSS_INPUT_XFRM attribute is a bitmap indicating the type of
transformation applied to the input protocol fields before given to the RSS
hfunc. Current supported options are symmetric-xor and symmetric-or-xor.
+ETHTOOL_A_RSS_FLOW_HASH carries per-flow type bitmask of which header
+fields are included in the hash calculation.
+
+RSS_SET
+=======
+
+Request contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+ ``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
+ ``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
+ ``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
+ ``ETHTOOL_A_RSS_INPUT_XFRM`` u32 RSS input data transformation
+ ``ETHTOOL_A_RSS_FLOW_HASH`` nested Header fields included in hash
+===================================== ====== ==============================
+
+``ETHTOOL_A_RSS_INDIR`` is the minimal RSS table the user expects. Kernel and
+the device driver may replicate the table if its smaller than smallest table
+size supported by the device. For example if user requests ``[0, 1]`` but the
+device needs at least 8 entries - the real table in use will end up being
+``[0, 1, 0, 1, 0, 1, 0, 1]``. Most devices require the table size to be power
+of 2, so tables which size is not a power of 2 will likely be rejected.
+Using table of size 0 will reset the indirection table to the default.
+
+RSS_CREATE_ACT
+==============
+
+Request contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+ ``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
+ ``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
+ ``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
+ ``ETHTOOL_A_RSS_INPUT_XFRM`` u32 RSS input data transformation
+===================================== ====== ==============================
+
+Kernel response contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+===================================== ====== ==============================
+
+Create an additional RSS context, if ``ETHTOOL_A_RSS_CONTEXT`` is not
+specified kernel will allocate one automatically.
+
+RSS_DELETE_ACT
+==============
+
+Request contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+===================================== ====== ==============================
+
+Delete an additional RSS context.
PLCA_GET_CFG
============
@@ -2386,8 +2505,8 @@ are netlink only.
``ETHTOOL_SFLAGS`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_GET``
``ETHTOOL_SPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_SET``
- ``ETHTOOL_GRXFH`` n/a
- ``ETHTOOL_SRXFH`` n/a
+ ``ETHTOOL_GRXFH`` ``ETHTOOL_MSG_RSS_GET``
+ ``ETHTOOL_SRXFH`` ``ETHTOOL_MSG_RSS_SET``
``ETHTOOL_GGRO`` ``ETHTOOL_MSG_FEATURES_GET``
``ETHTOOL_SGRO`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GRXRINGS`` n/a
@@ -2401,8 +2520,8 @@ are netlink only.
``ETHTOOL_SRXNTUPLE`` n/a
``ETHTOOL_GRXNTUPLE`` n/a
``ETHTOOL_GSSET_INFO`` ``ETHTOOL_MSG_STRSET_GET``
- ``ETHTOOL_GRXFHINDIR`` n/a
- ``ETHTOOL_SRXFHINDIR`` n/a
+ ``ETHTOOL_GRXFHINDIR`` ``ETHTOOL_MSG_RSS_GET``
+ ``ETHTOOL_SRXFHINDIR`` ``ETHTOOL_MSG_RSS_SET``
``ETHTOOL_GFEATURES`` ``ETHTOOL_MSG_FEATURES_GET``
``ETHTOOL_SFEATURES`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GCHANNELS`` ``ETHTOOL_MSG_CHANNELS_GET``
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 0f1251cce314..9756d16e3df1 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -8,15 +8,19 @@ IP Sysctl
==============================
ip_forward - BOOLEAN
- - 0 - disabled (default)
- - not 0 - enabled
-
Forward Packets between interfaces.
This variable is special, its change resets all configuration
parameters to their default state (RFC1122 for hosts, RFC1812
for routers)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
ip_default_ttl - INTEGER
Default value of TTL field (Time To Live) for outgoing (but not
forwarded) IP packets. Should be between 1 and 255 inclusive.
@@ -62,20 +66,25 @@ ip_forward_use_pmtu - BOOLEAN
kernel honoring this information. This is normally not the
case.
- Default: 0 (disabled)
-
Possible values:
- - 0 - disabled
- - 1 - enabled
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fwmark_reflect - BOOLEAN
Controls the fwmark of kernel-generated IPv4 reply packets that are not
associated with a socket for example, TCP RSTs or ICMP echo replies).
- If unset, these packets have a fwmark of zero. If set, they have the
+ If disabled, these packets have a fwmark of zero. If enabled, they have the
fwmark of the packet they are replying to.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fib_multipath_use_neigh - BOOLEAN
Use status of existing neighbor entry when determining nexthop for
@@ -83,12 +92,12 @@ fib_multipath_use_neigh - BOOLEAN
packets could be directed to a failed nexthop. Only valid for kernels
built with CONFIG_IP_ROUTE_MULTIPATH enabled.
- Default: 0 (disabled)
-
Possible values:
- - 0 - disabled
- - 1 - enabled
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fib_multipath_hash_policy - INTEGER
Controls which hash policy to use for multipath routes. Only valid
@@ -368,7 +377,12 @@ tcp_autocorking - BOOLEAN
queue. Applications can still use TCP_CORK for optimal behavior
when they know how/when to uncork their sockets.
- Default : 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_available_congestion_control - STRING
Shows the available congestion control choices that are registered.
@@ -408,9 +422,16 @@ tcp_congestion_control - STRING
tcp_dsack - BOOLEAN
Allows TCP to send "duplicate" SACKs.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_early_retrans - INTEGER
Tail loss probe (TLP) converts RTOs occurring due to tail
- losses into fast recovery (draft-ietf-tcpm-rack). Note that
+ losses into fast recovery (RFC8985). Note that
TLP requires RACK to function properly (see tcp_recovery below)
Possible values:
@@ -447,7 +468,12 @@ tcp_ecn_fallback - BOOLEAN
knob. The value is not used, if tcp_ecn or per route (or congestion
control) ECN settings are disabled.
- Default: 1 (fallback enabled)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_fack - BOOLEAN
This is a legacy option, it has no effect anymore.
@@ -474,7 +500,7 @@ tcp_frto - INTEGER
By default it's enabled with a non-zero value. 0 disables F-RTO.
tcp_fwmark_accept - BOOLEAN
- If set, incoming connections to listening sockets that do not have a
+ If enabled, incoming connections to listening sockets that do not have a
socket mark will set the mark of the accepting socket to the fwmark of
the incoming SYN packet. This will cause all packets on that connection
(starting from the first SYNACK) to be sent with that fwmark. The
@@ -482,7 +508,12 @@ tcp_fwmark_accept - BOOLEAN
have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
unaffected.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_invalid_ratelimit - INTEGER
Limit the maximal rate for sending duplicate acknowledgments
@@ -528,6 +559,11 @@ tcp_l3mdev_accept - BOOLEAN
which the packets originated. Only valid when the kernel was
compiled with CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
tcp_low_latency - BOOLEAN
@@ -593,10 +629,16 @@ tcp_min_rtt_wlen - INTEGER
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
- If set, TCP performs receive buffer auto-tuning, attempting to
+ If enabled, TCP performs receive buffer auto-tuning, attempting to
automatically size the buffer (no greater than tcp_rmem[2]) to
- match the size required by the path for full throughput. Enabled by
- default.
+ match the size required by the path for full throughput.
+
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_mtu_probing - INTEGER
Controls TCP Packetization-Layer Path MTU Discovery. Takes three
@@ -621,13 +663,26 @@ tcp_no_metrics_save - BOOLEAN
when the connection closes, so that connections established in the
near future can use these to set initial conditions. Usually, this
increases overall performance, but may sometimes cause performance
- degradation. If set, TCP will not cache metrics on closing
+ degradation. If enabled, TCP will not cache metrics on closing
connections.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
tcp_no_ssthresh_metrics_save - BOOLEAN
Controls whether TCP saves ssthresh metrics in the route cache.
+ If enabled, ssthresh metrics are disabled.
+
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
- Default is 1, which disables ssthresh metrics.
+ Default: 1 (enabled)
tcp_orphan_retries - INTEGER
This value influences the timeout of a locally closed TCP connection,
@@ -645,9 +700,11 @@ tcp_recovery - INTEGER
features.
========= =============================================================
- RACK: 0x1 enables the RACK loss detection for fast detection of lost
- retransmissions and tail drops. It also subsumes and disables
- RFC6675 recovery for SACK connections.
+ RACK: 0x1 enables RACK loss detection, for fast detection of lost
+ retransmissions and tail drops, and resilience to
+ reordering. currently, setting this bit to 0 has no
+ effect, since RACK is the only supported loss detection
+ algorithm.
RACK: 0x2 makes RACK's reordering window static (min_rtt/4).
@@ -664,6 +721,11 @@ tcp_reflect_tos - BOOLEAN
This options affects both IPv4 and IPv6.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
tcp_reordering - INTEGER
@@ -685,6 +747,13 @@ tcp_retrans_collapse - BOOLEAN
On retransmit try to send bigger packets to work around bugs in
certain TCP stacks.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_retries1 - INTEGER
This value influences the time, after which TCP decides, that
something is wrong due to unacknowledged RTO retransmissions,
@@ -712,11 +781,16 @@ tcp_retries2 - INTEGER
which corresponds to a value of at least 8.
tcp_rfc1337 - BOOLEAN
- If set, the TCP stack behaves conforming to RFC1337. If unset,
+ If enabled, the TCP stack behaves conforming to RFC1337. If unset,
we are not conforming to RFC, but prevent TCP TIME_WAIT
assassination.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_rmem - vector of 3 INTEGERs: min, default, max
min: Minimal size of receive buffer used by TCP sockets.
@@ -740,6 +814,13 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_comp_sack_delay_ns - LONG INTEGER
TCP tries to reduce number of SACK sent, using a timer
based on 5% of SRTT, capped by this sysctl, in nano seconds.
@@ -762,26 +843,41 @@ tcp_comp_sack_nr - INTEGER
Default : 44
tcp_backlog_ack_defer - BOOLEAN
- If set, user thread processing socket backlog tries sending
+ If enabled, user thread processing socket backlog tries sending
one ACK for the whole queue. This helps to avoid potential
long latencies at end of a TCP socket syscall.
- Default : true
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_slow_start_after_idle - BOOLEAN
- If set, provide RFC2861 behavior and time out the congestion
+ If enabled, provide RFC2861 behavior and time out the congestion
window after an idle period. An idle period is defined at
the current RTO. If unset, the congestion window will not
be timed out after an idle period.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_stdurg - BOOLEAN
Use the Host requirements interpretation of the TCP urgent pointer field.
- Most hosts use the older BSD interpretation, so if you turn this on
+ Most hosts use the older BSD interpretation, so if enabled,
Linux might not communicate correctly with them.
- Default: FALSE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_synack_retries - INTEGER
Number of times SYNACKs for a passive TCP connection attempt will
@@ -838,7 +934,12 @@ tcp_migrate_req - BOOLEAN
migration by returning SK_DROP in the type of eBPF program, or
disable this option.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_fastopen - INTEGER
Enable TCP Fast Open (RFC7413) to send and accept data in the opening
@@ -1019,6 +1120,13 @@ tcp_tw_reuse_delay - UNSIGNED INTEGER
tcp_window_scaling - BOOLEAN
Enable window scaling as defined in RFC1323.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_shrink_window - BOOLEAN
This changes how the TCP receive window is calculated.
@@ -1026,13 +1134,15 @@ tcp_shrink_window - BOOLEAN
window can be offered, and that TCP implementations MUST ensure
that they handle a shrinking window, as specified in RFC 1122.
- - 0 - Disabled. The window is never shrunk.
- - 1 - Enabled. The window is shrunk when necessary to remain within
- the memory limit set by autotuning (sk_rcvbuf).
- This only occurs if a non-zero receive window
- scaling factor is also in effect.
+ Possible values:
+
+ - 0 (disabled) - The window is never shrunk.
+ - 1 (enabled) - The window is shrunk when necessary to remain within
+ the memory limit set by autotuning (sk_rcvbuf).
+ This only occurs if a non-zero receive window
+ scaling factor is also in effect.
- Default: 0
+ Default: 0 (disabled)
tcp_wmem - vector of 3 INTEGERs: min, default, max
min: Amount of memory reserved for send buffers for TCP sockets.
@@ -1069,16 +1179,21 @@ tcp_notsent_lowat - UNSIGNED INTEGER
Default: UINT_MAX (0xFFFFFFFF)
tcp_workaround_signed_windows - BOOLEAN
- If set, assume no receipt of a window scaling option means the
+ If enabled, assume no receipt of a window scaling option means the
remote TCP is broken and treats the window as a signed quantity.
- If unset, assume the remote TCP is not broken even if we do
+ If disabled, assume the remote TCP is not broken even if we do
not receive a window scaling option from them.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_thin_linear_timeouts - BOOLEAN
Enable dynamic triggering of linear timeouts for thin streams.
- If set, a check is performed upon retransmission by timeout to
+ If enabled, a check is performed upon retransmission by timeout to
determine if the stream is thin (less than 4 packets in flight).
As long as the stream is found to be thin, up to 6 linear
timeouts may be performed before exponential backoff mode is
@@ -1087,7 +1202,12 @@ tcp_thin_linear_timeouts - BOOLEAN
For more information on thin streams, see
Documentation/networking/tcp-thin.rst
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
@@ -1139,7 +1259,7 @@ tcp_child_ehash_entries - INTEGER
Default: 0
tcp_plb_enabled - BOOLEAN
- If set and the underlying congestion control (e.g. DCTCP) supports
+ If enabled and the underlying congestion control (e.g. DCTCP) supports
and enables PLB feature, TCP PLB (Protective Load Balancing) is
enabled. PLB is described in the following paper:
https://doi.org/10.1145/3544216.3544226. Based on PLB parameters,
@@ -1155,12 +1275,17 @@ tcp_plb_enabled - BOOLEAN
by switches to determine next hop. In either case, further host
and switch side changes will be needed.
- When set, PLB assumes that congestion signal (e.g. ECN) is made
+ If enabled, PLB assumes that congestion signal (e.g. ECN) is made
available and used by congestion control module to estimate a
congestion measure (e.g. ce_ratio). PLB needs a congestion measure to
make repathing decisions.
- Default: FALSE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_plb_idle_rehash_rounds - INTEGER
Number of consecutive congested rounds (RTT) seen after which
@@ -1260,6 +1385,11 @@ udp_l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
udp_mem - vector of 3 INTEGERs: min, pressure, max
@@ -1290,7 +1420,7 @@ udp_hash_entries - INTEGER
A negative value means the networking namespace does not own its
hash buckets and shares the initial networking namespace's one.
-udp_child_ehash_entries - INTEGER
+udp_child_hash_entries - INTEGER
Control the number of hash buckets for UDP sockets in the child
networking namespace, which must be set before clone() or unshare().
@@ -1320,19 +1450,29 @@ raw_l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 1 (enabled)
CIPSOv4 Variables
=================
cipso_cache_enable - BOOLEAN
- If set, enable additions to and lookups from the CIPSO label mapping
- cache. If unset, additions are ignored and lookups always result in a
+ If enabled, enable additions to and lookups from the CIPSO label mapping
+ cache. If disabled, additions are ignored and lookups always result in a
miss. However, regardless of the setting the cache is still
invalidated when required when means you can safely toggle this on and
off and the cache will always be "safe".
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
cipso_cache_bucket_size - INTEGER
The CIPSO label cache consists of a fixed size hash table with each
@@ -1350,17 +1490,27 @@ cipso_rbm_optfmt - BOOLEAN
This means that when set the CIPSO tag will be padded with empty
categories in order to make the packet data 32-bit aligned.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
-cipso_rbm_structvalid - BOOLEAN
- If set, do a very strict check of the CIPSO option when
- ip_options_compile() is called. If unset, relax the checks done during
+ Default: 0 (disabled)
+
+cipso_rbm_strictvalid - BOOLEAN
+ If enabled, do a very strict check of the CIPSO option when
+ ip_options_compile() is called. If disabled, relax the checks done during
ip_options_compile(). Either way is "safe" as errors are caught else
where in the CIPSO processing code but setting this to 0 (False) should
result in less work (i.e. it should be faster) but could cause problems
with other implementations that require strict checking.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
IP Variables
============
@@ -1417,10 +1567,15 @@ ip_unprivileged_port_start - INTEGER
Default: 1024
ip_nonlocal_bind - BOOLEAN
- If set, allows processes to bind() to non-local IP addresses,
+ If enabled, allows processes to bind() to non-local IP addresses,
which can be quite useful - but may break some applications.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
ip_autobind_reuse - BOOLEAN
By default, bind() does not select the ports automatically even if
@@ -1429,7 +1584,13 @@ ip_autobind_reuse - BOOLEAN
when you use bind()+connect(), but may break some applications.
The preferred solution is to use IP_BIND_ADDRESS_NO_PORT and this
option should only be set by experts.
- Default: 0
+
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
ip_dynaddr - INTEGER
If set non-zero, enables support for dynamic addresses.
@@ -1447,7 +1608,12 @@ ip_early_demux - BOOLEAN
It may add an additional cost for pure routing workloads that
reduces overall throughput, in such case you should disable it.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
ping_group_range - 2 INTEGERS
Restrict ICMP_PROTO datagram sockets to users in the group range.
@@ -1459,31 +1625,56 @@ ping_group_range - 2 INTEGERS
tcp_early_demux - BOOLEAN
Enable early demux for established TCP sockets.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
udp_early_demux - BOOLEAN
Enable early demux for connected UDP sockets. Disable this if
your system could experience more unconnected load.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_echo_ignore_all - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
icmp_echo_enable_probe - BOOLEAN
- If set to one, then the kernel will respond to RFC 8335 PROBE
+ If enabled, then the kernel will respond to RFC 8335 PROBE
requests sent to it.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
icmp_echo_ignore_broadcasts - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO and
+ If enabled, then the kernel will ignore all ICMP ECHO and
TIMESTAMP requests sent to it via broadcast/multicast.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_ratelimit - INTEGER
Limit the maximal rates for sending ICMP packets whose type matches
@@ -1540,17 +1731,22 @@ icmp_ratemask - INTEGER
icmp_ignore_bogus_error_responses - BOOLEAN
Some routers violate RFC1122 by sending bogus responses to broadcast
frames. Such violations are normally logged via a kernel warning.
- If this is set to TRUE, the kernel will not give such warnings, which
+ If enabled, the kernel will not give such warnings, which
will avoid log file clutter.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_errors_use_inbound_ifaddr - BOOLEAN
- If zero, icmp error messages are sent with the primary address of
+ If disabled, icmp error messages are sent with the primary address of
the exiting interface.
- If non-zero, the message will be sent with the primary address of
+ If enabled, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
This is the behaviour many network administrators will expect from
a router. And it can make debugging complicated network layouts
@@ -1560,7 +1756,12 @@ icmp_errors_use_inbound_ifaddr - BOOLEAN
then the primary address of the first non-loopback interface that
has one will be used regardless of this setting.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
igmp_max_memberships - INTEGER
Change the maximum number of multicast groups we can subscribe to.
@@ -1690,10 +1891,10 @@ proxy_arp_pvlan - BOOLEAN
This technology is known by different names:
- In RFC 3069 it is called VLAN Aggregation.
- Cisco and Allied Telesyn call it Private VLAN.
- Hewlett-Packard call it Source-Port filtering or port-isolation.
- Ericsson call it MAC-Forced Forwarding (RFC Draft).
+ - In RFC 3069 it is called VLAN Aggregation.
+ - Cisco and Allied Telesyn call it Private VLAN.
+ - Hewlett-Packard call it Source-Port filtering or port-isolation.
+ - Ericsson call it MAC-Forced Forwarding (RFC Draft).
proxy_delay - INTEGER
Delay proxy response.
@@ -1910,8 +2111,12 @@ arp_evict_nocarrier - BOOLEAN
between access points on the same network. In most cases this should
remain as the default (1).
- - 1 - (default): Clear the ARP cache on NOCARRIER events
- - 0 - Do not clear ARP cache on NOCARRIER events
+ Possible values:
+
+ - 0 (disabled) - Do not clear ARP cache on NOCARRIER events
+ - 1 (enabled) - Clear the ARP cache on NOCARRIER events
+
+ Default: 1 (enabled)
mcast_solicit - INTEGER
The maximum number of multicast probes in INCOMPLETE state,
@@ -1934,9 +2139,23 @@ mcast_resolicit - INTEGER
disable_policy - BOOLEAN
Disable IPSEC policy (SPD) for this interface
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
disable_xfrm - BOOLEAN
Disable IPSEC encryption on this interface, whatever the policy
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
igmpv2_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
IGMPv1 or IGMPv2 report retransmit will take place.
@@ -1952,11 +2171,25 @@ igmpv3_unsolicited_report_interval - INTEGER
ignore_routes_with_linkdown - BOOLEAN
Ignore routes whose link is down when performing a FIB lookup.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
promote_secondaries - BOOLEAN
When a primary IP address is removed from this interface
promote a corresponding secondary IP address instead of
removing all the corresponding secondary IP addresses.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IP packets that are received in link-layer
multicast (or broadcast) frames.
@@ -1964,14 +2197,24 @@ drop_unicast_in_l2_multicast - BOOLEAN
This behavior (for multicast) is actually a SHOULD in RFC
1122, but is disabled by default for compatibility reasons.
- Default: off (0)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
drop_gratuitous_arp - BOOLEAN
Drop all gratuitous ARP frames, for example if there's a known
good ARP proxy on the network and such frames need not be used
(or in the case of 802.11, must not be used to prevent attacks.)
- Default: off (0)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tag - INTEGER
@@ -2015,20 +2258,24 @@ bindv6only - BOOLEAN
which restricts use of the IPv6 socket to IPv6 communication
only.
- - TRUE: disable IPv4-mapped address feature
- - FALSE: enable IPv4-mapped address feature
+ Possible values:
+
+ - 0 (disabled) - enable IPv4-mapped address feature
+ - 1 (enabled) - disable IPv4-mapped address feature
- Default: FALSE (as specified in RFC3493)
+ Default: 0 (disabled)
flowlabel_consistency - BOOLEAN
Protect the consistency (and unicity) of flow label.
You have to disable it to use IPV6_FL_F_REFLECT flag on the
flow label manager.
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
- Default: TRUE
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
auto_flowlabels - INTEGER
Automatically generate flow labels based on a flow hash of the
@@ -2054,10 +2301,13 @@ flowlabel_state_ranges - BOOLEAN
reserved for the IPv6 flow manager facility, 0x80000-0xFFFFF
is reserved for stateless flow labels as described in RFC6437.
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
- Default: true
flowlabel_reflect - INTEGER
Control flow label reflection. Needed for Path MTU
@@ -2125,10 +2375,13 @@ anycast_src_echo_reply - BOOLEAN
Controls the use of anycast addresses as source addresses for ICMPv6
echo reply
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
- Default: FALSE
idgen_delay - INTEGER
Controls the delay in seconds after which time to retry
@@ -2185,7 +2438,12 @@ skip_notify_on_dev_down - BOOLEAN
to true skips the message, making IPv4 and IPv6 on par in relying
on userspace caches to track link events and evict routes.
- Default: false (generate message)
+ Possible values:
+
+ - 0 (disabled) - generate the message
+ - 1 (enabled) - skip generating the message
+
+ Default: 0 (disabled)
nexthop_compat_mode - BOOLEAN
New nexthop API provides a means for managing nexthops independent of
@@ -2229,8 +2487,10 @@ fib_notify_on_flag_change - INTEGER
ioam6_id - INTEGER
Define the IOAM id of this node. Uses only 24 bits out of 32 in total.
- Min: 0
- Max: 0xFFFFFF
+ Possible value range:
+
+ - Min: 0
+ - Max: 0xFFFFFF
Default: 0xFFFFFF
@@ -2238,8 +2498,10 @@ ioam6_id_wide - LONG INTEGER
Define the wide IOAM id of this node. Uses only 56 bits out of 64 in
total. Can be different from ioam6_id.
- Min: 0
- Max: 0xFFFFFFFFFFFFFF
+ Possible value range:
+
+ - Min: 0
+ - Max: 0xFFFFFFFFFFFFFF
Default: 0xFFFFFFFFFFFFFF
@@ -2281,8 +2543,8 @@ conf/all/disable_ipv6 - BOOLEAN
conf/all/forwarding - BOOLEAN
Enable global IPv6 forwarding between all interfaces.
- IPv4 and IPv6 work differently here; e.g. netfilter must be used
- to control which interfaces may forward packets and which not.
+ IPv4 and IPv6 work differently here; the ``force_forwarding`` flag must
+ be used to control which interfaces may forward packets.
This also sets all interfaces' Host/Router setting
'forwarding' to the specified value. See below for details.
@@ -2292,13 +2554,30 @@ conf/all/forwarding - BOOLEAN
proxy_ndp - BOOLEAN
Do proxy ndp.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
+force_forwarding - BOOLEAN
+ Enable forwarding on this interface only -- regardless of the setting on
+ ``conf/all/forwarding``. When setting ``conf.all.forwarding`` to 0,
+ the ``force_forwarding`` flag will be reset on all interfaces.
+
fwmark_reflect - BOOLEAN
Controls the fwmark of kernel-generated IPv6 reply packets that are not
associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
- If unset, these packets have a fwmark of zero. If set, they have the
+ If disabled, these packets have a fwmark of zero. If enabled, they have the
fwmark of the packet they are replying to.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
``conf/interface/*``:
Change special settings per interface.
@@ -2389,9 +2668,11 @@ ra_honor_pio_life - BOOLEAN
lifetime of an address matching a prefix sent in a Router
Advertisement Prefix Information Option.
- - If enabled, the PIO valid lifetime will always be honored.
- - If disabled, RFC4862 section 5.5.3e is used to determine
+ Possible values:
+
+ - 0 (disabled) - RFC4862 section 5.5.3e is used to determine
the valid lifetime of the address.
+ - 1 (enabled) - the PIO valid lifetime will always be honored.
Default: 0 (disabled)
@@ -2403,8 +2684,10 @@ ra_honor_pio_pflag - BOOLEAN
P-flag suppresses any effects of the A-flag within the same
PIO. For a given PIO, P=1 and A=1 is treated as A=0.
- - If disabled, the P-flag is ignored.
- - If enabled, the P-flag will disable SLAAC autoconfiguration
+ Possible values:
+
+ - 0 (disabled) - the P-flag is ignored.
+ - 1 (enabled) - the P-flag will disable SLAAC autoconfiguration
for the given Prefix Information Option.
Default: 0 (disabled)
@@ -2526,10 +2809,15 @@ mtu - INTEGER
Default: 1280 (IPv6 required minimum)
ip_nonlocal_bind - BOOLEAN
- If set, allows processes to bind() to non-local IPv6 addresses,
+ If enabled, allows processes to bind() to non-local IPv6 addresses,
which can be quite useful - but may break some applications.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
router_probe_interval - INTEGER
Minimum interval (in seconds) between Router Probing described
@@ -2559,7 +2847,12 @@ use_oif_addrs_only - BOOLEAN
routed via this interface are restricted to the set of addresses
configured on this interface (vis. RFC 6724, section 4).
- Default: false
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
use_tempaddr - INTEGER
Preference for Privacy Extensions (RFC3041).
@@ -2684,10 +2977,14 @@ force_tllao - BOOLEAN
ndisc_notify - BOOLEAN
Define mode for notification of address and device changes.
- * 0 - (default): do nothing
- * 1 - Generate unsolicited neighbour advertisements when device is brought
+ Possible values:
+
+ - 0 (disabled) - do nothing
+ - 1 (enabled) - Generate unsolicited neighbour advertisements when device is brought
up or hardware address changes.
+ Default: 0 (disabled)
+
ndisc_tclass - INTEGER
The IPv6 Traffic Class to use by default when sending IPv6 Neighbor
Discovery (Router Solicitation, Router Advertisement, Neighbor
@@ -2704,8 +3001,12 @@ ndisc_evict_nocarrier - BOOLEAN
not be cleared when roaming between access points on the same network.
In most cases this should remain as the default (1).
- - 1 - (default): Clear neighbor discover cache on NOCARRIER events.
- - 0 - Do not clear neighbor discovery cache on NOCARRIER events.
+ Possible values:
+
+ - 0 (disabled) - Do not clear neighbor discovery cache on NOCARRIER events.
+ - 1 (enabled) - Clear neighbor discover cache on NOCARRIER events.
+
+ Default: 1 (enabled)
mldv1_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
@@ -2734,25 +3035,34 @@ suppress_frag_ndisc - INTEGER
optimistic_dad - BOOLEAN
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
- * 0: disabled (default)
- * 1: enabled
-
Optimistic Duplicate Address Detection for the interface will be enabled
if at least one of conf/{all,interface}/optimistic_dad is set to 1,
it will be disabled otherwise.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
+
use_optimistic - BOOLEAN
If enabled, do not classify optimistic addresses as deprecated during
source address selection. Preferred addresses will still be chosen
before optimistic addresses, subject to other ranking in the source
address selection algorithm.
- * 0: disabled (default)
- * 1: enabled
-
This will be enabled if at least one of
conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
stable_secret - IPv6 address
This IPv6 address will be used as a secret to generate IPv6
addresses for link-local addresses and autoconfigured
@@ -2783,14 +3093,24 @@ drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IPv6 packets that are received in link-layer
multicast (or broadcast) frames.
- By default this is turned off.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
drop_unsolicited_na - BOOLEAN
Drop all unsolicited neighbor advertisements, for example if there's
a known good NA proxy on the network and such frames need not be used
(or in the case of 802.11, must not be used to prevent attacks.)
- By default this is turned off.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled).
accept_untracked_na - INTEGER
Define behavior for accepting neighbor advertisements from devices that
@@ -2831,7 +3151,12 @@ enhanced_dad - BOOLEAN
The nonce option will be sent on an interface unless both of
conf/{all,interface}/enhanced_dad are set to FALSE.
- Default: TRUE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
``icmp/*``:
===========
@@ -2860,29 +3185,49 @@ ratemask - list of comma separated ranges
Default: 0-1,3-127 (rate limit ICMPv6 errors except Packet Too Big)
echo_ignore_all - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
echo_ignore_multicast - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol via multicast.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
echo_ignore_anycast - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol destined to anycast address.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
error_anycast_as_unicast - BOOLEAN
- If set to 1, then the kernel will respond with ICMP Errors
+ If enabled, then the kernel will respond with ICMP Errors
resulting from requests sent to it over the IPv6 protocol destined
to anycast address essentially treating anycast as unicast.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
xfrm6_gc_thresh - INTEGER
(Obsolete since linux-4.14)
@@ -2900,34 +3245,49 @@ YOSHIFUJI Hideaki / USAGI Project <yoshfuji@linux-ipv6.org>
=================================
bridge-nf-call-arptables - BOOLEAN
- - 1 : pass bridged ARP traffic to arptables' FORWARD chain.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged ARP traffic to arptables' FORWARD chain.
+
+ Default: 1 (enabled)
bridge-nf-call-iptables - BOOLEAN
- - 1 : pass bridged IPv4 traffic to iptables' chains.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged IPv4 traffic to iptables' chains.
+
+ Default: 1 (enabled)
bridge-nf-call-ip6tables - BOOLEAN
- - 1 : pass bridged IPv6 traffic to ip6tables' chains.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged IPv6 traffic to ip6tables' chains.
+
+ Default: 1 (enabled)
bridge-nf-filter-vlan-tagged - BOOLEAN
- - 1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables.
- - 0 : disable this.
- Default: 0
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables
+
+ Default: 0 (disabled)
bridge-nf-filter-pppoe-tagged - BOOLEAN
- - 1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
- - 0 : disable this.
- Default: 0
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
+
+ Default: 0 (disabled)
bridge-nf-pass-vlan-input-dev - BOOLEAN
- 1: if bridge-nf-filter-vlan-tagged is enabled, try to find a vlan
@@ -2950,11 +3310,12 @@ addip_enable - BOOLEAN
the ability to dynamically add and remove new addresses for the SCTP
associations.
- 1: Enable extension.
+ Possible values:
- 0: Disable extension.
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
- Default: 0
+ Default: 0 (disabled)
pf_enable - INTEGER
Enable or disable pf (pf is short for potentially failed) state. A value
@@ -2969,31 +3330,27 @@ pf_enable - INTEGER
https://datatracker.ietf.org/doc/draft-ietf-tsvwg-sctp-failover for
details.
- 1: Enable pf.
+ Possible values:
- 0: Disable pf.
+ - 1: Enable pf.
+ - 0: Disable pf.
Default: 1
pf_expose - INTEGER
Unset or enable/disable pf (pf is short for potentially failed) state
exposure. Applications can control the exposure of the PF path state
- in the SCTP_PEER_ADDR_CHANGE event and the SCTP_GET_PEER_ADDR_INFO
- sockopt. When it's unset, no SCTP_PEER_ADDR_CHANGE event with
- SCTP_ADDR_PF state will be sent and a SCTP_PF-state transport info
- can be got via SCTP_GET_PEER_ADDR_INFO sockopt; When it's enabled,
- a SCTP_PEER_ADDR_CHANGE event will be sent for a transport becoming
- SCTP_PF state and a SCTP_PF-state transport info can be got via
- SCTP_GET_PEER_ADDR_INFO sockopt; When it's disabled, no
- SCTP_PEER_ADDR_CHANGE event will be sent and it returns -EACCES when
- trying to get a SCTP_PF-state transport info via SCTP_GET_PEER_ADDR_INFO
- sockopt.
-
- 0: Unset pf state exposure, Compatible with old applications.
+ in the SCTP_PEER_ADDR_CHANGE event and access of SCTP_PF-state
+ transport info via SCTP_GET_PEER_ADDR_INFO sockopt.
- 1: Disable pf state exposure.
+ Possible values:
- 2: Enable pf state exposure.
+ - 0: Unset pf state exposure (compatible with old applications). No
+ event will be sent but the transport info can be queried.
+ - 1: Disable pf state exposure. No event will be sent and trying to
+ obtain transport info will return -EACCESS.
+ - 2: Enable pf state exposure. The event will be sent for a transport
+ becoming SCTP_PF state and transport info can be obtained.
Default: 0
@@ -3023,19 +3380,23 @@ auth_enable - BOOLEAN
required for secure operation of Dynamic Address Reconfiguration
(ADD-IP) extension.
- - 1: Enable this extension.
- - 0: Disable this extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
+
+ Default: 0 (disabled)
prsctp_enable - BOOLEAN
Enable or disable the Partial Reliability extension (RFC3758) which
is used to notify peers that a given DATA should no longer be expected.
- - 1: Enable extension
- - 0: Disable
+ Possible values:
- Default: 1
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
+
+ Default: 1 (enabled)
max_burst - INTEGER
The limit of the number of new packets that can be initially sent. It
@@ -3135,10 +3496,12 @@ cookie_preserve_enable - BOOLEAN
Enable or disable the ability to extend the lifetime of the SCTP cookie
that is used during the establishment phase of SCTP association
- - 1: Enable cookie lifetime extension.
- - 0: Disable
+ Possible values:
+
+ - 0 (disabled) - disable.
+ - 1 (enabled) - enable cookie lifetime extension.
- Default: 1
+ Default: 1 (enabled)
cookie_hmac_alg - STRING
Select the hmac algorithm used when generating the cookie value sent by
@@ -3183,13 +3546,11 @@ sndbuf_policy - INTEGER
sctp_mem - vector of 3 INTEGERs: min, pressure, max
Number of pages allowed for queueing by all SCTP sockets.
- min: Below this number of pages SCTP is not bothered about its
- memory appetite. When amount of memory allocated by SCTP exceeds
- this number, SCTP starts to moderate memory usage.
-
- pressure: This value was introduced to follow format of tcp_mem.
-
- max: Number of pages allowed for queueing by all SCTP sockets.
+ * min: Below this number of pages SCTP is not bothered about its
+ memory usage. When amount of memory allocated by SCTP exceeds
+ this number, SCTP starts to moderate memory usage.
+ * pressure: This value was introduced to follow format of tcp_mem.
+ * max: Maximum number of allowed pages.
Default is calculated at boot time from amount of available memory.
@@ -3197,9 +3558,9 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
Only the first value ("min") is used, "default" and "max" are
ignored.
- min: Minimal size of receive buffer used by SCTP socket.
- It is guaranteed to each SCTP socket (but not association) even
- under moderate memory pressure.
+ * min: Minimal size of receive buffer used by SCTP socket.
+ It is guaranteed to each SCTP socket (but not association) even
+ under moderate memory pressure.
Default: 4K
@@ -3207,14 +3568,16 @@ sctp_wmem - vector of 3 INTEGERs: min, default, max
Only the first value ("min") is used, "default" and "max" are
ignored.
- min: Minimum size of send buffer that can be used by SCTP sockets.
- It is guaranteed to each SCTP socket (but not association) even
- under moderate memory pressure.
+ * min: Minimum size of send buffer that can be used by SCTP sockets.
+ It is guaranteed to each SCTP socket (but not association) even
+ under moderate memory pressure.
Default: 4K
addr_scope_policy - INTEGER
- Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
+ Control IPv4 address scoping (see
+ https://datatracker.ietf.org/doc/draft-stewart-tsvwg-sctp-ipv4/00/
+ for details).
- 0 - Disable IPv4 address scoping
- 1 - Enable IPv4 address scoping
@@ -3272,10 +3635,12 @@ reconf_enable - BOOLEAN
a stream, and it includes the Parameters of "Outgoing/Incoming SSN
Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams".
- - 1: Enable extension.
- - 0: Disable extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - Disable extension.
+ - 1 (enabled) - Enable extension.
+
+ Default: 0 (disabled)
intl_enable - BOOLEAN
Enable or disable extension of User Message Interleaving functionality
@@ -3286,10 +3651,12 @@ intl_enable - BOOLEAN
to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2
and SCTP_INTERLEAVING_SUPPORTED to 1.
- - 1: Enable extension.
- - 0: Disable extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - Disable extension.
+ - 1 (enabled) - Enable extension.
+
+ Default: 0 (disabled)
ecn_enable - BOOLEAN
Control use of Explicit Congestion Notification (ECN) by SCTP.
@@ -3298,10 +3665,12 @@ ecn_enable - BOOLEAN
due to congestion by allowing supporting routers to signal congestion
before having to drop packets.
- 1: Enable ecn.
- 0: Disable ecn.
+ Possible values:
- Default: 1
+ - 0 (disabled) - Disable ecn.
+ - 1 (enabled) - Enable ecn.
+
+ Default: 1 (enabled)
l3mdev_accept - BOOLEAN
Enabling this option allows a "global" bound socket to work
@@ -3310,6 +3679,11 @@ l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 1 (enabled)
diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst
index d0e3953cae6a..a15754adb041 100644
--- a/Documentation/networking/napi.rst
+++ b/Documentation/networking/napi.rst
@@ -444,7 +444,14 @@ dependent). The NAPI instance IDs will be assigned in the opposite
order than the process IDs of the kernel threads.
Threaded NAPI is controlled by writing 0/1 to the ``threaded`` file in
-netdev's sysfs directory.
+netdev's sysfs directory. It can also be enabled for a specific NAPI using
+netlink interface.
+
+For example, using the script:
+
+.. code-block:: bash
+
+ $ ynl --family netdev --do napi-set --json='{"id": 66, "threaded": 1}'
.. rubric:: Footnotes
diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
index c69cc89c958e..1c19bb7705df 100644
--- a/Documentation/networking/net_cachelines/net_device.rst
+++ b/Documentation/networking/net_cachelines/net_device.rst
@@ -68,6 +68,7 @@ unsigned_char addr_assign_type
unsigned_char addr_len
unsigned_char upper_level
unsigned_char lower_level
+u8 threaded napi_poll(napi_enable,netif_set_threaded)
unsigned_short neigh_priv_len
unsigned_short padded
unsigned_short dev_id
@@ -165,7 +166,6 @@ struct sfp_bus* sfp_bus
struct lock_class_key* qdisc_tx_busylock
bool proto_down
unsigned:1 wol_enabled
-unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded)
unsigned_long:1 see_all_hwtstamp_requests
unsigned_long:1 change_proto_down
unsigned_long:1 netns_immutable
diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst
index bd44b3eebbef..bce4eb35ec48 100644
--- a/Documentation/networking/net_cachelines/snmp.rst
+++ b/Documentation/networking/net_cachelines/snmp.rst
@@ -36,6 +36,7 @@ unsigned_long LINUX_MIB_TIMEWAITRECYCLED
unsigned_long LINUX_MIB_TIMEWAITKILLED
unsigned_long LINUX_MIB_PAWSACTIVEREJECTED
unsigned_long LINUX_MIB_PAWSESTABREJECTED
+unsigned_long LINUX_MIB_BEYOND_WINDOW
unsigned_long LINUX_MIB_TSECR_REJECTED
unsigned_long LINUX_MIB_PAWS_OLD_ACK
unsigned_long LINUX_MIB_PAWS_TW_REJECTED
diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst
index bc9b2131bf7a..7bbda5944ee2 100644
--- a/Documentation/networking/net_cachelines/tcp_sock.rst
+++ b/Documentation/networking/net_cachelines/tcp_sock.rst
@@ -115,7 +115,6 @@ u32 lost_out read_mostly read_m
u32 sacked_out read_mostly read_mostly tcp_left_out(tx);tcp_packets_in_flight(tx/rx);tcp_clean_rtx_queue(rx)
struct hrtimer pacing_timer
struct hrtimer compressed_ack_timer
-struct sk_buff* lost_skb_hint read_mostly tcp_clean_rtx_queue
struct sk_buff* retransmit_skb_hint read_mostly tcp_clean_rtx_queue
struct rb_root out_of_order_queue read_mostly tcp_data_queue,tcp_fast_path_check
struct sk_buff* ooo_last_skb
@@ -123,7 +122,6 @@ struct tcp_sack_block[1] duplicate_sack
struct tcp_sack_block[4] selective_acks
struct tcp_sack_block[4] recv_sack_cache
struct sk_buff* highest_sack read_write tcp_event_new_data_sent
-int lost_cnt_hint
u32 prior_ssthresh
u32 high_seq
u32 retrans_stamp
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index a0076b542e9c..59cb9982afe6 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -340,6 +340,38 @@ In this example, the message was sent by CPU 42.
cpu=42 # kernel-populated value
+Message ID auto population in userdata
+--------------------------------------
+
+Within the netconsole configfs hierarchy, there is a file named `msgid_enabled`
+located in the `userdata` directory. This file controls the message ID
+auto-population feature, which assigns a numeric id to each message sent to a
+given target and appends the ID to userdata dictionary in every message sent.
+
+The message ID is generated using a per-target 32 bit counter that is
+incremented for every message sent to the target. Note that this counter will
+eventually wrap around after reaching uint32_t max value, so the message ID is
+not globally unique over time. However, it can still be used by the target to
+detect if messages were dropped before reaching the target by identifying gaps
+in the sequence of IDs.
+
+It is important to distinguish message IDs from the message <sequnum> field.
+Some kernel messages may never reach netconsole (for example, due to printk
+rate limiting). Thus, a gap in <sequnum> cannot be solely relied upon to
+indicate that a message was dropped during transmission, as it may never have
+been sent via netconsole. The message ID, on the other hand, is only assigned
+to messages that are actually transmitted via netconsole.
+
+Example::
+
+ echo "This is message #1" > /dev/kmsg
+ echo "This is message #2" > /dev/kmsg
+ 13,434,54928466,-;This is message #1
+ msgid=1
+ 13,435,54934019,-;This is message #2
+ msgid=2
+
+
Extended console:
=================
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 238b66d0e059..35f889259fcd 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -85,7 +85,6 @@ nf_conntrack_log_invalid - INTEGER
- 1 - log ICMP packets
- 6 - log TCP packets
- 17 - log UDP packets
- - 33 - log DCCP packets
- 41 - log ICMPv6 packets
- 136 - log UDPLITE packets
- 255 - log packets of any protocol
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index f64641417c54..7f159043ad5a 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -333,6 +333,13 @@ Some of the interface modes are described below:
SerDes lane, each port having speeds of 2.5G / 1G / 100M / 10M achieved
through symbol replication. The PCS expects the standard USXGMII code word.
+``PHY_INTERFACE_MODE_MIILITE``
+ Non-standard, simplified MII mode, without TXER, RXER, CRS and COL signals
+ as defined for the MII. The absence of COL signal makes half-duplex link
+ modes impossible but does not interfere with BroadR-Reach link modes on
+ Broadcom (and other two-wire Ethernet) PHYs, because they are full-duplex
+ only.
+
Pause frames / flow control
===========================
diff --git a/Documentation/networking/xdp-rx-metadata.rst b/Documentation/networking/xdp-rx-metadata.rst
index a6e0ece18be5..ce96f4c99505 100644
--- a/Documentation/networking/xdp-rx-metadata.rst
+++ b/Documentation/networking/xdp-rx-metadata.rst
@@ -120,6 +120,39 @@ It is possible to query which kfunc the particular netdev implements via
netlink. See ``xdp-rx-metadata-features`` attribute set in
``Documentation/netlink/specs/netdev.yaml``.
+Driver Implementation
+=====================
+
+Certain devices may prepend metadata to received packets. However, as of now,
+``AF_XDP`` lacks the ability to communicate the size of the ``data_meta`` area
+to the consumer. Therefore, it is the responsibility of the driver to copy any
+device-reserved metadata out from the metadata area and ensure that
+``xdp_buff->data_meta`` is pointing to ``xdp_buff->data`` before presenting the
+frame to the XDP program. This is necessary so that, after the XDP program
+adjusts the metadata area, the consumer can reliably retrieve the metadata
+address using ``METADATA_SIZE`` offset.
+
+The following diagram shows how custom metadata is positioned relative to the
+packet data and how pointers are adjusted for metadata access::
+
+ |<-- bpf_xdp_adjust_meta(xdp_buff, -METADATA_SIZE) --|
+ new xdp_buff->data_meta old xdp_buff->data_meta
+ | |
+ | xdp_buff->data
+ | |
+ +----------+----------------------------------------------------+------+
+ | headroom | custom metadata | data |
+ +----------+----------------------------------------------------+------+
+ | |
+ | xdp_desc->addr
+ |<------ xsk_umem__get_data() - METADATA_SIZE -------|
+
+``bpf_xdp_adjust_meta`` ensures that ``METADATA_SIZE`` is aligned to 4 bytes,
+does not exceed 252 bytes, and leaves sufficient space for building the
+xdp_frame. If these conditions are not met, it returns a negative error. In this
+case, the BPF program should not proceed to populate data into the ``data_meta``
+area.
+
Example
=======
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index b14bd5b7cbc9..bccfa19b45df 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -43,7 +43,6 @@ util-linux 2.10o mount --version
kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
jfsutils 1.1.3 fsck.jfs -V
-reiserfsprogs 3.6.3 reiserfsck -V
xfsprogs 2.6.0 xfs_db -V
squashfs-tools 4.0 mksquashfs -version
btrfs-progs 0.18 btrfs --version
@@ -262,14 +261,6 @@ The following utilities are available:
- other file system utilities are also available in this package.
-Reiserfsprogs
--------------
-
-The reiserfsprogs package should be used for reiserfs-3.6.x
-(Linux kernels 2.4.x). It is a combined package and contains working
-versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and
-``reiserfsck``. These utils work on both i386 and alpha platforms.
-
Xfsprogs
--------
@@ -493,11 +484,6 @@ JFSutils
- <https://jfs.sourceforge.net/>
-Reiserfsprogs
--------------
-
-- <https://git.kernel.org/pub/scm/linux/kernel/git/jeffm/reiserfsprogs.git/>
-
Xfsprogs
--------
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 19d2ed47ff79..d1a8e5465ed9 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -614,7 +614,10 @@ it.
When commenting the kernel API functions, please use the kernel-doc format.
See the files at :ref:`Documentation/doc-guide/ <doc_guide>` and
-``scripts/kernel-doc`` for details.
+``scripts/kernel-doc`` for details. Note that the danger of over-commenting
+applies to kernel-doc comments all the same. Do not add boilerplate
+kernel-doc which simply reiterates what's obvious from the signature
+of the function.
The preferred style for long (multi-line) comments is:
diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst
index a727827b8dd5..ec543a12f848 100644
--- a/Documentation/scheduler/sched-deadline.rst
+++ b/Documentation/scheduler/sched-deadline.rst
@@ -20,7 +20,8 @@ Deadline Task Scheduling
4.3 Default behavior
4.4 Behavior of sched_yield()
5. Tasks CPU affinity
- 5.1 SCHED_DEADLINE and cpusets HOWTO
+ 5.1 Using cgroup v1 cpuset controller
+ 5.2 Using cgroup v2 cpuset controller
6. Future plans
A. Test suite
B. Minimal main()
@@ -671,15 +672,17 @@ Deadline Task Scheduling
5. Tasks CPU affinity
=====================
- -deadline tasks cannot have an affinity mask smaller that the entire
- root_domain they are created on. However, affinities can be specified
- through the cpuset facility (Documentation/admin-guide/cgroup-v1/cpusets.rst).
+ Deadline tasks cannot have a cpu affinity mask smaller than the root domain they
+ are created on. So, using ``sched_setaffinity(2)`` won't work. Instead, the
+ the deadline task should be created in a restricted root domain. This can be
+ done using the cpuset controller of either cgroup v1 (deprecated) or cgroup v2.
+ See :ref:`Documentation/admin-guide/cgroup-v1/cpusets.rst <cpusets>` and
+ :ref:`Documentation/admin-guide/cgroup-v2.rst <cgroup-v2>` for more information.
-5.1 SCHED_DEADLINE and cpusets HOWTO
-------------------------------------
+5.1 Using cgroup v1 cpuset controller
+-------------------------------------
- An example of a simple configuration (pin a -deadline task to CPU0)
- follows (rt-app is used to create a -deadline task)::
+ An example of a simple configuration (pin a -deadline task to CPU0) follows::
mkdir /dev/cpuset
mount -t cgroup -o cpuset cpuset /dev/cpuset
@@ -692,8 +695,20 @@ Deadline Task Scheduling
echo 1 > cpu0/cpuset.cpu_exclusive
echo 1 > cpu0/cpuset.mem_exclusive
echo $$ > cpu0/tasks
- rt-app -t 100000:10000:d:0 -D5 # it is now actually superfluous to specify
- # task affinity
+ chrt --sched-runtime 100000 --sched-period 200000 --deadline 0 yes > /dev/null
+
+5.2 Using cgroup v2 cpuset controller
+-------------------------------------
+
+ Assuming the cgroup v2 root is mounted at ``/sys/fs/cgroup``.
+
+ cd /sys/fs/cgroup
+ echo '+cpuset' > cgroup.subtree_control
+ mkdir deadline_group
+ echo 0 > deadline_group/cpuset.cpus
+ echo 'root' > deadline_group/cpuset.cpus.partition
+ echo $$ > deadline_group/cgroup.procs
+ chrt --sched-runtime 100000 --sched-period 200000 --deadline 0 yes > /dev/null
6. Future plans
===============
@@ -731,24 +746,38 @@ Appendix A. Test suite
behaves under such workloads. In this way, results are easily reproducible.
rt-app is available at: https://github.com/scheduler-tools/rt-app.
- Thread parameters can be specified from the command line, with something like
- this::
-
- # rt-app -t 100000:10000:d -t 150000:20000:f:10 -D5
-
- The above creates 2 threads. The first one, scheduled by SCHED_DEADLINE,
- executes for 10ms every 100ms. The second one, scheduled at SCHED_FIFO
- priority 10, executes for 20ms every 150ms. The test will run for a total
- of 5 seconds.
-
- More interestingly, configurations can be described with a json file that
- can be passed as input to rt-app with something like this::
-
- # rt-app my_config.json
-
- The parameters that can be specified with the second method are a superset
- of the command line options. Please refer to rt-app documentation for more
- details (`<rt-app-sources>/doc/*.json`).
+ rt-app does not accept command line arguments, and instead reads from a JSON
+ configuration file. Here is an example ``config.json``:
+
+ .. code-block:: json
+
+ {
+ "tasks": {
+ "dl_task": {
+ "policy": "SCHED_DEADLINE",
+ "priority": 0,
+ "dl-runtime": 10000,
+ "dl-period": 100000,
+ "dl-deadline": 100000
+ },
+ "fifo_task": {
+ "policy": "SCHED_FIFO",
+ "priority": 10,
+ "runtime": 20000,
+ "sleep": 130000
+ }
+ },
+ "global": {
+ "duration": 5
+ }
+ }
+
+ On running ``rt-app config.json``, it creates 2 threads. The first one,
+ scheduled by SCHED_DEADLINE, executes for 10ms every 100ms. The second one,
+ scheduled at SCHED_FIFO priority 10, executes for 20ms every 150ms. The test
+ will run for a total of 5 seconds.
+
+ Please refer to the rt-app documentation for the JSON schema and more examples.
The second testing application is done using chrt which has support
for SCHED_DEADLINE.
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index a1869c38046e..404fe6126a76 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -313,16 +313,21 @@ by a sched_ext scheduler:
ops.runnable(); /* Task becomes ready to run */
while (task is runnable) {
- if (task is not in a DSQ) {
+ if (task is not in a DSQ && task->scx.slice == 0) {
ops.enqueue(); /* Task can be added to a DSQ */
- /* A CPU becomes available */
+ /* Any usable CPU becomes available */
ops.dispatch(); /* Task is moved to a local DSQ */
}
ops.running(); /* Task starts running on its assigned CPU */
- ops.tick(); /* Called every 1/HZ seconds */
+ while (task->scx.slice > 0 && task is runnable)
+ ops.tick(); /* Called every 1/HZ seconds */
ops.stopping(); /* Task stops running (time slice expires or wait) */
+
+ /* Task's CPU becomes available */
+
+ ops.dispatch(); /* task->scx.slice can be refilled */
}
ops.quiescent(); /* Task releases its assigned CPU (wait) */
diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst
index d82e7d2b54f0..9d6a337755f4 100644
--- a/Documentation/scheduler/sched-stats.rst
+++ b/Documentation/scheduler/sched-stats.rst
@@ -86,13 +86,16 @@ Domain statistics
-----------------
One of these is produced per domain for each cpu described. (Note that if
CONFIG_SMP is not defined, *no* domains are utilized and these lines
-will not appear in the output. <name> is an extension to the domain field
-that prints the name of the corresponding sched domain. It can appear in
-schedstat version 17 and above.
+will not appear in the output.)
domain<N> <name> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
-The first field is a bit mask indicating what cpus this domain operates over.
+The <name> field prints the name of the sched domain and is only supported
+with schedstat version >= 17. On previous versions, <cpumask> is the first
+field.
+
+The <cpumask> field is a bit mask indicating what cpus this domain operates
+over.
The next 33 are a variety of sched_balance_rq() statistics in grouped into types
of idleness (busy, idle and newly idle):
@@ -103,12 +106,13 @@ of idleness (busy, idle and newly idle):
load did not require balancing when busy
3) # of times in this domain sched_balance_rq() tried to move one or
more tasks and failed, when the cpu was busy
- 4) Total imbalance in load when the cpu was busy
- 5) Total imbalance in utilization when the cpu was busy
- 6) Total imbalance in number of tasks when the cpu was busy
- 7) Total imbalance due to misfit tasks when the cpu was busy
- 8) # of times in this domain pull_task() was called when busy
- 9) # of times in this domain pull_task() was called even though the
+ 4) Total imbalance in load in this domain when the cpu was busy
+ 5) Total imbalance in utilization in this domain when the cpu was busy
+ 6) Total imbalance in number of tasks in this domain when the cpu was busy
+ 7) Total imbalance due to misfit tasks in this domain when the cpu was
+ busy
+ 8) # of times in this domain detach_task() was called when busy
+ 9) # of times in this domain detach_task() was called even though the
target task was cache-hot when busy
10) # of times in this domain sched_balance_rq() was called but did not
find a busier queue while the cpu was busy
@@ -121,13 +125,14 @@ of idleness (busy, idle and newly idle):
the load did not require balancing when the cpu was idle
14) # of times in this domain sched_balance_rq() tried to move one or
more tasks and failed, when the cpu was idle
- 15) Total imbalance in load when the cpu was idle
- 16) Total imbalance in utilization when the cpu was idle
- 17) Total imbalance in number of tasks when the cpu was idle
- 18) Total imbalance due to misfit tasks when the cpu was idle
- 19) # of times in this domain pull_task() was called when the cpu
+ 15) Total imbalance in load in this domain when the cpu was idle
+ 16) Total imbalance in utilization in this domain when the cpu was idle
+ 17) Total imbalance in number of tasks in this domain when the cpu was idle
+ 18) Total imbalance due to misfit tasks in this domain when the cpu was
+ idle
+ 19) # of times in this domain detach_task() was called when the cpu
was idle
- 20) # of times in this domain pull_task() was called even though
+ 20) # of times in this domain detach_task() was called even though
the target task was cache-hot when idle
21) # of times in this domain sched_balance_rq() was called but did
not find a busier queue while the cpu was idle
@@ -140,12 +145,16 @@ of idleness (busy, idle and newly idle):
load did not require balancing when the cpu was just becoming idle
25) # of times in this domain sched_balance_rq() tried to move one or more
tasks and failed, when the cpu was just becoming idle
- 26) Total imbalance in load when the cpu was just becoming idle
- 27) Total imbalance in utilization when the cpu was just becoming idle
- 28) Total imbalance in number of tasks when the cpu was just becoming idle
- 29) Total imbalance due to misfit tasks when the cpu was just becoming idle
- 30) # of times in this domain pull_task() was called when newly idle
- 31) # of times in this domain pull_task() was called even though the
+ 26) Total imbalance in load in this domain when the cpu was just becoming
+ idle
+ 27) Total imbalance in utilization in this domain when the cpu was just
+ becoming idle
+ 28) Total imbalance in number of tasks in this domain when the cpu was just
+ becoming idle
+ 29) Total imbalance due to misfit tasks in this domain when the cpu was
+ just becoming idle
+ 30) # of times in this domain detach_task() was called when newly idle
+ 31) # of times in this domain detach_task() was called even though the
target task was cache-hot when just becoming idle
32) # of times in this domain sched_balance_rq() was called but did not
find a busier queue while the cpu was just becoming idle
diff --git a/Documentation/scsi/scsi_fc_transport.rst b/Documentation/scsi/scsi_fc_transport.rst
index e3ddcfb7f8fd..5ef75575924e 100644
--- a/Documentation/scsi/scsi_fc_transport.rst
+++ b/Documentation/scsi/scsi_fc_transport.rst
@@ -30,7 +30,40 @@ This file is found at Documentation/scsi/scsi_fc_transport.rst
FC Remote Ports (rports)
========================
-<< To Be Supplied >>
+
+ In the Fibre Channel (FC) subsystem, a remote port (rport) refers to a
+ remote Fibre Channel node that the local port can communicate with.
+ These are typically storage targets (e.g., arrays, tapes) that respond
+ to SCSI commands over FC transport.
+
+ In Linux, rports are managed by the FC transport class and are
+ represented in sysfs under:
+
+ /sys/class/fc_remote_ports/
+
+ Each rport directory contains attributes describing the remote port,
+ such as port ID, node name, port state, and link speed.
+
+ rports are typically created by the FC transport when a new device is
+ discovered during a fabric login or scan, and they persist until the
+ device is removed or the link is lost.
+
+ Common attributes:
+ - node_name: World Wide Node Name (WWNN).
+ - port_name: World Wide Port Name (WWPN).
+ - port_id: FC address of the remote port.
+ - roles: Indicates if the port is an initiator, target, or both.
+ - port_state: Shows the current operational state.
+
+ After discovering a remote port, the driver typically populates a
+ fc_rport_identifiers structure and invokes fc_remote_port_add() to
+ create and register the remote port with the SCSI subsystem via the
+ Fibre Channel (FC) transport class.
+
+ rports are also visible via sysfs as children of the FC host adapter.
+
+ For developers: use fc_remote_port_add() and fc_remote_port_delete() when
+ implementing a driver that interacts with the FC transport class.
FC Virtual Ports (vports)
diff --git a/Documentation/sphinx-static/custom.css b/Documentation/sphinx-static/custom.css
index f4285417c71a..06cedbae095c 100644
--- a/Documentation/sphinx-static/custom.css
+++ b/Documentation/sphinx-static/custom.css
@@ -136,3 +136,18 @@ div.language-selection:hover ul {
div.language-selection ul li:hover {
background: #dddddd;
}
+
+/* Make xrefs more universally visible */
+a.reference, a.reference:hover {
+ border-bottom: none;
+ text-decoration: underline;
+ text-underline-offset: 0.3em;
+}
+
+/* Slightly different style for sidebar links */
+div.sphinxsidebar a { border-bottom: none; }
+div.sphinxsidebar a:hover {
+ border-bottom: none;
+ text-decoration: underline;
+ text-underline-offset: 0.3em;
+}
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index fd633f7a0bc3..563033f764bb 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -23,12 +23,6 @@ from kernel_abi import get_kernel_abi
RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII)
#
-# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
-#
-RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
- flags=re.ASCII)
-
-#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
@@ -150,20 +144,12 @@ def markup_func_ref_sphinx3(docname, app, match):
return target_text
def markup_c_ref(docname, app, match):
- class_str = {# Sphinx 2 only
- RE_function: 'c-func',
- RE_generic_type: 'c-type',
- # Sphinx 3+ only
- RE_struct: 'c-struct',
+ class_str = {RE_struct: 'c-struct',
RE_union: 'c-union',
RE_enum: 'c-enum',
RE_typedef: 'c-type',
}
- reftype_str = {# Sphinx 2 only
- RE_function: 'function',
- RE_generic_type: 'type',
- # Sphinx 3+ only
- RE_struct: 'struct',
+ reftype_str = {RE_struct: 'struct',
RE_union: 'union',
RE_enum: 'enum',
RE_typedef: 'type',
@@ -249,8 +235,13 @@ def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None):
if xref:
return xref
-
- return None
+ #
+ # We didn't find the xref; if a container node was supplied,
+ # mark it as a broken xref
+ #
+ if contnode:
+ contnode['classes'].append("broken_xref")
+ return contnode
#
# Variant of markup_abi_ref() that warns whan a reference is not found
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
index e8ea80d4324c..3dc285dc70f5 100644
--- a/Documentation/sphinx/cdomain.py
+++ b/Documentation/sphinx/cdomain.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=W0141,C0113,C0103,C0325
"""
cdomain
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
index db6f0380de94..4c4375201b9e 100644
--- a/Documentation/sphinx/kernel_abi.py
+++ b/Documentation/sphinx/kernel_abi.py
@@ -146,8 +146,10 @@ class KernelCmd(Directive):
n += 1
if f != old_f:
- # Add the file to Sphinx build dependencies
- env.note_dependency(os.path.abspath(f))
+ # Add the file to Sphinx build dependencies if the file exists
+ fname = os.path.join(srctree, f)
+ if os.path.isfile(fname):
+ env.note_dependency(fname)
old_f = f
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index 8db176045bc5..1e566e87ebcd 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=R0903, C0330, R0914, R0912, E0401
"""
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index b818d4c77924..2586b4d4e494 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -1,4 +1,5 @@
# coding=utf-8
+# SPDX-License-Identifier: MIT
#
# Copyright © 2016 Intel Corporation
#
@@ -24,8 +25,6 @@
# Authors:
# Jani Nikula <jani.nikula@intel.com>
#
-# Please make sure this works on both python2 and python3.
-#
import codecs
import os
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index f1a7f13c9c60..ad495c0da270 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=C0103, R0903, R0912, R0915
"""
scalable figure and image handling
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index ec50e1ee5223..1afb0c97f06b 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=R0903, C0330, R0914, R0912, E0401
import os
diff --git a/Documentation/sphinx/min_requirements.txt b/Documentation/sphinx/min_requirements.txt
new file mode 100644
index 000000000000..96b5e0bfa3d7
--- /dev/null
+++ b/Documentation/sphinx/min_requirements.txt
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+alabaster >=0.7,<0.8
+docutils>=0.15,<0.18
+jinja2>=2.3,<3.1
+PyYAML>=5.1,<6.1
+Sphinx==3.4.3
+sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-devhelp==1.0.1
+sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-qthelp==1.0.2
+sphinxcontrib-serializinghtml==1.1.4
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index b063f2f1cfb2..7b1458544e2e 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -1,4 +1,7 @@
#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>.
+
use strict;
use Text::Tabs;
use Getopt::Long;
@@ -391,7 +394,7 @@ Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
=head1 COPYRIGHT
-Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>.
License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
index 5017f307c8a4..76b4255061d0 100644
--- a/Documentation/sphinx/requirements.txt
+++ b/Documentation/sphinx/requirements.txt
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
alabaster
Sphinx
pyyaml
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 180fbb50c337..3d19569e5728 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
# pylint: disable=C0330, R0903, R0912
"""
diff --git a/Documentation/tools/rtla/common_appendix.rst b/Documentation/tools/rtla/common_appendix.rst
index b5cf2dc223df..53cae7537537 100644
--- a/Documentation/tools/rtla/common_appendix.rst
+++ b/Documentation/tools/rtla/common_appendix.rst
@@ -1,3 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+EXIT STATUS
+===========
+
+::
+
+ 0 Passed: the test did not hit the stop tracing condition
+ 1 Error: invalid argument
+ 2 Failed: the test hit the stop tracing condition
+
REPORTING BUGS
==============
Report bugs to <linux-kernel@vger.kernel.org>
diff --git a/Documentation/tools/rtla/common_timerlat_options.rst b/Documentation/tools/rtla/common_timerlat_options.rst
index 10dc802f8d65..7854368f1827 100644
--- a/Documentation/tools/rtla/common_timerlat_options.rst
+++ b/Documentation/tools/rtla/common_timerlat_options.rst
@@ -55,3 +55,67 @@
Set timerlat to run without workload, waiting for the user to dispatch a per-cpu
task that waits for a new period on the tracing/osnoise/per_cpu/cpu$ID/timerlat_fd.
See linux/tools/rtla/sample/timerlat_load.py for an example of user-load code.
+
+**--on-threshold** *action*
+
+ Defines an action to be executed when tracing is stopped on a latency threshold
+ specified by **-i/--irq** or **-T/--thread**.
+
+ Multiple --on-threshold actions may be specified, and they will be executed in
+ the order they are provided. If any action fails, subsequent actions in the list
+ will not be executed.
+
+ Supported actions are:
+
+ - *trace[,file=<filename>]*
+
+ Saves trace output, optionally taking a filename. Alternative to -t/--trace.
+ Note that nlike -t/--trace, specifying this multiple times will result in
+ the trace being saved multiple times.
+
+ - *signal,num=<sig>,pid=<pid>*
+
+ Sends signal to process. "parent" might be specified in place of pid to target
+ the parent process of rtla.
+
+ - *shell,command=<command>*
+
+ Execute shell command.
+
+ - *continue*
+
+ Continue tracing after actions are executed instead of stopping.
+
+ Example:
+
+ $ rtla timerlat -T 20 --on-threshold trace
+ --on-threshold shell,command="grep ipi_send timerlat_trace.txt"
+ --on-threshold signal,num=2,pid=parent
+
+ This will save a trace with the default filename "timerlat_trace.txt", print its
+ lines that contain the text "ipi_send" on standard output, and send signal 2
+ (SIGINT) to the parent process.
+
+ Performance Considerations:
+
+ For time-sensitive actions, it is recommended to run **rtla timerlat** with BPF
+ support and RT priority. Note that due to implementational limitations, actions
+ might be delayed up to one second after tracing is stopped if BPF mode is not
+ available or disabled.
+
+**--on-end** *action*
+
+ Defines an action to be executed at the end of **rtla timerlat** tracing.
+
+ Multiple --on-end actions can be specified, and they will be executed in the order
+ they are provided. If any action fails, subsequent actions in the list will not be
+ executed.
+
+ See the documentation for **--on-threshold** for the list of supported actions, with
+ the exception that *continue* has no effect.
+
+ Example:
+
+ $ rtla timerlat -d 5s --on-end trace
+
+ This runs rtla timerlat with default options and save trace output at the end.
diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst
index 03b7f3deb069..b2d8726271b3 100644
--- a/Documentation/tools/rtla/rtla-timerlat-hist.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst
@@ -107,3 +107,5 @@ SEE ALSO
AUTHOR
======
Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst
index d594597201fd..3efac10adb36 100644
--- a/Documentation/trace/boottime-trace.rst
+++ b/Documentation/trace/boottime-trace.rst
@@ -198,8 +198,8 @@ Most of the subsystems and architecture dependent drivers will be initialized
after that (arch_initcall or subsys_initcall). Thus, you can trace those with
boot-time tracing.
If you want to trace events before core_initcall, you can use the options
-starting with ``kernel``. Some of them will be enabled eariler than the initcall
-processing (for example,. ``kernel.ftrace=function`` and ``kernel.trace_event``
+starting with ``kernel``. Some of them will be enabled earlier than the initcall
+processing (for example, ``kernel.ftrace=function`` and ``kernel.trace_event``
will start before the initcall.)
diff --git a/Documentation/trace/eprobetrace.rst b/Documentation/trace/eprobetrace.rst
new file mode 100644
index 000000000000..89b5157cfab8
--- /dev/null
+++ b/Documentation/trace/eprobetrace.rst
@@ -0,0 +1,269 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Eprobe - Event-based Probe Tracing
+==================================
+
+:Author: Steven Rostedt <rostedt@goodmis.org>
+
+- Written for v6.17
+
+Overview
+========
+
+Eprobes are dynamic events that are placed on existing events to either
+dereference a field that is a pointer, or simply to limit what fields are
+recorded in the trace event.
+
+Eprobes depend on kprobe events so to enable this feature, build your kernel
+with CONFIG_EPROBE_EVENTS=y.
+
+Eprobes are created via the /sys/kernel/tracing/dynamic_events file.
+
+Synopsis of eprobe_events
+-------------------------
+::
+
+ e[:[EGRP/][EEVENT]] GRP.EVENT [FETCHARGS] : Set a probe
+ -:[EGRP/][EEVENT] : Clear a probe
+
+ EGRP : Group name of the new event. If omitted, use "eprobes" for it.
+ EEVENT : Event name. If omitted, the event name is generated and will
+ be the same event name as the event it attached to.
+ GRP : Group name of the event to attach to.
+ EVENT : Event name of the event to attach to.
+
+ FETCHARGS : Arguments. Each probe can have up to 128 args.
+ $FIELD : Fetch the value of the event field called FIELD.
+ @ADDR : Fetch memory at ADDR (ADDR should be in kernel)
+ @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
+ $comm : Fetch current task comm.
+ +|-[u]OFFS(FETCHARG) : Fetch memory at FETCHARG +|- OFFS address.(\*3)(\*4)
+ \IMM : Store an immediate value to the argument.
+ NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
+ FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
+ (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
+ (x8/x16/x32/x64), VFS layer common type(%pd/%pD), "char",
+ "string", "ustring", "symbol", "symstr" and "bitfield" are
+ supported.
+
+Types
+-----
+The FETCHARGS above is very similar to the kprobe events as described in
+Documentation/trace/kprobetrace.rst.
+
+The difference between eprobes and kprobes FETCHARGS is that eprobes has a
+$FIELD command that returns the content of the event field of the event
+that is attached. Eprobes do not have access to registers, stacks and function
+arguments that kprobes has.
+
+If a field argument is a pointer, it may be dereferenced just like a memory
+address using the FETCHARGS syntax.
+
+
+Attaching to dynamic events
+---------------------------
+
+Eprobes may attach to dynamic events as well as to normal events. It may
+attach to a kprobe event, a synthetic event or a fprobe event. This is useful
+if the type of a field needs to be changed. See Example 2 below.
+
+Usage examples
+==============
+
+Example 1
+---------
+
+The basic usage of eprobes is to limit the data that is being recorded into
+the tracing buffer. For example, a common event to trace is the sched_switch
+trace event. That has a format of::
+
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+
+ field:char prev_comm[16]; offset:8; size:16; signed:0;
+ field:pid_t prev_pid; offset:24; size:4; signed:1;
+ field:int prev_prio; offset:28; size:4; signed:1;
+ field:long prev_state; offset:32; size:8; signed:1;
+ field:char next_comm[16]; offset:40; size:16; signed:0;
+ field:pid_t next_pid; offset:56; size:4; signed:1;
+ field:int next_prio; offset:60; size:4; signed:1;
+
+The first four fields are common to all events and can not be limited. But the
+rest of the event has 60 bytes of information. It records the names of the
+previous and next tasks being scheduled out and in, as well as their pids and
+priorities. It also records the state of the previous task. If only the pids
+of the tasks are of interest, why waste the ring buffer with all the other
+fields?
+
+An eprobe can limit what gets recorded. Note, it does not help in performance,
+as all the fields are recorded in a temporary buffer to process the eprobe.
+::
+
+ # echo 'e:sched/switch sched.sched_switch prev=$prev_pid:u32 next=$next_pid:u32' >> /sys/kernel/tracing/dynamic_events
+ # echo 1 > /sys/kernel/tracing/events/sched/switch/enable
+ # cat /sys/kernel/tracing/trace
+
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 2721/2721 #P:8
+ #
+ # _-----=> irqs-off/BH-disabled
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / _-=> migrate-disable
+ # |||| / delay
+ # TASK-PID CPU# ||||| TIMESTAMP FUNCTION
+ # | | | ||||| | |
+ sshd-session-1082 [004] d..4. 5041.239906: switch: (sched.sched_switch) prev=1082 next=0
+ bash-1085 [001] d..4. 5041.240198: switch: (sched.sched_switch) prev=1085 next=141
+ kworker/u34:5-141 [001] d..4. 5041.240259: switch: (sched.sched_switch) prev=141 next=1085
+ <idle>-0 [004] d..4. 5041.240354: switch: (sched.sched_switch) prev=0 next=1082
+ bash-1085 [001] d..4. 5041.240385: switch: (sched.sched_switch) prev=1085 next=141
+ kworker/u34:5-141 [001] d..4. 5041.240410: switch: (sched.sched_switch) prev=141 next=1085
+ bash-1085 [001] d..4. 5041.240478: switch: (sched.sched_switch) prev=1085 next=0
+ sshd-session-1082 [004] d..4. 5041.240526: switch: (sched.sched_switch) prev=1082 next=0
+ <idle>-0 [001] d..4. 5041.247524: switch: (sched.sched_switch) prev=0 next=90
+ <idle>-0 [002] d..4. 5041.247545: switch: (sched.sched_switch) prev=0 next=16
+ kworker/1:1-90 [001] d..4. 5041.247580: switch: (sched.sched_switch) prev=90 next=0
+ rcu_sched-16 [002] d..4. 5041.247591: switch: (sched.sched_switch) prev=16 next=0
+ <idle>-0 [002] d..4. 5041.257536: switch: (sched.sched_switch) prev=0 next=16
+ rcu_sched-16 [002] d..4. 5041.257573: switch: (sched.sched_switch) prev=16 next=0
+
+Note, without adding the "u32" after the prev_pid and next_pid, the values
+would default showing in hexadecimal.
+
+Example 2
+---------
+
+If a specific system call is to be recorded but the syscalls events are not
+enabled, the raw_syscalls can still be used (syscalls are system call
+events are not normal events, but are created from the raw_syscalls events
+within the kernel). In order to trace the openat system call, one can create
+an event probe on top of the raw_syscalls event:
+::
+
+ # cd /sys/kernel/tracing
+ # cat events/raw_syscalls/sys_enter/format
+ name: sys_enter
+ ID: 395
+ format:
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+
+ field:long id; offset:8; size:8; signed:1;
+ field:unsigned long args[6]; offset:16; size:48; signed:0;
+
+ print fmt: "NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", REC->id, REC->args[0], REC->args[1], REC->args[2], REC->args[3], REC->args[4], REC->args[5]
+
+From the source code, the sys_openat() has:
+::
+
+ int sys_openat(int dirfd, const char *path, int flags, mode_t mode)
+ {
+ return my_syscall4(__NR_openat, dirfd, path, flags, mode);
+ }
+
+The path is the second parameter, and that is what is wanted.
+::
+
+ # echo 'e:openat raw_syscalls.sys_enter nr=$id filename=+8($args):ustring' >> dynamic_events
+
+This is being run on x86_64 where the word size is 8 bytes and the openat
+system call __NR_openat is set at 257.
+::
+
+ # echo 'nr == 257' > events/eprobes/openat/filter
+
+Now enable the event and look at the trace.
+::
+
+ # echo 1 > events/eprobes/openat/enable
+ # cat trace
+
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 4/4 #P:8
+ #
+ # _-----=> irqs-off/BH-disabled
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / _-=> migrate-disable
+ # |||| / delay
+ # TASK-PID CPU# ||||| TIMESTAMP FUNCTION
+ # | | | ||||| | |
+ cat-1298 [003] ...2. 2060.875970: openat: (raw_syscalls.sys_enter) nr=0x101 filename=(fault)
+ cat-1298 [003] ...2. 2060.876197: openat: (raw_syscalls.sys_enter) nr=0x101 filename=(fault)
+ cat-1298 [003] ...2. 2060.879126: openat: (raw_syscalls.sys_enter) nr=0x101 filename=(fault)
+ cat-1298 [003] ...2. 2060.879639: openat: (raw_syscalls.sys_enter) nr=0x101 filename=(fault)
+
+The filename shows "(fault)". This is likely because the filename has not been
+pulled into memory yet and currently trace events cannot fault in memory that
+is not present. When an eprobe tries to read memory that has not been faulted
+in yet, it will show the "(fault)" text.
+
+To get around this, as the kernel will likely pull in this filename and make
+it present, attaching it to a synthetic event that can pass the address of the
+filename from the entry of the event to the end of the event, this can be used
+to show the filename when the system call returns.
+
+Remove the old eprobe::
+
+ # echo 1 > events/eprobes/openat/enable
+ # echo '-:openat' >> dynamic_events
+
+This time make an eprobe where the address of the filename is saved::
+
+ # echo 'e:openat_start raw_syscalls.sys_enter nr=$id filename=+8($args):x64' >> dynamic_events
+
+Create a synthetic event that passes the address of the filename to the
+end of the event::
+
+ # echo 's:filename u64 file' >> dynamic_events
+ # echo 'hist:keys=common_pid:f=filename if nr == 257' > events/eprobes/openat_start/trigger
+ # echo 'hist:keys=common_pid:file=$f:onmatch(eprobes.openat_start).trace(filename,$file) if id == 257' > events/raw_syscalls/sys_exit/trigger
+
+Now that the address of the filename has been passed to the end of the
+system call, create another eprobe to attach to the exit event to show the
+string::
+
+ # echo 'e:openat synthetic.filename filename=+0($file):ustring' >> dynamic_events
+ # echo 1 > events/eprobes/openat/enable
+ # cat trace
+
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 4/4 #P:8
+ #
+ # _-----=> irqs-off/BH-disabled
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / _-=> migrate-disable
+ # |||| / delay
+ # TASK-PID CPU# ||||| TIMESTAMP FUNCTION
+ # | | | ||||| | |
+ cat-1331 [001] ...5. 2944.787977: openat: (synthetic.filename) filename="/etc/ld.so.cache"
+ cat-1331 [001] ...5. 2944.788480: openat: (synthetic.filename) filename="/lib/x86_64-linux-gnu/libc.so.6"
+ cat-1331 [001] ...5. 2944.793426: openat: (synthetic.filename) filename="/usr/lib/locale/locale-archive"
+ cat-1331 [001] ...5. 2944.831362: openat: (synthetic.filename) filename="trace"
+
+Example 3
+---------
+
+If syscall trace events are available, the above would not need the first
+eprobe, but it would still need the last one::
+
+ # echo 's:filename u64 file' >> dynamic_events
+ # echo 'hist:keys=common_pid:f=filename' > events/syscalls/sys_enter_openat/trigger
+ # echo 'hist:keys=common_pid:file=$f:onmatch(syscalls.sys_enter_openat).trace(filename,$file)' > events/syscalls/sys_exit_openat/trigger
+ # echo 'e:openat synthetic.filename filename=+0($file):ustring' >> dynamic_events
+ # echo 1 > events/eprobes/openat/enable
+
+And this would produce the same result as Example 2.
diff --git a/Documentation/trace/ftrace-design.rst b/Documentation/trace/ftrace-design.rst
index dc82d64b3a44..8f4fab3f9324 100644
--- a/Documentation/trace/ftrace-design.rst
+++ b/Documentation/trace/ftrace-design.rst
@@ -238,19 +238,15 @@ You need very few things to get the syscalls tracing in an arch.
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
-HAVE_FTRACE_MCOUNT_RECORD
--------------------------
+HAVE_DYNAMIC_FTRACE
+-------------------
See scripts/recordmcount.pl for more info. Just fill in the arch-specific
details for how to locate the addresses of mcount call sites via objdump.
This option doesn't make much sense without also implementing dynamic ftrace.
-
-HAVE_DYNAMIC_FTRACE
--------------------
-
-You will first need HAVE_FTRACE_MCOUNT_RECORD and HAVE_FUNCTION_TRACER, so
-scroll your reader back up if you got over eager.
+You will first need HAVE_FUNCTION_TRACER, so scroll your reader back up if you
+got over eager.
Once those are out of the way, you will need to implement:
- asm/ftrace.h:
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 0aada18c38c6..2b98c1720a54 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -249,7 +249,7 @@ Extended error information
table, it should keep a running total of the number of bytes
requested by that call_site.
- We'll let it run for awhile and then dump the contents of the 'hist'
+ We'll let it run for a while and then dump the contents of the 'hist'
file in the kmalloc event's subdirectory (for readability, a number
of entries have been omitted)::
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index cc1dc5a087e8..b4a429dc4f7a 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -36,6 +36,7 @@ the Linux kernel.
kprobes
kprobetrace
fprobetrace
+ eprobetrace
fprobe
ring-buffer-design
diff --git a/Documentation/trace/rv/da_monitor_synthesis.rst b/Documentation/trace/rv/da_monitor_synthesis.rst
deleted file mode 100644
index 0a92729c8a9b..000000000000
--- a/Documentation/trace/rv/da_monitor_synthesis.rst
+++ /dev/null
@@ -1,147 +0,0 @@
-Deterministic Automata Monitor Synthesis
-========================================
-
-The starting point for the application of runtime verification (RV) techniques
-is the *specification* or *modeling* of the desired (or undesired) behavior
-of the system under scrutiny.
-
-The formal representation needs to be then *synthesized* into a *monitor*
-that can then be used in the analysis of the trace of the system. The
-*monitor* connects to the system via an *instrumentation* that converts
-the events from the *system* to the events of the *specification*.
-
-
-In Linux terms, the runtime verification monitors are encapsulated inside
-the *RV monitor* abstraction. The RV monitor includes a set of instances
-of the monitor (per-cpu monitor, per-task monitor, and so on), the helper
-functions that glue the monitor to the system reference model, and the
-trace output as a reaction to event parsing and exceptions, as depicted
-below::
-
- Linux +----- RV Monitor ----------------------------------+ Formal
- Realm | | Realm
- +-------------------+ +----------------+ +-----------------+
- | Linux kernel | | Monitor | | Reference |
- | Tracing | -> | Instance(s) | <- | Model |
- | (instrumentation) | | (verification) | | (specification) |
- +-------------------+ +----------------+ +-----------------+
- | | |
- | V |
- | +----------+ |
- | | Reaction | |
- | +--+--+--+-+ |
- | | | | |
- | | | +-> trace output ? |
- +------------------------|--|----------------------+
- | +----> panic ?
- +-------> <user-specified>
-
-DA monitor synthesis
---------------------
-
-The synthesis of automata-based models into the Linux *RV monitor* abstraction
-is automated by the dot2k tool and the rv/da_monitor.h header file that
-contains a set of macros that automatically generate the monitor's code.
-
-dot2k
------
-
-The dot2k utility leverages dot2c by converting an automaton model in
-the DOT format into the C representation [1] and creating the skeleton of
-a kernel monitor in C.
-
-For example, it is possible to transform the wip.dot model present in
-[1] into a per-cpu monitor with the following command::
-
- $ dot2k -d wip.dot -t per_cpu
-
-This will create a directory named wip/ with the following files:
-
-- wip.h: the wip model in C
-- wip.c: the RV monitor
-
-The wip.c file contains the monitor declaration and the starting point for
-the system instrumentation.
-
-Monitor macros
---------------
-
-The rv/da_monitor.h enables automatic code generation for the *Monitor
-Instance(s)* using C macros.
-
-The benefits of the usage of macro for monitor synthesis are 3-fold as it:
-
-- Reduces the code duplication;
-- Facilitates the bug fix/improvement;
-- Avoids the case of developers changing the core of the monitor code
- to manipulate the model in a (let's say) non-standard way.
-
-This initial implementation presents three different types of monitor instances:
-
-- ``#define DECLARE_DA_MON_GLOBAL(name, type)``
-- ``#define DECLARE_DA_MON_PER_CPU(name, type)``
-- ``#define DECLARE_DA_MON_PER_TASK(name, type)``
-
-The first declares the functions for a global deterministic automata monitor,
-the second for monitors with per-cpu instances, and the third with per-task
-instances.
-
-In all cases, the 'name' argument is a string that identifies the monitor, and
-the 'type' argument is the data type used by dot2k on the representation of
-the model in C.
-
-For example, the wip model with two states and three events can be
-stored in an 'unsigned char' type. Considering that the preemption control
-is a per-cpu behavior, the monitor declaration in the 'wip.c' file is::
-
- DECLARE_DA_MON_PER_CPU(wip, unsigned char);
-
-The monitor is executed by sending events to be processed via the functions
-presented below::
-
- da_handle_event_$(MONITOR_NAME)($(event from event enum));
- da_handle_start_event_$(MONITOR_NAME)($(event from event enum));
- da_handle_start_run_event_$(MONITOR_NAME)($(event from event enum));
-
-The function ``da_handle_event_$(MONITOR_NAME)()`` is the regular case where
-the event will be processed if the monitor is processing events.
-
-When a monitor is enabled, it is placed in the initial state of the automata.
-However, the monitor does not know if the system is in the *initial state*.
-
-The ``da_handle_start_event_$(MONITOR_NAME)()`` function is used to notify the
-monitor that the system is returning to the initial state, so the monitor can
-start monitoring the next event.
-
-The ``da_handle_start_run_event_$(MONITOR_NAME)()`` function is used to notify
-the monitor that the system is known to be in the initial state, so the
-monitor can start monitoring and monitor the current event.
-
-Using the wip model as example, the events "preempt_disable" and
-"sched_waking" should be sent to monitor, respectively, via [2]::
-
- da_handle_event_wip(preempt_disable_wip);
- da_handle_event_wip(sched_waking_wip);
-
-While the event "preempt_enabled" will use::
-
- da_handle_start_event_wip(preempt_enable_wip);
-
-To notify the monitor that the system will be returning to the initial state,
-so the system and the monitor should be in sync.
-
-Final remarks
--------------
-
-With the monitor synthesis in place using the rv/da_monitor.h and
-dot2k, the developer's work should be limited to the instrumentation
-of the system, increasing the confidence in the overall approach.
-
-[1] For details about deterministic automata format and the translation
-from one representation to another, see::
-
- Documentation/trace/rv/deterministic_automata.rst
-
-[2] dot2k appends the monitor's name suffix to the events enums to
-avoid conflicting variables when exporting the global vmlinux.h
-use by BPF programs.
diff --git a/Documentation/trace/rv/index.rst b/Documentation/trace/rv/index.rst
index e80e0057feb4..a2812ac5cfeb 100644
--- a/Documentation/trace/rv/index.rst
+++ b/Documentation/trace/rv/index.rst
@@ -8,8 +8,10 @@ Runtime Verification
runtime-verification.rst
deterministic_automata.rst
- da_monitor_synthesis.rst
+ linear_temporal_logic.rst
+ monitor_synthesis.rst
da_monitor_instrumentation.rst
monitor_wip.rst
monitor_wwnr.rst
monitor_sched.rst
+ monitor_rtapp.rst
diff --git a/Documentation/trace/rv/linear_temporal_logic.rst b/Documentation/trace/rv/linear_temporal_logic.rst
new file mode 100644
index 000000000000..9eee09d9cacf
--- /dev/null
+++ b/Documentation/trace/rv/linear_temporal_logic.rst
@@ -0,0 +1,134 @@
+Linear temporal logic
+=====================
+
+Introduction
+------------
+
+Runtime verification monitor is a verification technique which checks that the
+kernel follows a specification. It does so by using tracepoints to monitor the
+kernel's execution trace, and verifying that the execution trace sastifies the
+specification.
+
+Initially, the specification can only be written in the form of deterministic
+automaton (DA). However, while attempting to implement DA monitors for some
+complex specifications, deterministic automaton is found to be inappropriate as
+the specification language. The automaton is complicated, hard to understand,
+and error-prone.
+
+Thus, RV monitors based on linear temporal logic (LTL) are introduced. This type
+of monitor uses LTL as specification instead of DA. For some cases, writing the
+specification as LTL is more concise and intuitive.
+
+Many materials explain LTL in details. One book is::
+
+ Christel Baier and Joost-Pieter Katoen: Principles of Model Checking, The MIT
+ Press, 2008.
+
+Grammar
+-------
+
+Unlike some existing syntax, kernel's implementation of LTL is more verbose.
+This is motivated by considering that the people who read the LTL specifications
+may not be well-versed in LTL.
+
+Grammar:
+ ltl ::= opd | ( ltl ) | ltl binop ltl | unop ltl
+
+Operands (opd):
+ true, false, user-defined names consisting of upper-case characters, digits,
+ and underscore.
+
+Unary Operators (unop):
+ always
+ eventually
+ next
+ not
+
+Binary Operators (binop):
+ until
+ and
+ or
+ imply
+ equivalent
+
+This grammar is ambiguous: operator precedence is not defined. Parentheses must
+be used.
+
+Example linear temporal logic
+-----------------------------
+.. code-block::
+
+ RAIN imply (GO_OUTSIDE imply HAVE_UMBRELLA)
+
+means: if it is raining, going outside means having an umbrella.
+
+.. code-block::
+
+ RAIN imply (WET until not RAIN)
+
+means: if it is raining, it is going to be wet until the rain stops.
+
+.. code-block::
+
+ RAIN imply eventually not RAIN
+
+means: if it is raining, rain will eventually stop.
+
+The above examples are referring to the current time instance only. For kernel
+verification, the `always` operator is usually desirable, to specify that
+something is always true at the present and for all future. For example::
+
+ always (RAIN imply eventually not RAIN)
+
+means: *all* rain eventually stops.
+
+In the above examples, `RAIN`, `GO_OUTSIDE`, `HAVE_UMBRELLA` and `WET` are the
+"atomic propositions".
+
+Monitor synthesis
+-----------------
+
+To synthesize an LTL into a kernel monitor, the `rvgen` tool can be used:
+`tools/verification/rvgen`. The specification needs to be provided as a file,
+and it must have a "RULE = LTL" assignment. For example::
+
+ RULE = always (ACQUIRE imply ((not KILLED and not CRASHED) until RELEASE))
+
+which says: if `ACQUIRE`, then `RELEASE` must happen before `KILLED` or
+`CRASHED`.
+
+The LTL can be broken down using sub-expressions. The above is equivalent to:
+
+ .. code-block::
+
+ RULE = always (ACQUIRE imply (ALIVE until RELEASE))
+ ALIVE = not KILLED and not CRASHED
+
+From this specification, `rvgen` generates the C implementation of a Buchi
+automaton - a non-deterministic state machine which checks the satisfiability of
+the LTL. See Documentation/trace/rv/monitor_synthesis.rst for details on using
+`rvgen`.
+
+References
+----------
+
+One book covering model checking and linear temporal logic is::
+
+ Christel Baier and Joost-Pieter Katoen: Principles of Model Checking, The MIT
+ Press, 2008.
+
+For an example of using linear temporal logic in software testing, see::
+
+ Ruijie Meng, Zhen Dong, Jialin Li, Ivan Beschastnikh, and Abhik Roychoudhury.
+ 2022. Linear-time temporal logic guided greybox fuzzing. In Proceedings of the
+ 44th International Conference on Software Engineering (ICSE '22). Association
+ for Computing Machinery, New York, NY, USA, 1343–1355.
+ https://doi.org/10.1145/3510003.3510082
+
+The kernel's LTL monitor implementation is based on::
+
+ Gerth, R., Peled, D., Vardi, M.Y., Wolper, P. (1996). Simple On-the-fly
+ Automatic Verification of Linear Temporal Logic. In: Dembiński, P., Średniawa,
+ M. (eds) Protocol Specification, Testing and Verification XV. PSTV 1995. IFIP
+ Advances in Information and Communication Technology. Springer, Boston, MA.
+ https://doi.org/10.1007/978-0-387-34892-6_1
diff --git a/Documentation/trace/rv/monitor_rtapp.rst b/Documentation/trace/rv/monitor_rtapp.rst
new file mode 100644
index 000000000000..c8104eda924a
--- /dev/null
+++ b/Documentation/trace/rv/monitor_rtapp.rst
@@ -0,0 +1,133 @@
+Real-time application monitors
+==============================
+
+- Name: rtapp
+- Type: container for multiple monitors
+- Author: Nam Cao <namcao@linutronix.de>
+
+Description
+-----------
+
+Real-time applications may have design flaws such that they experience
+unexpected latency and fail to meet their time requirements. Often, these flaws
+follow a few patterns:
+
+ - Page faults: A real-time thread may access memory that does not have a
+ mapped physical backing or must first be copied (such as for copy-on-write).
+ Thus a page fault is raised and the kernel must first perform the expensive
+ action. This causes significant delays to the real-time thread
+ - Priority inversion: A real-time thread blocks waiting for a lower-priority
+ thread. This causes the real-time thread to effectively take on the
+ scheduling priority of the lower-priority thread. For example, the real-time
+ thread needs to access a shared resource that is protected by a
+ non-pi-mutex, but the mutex is currently owned by a non-real-time thread.
+
+The `rtapp` monitor detects these patterns. It aids developers to identify
+reasons for unexpected latency with real-time applications. It is a container of
+multiple sub-monitors described in the following sections.
+
+Monitor pagefault
++++++++++++++++++
+
+The `pagefault` monitor reports real-time tasks raising page faults. Its
+specification is::
+
+ RULE = always (RT imply not PAGEFAULT)
+
+To fix warnings reported by this monitor, `mlockall()` or `mlock()` can be used
+to ensure physical backing for memory.
+
+This monitor may have false negatives because the pages used by the real-time
+threads may just happen to be directly available during testing. To minimize
+this, the system can be put under memory pressure (e.g. invoking the OOM killer
+using a program that does `ptr = malloc(SIZE_OF_RAM); memset(ptr, 0,
+SIZE_OF_RAM);`) so that the kernel executes aggressive strategies to recycle as
+much physical memory as possible.
+
+Monitor sleep
++++++++++++++
+
+The `sleep` monitor reports real-time threads sleeping in a manner that may
+cause undesirable latency. Real-time applications should only put a real-time
+thread to sleep for one of the following reasons:
+
+ - Cyclic work: real-time thread sleeps waiting for the next cycle. For this
+ case, only the `clock_nanosleep` syscall should be used with `TIMER_ABSTIME`
+ (to avoid time drift) and `CLOCK_MONOTONIC` (to avoid the clock being
+ changed). No other method is safe for real-time. For example, threads
+ waiting for timerfd can be woken by softirq which provides no real-time
+ guarantee.
+ - Real-time thread waiting for something to happen (e.g. another thread
+ releasing shared resources, or a completion signal from another thread). In
+ this case, only futexes (FUTEX_LOCK_PI, FUTEX_LOCK_PI2 or one of
+ FUTEX_WAIT_*) should be used. Applications usually do not use futexes
+ directly, but use PI mutexes and PI condition variables which are built on
+ top of futexes. Be aware that the C library might not implement conditional
+ variables as safe for real-time. As an alternative, the librtpi library
+ exists to provide a conditional variable implementation that is correct for
+ real-time applications in Linux.
+
+Beside the reason for sleeping, the eventual waker should also be
+real-time-safe. Namely, one of:
+
+ - An equal-or-higher-priority thread
+ - Hard interrupt handler
+ - Non-maskable interrupt handler
+
+This monitor's warning usually means one of the following:
+
+ - Real-time thread is blocked by a non-real-time thread (e.g. due to
+ contention on a mutex without priority inheritance). This is priority
+ inversion.
+ - Time-critical work waits for something which is not safe for real-time (e.g.
+ timerfd).
+ - The work executed by the real-time thread does not need to run at real-time
+ priority at all. This is not a problem for the real-time thread itself, but
+ it is potentially taking the CPU away from other important real-time work.
+
+Application developers may purposely choose to have their real-time application
+sleep in a way that is not safe for real-time. It is debatable whether that is a
+problem. Application developers must analyze the warnings to make a proper
+assessment.
+
+The monitor's specification is::
+
+ RULE = always ((RT and SLEEP) imply (RT_FRIENDLY_SLEEP or ALLOWLIST))
+
+ RT_FRIENDLY_SLEEP = (RT_VALID_SLEEP_REASON or KERNEL_THREAD)
+ and ((not WAKE) until RT_FRIENDLY_WAKE)
+
+ RT_VALID_SLEEP_REASON = FUTEX_WAIT
+ or RT_FRIENDLY_NANOSLEEP
+
+ RT_FRIENDLY_NANOSLEEP = CLOCK_NANOSLEEP
+ and NANOSLEEP_TIMER_ABSTIME
+ and NANOSLEEP_CLOCK_MONOTONIC
+
+ RT_FRIENDLY_WAKE = WOKEN_BY_EQUAL_OR_HIGHER_PRIO
+ or WOKEN_BY_HARDIRQ
+ or WOKEN_BY_NMI
+ or KTHREAD_SHOULD_STOP
+
+ ALLOWLIST = BLOCK_ON_RT_MUTEX
+ or FUTEX_LOCK_PI
+ or TASK_IS_RCU
+ or TASK_IS_MIGRATION
+
+Beside the scenarios described above, this specification also handle some
+special cases:
+
+ - `KERNEL_THREAD`: kernel tasks do not have any pattern that can be recognized
+ as valid real-time sleeping reasons. Therefore sleeping reason is not
+ checked for kernel tasks.
+ - `KTHREAD_SHOULD_STOP`: a non-real-time thread may stop a real-time kernel
+ thread by waking it and waiting for it to exit (`kthread_stop()`). This
+ wakeup is safe for real-time.
+ - `ALLOWLIST`: to handle known false positives with the kernel.
+ - `BLOCK_ON_RT_MUTEX` is included in the allowlist due to its implementation.
+ In the release path of rt_mutex, a boosted task is de-boosted before waking
+ the rt_mutex's waiter. Consequently, the monitor may see a real-time-unsafe
+ wakeup (e.g. non-real-time task waking real-time task). This is actually
+ real-time-safe because preemption is disabled for the duration.
+ - `FUTEX_LOCK_PI` is included in the allowlist for the same reason as
+ `BLOCK_ON_RT_MUTEX`.
diff --git a/Documentation/trace/rv/monitor_sched.rst b/Documentation/trace/rv/monitor_sched.rst
index 24b2c62a3bc2..3f8381ad9ec7 100644
--- a/Documentation/trace/rv/monitor_sched.rst
+++ b/Documentation/trace/rv/monitor_sched.rst
@@ -40,26 +40,6 @@ defined in by Daniel Bristot in [1].
Currently we included the following:
-Monitor tss
-~~~~~~~~~~~
-
-The task switch while scheduling (tss) monitor ensures a task switch happens
-only in scheduling context, that is inside a call to `__schedule`::
-
- |
- |
- v
- +-----------------+
- | thread | <+
- +-----------------+ |
- | |
- | schedule_entry | schedule_exit
- v |
- sched_switch |
- +--------------- |
- | sched |
- +--------------> -+
-
Monitor sco
~~~~~~~~~~~
@@ -144,26 +124,277 @@ does not enable preemption::
|
scheduling_contex -+
-Monitor sncid
-~~~~~~~~~~~~~
+Monitor sts
+~~~~~~~~~~~
-The schedule not called with interrupt disabled (sncid) monitor ensures
-schedule is not called with interrupt disabled::
+The schedule implies task switch (sts) monitor ensures a task switch happens
+only in scheduling context and up to once, as well as scheduling occurs with
+interrupts enabled but no task switch can happen before interrupts are
+disabled. When the next task picked for execution is the same as the previously
+running one, no real task switch occurs but interrupts are disabled nonetheless::
- |
- |
- v
- schedule_entry +--------------+
- schedule_exit | |
- +----------------- | can_sched |
- | | |
- +----------------> | | <+
- +--------------+ |
- | |
- | irq_disable | irq_enable
- v |
- |
- cant_sched -+
+ irq_entry |
+ +----+ |
+ v | v
+ +------------+ irq_enable #===================# irq_disable
+ | | ------------> H H irq_entry
+ | cant_sched | <------------ H H irq_enable
+ | | irq_disable H can_sched H --------------+
+ +------------+ H H |
+ H H |
+ +---------------> H H <-------------+
+ | #===================#
+ | |
+ schedule_exit | schedule_entry
+ | v
+ | +-------------------+ irq_enable
+ | | scheduling | <---------------+
+ | +-------------------+ |
+ | | |
+ | | irq_disable +--------+ irq_entry
+ | v | | --------+
+ | +-------------------+ irq_entry | in_irq | |
+ | | | -----------> | | <-------+
+ | | disable_to_switch | +--------+
+ | | | --+
+ | +-------------------+ |
+ | | |
+ | | sched_switch |
+ | v |
+ | +-------------------+ |
+ | | switching | | irq_enable
+ | +-------------------+ |
+ | | |
+ | | irq_enable |
+ | v |
+ | +-------------------+ |
+ +-- | enable_to_exit | <-+
+ +-------------------+
+ ^ | irq_disable
+ | | irq_entry
+ +---------------+ irq_enable
+
+Monitor nrp
+-----------
+
+The need resched preempts (nrp) monitor ensures preemption requires
+``need_resched``. Only kernel preemption is considered, since preemption
+while returning to userspace, for this monitor, is indistinguishable from
+``sched_switch_yield`` (described in the sssw monitor).
+A kernel preemption is whenever ``__schedule`` is called with the preemption
+flag set to true (e.g. from preempt_enable or exiting from interrupts). This
+type of preemption occurs after the need for ``rescheduling`` has been set.
+This is not valid for the *lazy* variant of the flag, which causes only
+userspace preemption.
+A ``schedule_entry_preempt`` may involve a task switch or not, in the latter
+case, a task goes through the scheduler from a preemption context but it is
+picked as the next task to run. Since the scheduler runs, this clears the need
+to reschedule. The ``any_thread_running`` state does not imply the monitored
+task is not running as this monitor does not track the outcome of scheduling.
+
+In theory, a preemption can only occur after the ``need_resched`` flag is set. In
+practice, however, it is possible to see a preemption where the flag is not
+set. This can happen in one specific condition::
+
+ need_resched
+ preempt_schedule()
+ preempt_schedule_irq()
+ __schedule()
+ !need_resched
+ __schedule()
+
+In the situation above, standard preemption starts (e.g. from preempt_enable
+when the flag is set), an interrupt occurs before scheduling and, on its exit
+path, it schedules, which clears the ``need_resched`` flag.
+When the preempted task runs again, the standard preemption started earlier
+resumes, although the flag is no longer set. The monitor considers this a
+``nested_preemption``, this allows another preemption without re-setting the
+flag. This condition relaxes the monitor constraints and may catch false
+negatives (i.e. no real ``nested_preemptions``) but makes the monitor more
+robust and able to validate other scenarios.
+For simplicity, the monitor starts in ``preempt_irq``, although no interrupt
+occurred, as the situation above is hard to pinpoint::
+
+ schedule_entry
+ irq_entry #===========================================#
+ +-------------------------- H H
+ | H H
+ +-------------------------> H any_thread_running H
+ H H
+ +-------------------------> H H
+ | #===========================================#
+ | schedule_entry | ^
+ | schedule_entry_preempt | sched_need_resched | schedule_entry
+ | | schedule_entry_preempt
+ | v |
+ | +----------------------+ |
+ | +--- | | |
+ | sched_need_resched | | rescheduling | -+
+ | +--> | |
+ | +----------------------+
+ | | irq_entry
+ | v
+ | +----------------------+
+ | | | ---+
+ | ---> | | | sched_need_resched
+ | | preempt_irq | | irq_entry
+ | | | <--+
+ | | | <--+
+ | +----------------------+ |
+ | | schedule_entry | sched_need_resched
+ | | schedule_entry_preempt |
+ | v |
+ | +-----------------------+ |
+ +-------------------------- | nested_preempt | --+
+ +-----------------------+
+ ^ irq_entry |
+ +-------------------+
+
+Due to how the ``need_resched`` flag on the preemption count works on arm64,
+this monitor is unstable on that architecture, as it often records preemption
+when the flag is not set, even in presence of the workaround above.
+For the time being, the monitor is disabled by default on arm64.
+
+Monitor sssw
+------------
+
+The set state sleep and wakeup (sssw) monitor ensures ``set_state`` to
+sleepable leads to sleeping and sleeping tasks require wakeup. It includes the
+following types of switch:
+
+* ``switch_suspend``:
+ a task puts itself to sleep, this can happen only after explicitly setting
+ the task to ``sleepable``. After a task is suspended, it needs to be woken up
+ (``waking`` state) before being switched in again.
+ Setting the task's state to ``sleepable`` can be reverted before switching if it
+ is woken up or set to ``runnable``.
+* ``switch_blocking``:
+ a special case of a ``switch_suspend`` where the task is waiting on a
+ sleeping RT lock (``PREEMPT_RT`` only), it is common to see wakeup and set
+ state events racing with each other and this leads the model to perceive this
+ type of switch when the task is not set to sleepable. This is a limitation of
+ the model in SMP system and workarounds may slow down the system.
+* ``switch_preempt``:
+ a task switch as a result of kernel preemption (``schedule_entry_preempt`` in
+ the nrp model).
+* ``switch_yield``:
+ a task explicitly calls the scheduler or is preempted while returning to
+ userspace. It can happen after a ``yield`` system call, from the idle task or
+ if the ``need_resched`` flag is set. By definition, a task cannot yield while
+ ``sleepable`` as that would be a suspension. A special case of a yield occurs
+ when a task in ``TASK_INTERRUPTIBLE`` calls the scheduler while a signal is
+ pending. The task doesn't go through the usual blocking/waking and is set
+ back to runnable, the resulting switch (if there) looks like a yield to the
+ ``signal_wakeup`` state and is followed by the signal delivery. From this
+ state, the monitor expects a signal even if it sees a wakeup event, although
+ not necessary, to rule out false negatives.
+
+This monitor doesn't include a running state, ``sleepable`` and ``runnable``
+are only referring to the task's desired state, which could be scheduled out
+(e.g. due to preemption). However, it does include the event
+``sched_switch_in`` to represent when a task is allowed to become running. This
+can be triggered also by preemption, but cannot occur after the task got to
+``sleeping`` before a ``wakeup`` occurs::
+
+ +--------------------------------------------------------------------------+
+ | |
+ | |
+ | switch_suspend | |
+ | switch_blocking | |
+ v v |
+ +----------+ #==========================# set_state_runnable |
+ | | H H wakeup |
+ | | H H switch_in |
+ | | H H switch_yield |
+ | sleeping | H H switch_preempt |
+ | | H H signal_deliver |
+ | | switch_ H H ------+ |
+ | | _blocking H runnable H | |
+ | | <----------- H H <-----+ |
+ +----------+ H H |
+ | wakeup H H |
+ +---------------------> H H |
+ H H |
+ +---------> H H |
+ | #==========================# |
+ | | ^ |
+ | | | set_state_runnable |
+ | | | wakeup |
+ | set_state_sleepable | +------------------------+
+ | v | |
+ | +--------------------------+ set_state_sleepable
+ | | | switch_in
+ | | | switch_preempt
+ signal_deliver | sleepable | signal_deliver
+ | | | ------+
+ | | | |
+ | | | <-----+
+ | +--------------------------+
+ | | ^
+ | switch_yield | set_state_sleepable
+ | v |
+ | +---------------+ |
+ +---------- | signal_wakeup | -+
+ +---------------+
+ ^ | switch_in
+ | | switch_preempt
+ | | switch_yield
+ +-----------+ wakeup
+
+Monitor opid
+------------
+
+The operations with preemption and irq disabled (opid) monitor ensures
+operations like ``wakeup`` and ``need_resched`` occur with interrupts and
+preemption disabled or during interrupt context, in such case preemption may
+not be disabled explicitly.
+``need_resched`` can be set by some RCU internals functions, in which case it
+doesn't match a task wakeup and might occur with only interrupts disabled::
+
+ | sched_need_resched
+ | sched_waking
+ | irq_entry
+ | +--------------------+
+ v v |
+ +------------------------------------------------------+
+ +----------- | disabled | <+
+ | +------------------------------------------------------+ |
+ | | ^ |
+ | | preempt_disable sched_need_resched |
+ | preempt_enable | +--------------------+ |
+ | v | v | |
+ | +------------------------------------------------------+ |
+ | | irq_disabled | |
+ | +------------------------------------------------------+ |
+ | | | ^ |
+ | irq_entry irq_entry | | |
+ | sched_need_resched v | irq_disable |
+ | sched_waking +--------------+ | | |
+ | +----- | | irq_enable | |
+ | | | in_irq | | | |
+ | +----> | | | | |
+ | +--------------+ | | irq_disable
+ | | | | |
+ | irq_enable | irq_enable | | |
+ | v v | |
+ | #======================================================# |
+ | H enabled H |
+ | #======================================================# |
+ | | ^ ^ preempt_enable | |
+ | preempt_disable preempt_enable +--------------------+ |
+ | v | |
+ | +------------------+ | |
+ +----------> | preempt_disabled | -+ |
+ +------------------+ |
+ | |
+ +-------------------------------------------------------+
+
+This monitor is designed to work on ``PREEMPT_RT`` kernels, the special case of
+events occurring in interrupt context is a shortcut to identify valid scenarios
+where the preemption tracepoints might not be visible, during interrupts
+preemption is always disabled. On non- ``PREEMPT_RT`` kernels, the interrupts
+might invoke a softirq to set ``need_resched`` and wake up a task. This is
+another special case that is currently not supported by the monitor.
References
----------
diff --git a/Documentation/trace/rv/monitor_synthesis.rst b/Documentation/trace/rv/monitor_synthesis.rst
new file mode 100644
index 000000000000..ac808a7554f5
--- /dev/null
+++ b/Documentation/trace/rv/monitor_synthesis.rst
@@ -0,0 +1,271 @@
+Runtime Verification Monitor Synthesis
+======================================
+
+The starting point for the application of runtime verification (RV) techniques
+is the *specification* or *modeling* of the desired (or undesired) behavior
+of the system under scrutiny.
+
+The formal representation needs to be then *synthesized* into a *monitor*
+that can then be used in the analysis of the trace of the system. The
+*monitor* connects to the system via an *instrumentation* that converts
+the events from the *system* to the events of the *specification*.
+
+
+In Linux terms, the runtime verification monitors are encapsulated inside
+the *RV monitor* abstraction. The RV monitor includes a set of instances
+of the monitor (per-cpu monitor, per-task monitor, and so on), the helper
+functions that glue the monitor to the system reference model, and the
+trace output as a reaction to event parsing and exceptions, as depicted
+below::
+
+ Linux +----- RV Monitor ----------------------------------+ Formal
+ Realm | | Realm
+ +-------------------+ +----------------+ +-----------------+
+ | Linux kernel | | Monitor | | Reference |
+ | Tracing | -> | Instance(s) | <- | Model |
+ | (instrumentation) | | (verification) | | (specification) |
+ +-------------------+ +----------------+ +-----------------+
+ | | |
+ | V |
+ | +----------+ |
+ | | Reaction | |
+ | +--+--+--+-+ |
+ | | | | |
+ | | | +-> trace output ? |
+ +------------------------|--|----------------------+
+ | +----> panic ?
+ +-------> <user-specified>
+
+RV monitor synthesis
+--------------------
+
+The synthesis of a specification into the Linux *RV monitor* abstraction is
+automated by the rvgen tool and the header file containing common code for
+creating monitors. The header files are:
+
+ * rv/da_monitor.h for deterministic automaton monitor.
+ * rv/ltl_monitor.h for linear temporal logic monitor.
+
+rvgen
+-----
+
+The rvgen utility converts a specification into the C presentation and creating
+the skeleton of a kernel monitor in C.
+
+For example, it is possible to transform the wip.dot model present in
+[1] into a per-cpu monitor with the following command::
+
+ $ rvgen monitor -c da -s wip.dot -t per_cpu
+
+This will create a directory named wip/ with the following files:
+
+- wip.h: the wip model in C
+- wip.c: the RV monitor
+
+The wip.c file contains the monitor declaration and the starting point for
+the system instrumentation.
+
+Similarly, a linear temporal logic monitor can be generated with the following
+command::
+
+ $ rvgen monitor -c ltl -s pagefault.ltl -t per_task
+
+This generates pagefault/ directory with:
+
+- pagefault.h: The Buchi automaton (the non-deterministic state machine to
+ verify the specification)
+- pagefault.c: The skeleton for the RV monitor
+
+Monitor header files
+--------------------
+
+The header files:
+
+- `rv/da_monitor.h` for deterministic automaton monitor
+- `rv/ltl_monitor` for linear temporal logic monitor
+
+include common macros and static functions for implementing *Monitor
+Instance(s)*.
+
+The benefits of having all common functionalities in a single header file are
+3-fold:
+
+ - Reduce the code duplication;
+ - Facilitate the bug fix/improvement;
+ - Avoid the case of developers changing the core of the monitor code to
+ manipulate the model in a (let's say) non-standard way.
+
+rv/da_monitor.h
++++++++++++++++
+
+This initial implementation presents three different types of monitor instances:
+
+- ``#define DECLARE_DA_MON_GLOBAL(name, type)``
+- ``#define DECLARE_DA_MON_PER_CPU(name, type)``
+- ``#define DECLARE_DA_MON_PER_TASK(name, type)``
+
+The first declares the functions for a global deterministic automata monitor,
+the second for monitors with per-cpu instances, and the third with per-task
+instances.
+
+In all cases, the 'name' argument is a string that identifies the monitor, and
+the 'type' argument is the data type used by rvgen on the representation of
+the model in C.
+
+For example, the wip model with two states and three events can be
+stored in an 'unsigned char' type. Considering that the preemption control
+is a per-cpu behavior, the monitor declaration in the 'wip.c' file is::
+
+ DECLARE_DA_MON_PER_CPU(wip, unsigned char);
+
+The monitor is executed by sending events to be processed via the functions
+presented below::
+
+ da_handle_event_$(MONITOR_NAME)($(event from event enum));
+ da_handle_start_event_$(MONITOR_NAME)($(event from event enum));
+ da_handle_start_run_event_$(MONITOR_NAME)($(event from event enum));
+
+The function ``da_handle_event_$(MONITOR_NAME)()`` is the regular case where
+the event will be processed if the monitor is processing events.
+
+When a monitor is enabled, it is placed in the initial state of the automata.
+However, the monitor does not know if the system is in the *initial state*.
+
+The ``da_handle_start_event_$(MONITOR_NAME)()`` function is used to notify the
+monitor that the system is returning to the initial state, so the monitor can
+start monitoring the next event.
+
+The ``da_handle_start_run_event_$(MONITOR_NAME)()`` function is used to notify
+the monitor that the system is known to be in the initial state, so the
+monitor can start monitoring and monitor the current event.
+
+Using the wip model as example, the events "preempt_disable" and
+"sched_waking" should be sent to monitor, respectively, via [2]::
+
+ da_handle_event_wip(preempt_disable_wip);
+ da_handle_event_wip(sched_waking_wip);
+
+While the event "preempt_enabled" will use::
+
+ da_handle_start_event_wip(preempt_enable_wip);
+
+To notify the monitor that the system will be returning to the initial state,
+so the system and the monitor should be in sync.
+
+rv/ltl_monitor.h
+++++++++++++++++
+This file must be combined with the $(MODEL_NAME).h file (generated by `rvgen`)
+to be complete. For example, for the `pagefault` monitor, the `pagefault.c`
+source file must include::
+
+ #include "pagefault.h"
+ #include <rv/ltl_monitor.h>
+
+(the skeleton monitor file generated by `rvgen` already does this).
+
+`$(MODEL_NAME).h` (`pagefault.h` in the above example) includes the
+implementation of the Buchi automaton - a non-deterministic state machine that
+verifies the LTL specification. While `rv/ltl_monitor.h` includes the common
+helper functions to interact with the Buchi automaton and to implement an RV
+monitor. An important definition in `$(MODEL_NAME).h` is::
+
+ enum ltl_atom {
+ LTL_$(FIRST_ATOMIC_PROPOSITION),
+ LTL_$(SECOND_ATOMIC_PROPOSITION),
+ ...
+ LTL_NUM_ATOM
+ };
+
+which is the list of atomic propositions present in the LTL specification
+(prefixed with "LTL\_" to avoid name collision). This `enum` is passed to the
+functions interacting with the Buchi automaton.
+
+While generating code, `rvgen` cannot understand the meaning of the atomic
+propositions. Thus, that task is left for manual work. The recommended pratice
+is adding tracepoints to places where the atomic propositions change; and in the
+tracepoints' handlers: the Buchi automaton is executed using::
+
+ void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+
+which tells the Buchi automaton that the atomic proposition `atom` is now
+`value`. The Buchi automaton checks whether the LTL specification is still
+satisfied, and invokes the monitor's error tracepoint and the reactor if
+violation is detected.
+
+Tracepoints and `ltl_atom_update()` should be used whenever possible. However,
+it is sometimes not the most convenient. For some atomic propositions which are
+changed in multiple places in the kernel, it is cumbersome to trace all those
+places. Furthermore, it may not be important that the atomic propositions are
+updated at precise times. For example, considering the following linear temporal
+logic::
+
+ RULE = always (RT imply not PAGEFAULT)
+
+This LTL states that a real-time task does not raise page faults. For this
+specification, it is not important when `RT` changes, as long as it has the
+correct value when `PAGEFAULT` is true. Motivated by this case, another
+function is introduced::
+
+ void ltl_atom_fetch(struct task_struct *task, struct ltl_monitor *mon)
+
+This function is called whenever the Buchi automaton is triggered. Therefore, it
+can be manually implemented to "fetch" `RT`::
+
+ void ltl_atom_fetch(struct task_struct *task, struct ltl_monitor *mon)
+ {
+ ltl_atom_set(mon, LTL_RT, rt_task(task));
+ }
+
+Effectively, whenever `PAGEFAULT` is updated with a call to `ltl_atom_update()`,
+`RT` is also fetched. Thus, the LTL specification can be verified without
+tracing `RT` everywhere.
+
+For atomic propositions which act like events, they usually need to be set (or
+cleared) and then immediately cleared (or set). A convenient function is
+provided::
+
+ void ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+
+which is equivalent to::
+
+ ltl_atom_update(task, atom, value);
+ ltl_atom_update(task, atom, !value);
+
+To initialize the atomic propositions, the following function must be
+implemented::
+
+ ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
+
+This function is called for all running tasks when the monitor is enabled. It is
+also called for new tasks created after the enabling the monitor. It should
+initialize as many atomic propositions as possible, for example::
+
+ void ltl_atom_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
+ {
+ ltl_atom_set(mon, LTL_RT, rt_task(task));
+ if (task_creation)
+ ltl_atom_set(mon, LTL_PAGEFAULT, false);
+ }
+
+Atomic propositions not initialized by `ltl_atom_init()` will stay in the
+unknown state until relevant tracepoints are hit, which can take some time. As
+monitoring for a task cannot be done until all atomic propositions is known for
+the task, the monitor may need some time to start validating tasks which have
+been running before the monitor is enabled. Therefore, it is recommended to
+start the tasks of interest after enabling the monitor.
+
+Final remarks
+-------------
+
+With the monitor synthesis in place using the header files and
+rvgen, the developer's work should be limited to the instrumentation
+of the system, increasing the confidence in the overall approach.
+
+[1] For details about deterministic automata format and the translation
+from one representation to another, see::
+
+ Documentation/trace/rv/deterministic_automata.rst
+
+[2] rvgen appends the monitor's name suffix to the events enums to
+avoid conflicting variables when exporting the global vmlinux.h
+use by BPF programs.
diff --git a/Documentation/translations/zh_CN/core-api/memory-hotplug.rst b/Documentation/translations/zh_CN/core-api/memory-hotplug.rst
index 9b2841fb9a5f..c2a4122ae221 100644
--- a/Documentation/translations/zh_CN/core-api/memory-hotplug.rst
+++ b/Documentation/translations/zh_CN/core-api/memory-hotplug.rst
@@ -62,7 +62,6 @@ memory_notify结构体的指针::
struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
int status_change_nid;
}
@@ -70,8 +69,6 @@ memory_notify结构体的指针::
- nr_pages是在线/离线内存的页数。
-- status_change_nid_normal是当nodemask的N_NORMAL_MEMORY被设置/清除时设置节
- 点id,如果是-1,则nodemask状æ€ä¸æ”¹å˜ã€‚
- status_change_nid是当nodemask的N_MEMORY被(将)设置/清除时设置的节点id。这
æ„味ç€ä¸€ä¸ªæ–°çš„ï¼ˆæ²¡ä¸Šçº¿çš„ï¼‰èŠ‚ç‚¹é€šè¿‡è”æœºèŽ·å¾—æ–°çš„å†…å­˜ï¼Œè€Œä¸€ä¸ªèŠ‚ç‚¹å¤±åŽ»äº†æ‰€æœ‰çš„å†…
diff --git a/Documentation/translations/zh_CN/how-to.rst b/Documentation/translations/zh_CN/how-to.rst
index 569b0209385a..ddd99c0f9b4d 100644
--- a/Documentation/translations/zh_CN/how-to.rst
+++ b/Documentation/translations/zh_CN/how-to.rst
@@ -1,19 +1,19 @@
.. SPDX-License-Identifier: GPL-2.0
-=========================
-Linux内核中文文档翻译规范
-=========================
+==========================
+Linux 内核中文文档翻译规范
+==========================
修订记录:
- - v1.0 2025å¹´3月28日,å¸å»¶è…¾ã€æ…•冬亮共åŒç¼–写了该规范。
+ - v1.0 2025 å¹´ 3 月 28 日,å¸å»¶è…¾ã€æ…•冬亮共åŒç¼–写了该规范。
制定规范的背景
==============
过去几年,在广大社区爱好者的å‹å¥½åˆä½œä¸‹ï¼ŒLinux 内核中文文档迎æ¥äº†è“¬å‹ƒçš„å‘
å±•ã€‚åœ¨ç¿»è¯‘çš„æ—©æœŸï¼Œä¸€åˆ‡éƒ½æ˜¯æ··ä¹±çš„ï¼Œç¤¾åŒºå¯¹è¯‘ç¨¿åªæœ‰ä¸€ä¸ªå‡†ç¡®ç¿»è¯‘çš„è¦æ±‚,以鼓
-励更多的开å‘者å‚与进æ¥ï¼Œè¿™æ˜¯ä»Ž0到1的必然过程,所以早期的中文文档目录更加
-具有多样性,ä¸è¿‡å¥½åœ¨æ–‡æ¡£ä¸å¤šï¼Œç»´æŠ¤ä¸Šå¹¶æ²¡æœ‰è¿‡å¤§çš„压力。
+励更多的开å‘者å‚与进æ¥ï¼Œè¿™æ˜¯ä»Ž 0 到 1 的必然过程,所以早期的中文文档目录
+更加具有多样性,ä¸è¿‡å¥½åœ¨æ–‡æ¡£ä¸å¤šï¼Œç»´æŠ¤ä¸Šå¹¶æ²¡æœ‰è¿‡å¤§çš„压力。
然而,世事å˜å¹»ï¼Œä¸è§‰æœ‰å¹´ï¼ŒçŽ°åœ¨å†…æ ¸ä¸­æ–‡æ–‡æ¡£åœ¨å‰è¿›çš„é“路上越走越远,很多潜
åœ¨çš„é—®é¢˜é€æ¸æµ®å‡ºæ°´é¢ï¼Œè€Œä¸”éšç€ä¸­æ–‡æ–‡æ¡£æ•°é‡çš„增加,翻译更多的文档与æé«˜ä¸­
@@ -34,7 +34,7 @@ reviewer 们åªèƒ½è€å¿ƒåœ°æŒ‡å¯¼ä»–们如何与社区更好地åˆä½œï¼Œä½†æ˜¯è¿
========
工欲善其事必先利其器,如果您目å‰å¯¹å†…核文档翻译满怀热情,并且会独立地安装
-linux å‘行版和简å•地使用 linux 命令行,那么å¯ä»¥è¿…速开始了。若您尚ä¸å…·å¤‡è¯¥
+Linux å‘行版和简å•地使用 Linux 命令行,那么å¯ä»¥è¿…速开始了。若您尚ä¸å…·å¤‡è¯¥
能力,很多网站上会有详细的手把手教程,最多一个上åˆï¼Œæ‚¨åº”该就能掌æ¡å¯¹åº”技
èƒ½ã€‚æ‚¨éœ€è¦æ³¨æ„的一点是,请ä¸è¦ä½¿ç”¨ root 用户进行åŽç»­æ­¥éª¤å’Œæ–‡æ¡£ç¿»è¯‘。
@@ -66,11 +66,18 @@ linux å‘行版和简å•地使用 linux 命令行,那么å¯ä»¥è¿…速开始了ã
cd linux
./scripts/sphinx-pre-install
-以Fedora为例,它的输出是这样的::
+以 Fedora 为例,它的输出是这样的::
You should run:
- sudo dnf install -y dejavu-sans-fonts dejavu-sans-mono-fonts dejavu-serif-fonts google-noto-sans-cjk-fonts graphviz-gd latexmk librsvg2-tools texlive-anyfontsize texlive-capt-of texlive-collection-fontsrecommended texlive-ctex texlive-eqparbox texlive-fncychap texlive-framed texlive-luatex85 texlive-multirow texlive-needspace texlive-tabulary texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xecjk
+ sudo dnf install -y dejavu-sans-fonts dejavu-sans-mono-fonts \
+ dejavu-serif-fonts google-noto-sans-cjk-fonts graphviz-gd \
+ latexmk librsvg2-tools texlive-anyfontsize texlive-capt-of \
+ texlive-collection-fontsrecommended texlive-ctex \
+ texlive-eqparbox texlive-fncychap texlive-framed \
+ texlive-luatex85 texlive-multirow texlive-needspace \
+ texlive-tabulary texlive-threeparttable texlive-upquote \
+ texlive-wrapfig texlive-xecjk
Sphinx needs to be installed either:
1) via pip/pypi with:
@@ -92,7 +99,8 @@ linux å‘行版和简å•地使用 linux 命令行,那么å¯ä»¥è¿…速开始了ã
https://github.com/sphinx-doc/sphinx/pull/8313
请您按照æç¤ºå¤åˆ¶æ‰“å°çš„命令到命令行执行,您必须具备 root æƒé™æ‰èƒ½æ‰§è¡Œ sudo
-开头的命令。
+开头的命令。**请注æ„**,最新版本 Sphinx 的文档编译速度有æžå¤§æå‡ï¼Œå¼ºçƒˆå»ºè®®
+您通过 pip/pypi 安装最新版本 Sphinx。
如果您处于一个多用户环境中,为了é¿å…对其他人造æˆå½±å“,建议您é…ç½®å•用户
sphinx 虚拟环境,å³åªéœ€è¦æ‰§è¡Œ::
@@ -126,11 +134,11 @@ sphinx 虚拟环境,å³åªéœ€è¦æ‰§è¡Œ::
检查编译结果
------------
-编译输出在Documentation/output/目录下,请用æµè§ˆå™¨æ‰“开该目录下对应
+编译输出在 Documentation/output/ 目录下,请用æµè§ˆå™¨æ‰“开该目录下对应
的文件进行检查。
-git和邮箱é…ç½®
--------------
+Git 和邮箱é…ç½®
+--------------
打开命令行执行::
@@ -150,11 +158,11 @@ git和邮箱é…ç½®
smtpencryption = ssl
smtpserver = smtp.migadu.com
smtpuser = si.yanteng@linux.dev
- smtppass = <passwd> # 建议使用第三方客户端专用密ç 
+ smtppass = <passwd> # 建议使用第三方客户端专用密ç 
chainreplyto = false
smtpserverport = 465
-关于邮件客户端的é…置,请查阅Documentation/translations/zh_CN/process/email-clients.rst。
+关于邮件客户端的é…置,请查阅 Documentation/translations/zh_CN/process/email-clients.rst。
开始翻译文档
============
@@ -162,8 +170,8 @@ git和邮箱é…ç½®
文档索引结构
------------
-ç›®å‰ä¸­æ–‡æ–‡æ¡£æ˜¯åœ¨Documentation/translations/zh_CN/目录下进行,该
-目录结构最终会与Documentation/结构一致,所以您åªéœ€è¦å°†æ‚¨æ„Ÿå…´è¶£çš„英文
+ç›®å‰ä¸­æ–‡æ–‡æ¡£æ˜¯åœ¨ Documentation/translations/zh_CN/ 目录下进行,该
+目录结构最终会与 Documentation/ 结构一致,所以您åªéœ€è¦å°†æ‚¨æ„Ÿå…´è¶£çš„英文
文档文件和对应的 index.rst å¤åˆ¶åˆ° zh_CN 目录下对应的ä½ç½®ï¼Œç„¶åŽä¿®æ”¹æ›´
上一级的 index å³å¯å¼€å§‹æ‚¨çš„翻译。
@@ -177,13 +185,12 @@ git和邮箱é…ç½®
请执行以下命令,新建开å‘分支::
git checkout docs-next
- git branch my-trans
- git checkout my-trans
+ git checkout -b my-trans
译文格å¼è¦æ±‚
------------
- - æ¯è¡Œé•¿åº¦æœ€å¤šä¸è¶…过40个字符
+ - æ¯è¡Œé•¿åº¦æœ€å¤šä¸è¶…过 40 个字符
- æ¯è¡Œé•¿åº¦è¯·ä¿æŒä¸€è‡´
- 标题的下划线长度请按照一个英文一个字符ã€ä¸€ä¸ªä¸­æ–‡ä¸¤ä¸ªå­—符与标题对é½
- å…¶å®ƒçš„ä¿®é¥°ç¬¦è¯·ä¸Žè‹±æ–‡æ–‡æ¡£ä¿æŒä¸€è‡´
@@ -192,7 +199,7 @@ git和邮箱é…ç½®
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst #您需è¦äº†è§£è¯¥æ–‡ä»¶çš„路径,根
- æ®æ‚¨å®žé™…ç¿»è¯‘çš„æ–‡æ¡£çµæ´»è°ƒæ•´
+ æ®æ‚¨å®žé™…ç¿»è¯‘çš„æ–‡æ¡£çµæ´»è°ƒæ•´
:Original: Documentation/xxx/xxx.rst #替æ¢ä¸ºæ‚¨ç¿»è¯‘的英文文档路径
@@ -203,11 +210,11 @@ git和邮箱é…ç½®
翻译技巧
--------
-中文文档有æ¯è¡Œ40字符é™åˆ¶ï¼Œå› ä¸ºä¸€ä¸ªä¸­æ–‡å­—符等于2个英文字符。但是社区并没有
-é‚£ä¹ˆä¸¥æ ¼ï¼Œä¸€ä¸ªè¯€çªæ˜¯å°†æ‚¨çš„翻译的内容与英文原文的æ¯è¡Œé•¿åº¦å¯¹é½å³å¯ï¼Œè¿™æ ·ï¼Œ
+中文文档有æ¯è¡Œ 40 字符é™åˆ¶ï¼Œå› ä¸ºä¸€ä¸ªä¸­æ–‡å­—符等于 2 个英文字符。但是社区并
+æ²¡æœ‰é‚£ä¹ˆä¸¥æ ¼ï¼Œä¸€ä¸ªè¯€çªæ˜¯å°†æ‚¨çš„翻译的内容与英文原文的æ¯è¡Œé•¿åº¦å¯¹é½å³å¯ï¼Œè¿™æ ·ï¼Œ
您也ä¸å¿…总是检查有没有超é™ã€‚
-如果您的英文阅读能力有é™ï¼Œå¯ä»¥è€ƒè™‘使用辅助翻译工具,例如 deepseek 。但是您
+如果您的英文阅读能力有é™ï¼Œå¯ä»¥è€ƒè™‘使用辅助翻译工具,例如 deepseek。但是您
必须仔细地打磨,使译文达到“信达雅â€çš„æ ‡å‡†ã€‚
**请注æ„** ç¤¾åŒºä¸æŽ¥å—纯机器翻译的文档,社区工作建立在信任的基础上,请认真对待。
@@ -248,14 +255,17 @@ git和邮箱é…ç½®
Translate .../security/self-protection.rst into Chinese.
- Update the translation through commit b080e52110ea #请执行git log <您翻译的英文文档路径> å¤åˆ¶æœ€é¡¶éƒ¨ç¬¬ä¸€ä¸ªè¡¥ä¸çš„sha值的å‰12ä½ï¼Œæ›¿æ¢æŽ‰12ä½sha值。
+ Update the translation through commit b080e52110ea
("docs: update self-protection __ro_after_init status")
+ # 请执行 git log --oneline <您翻译的英文文档路径>,并替æ¢ä¸Šè¿°å†…容
- Signed-off-by: Yanteng Si <si.yanteng@linux.dev> #如果您å‰é¢çš„æ­¥éª¤æ­£ç¡®æ‰§è¡Œï¼Œè¯¥è¡Œä¼šè‡ªåŠ¨æ˜¾ç¤ºï¼Œå¦åˆ™è¯·æ£€æŸ¥gitconfig文件。
+ Signed-off-by: Yanteng Si <si.yanteng@linux.dev>
+ # 如果您å‰é¢çš„æ­¥éª¤æ­£ç¡®æ‰§è¡Œï¼Œè¯¥è¡Œä¼šè‡ªåŠ¨æ˜¾ç¤ºï¼Œå¦åˆ™è¯·æ£€æŸ¥ gitconfig 文件
ä¿å­˜å¹¶é€€å‡ºã€‚
-**请注æ„** 以上四行,缺少任何一行,您都将会在第一轮审阅åŽè¿”工,如果您需è¦ä¸€ä¸ªæ›´åŠ æ˜Žç¡®çš„ç¤ºä¾‹ï¼Œè¯·å¯¹ zh_CN 目录执行 git log。
+**请注æ„** 以上四行,缺少任何一行,您都将会在第一轮审阅åŽè¿”工,如果您需è¦ä¸€ä¸ª
+更加明确的示例,请对 zh_CN 目录执行 git log。
导出补ä¸å’Œåˆ¶ä½œå°é¢
------------------
@@ -263,6 +273,7 @@ git和邮箱é…ç½®
这个时候,å¯ä»¥å¯¼å‡ºè¡¥ä¸ï¼Œåšå‘é€é‚®ä»¶åˆ—表最åŽçš„准备了。命令行执行::
git format-patch -N
+ # N è¦æ›¿æ¢ä¸ºè¡¥ä¸æ•°é‡ï¼Œä¸€èˆ¬ N 大于等于 1
ç„¶åŽå‘½ä»¤è¡Œä¼šè¾“出类似下é¢çš„内容::
@@ -286,13 +297,13 @@ warning ä¸éœ€è¦è§£å†³::
ç„¶åŽæ‰§è¡Œä»¥ä¸‹å‘½ä»¤ä¸ºè¡¥ä¸è¿½åŠ æ›´æ”¹::
git checkout docs-next
- git branch test-trans
+ git checkout -b test-trans-new
git am 0001-xxxxx.patch
./scripts/checkpatch.pl 0001-xxxxx.patch
- 直接修改您的翻译
+ # 直接修改您的翻译
git add .
git am --amend
- ä¿å­˜é€€å‡º
+ # ä¿å­˜é€€å‡º
git am 0002-xxxxx.patch
……
@@ -301,28 +312,30 @@ warning ä¸éœ€è¦è§£å†³::
最åŽï¼Œå¦‚果检测时没有 warning å’Œ error 需è¦è¢«å¤„ç†æˆ–è€…æ‚¨åªæœ‰ä¸€ä¸ªè¡¥ä¸ï¼Œè¯·è·³
过下é¢è¿™ä¸ªæ­¥éª¤ï¼Œå¦åˆ™è¯·é‡æ–°å¯¼å‡ºè¡¥ä¸åˆ¶ä½œå°é¢::
- git format-patch -N --cover-letter --thread=shallow #Nä¸ºæ‚¨çš„è¡¥ä¸æ•°é‡,N一般è¦å¤§äºŽ1。
+ git format-patch -N --cover-letter --thread=shallow
+ # N è¦æ›¿æ¢ä¸ºè¡¥ä¸æ•°é‡ï¼Œä¸€èˆ¬ N 大于 1
ç„¶åŽå‘½ä»¤è¡Œä¼šè¾“出类似下é¢çš„内容::
0000-cover-letter.patch
0001-docs-zh_CN-add-xxxxxxxx.patch
0002-docs-zh_CN-add-xxxxxxxx.patch
+ ……
-您需è¦ç”¨ç¼–辑器打开0å·è¡¥ä¸ï¼Œä¿®æ”¹ä¸¤å¤„内容::
+您需è¦ç”¨ç¼–辑器打开 0 å·è¡¥ä¸ï¼Œä¿®æ”¹ä¸¤å¤„内容::
vim 0000-cover-letter.patch
...
- Subject: [PATCH 0/1] *** SUBJECT HERE *** #修改该字段,概括您的补ä¸é›†éƒ½åšäº†å“ªäº›äº‹æƒ…
+ Subject: [PATCH 0/N] *** SUBJECT HERE *** #修改该字段,概括您的补ä¸é›†éƒ½åšäº†å“ªäº›äº‹æƒ…
- *** BLURB HERE *** #修改该字段,详细æè¿°æ‚¨çš„è¡¥ä¸é›†åšäº†å“ªäº›äº‹æƒ…
+ *** BLURB HERE *** #修改该字段,详细æè¿°æ‚¨çš„è¡¥ä¸é›†åšäº†å“ªäº›äº‹æƒ…
Yanteng Si (1):
docs/zh_CN: add xxxxx
...
-å¦‚æžœæ‚¨åªæœ‰ä¸€ä¸ªè¡¥ä¸ï¼Œåˆ™å¯ä»¥ä¸åˆ¶ä½œå°é¢ï¼Œå³0å·è¡¥ä¸ï¼Œåªéœ€è¦æ‰§è¡Œ::
+å¦‚æžœæ‚¨åªæœ‰ä¸€ä¸ªè¡¥ä¸ï¼Œåˆ™å¯ä»¥ä¸åˆ¶ä½œå°é¢ï¼Œå³ 0 å·è¡¥ä¸ï¼Œåªéœ€è¦æ‰§è¡Œ::
git format-patch -1
@@ -345,9 +358,10 @@ warning ä¸éœ€è¦è§£å†³::
æ‰“å¼€ä¸Šé¢æ‚¨ä¿å­˜çš„邮件地å€ï¼Œæ‰§è¡Œ::
- git send-email *.patch --to <maintainer email addr> --cc <others addr> #一个to对应一个地å€ï¼Œä¸€ä¸ªcc对应一个地å€ï¼Œæœ‰å‡ ä¸ªå°±å†™å‡ ä¸ªã€‚
+ git send-email *.patch --to <maintainer email addr> --cc <others addr>
+ # 一个 to 对应一个地å€ï¼Œä¸€ä¸ª cc 对应一个地å€ï¼Œæœ‰å‡ ä¸ªå°±å†™å‡ ä¸ª
-执行该命令时,请确ä¿ç½‘络通常,邮件å‘逿ˆåŠŸä¸€èˆ¬ä¼šè¿”å›ž250。
+执行该命令时,请确ä¿ç½‘络通常,邮件å‘逿ˆåŠŸä¸€èˆ¬ä¼šè¿”å›ž 250。
您å¯ä»¥å…ˆå‘é€ç»™è‡ªå·±ï¼Œå°è¯•å‘出的 patch 是å¦å¯ä»¥ç”¨ 'git am' 工具正常打上。
如果检查正常, 您就å¯ä»¥æ”¾å¿ƒçš„å‘é€åˆ°ç¤¾åŒºè¯„审了。
@@ -382,15 +396,15 @@ reviewer 的评论,åšåˆ°æ¯æ¡éƒ½æœ‰å›žå¤ï¼Œæ¯ä¸ªå›žå¤éƒ½è½å®žåˆ°ä½ã€‚
æ¯æ¬¡è¿­ä»£ä¸€ä¸ªè¡¥ä¸ï¼Œä¸è¦ä¸€æ¬¡å¤šä¸ª::
git am <您è¦ä¿®æ”¹çš„è¡¥ä¸>
- 直接对文件进行您的修改
+ # 直接对文件进行您的修改
git add .
git commit --amend
当您将所有的评论è½å®žåˆ°ä½åŽï¼Œå¯¼å‡ºç¬¬äºŒç‰ˆè¡¥ä¸ï¼Œå¹¶ä¿®æ”¹å°é¢::
- git format-patch -N -v 2 --cover-letter --thread=shallow
+ git format-patch -N -v 2 --cover-letter --thread=shallow
-打开0å·è¡¥ä¸ï¼Œåœ¨ BLURB HERE 处编写相较于上个版本,您åšäº†å“ªäº›æ”¹åŠ¨ã€‚
+打开 0 å·è¡¥ä¸ï¼Œåœ¨ BLURB HERE 处编写相较于上个版本,您åšäº†å“ªäº›æ”¹åŠ¨ã€‚
ç„¶åŽæ‰§è¡Œ::
@@ -414,7 +428,7 @@ reviewer 的评论,åšåˆ°æ¯æ¡éƒ½æœ‰å›žå¤ï¼Œæ¯ä¸ªå›žå¤éƒ½è½å®žåˆ°ä½ã€‚
如果您å‘é€åˆ°é‚®ä»¶åˆ—表之åŽã€‚å‘现å‘错了补ä¸é›†ï¼Œå°¤å…¶æ˜¯åœ¨å¤šä¸ªç‰ˆæœ¬è¿­ä»£çš„过程中;
自己å‘现了一些ä¸å¦¥çš„翻译;å‘é€é”™äº†é‚®ä»¶åˆ—表……
-git email默认会抄é€ç»™æ‚¨ä¸€ä»½ï¼Œæ‰€ä»¥æ‚¨å¯ä»¥åˆ‡æ¢ä¸ºå®¡é˜…者的角色审查自己的补ä¸ï¼Œ
+git email 默认会抄é€ç»™æ‚¨ä¸€ä»½ï¼Œæ‰€ä»¥æ‚¨å¯ä»¥åˆ‡æ¢ä¸ºå®¡é˜…者的角色审查自己的补ä¸ï¼Œ
并留下评论,æè¿°æœ‰ä½•ä¸å¦¥ï¼Œå°†åœ¨ä¸‹ä¸ªç‰ˆæœ¬æ€Žä¹ˆæ”¹ï¼Œå¹¶ä»˜è¯¸è¡ŒåŠ¨ï¼Œé‡æ–°æäº¤ï¼Œä½†æ˜¯
注æ„频率,æ¯å¤©æäº¤çš„æ¬¡æ•°ä¸è¦è¶…过两次。
@@ -425,9 +439,9 @@ git email默认会抄é€ç»™æ‚¨ä¸€ä»½ï¼Œæ‰€ä»¥æ‚¨å¯ä»¥åˆ‡æ¢ä¸ºå®¡é˜…者的角è‰
./script/checktransupdate.py -l zh_CN``
-该命令会列出需è¦ç¿»è¯‘或更新的英文文档。
+该命令会列出需è¦ç¿»è¯‘æˆ–æ›´æ–°çš„è‹±æ–‡æ–‡æ¡£ï¼Œç»“æžœåŒæ—¶ä¿å­˜åœ¨ checktransupdate.log 中。
-关于详细æ“作说明,请å‚考: Documentation/translations/zh_CN/doc-guide/checktransupdate.rst\
+关于详细æ“作说明,请å‚考:Documentation/translations/zh_CN/doc-guide/checktransupdate.rst。
进阶
----
@@ -439,8 +453,8 @@ git email默认会抄é€ç»™æ‚¨ä¸€ä»½ï¼Œæ‰€ä»¥æ‚¨å¯ä»¥åˆ‡æ¢ä¸ºå®¡é˜…者的角è‰
常è§çš„问题
==========
-Maintainer回å¤è¡¥ä¸ä¸èƒ½æ­£å¸¸apply
--------------------------------
+Maintainer 回å¤è¡¥ä¸ä¸èƒ½æ­£å¸¸ apply
+---------------------------------
这通常是因为您的补ä¸ä¸Žé‚®ä»¶åˆ—表其他人的补ä¸äº§ç”Ÿäº†å†²çªï¼Œåˆ«äººçš„è¡¥ä¸å…ˆè¢« apply 了,
您的补ä¸é›†å°±æ— æ³•æˆåŠŸ apply äº†ï¼Œè¿™éœ€è¦æ‚¨æ›´æ–°æœ¬åœ°åˆ†æ”¯ï¼Œåœ¨æœ¬åœ°è§£å†³å®Œå†²çªåŽå†æ¬¡æäº¤ã€‚
@@ -455,5 +469,5 @@ Maintainer回å¤è¡¥ä¸ä¸èƒ½æ­£å¸¸apply
大部分情况下,是由于您å‘é€äº†éžçº¯æ–‡æœ¬æ ¼å¼çš„信件,请尽é‡é¿å…使用 webmail,推è
使用邮件客户端,比如 thunderbird,记得在设置中的回信é…置那改为纯文本å‘é€ã€‚
-如果超过了24å°æ—¶ï¼Œæ‚¨ä¾æ—§æ²¡æœ‰åœ¨<https://lore.kernel.org/linux-doc/>å‘现您的邮
-件,请è”系您的网络管ç†å‘˜å¸®å¿™è§£å†³ã€‚
+如果超过了 24 å°æ—¶ï¼Œæ‚¨ä¾æ—§æ²¡æœ‰åœ¨<https://lore.kernel.org/linux-doc/>å‘现您的
+邮件,请è”系您的网络管ç†å‘˜å¸®å¿™è§£å†³ã€‚
diff --git a/Documentation/translations/zh_CN/networking/alias.rst b/Documentation/translations/zh_CN/networking/alias.rst
new file mode 100644
index 000000000000..e024d9eac50e
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/alias.rst
@@ -0,0 +1,56 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/alias.rst
+
+:翻译:
+
+ 邱禹潭 Qiu Yutan <qiu.yutan@zte.com.cn>
+
+:校译:
+
+======
+IP别å
+======
+
+IPåˆ«åæ˜¯ç®¡ç†æ¯ä¸ªæŽ¥å£å­˜åœ¨å¤šä¸ªIP地å€/å­ç½‘掩ç çš„一ç§è¿‡æ—¶æ–¹æ³•。
+虽然更新的工具如iproute2æ”¯æŒæ¯ä¸ªæŽ¥å£å¤šä¸ªåœ°å€/å‰ç¼€ï¼Œ
+但为了å‘åŽå…¼å®¹æ€§ï¼Œåˆ«åä»è¢«æ”¯æŒã€‚
+
+别å通过在使用 ifconfig 时在接å£ååŽæ·»åР冒å·å’Œä¸€ä¸ªå­—符串æ¥åˆ›å»ºã€‚
+这个字符串通常是数字,但并éžå¿…须。
+
+
+别å创建
+========
+
+别åçš„åˆ›å»ºæ˜¯é€šè¿‡â€œç‰¹æ®Šçš„â€æŽ¥å£å‘½å机制完æˆçš„:例如,
+è¦ä¸ºeth0创建一个 200.1.1.1 的别å...
+::
+
+ # ifconfig eth0:0 200.1.1.1 等等
+ ~~ -> 请求为eth0创建别å#0(如果尚ä¸å­˜åœ¨ï¼‰
+
+该命令也会设置相应的路由表项。请注æ„:路由表项始终指å‘基础接å£ã€‚
+
+
+别å删除
+========
+
+通过关闭别åå³å¯å°†å…¶åˆ é™¤::
+
+ # ifconfig eth0:0 down
+ ~~~~~~~~~~ -> 将删除别å
+
+
+别åï¼ˆé‡æ–°ï¼‰é…ç½®
+================
+
+别å䏿˜¯çœŸå®žçš„设备,但程åºåº”该能够正常é…置和引用它们(ifconfigã€route等)。
+
+
+与主设备的关系
+==============
+
+如果基础设备被关闭,则其上添加的所有别å也将被删除。
diff --git a/Documentation/translations/zh_CN/networking/index.rst b/Documentation/translations/zh_CN/networking/index.rst
index d07dd69f980b..bb0edcffd144 100644
--- a/Documentation/translations/zh_CN/networking/index.rst
+++ b/Documentation/translations/zh_CN/networking/index.rst
@@ -21,6 +21,12 @@
:maxdepth: 1
msg_zerocopy
+ napi
+ vxlan
+ netif-msg
+ xfrm_proc
+ netmem
+ alias
Todolist:
@@ -45,7 +51,6 @@ Todolist:
* page_pool
* phy
* sfp-phylink
-* alias
* bridge
* snmp_counter
* checksum-offloads
@@ -94,14 +99,11 @@ Todolist:
* mptcp-sysctl
* multiqueue
* multi-pf-netdev
-* napi
* net_cachelines/index
* netconsole
* netdev-features
* netdevices
* netfilter-sysctl
-* netif-msg
-* netmem
* nexthop-group-resilient
* nf_conntrack-sysctl
* nf_flowtable
@@ -142,11 +144,9 @@ Todolist:
* tuntap
* udplite
* vrf
-* vxlan
* x25
* x25-iface
* xfrm_device
-* xfrm_proc
* xfrm_sync
* xfrm_sysctl
* xdp-rx-metadata
diff --git a/Documentation/translations/zh_CN/networking/napi.rst b/Documentation/translations/zh_CN/networking/napi.rst
new file mode 100644
index 000000000000..619971c3dea3
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/napi.rst
@@ -0,0 +1,362 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/napi.rst
+
+:翻译:
+
+ 王亚鑫 Yaxin Wang <wang.yaxin@zte.com.cn>
+
+====
+NAPI
+====
+
+NAPI 是 Linux ç½‘ç»œå †æ ˆä¸­ä½¿ç”¨çš„äº‹ä»¶å¤„ç†æœºåˆ¶ã€‚NAPI çš„å称现在ä¸å†ä»£è¡¨ä»»ä½•特定å«ä¹‰ [#]_。
+
+在基本æ“作中,设备通过中断通知主机有新事件å‘生。主机éšåŽè°ƒåº¦ NAPI 实例æ¥å¤„ç†è¿™äº›äº‹ä»¶ã€‚
+该设备也å¯ä»¥é€šè¿‡ NAPI 进行事件轮询,而无需先接收中断信å·ï¼ˆ:ref:`忙轮询<poll_zh_CN>`)。
+
+NAPI 处ç†é€šå¸¸å‘生在软中断上下文中,但有一个选项,å¯ä»¥ä½¿ç”¨ :ref:`å•独的内核线程<threaded_zh_CN>`
+æ¥è¿›è¡Œ NAPI 处ç†ã€‚
+
+总的æ¥è¯´ï¼ŒNAPI ä¸ºé©±åŠ¨ç¨‹åºæŠ½è±¡äº†äº‹ä»¶ï¼ˆæ•°æ®åŒ…接收和å‘é€ï¼‰å¤„ç†çš„上下文环境和é…置情况。
+
+驱动程åºAPI
+===========
+
+NAPI 最é‡è¦çš„两个元素是 struct napi_struct 和关è”çš„ poll 方法。struct napi_struct
+æŒæœ‰ NAPI 实例的状æ€ï¼Œè€Œæ–¹æ³•则是与驱动程åºç›¸å…³çš„事件处ç†å™¨ã€‚该方法通常会释放已传输的å‘é€
+(Tx)æ•°æ®åŒ…å¹¶å¤„ç†æ–°æŽ¥æ”¶çš„æ•°æ®åŒ…。
+
+.. _drv_ctrl_zh_CN:
+
+控制API
+-------
+
+netif_napi_add() å’Œ netif_napi_del() 用于å‘系统中添加/删除一个 NAPI 实例。实例会被
+é™„åŠ åˆ°ä½œä¸ºå‚æ•°ä¼ é€’çš„ netdevice上(并在 netdevice 注销时自动删除)。实例在添加时处于ç¦
+用状æ€ã€‚
+
+napi_enable() å’Œ napi_disable() 管ç†ç¦ç”¨çжæ€ã€‚ç¦ç”¨çš„ NAPI ä¸ä¼šè¢«è°ƒåº¦ï¼Œå¹¶ä¸”ä¿è¯å…¶
+poll 方法ä¸ä¼šè¢«è°ƒç”¨ã€‚napi_disable() 会等待 NAPI 实例的所有æƒè¢«é‡Šæ”¾ã€‚
+
+这些控制 API å¹¶éžå¹‚等的。控制 API 调用在é¢å¯¹æ•°æ®è·¯å¾„ API 的并å‘使用时是安全的,但控制
+API 调用顺åºé”™è¯¯å¯èƒ½ä¼šå¯¼è‡´ç³»ç»Ÿå´©æºƒã€æ­»é”æˆ–ç«žæ€æ¡ä»¶ã€‚例如,连续多次调用 napi_disable()
+ä¼šé€ æˆæ­»é”。
+
+æ•°æ®è·¯å¾„API
+-----------
+
+napi_schedule() 是调度 NAPI 轮询的基本方法。驱动程åºåº”在其中断处ç†ç¨‹åºä¸­è°ƒç”¨æ­¤å‡½æ•°
+(更多信æ¯è¯·å‚è§ :ref:`drv_sched_zh_CN`)。æˆåŠŸçš„ napi_schedule() 调用将获得 NAPI 实例
+的所有æƒã€‚
+
+之åŽï¼Œåœ¨ NAPI 被调度åŽï¼Œé©±åŠ¨ç¨‹åºçš„ poll 方法将被调用以处ç†äº‹ä»¶/æ•°æ®åŒ…。该方法接å—一个
+``budget`` 傿•° - 驱动程åºå¯ä»¥å¤„ç†ä»»æ„æ•°é‡çš„å‘é€ (Tx) æ•°æ®åŒ…完æˆï¼Œä½†å¤„ç†æœ€å¤šå¤„ç†
+``budget`` 个接收 (Rx) æ•°æ®åŒ…ã€‚å¤„ç†æŽ¥æ”¶æ•°æ®åŒ…通常开销更大。
+
+æ¢å¥è¯è¯´ï¼Œå¯¹äºŽæŽ¥æ”¶æ•°æ®åŒ…的处ç†ï¼Œ``budget`` 傿•°é™åˆ¶äº†é©±åŠ¨ç¨‹åºåœ¨å•次轮询中能够处ç†çš„æ•°
+æ®åŒ…æ•°é‡ã€‚当 ``budget`` 为 0 时,åƒé¡µé¢æ± æˆ– XDP 这类专门用于接收的 API 根本无法使用。
+无论 ``budget`` 的值是多少,skb çš„å‘é€å¤„ç†éƒ½åº”该进行,但是如果 ``budget`` 傿•°ä¸º 0,
+驱动程åºå°±ä¸èƒ½è°ƒç”¨ä»»ä½• XDPï¼ˆæˆ–é¡µé¢æ± ï¼‰API。
+
+.. warning::
+
+ 如果内核仅å°è¯•处ç†skbçš„å‘é€å®Œæˆæƒ…况,而ä¸å¤„ç†æŽ¥æ”¶ (Rx) 或 XDP æ•°æ®åŒ…,那么 ``budget``
+ 傿•°å¯èƒ½ä¸º 0。
+
+轮询方法会返回已完æˆçš„工作é‡ã€‚如果驱动程åºä»æœ‰æœªå®Œæˆçš„工作(例如,``budget`` 已用完),
+轮询方法应精确返回 ``budget`` çš„å€¼ã€‚åœ¨è¿™ç§æƒ…况下,NAPI å®žä¾‹å°†å†æ¬¡è¢«å¤„ç† / 轮询(无需
+釿–°è°ƒåº¦ï¼‰ã€‚
+
+如果事件处ç†å·²å®Œæˆï¼ˆæ‰€æœ‰æœªå¤„ç†çš„æ•°æ®åŒ…都已处ç†å®Œæ¯•),轮询方法在返回之å‰åº”调用 napi_complete_done()。
+napi_complete_done() 会释放实例的所有æƒã€‚
+
+.. warning::
+
+ 当出现既完æˆäº†æ‰€æœ‰äº‹ä»¶å¤„ç†ï¼Œåˆæ°å¥½è¾¾åˆ°äº† ``budget`` æ•°é‡çš„æƒ…况时,必须谨慎处ç†ã€‚因为没
+ 有办法将这ç§ï¼ˆå¾ˆå°‘出现的)情况报告给å议栈,所以驱动程åºè¦ä¹ˆä¸è°ƒç”¨ napi_complete_done()
+ å¹¶ç­‰å¾…å†æ¬¡è¢«è°ƒç”¨ï¼Œè¦ä¹ˆè¿”回 ``budget - 1``。
+
+ 当 ``budget`` 为 0 时,napi_complete_done() ç»å¯¹ä¸èƒ½è¢«è°ƒç”¨ã€‚
+
+调用åºåˆ—
+--------
+
+驱动程åºä¸åº”å‡å®šè°ƒç”¨çš„é¡ºåºæ˜¯å›ºå®šä¸å˜çš„。å³ä½¿é©±åŠ¨ç¨‹åºæ²¡æœ‰è°ƒåº¦è¯¥å®žä¾‹ï¼Œè½®è¯¢æ–¹æ³•也å¯èƒ½ä¼šè¢«è°ƒç”¨
+(除éžè¯¥å®žä¾‹å¤„于ç¦ç”¨çжæ€ï¼‰ã€‚åŒæ ·ï¼Œå³ä¾¿ napi_schedule() 调用æˆåŠŸï¼Œä¹Ÿä¸èƒ½ä¿è¯è½®è¯¢æ–¹æ³•一定
+会被调用(例如,如果该实例被ç¦ç”¨ï¼‰ã€‚
+
+正如在 :ref:`drv_ctrl_zh_CN` 部分所æåˆ°çš„,napi_disable() 以åŠåŽç»­å¯¹è½®è¯¢æ–¹æ³•的调用,
+仅会等待该实例的所有æƒè¢«é‡Šæ”¾ï¼Œè€Œä¸ä¼šç­‰å¾…轮询方法退出。这æ„味ç€ï¼Œé©±åŠ¨ç¨‹åºåœ¨è°ƒç”¨ napi_complete_done()
+之åŽï¼Œåº”é¿å…访问任何数æ®ç»“构。
+
+.. _drv_sched_zh_CN:
+
+调度与IRQå±è”½
+-------------
+
+驱动程åºåº”在调度 NAPI 实例åŽä¿æŒä¸­æ–­å±è”½ - 直到 NAPI 轮询完æˆï¼Œä»»ä½•进一步的中断都是ä¸å¿…è¦çš„。
+
+显å¼å±è”½ä¸­æ–­çš„驱动程åºï¼ˆè€Œéžè®¾å¤‡è‡ªåЍå±è”½ IRQ)应使用 napi_schedule_prep() å’Œ
+__napi_schedule() 调用:
+
+.. code-block:: c
+
+ if (napi_schedule_prep(&v->napi)) {
+ mydrv_mask_rxtx_irq(v->idx);
+ /* 在å±è”½åŽè°ƒåº¦ä»¥é¿å…竞争 */
+ __napi_schedule(&v->napi);
+ }
+
+IRQ 仅应在æˆåŠŸè°ƒç”¨ napi_complete_done() åŽå–消å±è”½ï¼š
+
+.. code-block:: c
+
+ if (budget && napi_complete_done(&v->napi, work_done)) {
+ mydrv_unmask_rxtx_irq(v->idx);
+ return min(work_done, budget - 1);
+ }
+
+napi_schedule_irqoff() 是 napi_schedule() 的一个å˜ä½“,它利用了在中断请求(IRQ)上下文
+环境中调用所带æ¥çš„特性(无需å±è”½ä¸­æ–­ï¼‰ã€‚如果中断请求(IRQ)是通过线程处ç†çš„(例如å¯ç”¨äº†
+``PREEMPT_RT`` 时的情况),napi_schedule_irqoff() 会回退为使用 napi_schedule() 。
+
+实例到队列的映射
+----------------
+
+现代设备æ¯ä¸ªæŽ¥å£æœ‰å¤šä¸ª NAPI 实例(struct napi_struct)。关于实例如何映射到队列和中断没有
+ä¸¥æ ¼è¦æ±‚。NAPI ä¸»è¦æ˜¯äº‹ä»¶å¤„ç†/轮询抽象,没有用户å¯è§çš„语义。也就是说,大多数网络设备最终以
+éžå¸¸ç›¸ä¼¼çš„æ–¹å¼ä½¿ç”¨ NAPI。
+
+NAPI 实例最常以 1:1:1 映射到中断和队列对(队列对是由一个接收队列和一个å‘é€é˜Ÿåˆ—组æˆçš„一组
+队列)。
+
+在ä¸å¤ªå¸¸è§çš„æƒ…况下,一个 NAPI 实例å¯èƒ½ä¼šç”¨äºŽå¤„ç†å¤šä¸ªé˜Ÿåˆ—,或者在å•个内核上,接收(Rx)队列
+å’Œå‘é€ï¼ˆTx)队列å¯ä»¥ç”±ä¸åŒçš„ NAPI 实例æ¥å¤„ç†ã€‚ä¸è¿‡ï¼Œæ— è®ºé˜Ÿåˆ—如何分é…,通常 NAPI 实例和中断
+之间ä»ç„¶ä¿æŒä¸€ä¸€å¯¹åº”的关系。
+
+值得注æ„的是,ethtool API 使用了 “通铆这一术语,æ¯ä¸ªé€šé“å¯ä»¥æ˜¯ ``rx`` (接收)ã€``tx``
+(å‘é€ï¼‰æˆ– ``combined`` (组åˆï¼‰ç±»åž‹ã€‚ç›®å‰å°šä¸æ¸…楚一个通é“具体由什么构æˆï¼Œå»ºè®®çš„ç†è§£æ–¹å¼æ˜¯
+将一个通é“视为一个为特定类型队列æä¾›æœåŠ¡çš„ IRQ(中断请求)/ NAPI 实例。例如,é…置为 1 个
+``rx`` 通é“ã€1 个 ``tx`` 通é“å’Œ 1 个 ``combined`` 通é“的情况下,预计会使用 3 个中断ã€
+2 个接收队列和 2 个å‘é€é˜Ÿåˆ—。
+
+æŒä¹…化NAPIé…ç½®
+--------------
+
+驱动程åºå¸¸å¸¸ä¼šåЍæ€åœ°åˆ†é…和释放 NAPI 实例。这就导致æ¯å½“ NAPI å®žä¾‹è¢«é‡æ–°åˆ†é…时,与 NAPI 相关
+的用户é…置就会丢失。netif_napi_add_config() API接å£é€šè¿‡å°†æ¯ä¸ª NAPI 实例与基于驱动程åºå®šä¹‰
+的索引值(如队列编å·ï¼‰çš„æŒä¹…åŒ– NAPI é…置相关è”,从而é¿å…了这ç§é…置丢失的情况。
+
+使用此 API å¯å®žçްæŒä¹…化的 NAPI 标识符(以åŠå…¶ä»–设置),这对于使用 ``SO_INCOMING_NAPI_ID``
+çš„ç”¨æˆ·ç©ºé—´ç¨‹åºæ¥è¯´æ˜¯æœ‰ç›Šçš„。有关其他 NAPI é…置的设置,请å‚阅以下章节。
+
+驱动程åºåº”å°½å¯èƒ½å°è¯•使用 netif_napi_add_config()。
+
+用户API
+=======
+
+用户与 NAPI 的交互ä¾èµ–于 NAPI 实例 ID。这些实例 ID 仅通过 ``SO_INCOMING_NAPI_ID`` 套接字
+选项对用户å¯è§ã€‚
+
+用户å¯ä»¥ä½¿ç”¨ Netlink æ¥æŸ¥è¯¢æŸä¸ªè®¾å¤‡æˆ–设备队列的 NAPI 标识符。这既å¯ä»¥åœ¨ç”¨æˆ·åº”用程åºä¸­é€šè¿‡ç¼–程
+æ–¹å¼å®žçŽ°ï¼Œä¹Ÿå¯ä»¥ä½¿ç”¨å†…æ ¸æºä»£ç æ ‘中包å«çš„一个脚本:tools/net/ynl/pyynl/cli.py æ¥å®Œæˆã€‚
+
+例如,使用该脚本转储æŸä¸ªè®¾å¤‡çš„æ‰€æœ‰é˜Ÿåˆ—(这将显示æ¯ä¸ªé˜Ÿåˆ—çš„ NAPI 标识符):
+
+
+.. code-block:: bash
+
+ $ kernel-source/tools/net/ynl/pyynl/cli.py \
+ --spec Documentation/netlink/specs/netdev.yaml \
+ --dump queue-get \
+ --json='{"ifindex": 2}'
+
+有关å¯ç”¨æ“作和属性的更多详细信æ¯ï¼Œè¯·å‚阅 ``Documentation/netlink/specs/netdev.yaml``。
+
+软件IRQåˆå¹¶
+-----------
+
+默认情况下,NAPI 䏿‰§è¡Œä»»ä½•显å¼çš„事件åˆå¹¶ã€‚在大多数场景中,数æ®åŒ…的批é‡å¤„ç†å¾—益于设备进行
+的中断请求(IRQ)åˆå¹¶ã€‚ä¸è¿‡ï¼Œåœ¨æŸäº›æƒ…况下,软件层é¢çš„åˆå¹¶æ“作也很有帮助。
+
+å¯ä»¥å°† NAPI é…ç½®ä¸ºè®¾ç½®ä¸€ä¸ªé‡æ–°è½®è¯¢å®šæ—¶å™¨ï¼Œè€Œä¸æ˜¯åœ¨å¤„ç†å®Œæ‰€æœ‰æ•°æ®åŒ…åŽç«‹å³å–消å±è”½ç¡¬ä»¶ä¸­æ–­ã€‚
+网络设备的 ``gro_flush_timeout`` sysfs é…置项å¯ç”¨äºŽæŽ§åˆ¶è¯¥å®šæ—¶å™¨çš„延迟时间,而 ``napi_defer_hard_irqs``
+则用于控制在 NAPI æ”¾å¼ƒå¹¶é‡æ–°å¯ç”¨ç¡¬ä»¶ä¸­æ–­ä¹‹å‰ï¼Œè¿žç»­è¿›è¡Œç©ºè½®è¯¢çš„æ¬¡æ•°ã€‚
+
+ä¸Šè¿°å‚æ•°ä¹Ÿå¯ä»¥é€šè¿‡ Netlink çš„ netdev-genl 接å£ï¼ŒåŸºäºŽæ¯ä¸ª NAPI 实例进行设置。当通过
+Netlink 进行é…置且是基于æ¯ä¸ª NAPI å®žä¾‹è®¾ç½®æ—¶ï¼Œä¸Šè¿°å‚æ•°ä½¿ç”¨è¿žå­—符(-)而éžä¸‹åˆ’线(_)
+æ¥å‘½åï¼Œå³ ``gro-flush-timeout`` å’Œ ``napi-defer-hard-irqs``。
+
+基于æ¯ä¸ª NAPI 实例的é…置既å¯ä»¥åœ¨ç”¨æˆ·åº”用程åºä¸­é€šè¿‡ç¼–程方å¼å®Œæˆï¼Œä¹Ÿå¯ä»¥ä½¿ç”¨å†…æ ¸æºä»£ç æ ‘中的
+一个脚本实现,该脚本为 ``tools/net/ynl/pyynl/cli.py``。
+
+例如,通过如下方å¼ä½¿ç”¨è¯¥è„šæœ¬ï¼š
+
+.. code-block:: bash
+
+ $ kernel-source/tools/net/ynl/pyynl/cli.py \
+ --spec Documentation/netlink/specs/netdev.yaml \
+ --do napi-set \
+ --json='{"id": 345,
+ "defer-hard-irqs": 111,
+ "gro-flush-timeout": 11111}'
+
+ç±»ä¼¼åœ°ï¼Œå‚æ•° ``irq-suspend-timeout`` 也å¯ä»¥é€šè¿‡ netlink çš„ netdev-genl 设置。没有全局
+çš„ sysfs 傿•°å¯ç”¨äºŽè®¾ç½®è¿™ä¸ªå€¼ã€‚
+
+``irq-suspend-timeout`` 用于确定应用程åºå¯ä»¥å®Œå…¨æŒ‚èµ· IRQ 的时长。与 SO_PREFER_BUSY_POLL
+结åˆä½¿ç”¨ï¼ŒåŽè€…å¯ä»¥é€šè¿‡ ``EPIOCSPARAMS`` ioctl 在æ¯ä¸ª epoll 上下文中设置。
+
+.. _poll_zh_CN:
+
+忙轮询
+------
+
+忙轮询å…许用户进程在设备中断触å‘剿£€æŸ¥ä¼ å…¥çš„æ•°æ®åŒ…。与其他忙轮询一样,它以 CPU 周期æ¢å–更低
+的延迟(生产环境中 NAPI å¿™è½®è¯¢çš„ä½¿ç”¨å°šä¸æ˜Žç¡®ï¼‰ã€‚
+
+通过在选定套接字上设置 ``SO_BUSY_POLL`` 或使用全局 ``net.core.busy_poll`` 和 ``net.core.busy_read``
+ç­‰ sysctls å¯ç”¨å¿™è½®è¯¢ã€‚还存在基于 io_uring çš„ NAPI 忙轮询 API å¯ä½¿ç”¨ã€‚
+
+基于epoll的忙轮询
+-----------------
+
+å¯ä»¥ä»Ž ``epoll_wait`` è°ƒç”¨ç›´æŽ¥è§¦å‘æ•°æ®åŒ…处ç†ã€‚为了使用此功能,用户应用程åºå¿…é¡»ç¡®ä¿æ·»åŠ åˆ°
+epoll 上下文的所有文件æè¿°ç¬¦å…·æœ‰ç›¸åŒçš„ NAPI ID。
+
+如果应用程åºä½¿ç”¨ä¸“用的 acceptor 线程,那么该应用程åºå¯ä»¥èŽ·å–传入连接的 NAPI ID(使用
+SO_INCOMING_NAPI_ID)然åŽå°†è¯¥æ–‡ä»¶æè¿°ç¬¦åˆ†å‘给工作线程。工作线程将该文件æè¿°ç¬¦æ·»åŠ åˆ°å…¶
+epoll 上下文。这确ä¿äº†æ¯ä¸ªå·¥ä½œçº¿ç¨‹çš„ epoll 上下文中所包å«çš„æ–‡ä»¶æè¿°ç¬¦å…·æœ‰ç›¸åŒçš„ NAPI ID。
+
+或者,如果应用程åºä½¿ç”¨ SO_REUSEPORT,å¯ä»¥æ’å…¥ bpf 或 ebpf ç¨‹åºæ¥åˆ†å‘传入连接,使得æ¯ä¸ª
+çº¿ç¨‹åªæŽ¥æ”¶å…·æœ‰ç›¸åŒ NAPI ID 的连接。但是必须谨慎处ç†ç³»ç»Ÿä¸­å¯èƒ½å­˜åœ¨å¤šä¸ªç½‘å¡çš„æƒ…况。
+
+为了å¯ç”¨å¿™è½®è¯¢ï¼Œæœ‰ä¸¤ç§é€‰æ‹©ï¼š
+
+1. ``/proc/sys/net/core/busy_poll`` å¯ä»¥è®¾ç½®ä¸ºå¾®ç§’数以在忙循环中等待事件。这是一个系统
+ 范围的设置,将导致所有基于 epoll 的应用程åºåœ¨è°ƒç”¨ epoll_wait 时忙轮询。这å¯èƒ½ä¸æ˜¯ç†æƒ³
+ 的情况,因为许多应用程åºå¯èƒ½ä¸éœ€è¦å¿™è½®è¯¢ã€‚
+
+2. 使用最新内核的应用程åºå¯ä»¥åœ¨ epoll 上下文的文件æè¿°ç¬¦ä¸Šå‘出 ioctl æ¥è®¾ç½®(``EPIOCSPARAMS``)
+ 或获å–(``EPIOCGPARAMS``) ``struct epoll_params``,用户程åºå®šä¹‰å¦‚下:
+
+.. code-block:: c
+
+ struct epoll_params {
+ uint32_t busy_poll_usecs;
+ uint16_t busy_poll_budget;
+ uint8_t prefer_busy_poll;
+
+ /* 将结构填充到 64 ä½çš„倿•° */
+ uint8_t __pad;
+ };
+
+IRQ缓解
+-------
+
+虽然忙轮询旨在用于低延迟应用,但类似的机制å¯ç”¨äºŽå‡å°‘中断请求。
+
+æ¯ç§’高请求的应用程åºï¼ˆå°¤å…¶æ˜¯è·¯ç”±/转å‘应用程åºå’Œç‰¹åˆ«ä½¿ç”¨ AF_XDP 套接字的应用程åºï¼‰
+å¯èƒ½å¸Œæœ›åœ¨å¤„ç†å®Œä¸€ä¸ªè¯·æ±‚或一批数æ®åŒ…之å‰ä¸è¢«ä¸­æ–­ã€‚
+
+此类应用程åºå¯ä»¥å‘内核承诺会定期执行忙轮询æ“作,而驱动程åºåº”将设备的中断请求永久å±è”½ã€‚
+通过使用 ``SO_PREFER_BUSY_POLL`` 套接字选项å¯å¯ç”¨æ­¤æ¨¡å¼ã€‚为é¿å…系统出现异常,如果
+在 ``gro_flush_timeout`` 时间内没有进行任何忙轮询调用,该承诺将被撤销。对于基于
+epoll 的忙轮询应用程åºï¼Œå¯ä»¥å°† ``struct epoll_params`` 结构体中的 ``prefer_busy_poll``
+字段设置为 1,并使用 ``EPIOCSPARAMS`` 输入 / 输出控制(ioctl)æ“作æ¥å¯ç”¨æ­¤æ¨¡å¼ã€‚
+更多详情请å‚阅上述章节。
+
+NAPI 忙轮询的 budget ä½ŽäºŽé»˜è®¤å€¼ï¼ˆè¿™ç¬¦åˆæ­£å¸¸å¿™è½®è¯¢çš„低延迟æ„图)。å‡å°‘中断请求的场景中
+å¹¶éžå¦‚此,因此 budget å¯ä»¥é€šè¿‡ ``SO_BUSY_POLL_BUDGET`` 套接字选项进行调整。对于基于
+epoll 的忙轮询应用程åºï¼Œå¯ä»¥é€šè¿‡è°ƒæ•´ ``struct epoll_params`` 中的 ``busy_poll_budget``
+字段为特定值,并使用 ``EPIOCSPARAMS`` ioctl 在特定 epoll 上下文中设置。更多详细信
+æ¯è¯·å‚è§ä¸Šè¿°éƒ¨åˆ†ã€‚
+
+éœ€è¦æ³¨æ„的是,为 ``gro_flush_timeout`` 选择较大的值会延迟中断请求,以实现更好的批
+é‡å¤„ç†ï¼Œä½†åœ¨ç³»ç»Ÿæœªæ»¡è½½æ—¶ä¼šå¢žåŠ å»¶è¿Ÿã€‚ä¸º ``gro_flush_timeout`` 选择较å°çš„值å¯èƒ½ä¼šå› 
+设备中断请求和软中断处ç†è€Œå¹²æ‰°å°è¯•进行忙轮询的用户应用程åºã€‚应æƒè¡¡è¿™äº›å› ç´ åŽè°¨æ…Žé€‰æ‹©
+该值。基于 epoll 的忙轮询应用程åºå¯ä»¥é€šè¿‡ä¸º ``maxevents`` 选择åˆé€‚的值æ¥å‡å°‘用户
+处ç†çš„干扰。
+
+用户å¯èƒ½éœ€è¦è€ƒè™‘使用å¦ä¸€ç§æ–¹æ³•,IRQ 挂起,以帮助应对这些æƒè¡¡é—®é¢˜ã€‚
+
+IRQ挂起
+-------
+
+IRQ æŒ‚èµ·æ˜¯ä¸€ç§æœºåˆ¶ï¼Œå…¶ä¸­è®¾å¤‡ IRQ 在 epoll è§¦å‘ NAPI æ•°æ®åŒ…å¤„ç†æœŸé—´è¢«å±è”½ã€‚
+
+åªè¦åº”用程åºå¯¹ epoll_wait 的调用æˆåŠŸèŽ·å–事件,内核就会推迟 IRQ 挂起定时器。如果
+在忙轮询期间没有获å–任何事件(例如,因为网络æµé‡å‡å°‘),则会ç¦ç”¨IRQ挂起功能,并å¯
+用上述å‡å°‘中断请求的策略。
+
+è¿™å…许用户在 CPU æ¶ˆè€—å’Œç½‘ç»œå¤„ç†æ•ˆçއ之间å–得平衡。
+
+è¦ä½¿ç”¨æ­¤æœºåˆ¶ï¼š
+
+ 1. æ¯ä¸ª NAPI çš„é…ç½®å‚æ•° ``irq-suspend-timeout`` 应设置为应用程åºå¯ä»¥æŒ‚èµ·
+ IRQ 的最大时间(纳秒)。这通过 netlink 完æˆï¼Œå¦‚上所述。此超时时间作为一
+ ç§å®‰å…¨æœºåˆ¶ï¼Œå¦‚果应用程åºåœæ»žï¼Œå°†é‡æ–°å¯åŠ¨ä¸­æ–­é©±åŠ¨ç¨‹åºçš„中断处ç†ã€‚此值应选择
+ 为覆盖用户应用程åºè°ƒç”¨ epoll_wait å¤„ç†æ•°æ®æ‰€éœ€çš„æ—¶é—´ï¼Œéœ€æ³¨æ„的是,应用程
+ åºå¯é€šè¿‡åœ¨è°ƒç”¨ epoll_wait 时设置 ``max_events`` æ¥æŽ§åˆ¶èŽ·å–的数æ®é‡ã€‚
+
+ 2. sysfs 傿•°æˆ–æ¯ä¸ª NAPI çš„é…ç½®å‚æ•° ``gro_flush_timeout`` å’Œ ``napi_defer_hard_irqs``
+ å¯ä»¥è®¾ç½®ä¸ºè¾ƒä½Žå€¼ã€‚å®ƒä»¬å°†ç”¨äºŽåœ¨å¿™è½®è¯¢æœªæ‰¾åˆ°æ•°æ®æ—¶å»¶è¿Ÿ IRQs。
+
+ 3. 必须将 ``prefer_busy_poll`` 标志设置为 trueã€‚å¦‚å‰æ–‡æ‰€è¿°ï¼Œå¯ä½¿ç”¨ ``EPIOCSPARAMS``
+ ioctlæ“作æ¥å®Œæˆæ­¤è®¾ç½®ã€‚
+
+ 4. åº”ç”¨ç¨‹åºæŒ‰ç…§ä¸Šè¿°æ–¹å¼ä½¿ç”¨ epoll è§¦å‘ NAPI æ•°æ®åŒ…处ç†ã€‚
+
+如上所述,åªè¦åŽç»­å¯¹ epoll_wait 的调用å‘用户空间返回事件,``irq-suspend-timeout``
+就会被推迟并且 IRQ 会被ç¦ç”¨ã€‚è¿™å…许应用程åºåœ¨æ— å¹²æ‰°çš„æƒ…å†µä¸‹å¤„ç†æ•°æ®ã€‚
+
+一旦 epoll_wait 的调用没有找到任何事件,IRQ 挂起会被自动ç¦ç”¨ï¼Œå¹¶ä¸” ``gro_flush_timeout``
+和 ``napi_defer_hard_irqs`` 缓解机制将开始起作用。
+
+预期是 ``irq-suspend-timeout`` 的设置值会远大于 ``gro_flush_timeout``,因为 ``irq-suspend-timeout``
+应在一个用户空间处ç†å‘¨æœŸå†…æš‚åœä¸­æ–­è¯·æ±‚。
+
+虽然严格æ¥è¯´ä¸å¿…通过 ``napi_defer_hard_irqs`` å’Œ ``gro_flush_timeout`` æ¥æ‰§è¡Œ IRQ 挂起,
+但强烈建议这样åšã€‚
+
+中断请求挂起会使系统在轮询模å¼å’Œç”±ä¸­æ–­é©±åŠ¨çš„æ•°æ®åŒ…传输模å¼ä¹‹é—´åˆ‡æ¢ã€‚在网络ç¹å¿™æœŸé—´ï¼Œ``irq-suspend-timeout``
+会覆盖 ``gro_flush_timeout``ï¼Œä½¿ç³»ç»Ÿä¿æŒå¿™è½®è¯¢çжæ€ï¼Œä½†æ˜¯å½“ epoll 未å‘现任何事件时,``gro_flush_timeout``
+å’Œ ``napi_defer_hard_irqs`` 的设置将决定下一步的æ“作。
+
+有三ç§å¯èƒ½çš„网络处ç†å’Œæ•°æ®åŒ…交付循环:
+
+1) 硬中断 -> 软中断 -> NAPI 轮询;基本中断交付
+2) 定时器 -> 软中断 -> NAPI 轮询;延迟的 IRQ 处ç†
+3) epoll -> 忙轮询 -> NAPI 轮询;忙循环
+
+循环 2 å¯ä»¥æŽ¥ç®¡å¾ªçޝ 1,如果设置了 ``gro_flush_timeout`` å’Œ ``napi_defer_hard_irqs``。
+
+如果设置了 ``gro_flush_timeout`` å’Œ ``napi_defer_hard_irqs``,循环 2 å’Œ 3 å°†äº’ç›¸â€œäº‰å¤ºâ€æŽ§åˆ¶æƒã€‚
+
+在ç¹å¿™æ—¶æœŸï¼Œ``irq-suspend-timeout`` 用作循环 2 的定时器,这基本上使网络处ç†å€¾å‘于循环 3。
+
+如果ä¸è®¾ç½® ``gro_flush_timeout`` å’Œ ``napi_defer_hard_irqs``,循环 3 无法从循环 1 接管。
+
+因此,建议设置 ``gro_flush_timeout`` å’Œ ``napi_defer_hard_irqs``,因为若ä¸è¿™æ ·åšï¼Œè®¾ç½®
+``irq-suspend-timeout`` å¯èƒ½ä¸ä¼šæœ‰æ˜Žæ˜¾æ•ˆæžœã€‚
+
+.. _threaded_zh_CN:
+
+线程化NAPI
+----------
+
+线程化 NAPI æ˜¯ä¸€ç§æ“作模å¼ï¼Œå®ƒä½¿ç”¨ä¸“用的内核线程而éžè½¯ä»¶ä¸­æ–­ä¸Šä¸‹æ–‡æ¥è¿›è¡Œ NAPI 处ç†ã€‚è¿™ç§é…ç½®
+是针对æ¯ä¸ªç½‘络设备的,并且会影å“该设备的所有 NAPI 实例。æ¯ä¸ª NAPI 实例将生æˆä¸€ä¸ªå•独的线程
+(称为 ``napi/${ifc-name}-${napi-id}`` )。
+
+建议将æ¯ä¸ªå†…核线程固定到å•个 CPU 上,这个 CPU 与处ç†ä¸­æ–­çš„ CPU 相åŒã€‚请注æ„,中断请求(IRQ)
+å’Œ NAPI 实例之间的映射关系å¯èƒ½å¹¶ä¸ç®€å•(并且å–决于驱动程åºï¼‰ã€‚NAPI 实例 ID 的分é…顺åºå°†ä¸Žå†…
+核线程的进程 ID 顺åºç›¸å。
+
+线程化 NAPI 是通过å‘网络设备的 sysfs 目录中的 ``threaded`` 文件写入 0 或 1 æ¥æŽ§åˆ¶çš„ã€‚
+
+.. rubric:: 脚注
+
+.. [#] NAPI 最åˆåœ¨ 2.4 Linux 中被称为 New API。
diff --git a/Documentation/translations/zh_CN/networking/netif-msg.rst b/Documentation/translations/zh_CN/networking/netif-msg.rst
new file mode 100644
index 000000000000..877399b169fe
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/netif-msg.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/netif-msg.rst
+
+:翻译:
+
+ 王亚鑫 Wang Yaxin <wang.yaxin@zte.com.cn>
+
+================
+ç½‘ç»œæŽ¥å£æ¶ˆæ¯çº§åˆ«
+================
+
+ç½‘ç»œæŽ¥å£æ¶ˆæ¯çº§åˆ«è®¾ç½®çš„设计方案。
+
+历å²èƒŒæ™¯
+--------
+
+è°ƒè¯•æ¶ˆæ¯æŽ¥å£çš„设计éµå¾ªå¹¶å—制于å‘åŽå…¼å®¹æ€§åŠåކå²å®žè·µã€‚ç†è§£å…¶å‘å±•åŽ†å²æœ‰åŠ©äºŽæŠŠæ¡
+当å‰å®žè·µï¼Œå¹¶å°†å…¶ä¸Žæ—§ç‰ˆé©±åЍ代ç ç›¸å…³è”。
+
+自Linux诞生之åˆï¼Œæ¯ä¸ªç½‘络设备驱动å‡åŒ…å«ä¸€ä¸ªæœ¬åœ°æ•´åž‹å˜é‡ä»¥æŽ§åˆ¶è°ƒè¯•消æ¯çº§åˆ«ã€‚
+消æ¯çº§åˆ«èŒƒå›´ä¸º0至7,数值越大表示输出越详细。
+
+消æ¯çº§åˆ«çš„定义在3çº§ä¹‹åŽæœªæ˜Žç¡®ç»†åŒ–,但实际实现通常与指定级别相差±1。驱动程åº
+æˆç†ŸåŽï¼Œå†—余的详细级别消æ¯å¸¸è¢«ç§»é™¤ã€‚
+
+ - 0 最简消æ¯ï¼Œä»…显示致命错误的关键信æ¯ã€‚
+ - 1 标准消æ¯ï¼Œåˆå§‹åŒ–状æ€ã€‚æ— è¿è¡Œæ—¶æ¶ˆæ¯ã€‚
+ - 2 特殊介质选择消æ¯ï¼Œé€šå¸¸ç”±å®šæ—¶å™¨é©±åŠ¨ã€‚
+ - 3 接å£å¼€å¯å’Œåœæ­¢æ¶ˆæ¯ï¼ŒåŒ…括正常状æ€ä¿¡æ¯ã€‚
+ - 4 Tx/Rx帧错误消æ¯åŠå¼‚常驱动æ“作。
+ - 5 Txæ•°æ®åŒ…队列信æ¯ã€ä¸­æ–­äº‹ä»¶ã€‚
+ - 6 æ¯ä¸ªå®Œæˆçš„Txæ•°æ®åŒ…和接收的Rxæ•°æ®åŒ…状æ€ã€‚
+ - 7 Tx/Rxæ•°æ®åŒ…åˆå§‹å†…容。
+
+最åˆï¼Œè¯¥æ¶ˆæ¯çº§åˆ«å˜é‡åœ¨å„驱动中具有唯一å称(如"lance_debug"),便于通过
+内核符å·è°ƒè¯•器定ä½å’Œä¿®æ”¹å…¶è®¾ç½®ã€‚模å—化内核出现åŽï¼Œå˜é‡ç»Ÿä¸€é‡å‘½å为"debug",
+并作为模å—傿•°è®¾ç½®ã€‚
+
+è¿™ç§æ–¹æ³•效果良好。然而,人们始终对附加功能存在需求。多年æ¥ï¼Œä»¥ä¸‹åŠŸèƒ½é€æ¸
+æˆä¸ºåˆç†ä¸”易于实现的增强方案:
+
+ - 通过ioctl()调用修改消æ¯çº§åˆ«ã€‚
+ - 按接å£è€Œéžé©±åŠ¨è®¾ç½®æ¶ˆæ¯çº§åˆ«ã€‚
+ - 对å‘出的消æ¯ç±»åž‹è¿›è¡Œæ›´å…·é€‰æ‹©æ€§çš„æŽ§åˆ¶ã€‚
+
+netif_msg 建议添加了这些功能,仅带æ¥äº†è½»å¾®çš„夿‚性增加和代ç è§„模增长。
+
+æŽ¨èæ–¹æ¡ˆå¦‚下:
+
+ - ä¿ç•™é©±åŠ¨çº§æ•´åž‹å˜é‡"debug"作为模å—傿•°ï¼Œé»˜è®¤å€¼ä¸º'1'。
+
+ - 添加一个å为 "msg_enable" 的接å£ç§æœ‰å˜é‡ã€‚该å˜é‡æ˜¯ä½å›¾è€Œéžçº§åˆ«ï¼Œ
+ 并按如下方å¼åˆå§‹åŒ–::
+
+ 1 << debug
+
+ 或更精确地说::
+
+ debug < 0 ? 0 : 1 << min(sizeof(int)-1, debug)
+
+ 消æ¯åº”ä»Žä»¥ä¸‹å½¢å¼æ›´æ”¹::
+
+ if (debug > 1)
+ printk(MSG_DEBUG "%s: ...
+
+ 改为::
+
+ if (np->msg_enable & NETIF_MSG_LINK)
+ printk(MSG_DEBUG "%s: ...
+
+消æ¯çº§åˆ«å‘½å对应关系
+
+
+ ========= =================== ============
+ 旧级别 åç§° ä½ä½ç½®
+ ========= =================== ============
+ 1 NETIF_MSG_PROBE 0x0002
+ 2 NETIF_MSG_LINK 0x0004
+ 2 NETIF_MSG_TIMER 0x0004
+ 3 NETIF_MSG_IFDOWN 0x0008
+ 3 NETIF_MSG_IFUP 0x0008
+ 4 NETIF_MSG_RX_ERR 0x0010
+ 4 NETIF_MSG_TX_ERR 0x0010
+ 5 NETIF_MSG_TX_QUEUED 0x0020
+ 5 NETIF_MSG_INTR 0x0020
+ 6 NETIF_MSG_TX_DONE 0x0040
+ 6 NETIF_MSG_RX_STATUS 0x0040
+ 7 NETIF_MSG_PKTDATA 0x0080
+ ========= =================== ============
diff --git a/Documentation/translations/zh_CN/networking/netmem.rst b/Documentation/translations/zh_CN/networking/netmem.rst
new file mode 100644
index 000000000000..fe351a240f02
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/netmem.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/netmem.rst
+
+:翻译:
+
+ 王亚鑫 Wang Yaxin <wang.yaxin@zte.com.cn>
+
+==================
+网络驱动支æŒNetmem
+==================
+
+本文档概述了网络驱动支æŒnetmemï¼ˆä¸€ç§æŠ½è±¡å†…å­˜ç±»åž‹ï¼‰çš„è¦æ±‚,该内存类型
+支æŒè®¾å¤‡å†…å­˜ TCP 等功能。通过支æŒnetmem,驱动å¯ä»¥çµæ´»é€‚é…ä¸åŒåº•层内
+存类型(如设备内存TCP),且无需或仅需少é‡ä¿®æ”¹ã€‚
+
+Netmem的优势:
+
+* çµæ´»æ€§ï¼šnetmem å¯ç”±ä¸åŒå†…存类型(如 struct pageã€DMA-buf)支æŒï¼Œ
+ 使驱动程åºèƒ½å¤Ÿæ”¯æŒè®¾å¤‡å†…å­˜ TCP ç­‰å„ç§ç”¨ä¾‹ã€‚
+* å‰çž»æ€§ï¼šæ”¯æŒnetmemçš„é©±åŠ¨å¯æ— ç¼é€‚é…æœªæ¥ä¾èµ–此功能的新特性。
+* 简化开å‘:驱动通过统一API与netmem交互,无需关注底层内存的实现差异。
+
+驱动RXè¦æ±‚
+==========
+
+1. 驱动必须支æŒpage_pool。
+
+2. 驱动必须支æŒtcp-data-split ethtool选项。
+
+3. 驱动必须使用page_pool netmem APIå¤„ç†æœ‰æ•ˆè½½è·å†…存。当å‰netmem API
+ 与page APIä¸€ä¸€å¯¹åº”ã€‚è½¬æ¢æ—¶éœ€è¦å°†page API替æ¢ä¸ºnetmem API,并用驱动
+ 中的netmem_refsè·Ÿè¸ªå†…å­˜è€Œéž `struct page *`:
+
+ - page_pool_alloc -> page_pool_alloc_netmem
+ - page_pool_get_dma_addr -> page_pool_get_dma_addr_netmem
+ - page_pool_put_page -> page_pool_put_netmem
+
+ ç›®å‰å¹¶éžæ‰€æœ‰é¡µ pageAPI 都有对应的 netmem 等效接å£ã€‚如果你的驱动程åº
+ ä¾èµ–æŸä¸ªå°šæœªå®žçŽ°çš„ netmem API,请直接实现并æäº¤è‡³ netdev@邮件列表,
+ 或è”ç³»ç»´æŠ¤è€…åŠ almasrymina@google.com å助添加该 netmem API。
+
+4. 驱动必须设置以下PP_FLAGS:
+
+ - PP_FLAG_DMA_MAPï¼šé©±åŠ¨ç¨‹åºæ— æ³•对 netmem 执行 DMA 映射。此时驱动
+ 程åºå¿…须将 DMA 映射æ“作委托给 page_pool,由其判断何时适åˆï¼ˆæˆ–ä¸é€‚åˆï¼‰
+ 进行 DMA 映射。
+ - PP_FLAG_DMA_SYNC_DEVï¼šé©±åŠ¨ç¨‹åºæ— æ³•ä¿è¯ netmem çš„ DMA 地å€ä¸€å®šèƒ½
+ å®Œæˆ DMA åŒæ­¥ã€‚此时驱动程åºå¿…须将 DMA åŒæ­¥æ“作委托给 page_pool,由
+ 其判断何时适åˆï¼ˆæˆ–ä¸é€‚åˆï¼‰è¿›è¡Œ DMA åŒæ­¥ã€‚
+ - PP_FLAG_ALLOW_UNREADABLE_NETMEM:仅当å¯ç”¨ tcp-data-split 时,
+ 驱动程åºå¿…须显å¼è®¾ç½®æ­¤æ ‡å¿—。
+
+5. 驱动ä¸å¾—å‡è®¾netmemå¯è¯»æˆ–基于页。当netmem_address()返回NULL时,表示
+内存ä¸å¯è¯»ã€‚驱动需正确处ç†ä¸å¯è¯»çš„netmem,例如,当netmem_address()返回
+NULL时,é¿å…访问内容。
+
+ ç†æƒ³æƒ…况下,驱动程åºä¸åº”通过netmem_is_net_iov()等辅助函数检查底层
+ netmem 类型,也ä¸åº”通过netmem_to_page()或netmem_to_net_iov()å°†
+ netmem 转æ¢ä¸ºå…¶åº•层类型。在大多数情况下,系统会æä¾›æŠ½è±¡è¿™äº›å¤æ‚性的
+ netmem 或 page_pool è¾…åŠ©å‡½æ•°ï¼ˆå¹¶å¯æ ¹æ®éœ€è¦æ·»åŠ æ›´å¤šï¼‰ã€‚
+
+6. 驱动程åºå¿…须使用page_pool_dma_sync_netmem_for_cpu()代替dma_sync_single_range_for_cpu()。
+对于æŸäº›å†…å­˜æä¾›è€…,CPU çš„ DMA åŒæ­¥å°†ç”± page_pool 完æˆï¼›è€Œå¯¹äºŽå…¶ä»–æä¾›è€…
+(特别是 dmabuf 内存æä¾›è€…),CPU çš„ DMA åŒæ­¥ç”±ä½¿ç”¨ dmabuf API 的用户空
+间负责。驱动程åºå¿…须将整个 DMA åŒæ­¥æ“作委托给 page_poolï¼Œä»¥ç¡®ä¿æ“作正确执行。
+
+7. é¿å…在 page_pool 之上实现特定于驱动程åºå†…存回收机制。由于 netmem å¯èƒ½
+ä¸ç”±struct page支æŒï¼Œé©±åŠ¨ç¨‹åºä¸èƒ½ä¿ç•™struct pageæ¥è¿›è¡Œè‡ªå®šä¹‰å›žæ”¶ã€‚ä¸è¿‡ï¼Œ
+å¯ä¸ºæ­¤ç›®çš„通过page_pool_fragment_netmem()或page_pool_ref_netmem()ä¿ç•™
+page_pool å¼•ç”¨ï¼Œä½†éœ€æ³¨æ„æŸäº› netmem 类型的循环时间å¯èƒ½æ›´é•¿ï¼ˆä¾‹å¦‚é›¶æ‹·è´åœºæ™¯
+ä¸‹ç”¨æˆ·ç©ºé—´æŒæœ‰å¼•用的情况)。
+
+驱动TXè¦æ±‚
+==========
+
+1. 驱动程åºç»å¯¹ä¸èƒ½ç›´æŽ¥æŠŠ netmem çš„ dma_addr 传递给任何 dma-mapping API。这
+是由于 netmem çš„ dma_addr å¯èƒ½æºè‡ª dma-buf 这类和 dma-mapping API ä¸å…¼å®¹çš„
+æºå¤´ã€‚
+
+应当使用netmem_dma_unmap_page_attrs()和netmem_dma_unmap_addr_set()等辅助
+å‡½æ•°æ¥æ›¿ä»£dma_unmap_page[_attrs]()ã€dma_unmap_addr_set()。ä¸ç®¡ dma_addr
+æ¥æºå¦‚何,netmem 的这些å˜ä½“éƒ½èƒ½æ­£ç¡®å¤„ç† netmem dma_addr,在åˆé€‚的时候会委托给
+dma-mapping API 去处ç†ã€‚
+
+ç›®å‰ï¼Œå¹¶éžæ‰€æœ‰çš„ dma-mapping API 都有对应的 netmem ç‰ˆæœ¬ã€‚è¦æ˜¯ä½ çš„驱动程åºéœ€è¦
+使用æŸä¸ªè¿˜ä¸å­˜åœ¨çš„ netmem API,你å¯ä»¥è‡ªè¡Œæ·»åŠ å¹¶æäº¤åˆ° netdev@,也å¯ä»¥è”系维护
+人员或者å‘é€é‚®ä»¶è‡³ almasrymina@google.com 寻求帮助。
+
+2. 驱动程åºåº”通过设置 netdev->netmem_tx = true æ¥è¡¨æ˜Žè‡ªèº«æ”¯æŒ netmem 功能。
diff --git a/Documentation/translations/zh_CN/networking/vxlan.rst b/Documentation/translations/zh_CN/networking/vxlan.rst
new file mode 100644
index 000000000000..e319eddfcdbe
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/vxlan.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/vxlan.rst
+
+:翻译:
+
+ 范雨 Fan Yu <fan.yu9@zte.com.cn>
+
+:校译:
+
+ - 邱禹潭 Qiu Yutan <qiu.yutan@zte.com.cn>
+ - å¾é‘« xu xin <xu.xin16@zte.com.cn>
+
+==========================
+虚拟扩展本地局域网å议文档
+==========================
+
+VXLAN å议是一ç§éš§é“å议,旨在解决 IEEE 802.1q 中 VLAN ID(4096)有é™çš„问题。
+VXLAN å°†æ ‡è¯†ç¬¦çš„å¤§å°æ‰©å±•到 24 ä½ï¼ˆ16777216)。
+
+VXLAN 在 IETF RFC 7348 中进行了æè¿°ï¼Œå¹¶å·²ç”±å¤šå®¶ä¾›åº”商设计实现。
+该å议通过 UDP åè®®è¿è¡Œï¼Œå¹¶ä½¿ç”¨ç‰¹å®šç›®çš„端å£ã€‚
+本文档介ç»äº† Linux 内核隧é“设备,Openvswitch 也有å•独的 VXLAN 实现。
+
+与大多数隧é“ä¸åŒï¼ŒVXLAN 是 1 对 N 的网络,而ä¸ä»…仅是点对点网络。
+VXLAN 设备å¯ä»¥é€šè¿‡ç±»ä¼¼äºŽå­¦ä¹ æ¡¥æŽ¥å™¨çš„æ–¹å¼åЍæ€å­¦ä¹ å¦ä¸€ç«¯ç‚¹çš„ IP 地å€ï¼Œä¹Ÿå¯ä»¥åˆ©ç”¨é™æ€é…ç½®çš„è½¬å‘æ¡ç›®ã€‚
+
+VXLAN çš„ç®¡ç†æ–¹å¼ä¸Žå®ƒçš„两个近邻 GRE å’Œ VLAN 相似。
+é…ç½® VXLAN éœ€è¦ iproute2 的版本与 VXLAN 首次å‘上游åˆå¹¶çš„内核版本相匹é…。
+
+1. 创建 vxlan 设备::
+
+ # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789
+
+这将创建一个å为 vxlan0 的网络设备,该设备通过 eth1 使用组播组 239.1.1.1 处ç†è½¬å‘表中没有对应æ¡ç›®çš„æµé‡ã€‚
+目标端å£å·è®¾ç½®ä¸º IANA 分é…的值 4789,VXLAN çš„ Linux 实现早于 IANA 选择标准目的端å£å·çš„æ—¶é—´ã€‚
+因此默认使用 Linux é€‰æ‹©çš„å€¼ï¼Œä»¥ä¿æŒå‘åŽå…¼å®¹æ€§ã€‚
+
+2. 删除 vxlan 设备::
+
+ # ip link delete vxlan0
+
+3. 查看 vxlan 设备信æ¯::
+
+ # ip -d link show vxlan0
+
+使用新的 bridge 命令å¯ä»¥åˆ›å»ºã€é”€æ¯å’Œæ˜¾ç¤º vxlan 转å‘表。
+
+1. 创建vxlan转å‘表项::
+
+ # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
+
+2. 删除vxlan转å‘表项::
+
+ # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
+
+3. 显示vxlan转å‘表项::
+
+ # bridge fdb show dev vxlan0
+
+ä»¥ä¸‹ç½‘ç»œæŽ¥å£æŽ§åˆ¶å™¨ç‰¹æ€§å¯èƒ½è¡¨æ˜Žå¯¹ UDP éš§é“相关的å¸è½½æ”¯æŒï¼ˆæœ€å¸¸è§çš„æ˜¯ VXLAN 功能,
+但是对特定å°è£…å议的支æŒå–å†³äºŽç½‘ç»œæŽ¥å£æŽ§åˆ¶å™¨ï¼‰ï¼š
+
+ - `tx-udp_tnl-segmentation`
+ - `tx-udp_tnl-csum-segmentation`
+ 对 UDP å°è£…帧执行 TCP 分段å¸è½½çš„能力
+
+ - `rx-udp_tunnel-port-offload`
+ åœ¨æŽ¥æ”¶ç«¯è§£æž UDP å°è£…å¸§ï¼Œä½¿ç½‘ç»œæŽ¥å£æŽ§åˆ¶å™¨èƒ½å¤Ÿæ‰§è¡Œå议感知å¸è½½ï¼Œ
+ 例如内部帧的校验和验è¯å¸è½½ï¼ˆåªæœ‰ä¸å¸¦å议感知å¸è½½çš„ç½‘ç»œæŽ¥å£æŽ§åˆ¶å™¨æ‰éœ€è¦ï¼‰
+
+å¯¹äºŽæ”¯æŒ `rx-udp_tunnel-port-offload` 的设备,å¯ä½¿ç”¨ `ethtool` 查询当å‰å¸è½½ç«¯å£çš„列表::
+
+ $ ethtool --show-tunnels eth0
+ Tunnel information for eth0:
+ UDP port table 0:
+ Size: 4
+ Types: vxlan
+ No entries
+ UDP port table 1:
+ Size: 4
+ Types: geneve, vxlan-gpe
+ Entries (1):
+ port 1230, vxlan-gpe
diff --git a/Documentation/translations/zh_CN/networking/xfrm_proc.rst b/Documentation/translations/zh_CN/networking/xfrm_proc.rst
new file mode 100644
index 000000000000..a2ae86c44707
--- /dev/null
+++ b/Documentation/translations/zh_CN/networking/xfrm_proc.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/networking/xfrm_proc.rst
+
+:翻译:
+
+ 王亚鑫 Wang Yaxin <wang.yaxin@zte.com.cn>
+
+=================================
+XFRM proc - /proc/net/xfrm_* 文件
+=================================
+
+作者:Masahide NAKAMURA <nakam@linux-ipv6.org>
+
+
+转æ¢ç»Ÿè®¡ä¿¡æ¯
+------------
+
+`xfrm_proc` æä¾›ä¸€ç»„统计计数器,显示转æ¢è¿‡ç¨‹ä¸­ä¸¢å¼ƒçš„æ•°æ®åŒ…åŠå…¶åŽŸå› ã€‚
+这些计数器属于Linuxç§æœ‰MIB的一部分,å¯é€šè¿‡ `/proc/net/xfrm_stat`
+查看。
+
+入站错误
+~~~~~~~~
+
+XfrmInError:
+ 未匹é…其他类别的所有错误
+
+XfrmInBufferError:
+ 缓冲区ä¸è¶³
+
+XfrmInHdrError:
+ 头部错误
+
+XfrmInNoStates:
+ 未找到状æ€
+ (入站SPIã€åœ°å€æˆ–SAçš„IPsecåè®®ä¸åŒ¹é…)
+
+XfrmInStateProtoError:
+ 转æ¢å议相关的错误
+ (如SA密钥错误)
+
+XfrmInStateModeError:
+ è½¬æ¢æ¨¡å¼ç›¸å…³çš„错误
+
+XfrmInStateSeqError:
+ åºåˆ—å·é”™è¯¯
+ åºåˆ—å·è¶…出窗å£èŒƒå›´
+
+XfrmInStateExpired:
+ 状æ€å·²è¿‡æœŸ
+
+XfrmInStateMismatch:
+ 状æ€é€‰é¡¹ä¸åŒ¹é…
+ (如UDPå°è£…类型ä¸åŒ¹é…)
+
+XfrmInStateInvalid:
+ 无效状æ€
+
+XfrmInTmplMismatch:
+ çŠ¶æ€æ¨¡æ¿ä¸åŒ¹é…
+ (如入站SA正确但SP规则错误)
+
+XfrmInNoPols:
+ 未找到状æ€çš„对应策略
+ (如入站SA正确但无SP规则)
+
+XfrmInPolBlock:
+ 丢弃的策略
+
+XfrmInPolError:
+ 错误的策略
+
+XfrmAcquireError:
+ çŠ¶æ€æœªå®Œå…¨èŽ·å–å³è¢«ä½¿ç”¨
+
+XfrmFwdHdrError:
+ 转å‘è·¯ç”±ç¦æ­¢
+
+XfrmInStateDirError:
+ çŠ¶æ€æ–¹å‘ä¸åŒ¹é…
+ (输入路径查找到输出状æ€ï¼Œé¢„æœŸæ˜¯è¾“å…¥çŠ¶æ€æˆ–者无方å‘)
+
+出站错误
+~~~~~~~~
+XfrmOutError:
+ 未匹é…其他类别的所有错误
+
+XfrmOutBundleGenError:
+ æ†ç»‘包生æˆé”™è¯¯
+
+XfrmOutBundleCheckError:
+ æ†ç»‘包校验错误
+
+XfrmOutNoStates:
+ 未找到状æ€
+
+XfrmOutStateProtoError:
+ 转æ¢å议特定错误
+
+XfrmOutStateModeError:
+ è½¬æ¢æ¨¡å¼ç‰¹å®šé”™è¯¯
+
+XfrmOutStateSeqError:
+ åºåˆ—å·é”™è¯¯
+ (åºåˆ—å·æº¢å‡ºï¼‰
+
+XfrmOutStateExpired:
+ 状æ€å·²è¿‡æœŸ
+
+XfrmOutPolBlock:
+ 丢弃策略
+
+XfrmOutPolDead:
+ 失效策略
+
+XfrmOutPolError:
+ 错误策略
+
+XfrmOutStateInvalid:
+ 无效状æ€ï¼ˆå¯èƒ½å·²è¿‡æœŸï¼‰
+
+XfrmOutStateDirError:
+ çŠ¶æ€æ–¹å‘ä¸åŒ¹é…(输出路径查找到输入状æ€ï¼Œé¢„æœŸä¸ºè¾“å‡ºçŠ¶æ€æˆ–æ— æ–¹å‘)
diff --git a/Documentation/translations/zh_CN/process/1.Intro.rst b/Documentation/translations/zh_CN/process/1.Intro.rst
index 4f9284cbe33b..e314cce49d27 100644
--- a/Documentation/translations/zh_CN/process/1.Intro.rst
+++ b/Documentation/translations/zh_CN/process/1.Intro.rst
@@ -182,11 +182,11 @@ Andrew Morton, Andrew Price, Tsugikazu Shibata 和 Jochen Voß 。
å¯ä»¥èŽ·å¾—æ‰€æœ‰ç‰ˆæƒæ‰€æœ‰è€…çš„åŒæ„(或者从内核中删除他们的代ç ï¼‰ã€‚因此,尤其是在
å¯é¢„è§çš„å°†æ¥ï¼Œè®¸å¯è¯ä¸å¤§å¯èƒ½è¿ç§»åˆ°GPL的版本3。
-所有贡献给内核的代ç éƒ½å¿…é¡»æ˜¯åˆæ³•çš„å…è´¹è½¯ä»¶ã€‚å› æ­¤ï¼Œä¸æŽ¥å—匿å(或化å)贡献
-者的代ç ã€‚所有贡献者都需è¦åœ¨ä»–们的代ç ä¸Šâ€œsign off(签å‘)â€ï¼Œå£°æ˜Žä»£ç å¯ä»¥
-在GPL下与内核一起分å‘。无法æä¾›æœªè¢«å…¶æ‰€æœ‰è€…许å¯ä¸ºå…费软件的代ç ï¼Œæˆ–å¯èƒ½ä¸º
-内核造æˆç‰ˆæƒç›¸å…³é—®é¢˜çš„代ç ï¼ˆä¾‹å¦‚,由缺ä¹é€‚å½“ä¿æŠ¤çš„åå‘工程工作派生的代ç ï¼‰
-ä¸èƒ½è¢«æŽ¥å—。
+所有贡献给内核的代ç éƒ½å¿…é¡»æ˜¯åˆæ³•çš„å…è´¹è½¯ä»¶ã€‚å› æ­¤ï¼Œå‡ºäºŽè¿™ä¸ªåŽŸå› ï¼Œèº«ä»½ä¸æ˜Žçš„
+贡献者或匿å贡献者æäº¤çš„代ç å°†ä¸äºˆæŽ¥å—。所有贡献者都需è¦åœ¨ä»–们的代ç ä¸Š
+“sign off(签å‘)â€ï¼Œå£°æ˜Žä»£ç å¯ä»¥åœ¨GPL下与内核一起分å‘。无法æä¾›æœªè¢«å…¶æ‰€æœ‰è€…
+许å¯ä¸ºå…费软件的代ç ï¼Œæˆ–å¯èƒ½ä¸ºå†…核造æˆç‰ˆæƒç›¸å…³é—®é¢˜çš„代ç ï¼ˆä¾‹å¦‚,由缺ä¹é€‚当
+ä¿æŠ¤çš„åå‘工程工作派生的代ç ï¼‰ä¸èƒ½è¢«æŽ¥å—。
有关版æƒé—®é¢˜çš„æé—®åœ¨Linuxå¼€å‘邮件列表中很常è§ã€‚这样的问题通常会得到ä¸å°‘答案,
但请记ä½ï¼Œå›žç­”è¿™äº›é—®é¢˜çš„äººä¸æ˜¯å¾‹å¸ˆï¼Œä¸èƒ½æä¾›æ³•律咨询。如果您有关于Linuxæºä»£ç 
diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst
index e68c9de0f7f8..31b0e2c994f6 100644
--- a/Documentation/translations/zh_CN/process/2.Process.rst
+++ b/Documentation/translations/zh_CN/process/2.Process.rst
@@ -292,12 +292,11 @@ Quilt 是一个补ä¸ç®¡ç†ç³»ç»Ÿï¼Œè€Œä¸æ˜¯æºä»£ç ç®¡ç†ç³»ç»Ÿã€‚它ä¸ä¼šéš
一个潜在的å±é™©ï¼Œä»–们å¯èƒ½ä¼šè¢«ä¸€å †ç”µå­é‚®ä»¶æ·¹æ²¡ã€è¿åLinux列表上使用的约定,
或者两者兼而有之。
-大多数内核邮件列表都在vger.kernel.org上è¿è¡Œï¼›ä¸»åˆ—表ä½äºŽï¼š
+大多数内核邮件列表都托管在 kernel.org;主列表ä½äºŽï¼š
- http://vger.kernel.org/vger-lists.html
+ https://subspace.kernel.org
-ä¸è¿‡ï¼Œä¹Ÿæœ‰ä¸€äº›åˆ—表托管在别处;其中一些列表ä½äºŽ
-redhat.com/mailman/listinfo。
+其他地方也有邮件列表;请查看 MAINTAINERS 文件,获å–与特定å­ç³»ç»Ÿç›¸å…³çš„列表。
当然,内核开å‘的核心邮件列表是linux-kernel。这个列表是一个令人生ç•的地方:
æ¯å¤©çš„ä¿¡æ¯é‡å¯ä»¥è¾¾åˆ°500æ¡ï¼Œå™ªéŸ³å¾ˆé«˜ï¼Œè°ˆè¯æŠ€æœ¯æ€§å¾ˆå¼ºï¼Œä¸”å‚ä¸Žè€…å¹¶ä¸æ€»æ˜¯è¡¨çŽ°å‡º
diff --git a/Documentation/translations/zh_CN/process/5.Posting.rst b/Documentation/translations/zh_CN/process/5.Posting.rst
index 6c83a8f40310..ce37cf6a60e2 100644
--- a/Documentation/translations/zh_CN/process/5.Posting.rst
+++ b/Documentation/translations/zh_CN/process/5.Posting.rst
@@ -177,10 +177,21 @@
- Reported-by: 指定报告此补ä¸ä¿®å¤çš„问题的用户;此标记用于表示感谢。
+ - Suggested-by: è¡¨ç¤ºè¯¥è¡¥ä¸æ€è·¯ç”±æ‰€æåŠçš„人æå‡ºï¼Œç¡®ä¿å…¶åˆ›æ„贡献获得认å¯ã€‚
+ 这有望激励他们在未æ¥ç»§ç»­æä¾›å¸®åŠ©ã€‚
+
- Cc:指定æŸäººæ”¶åˆ°äº†è¡¥ä¸çš„副本,并有机会对此å‘表评论。
在补ä¸ä¸­æ·»åŠ æ ‡ç­¾æ—¶è¦å°å¿ƒï¼šåªæœ‰Cc:æ‰é€‚åˆåœ¨æ²¡æœ‰æŒ‡å®šäººå‘˜æ˜Žç¡®è®¸å¯çš„æƒ…况下添加。
+在补ä¸ä¸­æ·»åŠ ä¸Šè¿°æ ‡ç­¾æ—¶éœ€è°¨æ…Žï¼Œå› ä¸ºé™¤äº† Cc:ã€Reported-by: å’Œ Suggested-by:,
+所有其他标签都需è¦è¢«æåŠè€…的明确许å¯ã€‚å¯¹äºŽè¿™ä¸‰ä¸ªæ ‡ç­¾ï¼Œè‹¥æ ¹æ® lore 归档或æäº¤
+历å²è®°å½•,相关人员使用该姓å和电å­é‚®ä»¶åœ°å€ä¸º Linux 内核åšå‡ºè¿‡è´¡çŒ®ï¼Œåˆ™éšå«è®¸å¯
+已足够 -- 对于 Reported-by: å’Œ Suggested-by:ï¼Œéœ€ç¡®ä¿æŠ¥å‘Šæˆ–å»ºè®®æ˜¯å…¬å¼€è¿›è¡Œçš„ã€‚
+请注æ„,从这个æ„义上讲,bugzilla.kernel.org 属于公开场åˆï¼Œä½†å…¶ä½¿ç”¨çš„电å­é‚®ä»¶åœ°å€
+属于ç§äººä¿¡æ¯ï¼›å› æ­¤ï¼Œé™¤éžç›¸å…³äººå‘˜æ›¾åœ¨æ—©æœŸè´¡çŒ®ä¸­ä½¿ç”¨è¿‡è¿™äº›é‚®ç®±ï¼Œå¦åˆ™è¯·å‹¿åœ¨æ ‡ç­¾ä¸­
+公开它们。
+
寄é€è¡¥ä¸
--------
diff --git a/Documentation/translations/zh_CN/process/6.Followthrough.rst b/Documentation/translations/zh_CN/process/6.Followthrough.rst
index 2a127e737b6a..3d19c59ca6e4 100644
--- a/Documentation/translations/zh_CN/process/6.Followthrough.rst
+++ b/Documentation/translations/zh_CN/process/6.Followthrough.rst
@@ -49,6 +49,11 @@
å˜ã€‚他们真的,几乎毫无例外地,致力于创造他们所能åšåˆ°çš„æœ€å¥½çš„内核;他们并
没有试图给雇主的竞争对手造æˆä¸é€‚。
+ - 请准备好应对看似“愚蠢â€çš„代ç é£Žæ ¼ä¿®æ”¹è¯·æ±‚,以åŠå°†éƒ¨åˆ†ä»£ç æ‹†åˆ†åˆ°å†…æ ¸
+ 共享模å—çš„è¦æ±‚。维护者的èŒè´£ä¹‹ä¸€æ˜¯ä¿æŒæ•´ä½“风格的一致性。有时这æ„味ç€ï¼Œ
+ 你在驱动中为解决æŸä¸€é—®é¢˜è€Œé‡‡ç”¨çš„巧妙å–巧方案,实际上需è¦è¢«æç‚¼ä¸ºé€šç”¨çš„
+ 内核特性,以便未æ¥å¤ç”¨ã€‚
+
æ‰€æœ‰è¿™äº›å½’æ ¹ç»“åº•å°±æ˜¯ï¼Œå½“å®¡é˜…è€…å‘æ‚¨å‘é€è¯„è®ºæ—¶ï¼Œæ‚¨éœ€è¦æ³¨æ„他们正在进行的技术
评论。ä¸è¦è®©ä»–ä»¬çš„è¡¨è¾¾æ–¹å¼æˆ–你自己的骄傲阻止此事。当你在一个补ä¸ä¸Šå¾—到评论
时,花点时间去ç†è§£è¯„论人想说什么。如果å¯èƒ½çš„è¯ï¼Œè¯·ä¿®å¤å®¡é˜…è€…è¦æ±‚您修å¤çš„内
diff --git a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
index 57beca02181c..92cc06dd5f4e 100644
--- a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
+++ b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
@@ -113,6 +113,8 @@ Gitæä¾›äº†ä¸€äº›å¼ºå¤§çš„工具,å¯ä»¥è®©æ‚¨é‡å†™å¼€å‘历å²ã€‚ä¸€ä¸ªä¸æ–
æ›´æ”¹ã€‚åœ¨è¿™æ–¹é¢ git request-pull 命令éžå¸¸æœ‰ç”¨ï¼›å®ƒå°†æŒ‰ç…§å…¶ä»–å¼€å‘人员所期望的
æ ¼å¼åŒ–è¯·æ±‚ï¼Œå¹¶æ£€æŸ¥ä»¥ç¡®ä¿æ‚¨å·²è®°å¾—将这些更改推é€åˆ°å…¬å…±æœåŠ¡å™¨ã€‚
+.. _cn_development_advancedtopics_reviews:
+
审阅补ä¸
--------
@@ -126,8 +128,20 @@ Gitæä¾›äº†ä¸€äº›å¼ºå¤§çš„工具,å¯ä»¥è®©æ‚¨é‡å†™å¼€å‘历å²ã€‚ä¸€ä¸ªä¸æ–
的建议是:把审阅评论当æˆé—®é¢˜è€Œä¸æ˜¯æ‰¹è¯„。询问“在这æ¡è·¯å¾„中如何释放é”?â€
æ€»æ˜¯æ¯”è¯´â€œè¿™é‡Œçš„é”æ˜¯é”™è¯¯çš„â€æ›´å¥½ã€‚
+当出现分歧时,å¦ä¸€ä¸ªæœ‰ç”¨çš„æŠ€å·§æ˜¯é‚€è¯·ä»–人å‚ä¸Žè®¨è®ºã€‚å¦‚æžœäº¤æµæ•°æ¬¡åŽè®¨è®ºé™·å…¥åƒµå±€ï¼Œ
+å¯å¾æ±‚其他评审者或维护者的æ„è§ã€‚通常,与æŸä¸€è¯„审者æ„è§ä¸€è‡´çš„äººå¾€å¾€ä¼šä¿æŒæ²‰é»˜ï¼Œ
+除éžè¢«ä¸»åŠ¨è¯¢é—®ã€‚ä¼—äººæ„è§ä¼šäº§ç”Ÿæˆå€çš„å½±å“力。
+
ä¸åŒçš„å¼€å‘人员将从ä¸åŒçš„角度审查代ç ã€‚部分人会主è¦å…³æ³¨ä»£ç é£Žæ ¼ä»¥åŠä»£ç è¡Œæ˜¯
妿œ‰å°¾éšç©ºæ ¼ã€‚其他人会主è¦å…³æ³¨è¡¥ä¸ä½œä¸ºä¸€ä¸ªæ•´ä½“å®žçŽ°çš„å˜æ›´æ˜¯å¦å¯¹å†…核有好处。
åŒæ—¶ä¹Ÿæœ‰äººä¼šæ£€æŸ¥æ˜¯å¦å­˜åœ¨é”问题ã€å †æ ˆä½¿ç”¨è¿‡åº¦ã€å¯èƒ½çš„安全问题ã€åœ¨å…¶ä»–地方
å‘现的代ç é‡å¤ã€è¶³å¤Ÿçš„æ–‡æ¡£ã€å¯¹æ€§èƒ½çš„ä¸åˆ©å½±å“ã€ç”¨æˆ·ç©ºé—´ABI更改等。所有类型
的检查,åªè¦å®ƒä»¬èƒ½å¼•导更好的代ç è¿›å…¥å†…æ ¸ï¼Œéƒ½æ˜¯å—æ¬¢è¿Žå’Œå€¼å¾—的。
+
+使用诸如 ``Reviewed-by`` è¿™ç±»ç‰¹å®šæ ‡ç­¾å¹¶æ— ä¸¥æ ¼è¦æ±‚。事实上,å³ä¾¿æä¾›äº†æ ‡ç­¾ï¼Œä¹Ÿ
+更鼓励用平实的英文撰写评审æ„è§ï¼Œå› ä¸ºè¿™æ ·çš„内容信æ¯é‡æ›´å¤§ï¼Œä¾‹å¦‚,“我查看了此次
+æäº¤ä¸­ Aã€Bã€C 等方é¢çš„å†…å®¹ï¼Œè®¤ä¸ºæ²¡æœ‰é—®é¢˜ã€‚â€æ˜¾ç„¶ï¼Œä»¥æŸç§å½¢å¼æä¾›è¯„å®¡ä¿¡æ¯æˆ–回å¤
+是必è¦çš„,å¦åˆ™ç»´æŠ¤è€…将完全无法知晓评审者是å¦å·²æŸ¥çœ‹è¿‡è¡¥ä¸ï¼
+
+最åŽä½†åŒæ ·é‡è¦çš„æ˜¯ï¼Œè¡¥ä¸è¯„审å¯èƒ½ä¼šå˜æˆä¸€ä¸ªèšç„¦äºŽæŒ‡å‡ºé—®é¢˜çš„è´Ÿé¢è¿‡ç¨‹ã€‚请å¶å°”给予
+称赞,尤其是对新手贡献者ï¼
diff --git a/Documentation/translations/zh_CN/staging/index.rst b/Documentation/translations/zh_CN/staging/index.rst
index bb55c81c84a3..6d68fabce175 100644
--- a/Documentation/translations/zh_CN/staging/index.rst
+++ b/Documentation/translations/zh_CN/staging/index.rst
@@ -13,6 +13,7 @@
.. toctree::
:maxdepth: 2
+ speculation
xz
TODOList:
@@ -21,6 +22,5 @@ TODOList:
* lzo
* remoteproc
* rpmsg
-* speculation
* static-keys
* tee
diff --git a/Documentation/translations/zh_CN/staging/speculation.rst b/Documentation/translations/zh_CN/staging/speculation.rst
new file mode 100644
index 000000000000..c36d33f67897
--- /dev/null
+++ b/Documentation/translations/zh_CN/staging/speculation.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/staging/speculation.rst
+
+:翻译:
+
+ å´”å· Cui Wei <chris.wei.cui@gmail.com>
+
+========
+推测执行
+========
+
+本文档解释了推测执行的潜在影å“,以åŠå¦‚何使用通用APIæ¥å‡è½»ä¸è‰¯å½±å“。
+
+------------------------------------------------------------------------------
+
+为æé«˜æ€§èƒ½å¹¶å‡å°‘å¹³å‡å»¶è¿Ÿï¼Œè®¸å¤šçް代处ç†å™¨éƒ½é‡‡ç”¨åˆ†æ”¯é¢„测等推测执行技术,执行结果
+å¯èƒ½åœ¨åŽç»­é˜¶æ®µè¢«ä¸¢å¼ƒã€‚
+
+通常情况下,我们无法从架构状æ€ï¼ˆå¦‚寄存器内容)观察到推测执行。然而,在æŸäº›æƒ…况
+下从微架构状æ€è§‚å¯Ÿå…¶å½±å“æ˜¯å¯èƒ½çš„ï¼Œä¾‹å¦‚æ•°æ®æ˜¯å¦å­˜åœ¨äºŽç¼“存中。这ç§çжæ€å¯èƒ½ä¼šå½¢æˆ
+ä¾§ä¿¡é“,通过观察侧信é“å¯ä»¥æå–秘密信æ¯ã€‚
+
+例如,在分支预测存在的情况下,边界检查å¯èƒ½è¢«æŽ¨æµ‹æ‰§è¡Œçš„代ç å¿½ç•¥ã€‚考虑以下代ç ::
+
+ int load_array(int *array, unsigned int index)
+ {
+ if (index >= MAX_ARRAY_ELEMS)
+ return 0;
+ else
+ return array[index];
+ }
+
+在arm64上,å¯ä»¥ç¼–译æˆå¦‚下汇编åºåˆ—::
+
+ CMP <index>, #MAX_ARRAY_ELEMS
+ B.LT less
+ MOV <returnval>, #0
+ RET
+ less:
+ LDR <returnval>, [<array>, <index>]
+ RET
+
+处ç†å™¨æœ‰å¯èƒ½è¯¯é¢„测æ¡ä»¶åˆ†æ”¯ï¼Œå¹¶æŽ¨æµ‹æ€§è£…è½½array[index],å³ä½¿index >= MAX_ARRAY_ELEMS。
+这个值éšåŽä¼šè¢«ä¸¢å¼ƒï¼Œä½†æŽ¨æµ‹çš„装载å¯èƒ½ä¼šå½±å“微架构状æ€ï¼ŒéšåŽå¯è¢«æµ‹é‡åˆ°ã€‚
+
+涉åŠå¤šä¸ªä¾èµ–å†…å­˜è®¿é—®çš„æ›´å¤æ‚åºåˆ—å¯èƒ½ä¼šå¯¼è‡´æ•æ„Ÿä¿¡æ¯æ³„露。以å‰é¢çš„示例为基础,考虑
+以下代ç ::
+
+ int load_dependent_arrays(int *arr1, int *arr2, int index)
+ {
+ int val1, val2,
+
+ val1 = load_array(arr1, index);
+ val2 = load_array(arr2, val1);
+
+ return val2;
+ }
+
+æ ¹æ®æŽ¨æµ‹ï¼Œå¯¹load_array()的第一次调用å¯èƒ½ä¼šè¿”回一个越界地å€çš„值,而第二次调用将影å“
+ä¾èµ–于该值的微架构状æ€ã€‚è¿™å¯èƒ½ä¼šæä¾›ä¸€ä¸ªä»»æ„读å–原语。
+
+缓解推测执行侧信é“
+==================
+
+内核æä¾›äº†ä¸€ä¸ªé€šç”¨API以确ä¿å³ä½¿åœ¨æŽ¨æµ‹æƒ…况下也能éµå®ˆè¾¹ç•Œæ£€æŸ¥ã€‚å—æŽ¨æµ‹æ‰§è¡Œä¾§ä¿¡é“å½±å“
+的架构应当实现这些原语。
+
+<linux/nospec.h>中的array_index_nospec()辅助函数å¯ç”¨äºŽé˜²æ­¢ä¿¡æ¯é€šè¿‡ä¾§ä¿¡é“泄æ¼ã€‚
+
+调用array_index_nospec(index, size)将返回一个ç»è¿‡å‡€åŒ–的索引值,å³ä½¿åœ¨CPU推测执行
+æ¡ä»¶ä¸‹ï¼Œè¯¥å€¼ä¹Ÿä¼šè¢«ä¸¥æ ¼é™åˆ¶åœ¨[0, size)范围内。
+
+è¿™å¯ä»¥ç”¨æ¥ä¿æŠ¤å‰é¢çš„load_array()示例::
+
+ int load_array(int *array, unsigned int index)
+ {
+ if (index >= MAX_ARRAY_ELEMS)
+ return 0;
+ else {
+ index = array_index_nospec(index, MAX_ARRAY_ELEMS);
+ return array[index];
+ }
+ }
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 1998dc146c56..5f90af1fb573 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -874,7 +874,7 @@ where uvc-gadget is this program:
with these patches:
- http://www.spinics.net/lists/linux-usb/msg99220.html
+ https://lore.kernel.org/r/1386675637-18243-1-git-send-email-r.baldyga@samsung.com/
host::
diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst
index 535f49047ce6..1dfe5e7acd5a 100644
--- a/Documentation/userspace-api/dma-buf-heaps.rst
+++ b/Documentation/userspace-api/dma-buf-heaps.rst
@@ -19,7 +19,10 @@ following heaps:
- The ``cma`` heap allocates physically contiguous, cacheable,
buffers. Only present if a CMA region is present. Such a region is
usually created either through the kernel commandline through the
- `cma` parameter, a memory region Device-Tree node with the
- `linux,cma-default` property set, or through the `CMA_SIZE_MBYTES` or
- `CMA_SIZE_PERCENTAGE` Kconfig options. Depending on the platform, it
- might be called ``reserved``, ``linux,cma``, or ``default-pool``.
+ ``cma`` parameter, a memory region Device-Tree node with the
+ ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or
+ ``CMA_SIZE_PERCENTAGE`` Kconfig options. The heap's name in devtmpfs is
+ ``default_cma_region``. For backwards compatibility, when the
+ ``DMABUF_HEAPS_CMA_LEGACY`` Kconfig option is set, a duplicate node is
+ created following legacy naming conventions; the legacy name might be
+ ``reserved``, ``linux,cma``, or ``default-pool``.
diff --git a/Documentation/userspace-api/fwctl/fwctl.rst b/Documentation/userspace-api/fwctl/fwctl.rst
index fdcfe418a83f..a74eab8d14c6 100644
--- a/Documentation/userspace-api/fwctl/fwctl.rst
+++ b/Documentation/userspace-api/fwctl/fwctl.rst
@@ -54,7 +54,7 @@ operated by the block layer but also comes with a set of RPCs to administer the
construction of drives within the HW RAID.
In the past when devices were more single function, individual subsystems would
-grow different approaches to solving some of these common problems. For instance
+grow different approaches to solving some of these common problems. For instance,
monitoring device health, manipulating its FLASH, debugging the FW,
provisioning, all have various unique interfaces across the kernel.
@@ -87,7 +87,7 @@ device today may broadly have several function-level scopes:
3. Multiple VM functions tightly scoped within the VM
The device may create a logical parent/child relationship between these scopes.
-For instance a child VM's FW may be within the scope of the hypervisor FW. It is
+For instance, a child VM's FW may be within the scope of the hypervisor FW. It is
quite common in the VFIO world that the hypervisor environment has a complex
provisioning/profiling/configuration responsibility for the function VFIO
assigns to the VM.
@@ -105,19 +105,19 @@ some general scopes of action (see enum fwctl_rpc_scope):
3. Write access to function & child debug information strictly compatible with
the principles of kernel lockdown and kernel integrity protection. Triggers
- a kernel Taint.
+ a kernel taint.
- 4. Full debug device access. Triggers a kernel Taint, requires CAP_SYS_RAWIO.
+ 4. Full debug device access. Triggers a kernel taint, requires CAP_SYS_RAWIO.
User space will provide a scope label on each RPC and the kernel must enforce the
above CAPs and taints based on that scope. A combination of kernel and FW can
enforce that RPCs are placed in the correct scope by user space.
-Denied behavior
----------------
+Disallowed behavior
+-------------------
There are many things this interface must not allow user space to do (without a
-Taint or CAP), broadly derived from the principles of kernel lockdown. Some
+taint or CAP), broadly derived from the principles of kernel lockdown. Some
examples:
1. DMA to/from arbitrary memory, hang the system, compromise FW integrity with
@@ -138,8 +138,8 @@ examples:
fwctl is not a replacement for device direct access subsystems like uacce or
VFIO.
-Operations exposed through fwctl's non-taining interfaces should be fully
-sharable with other users of the device. For instance exposing a RPC through
+Operations exposed through fwctl's non-tainting interfaces should be fully
+sharable with other users of the device. For instance, exposing a RPC through
fwctl should never prevent a kernel subsystem from also concurrently using that
same RPC or hardware unit down the road. In such cases fwctl will be less
important than proper kernel subsystems that eventually emerge. Mistakes in this
@@ -225,12 +225,12 @@ subsystems.
Each device type must be mindful of Linux's philosophy for stable ABI. The FW
RPC interface does not have to meet a strictly stable ABI, but it does need to
-meet an expectation that userspace tools that are deployed and in significant
+meet an expectation that user space tools that are deployed and in significant
use don't needlessly break. FW upgrade and kernel upgrade should keep widely
deployed tooling working.
Development and debugging focused RPCs under more permissive scopes can have
-less stabilitiy if the tools using them are only run under exceptional
+less stability if the tools using them are only run under exceptional
circumstances and not for every day use of the device. Debugging tools may even
require exact version matching as they may require something similar to DWARF
debug information from the FW binary.
@@ -261,7 +261,7 @@ Some examples:
- HW RAID controllers. This includes RPCs to do things like compose drives into
a RAID volume, configure RAID parameters, monitor the HW and more.
- - Baseboard managers. RPCs for configuring settings in the device and more
+ - Baseboard managers. RPCs for configuring settings in the device and more.
- NVMe vendor command capsules. nvme-cli provides access to some monitoring
functions that different products have defined, but more exist.
@@ -269,15 +269,15 @@ Some examples:
- CXL also has a NVMe-like vendor command system.
- DRM allows user space drivers to send commands to the device via kernel
- mediation
+ mediation.
- RDMA allows user space drivers to directly push commands to the device
- without kernel involvement
+ without kernel involvement.
- Various “raw†APIs, raw HID (SDL2), raw USB, NVMe Generic Interface, etc.
The first 4 are examples of areas that fwctl intends to cover. The latter three
-are examples of denied behavior as they fully overlap with the primary purpose
+are examples of disallowed behavior as they fully overlap with the primary purpose
of a kernel subsystem.
Some key lessons learned from these past efforts are the importance of having a
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 4f1532a251d2..406a9f4d0869 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -10,12 +10,14 @@ Michael Elizabeth Chastain
If you are adding new ioctl's to the kernel, you should use the _IO
macros defined in <linux/ioctl.h>:
- ====== == ============================================
- _IO an ioctl with no parameters
- _IOW an ioctl with write parameters (copy_from_user)
- _IOR an ioctl with read parameters (copy_to_user)
- _IOWR an ioctl with both write and read parameters.
- ====== == ============================================
+ ====== ===========================
+ macro parameters
+ ====== ===========================
+ _IO none
+ _IOW write (read from userspace)
+ _IOR read (write to userpace)
+ _IOWR write and read
+ ====== ===========================
'Write' and 'read' are from the user's point of view, just like the
system calls 'write' and 'read'. For example, a SET_FOO ioctl would
@@ -23,9 +25,9 @@ be _IOW, although the kernel would actually read data from user space;
a GET_FOO ioctl would be _IOR, although the kernel would actually write
data to user space.
-The first argument to _IO, _IOW, _IOR, or _IOWR is an identifying letter
-or number from the table below. Because of the large number of drivers,
-many drivers share a partial letter with other drivers.
+The first argument to the macros is an identifying letter or number from
+the table below. Because of the large number of drivers, many drivers
+share a partial letter with other drivers.
If you are writing a driver for a new device and need a letter, pick an
unused block with enough room for expansion: 32 to 256 ioctl commands
@@ -33,12 +35,14 @@ should suffice. You can register the block by patching this file and
submitting the patch through :doc:`usual patch submission process
</process/submitting-patches>`.
-The second argument to _IO, _IOW, _IOR, or _IOWR is a sequence number
-to distinguish ioctls from each other. The third argument to _IOW,
-_IOR, or _IOWR is the type of the data going into the kernel or coming
-out of the kernel (e.g. 'int' or 'struct foo'). NOTE! Do NOT use
-sizeof(arg) as the third argument as this results in your ioctl thinking
-it passes an argument of type size_t.
+The second argument is a sequence number to distinguish ioctls from each
+other. The third argument (not applicable to _IO) is the type of the data
+going into the kernel or coming out of the kernel (e.g. 'int' or
+'struct foo').
+
+.. note::
+ Do NOT use sizeof(arg) as the third argument as this results in your
+ ioctl thinking it passes an argument of type size_t.
Some devices use their major number as the identifier; this is OK, as
long as it is unique. Some devices are irregular and don't follow any
@@ -51,7 +55,7 @@ Following this convention is good because:
error rather than some unexpected behaviour.
(2) The 'strace' build procedure automatically finds ioctl numbers
- defined with _IO, _IOW, _IOR, or _IOWR.
+ defined with the macros.
(3) 'strace' can decode numbers back into useful names when the
numbers are unique.
@@ -65,344 +69,344 @@ Following this convention is good because:
This table lists ioctls visible from userland, excluding ones from
drivers/staging/.
-==== ===== ======================================================= ================================================================
-Code Seq# Include File Comments
+==== ===== ========================================================= ================================================================
+Code Seq# Include File Comments
(hex)
-==== ===== ======================================================= ================================================================
-0x00 00-1F linux/fs.h conflict!
-0x00 00-1F scsi/scsi_ioctl.h conflict!
-0x00 00-1F linux/fb.h conflict!
-0x00 00-1F linux/wavefront.h conflict!
+==== ===== ========================================================= ================================================================
+0x00 00-1F linux/fs.h conflict!
+0x00 00-1F scsi/scsi_ioctl.h conflict!
+0x00 00-1F linux/fb.h conflict!
+0x00 00-1F linux/wavefront.h conflict!
0x02 all linux/fd.h
0x03 all linux/hdreg.h
-0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these.
+0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these.
0x06 all linux/lp.h
0x07 9F-D0 linux/vmw_vmci_defs.h, uapi/linux/vm_sockets.h
0x09 all linux/raid/md_u.h
0x10 00-0F drivers/char/s390/vmcp.h
0x10 10-1F arch/s390/include/uapi/sclp_ctl.h
0x10 20-2F arch/s390/include/uapi/asm/hypfs.h
-0x12 all linux/fs.h BLK* ioctls
+0x12 all linux/fs.h BLK* ioctls
linux/blkpg.h
linux/blkzoned.h
linux/blk-crypto.h
-0x15 all linux/fs.h FS_IOC_* ioctls
-0x1b all InfiniBand Subsystem
- <http://infiniband.sourceforge.net/>
+0x15 all linux/fs.h FS_IOC_* ioctls
+0x1b all InfiniBand Subsystem
+ <http://infiniband.sourceforge.net/>
0x20 all drivers/cdrom/cm206.h
0x22 all scsi/sg.h
-0x3E 00-0F linux/counter.h <mailto:linux-iio@vger.kernel.org>
+0x3E 00-0F linux/counter.h <mailto:linux-iio@vger.kernel.org>
'!' 00-1F uapi/linux/seccomp.h
-'#' 00-3F IEEE 1394 Subsystem
- Block for the entire subsystem
+'#' 00-3F IEEE 1394 Subsystem
+ Block for the entire subsystem
'$' 00-0F linux/perf_counter.h, linux/perf_event.h
-'%' 00-0F include/uapi/linux/stm.h System Trace Module subsystem
- <mailto:alexander.shishkin@linux.intel.com>
+'%' 00-0F include/uapi/linux/stm.h System Trace Module subsystem
+ <mailto:alexander.shishkin@linux.intel.com>
'&' 00-07 drivers/firewire/nosy-user.h
-'*' 00-1F uapi/linux/user_events.h User Events Subsystem
- <mailto:linux-trace-kernel@vger.kernel.org>
-'1' 00-1F linux/timepps.h PPS kit from Ulrich Windl
- <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
+'*' 00-1F uapi/linux/user_events.h User Events Subsystem
+ <mailto:linux-trace-kernel@vger.kernel.org>
+'1' 00-1F linux/timepps.h PPS kit from Ulrich Windl
+ <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
'2' 01-04 linux/i2o.h
-'3' 00-0F drivers/s390/char/raw3270.h conflict!
-'3' 00-1F linux/suspend_ioctls.h, conflict!
+'3' 00-0F drivers/s390/char/raw3270.h conflict!
+'3' 00-1F linux/suspend_ioctls.h, conflict!
kernel/power/user.c
-'8' all SNP8023 advanced NIC card
- <mailto:mcr@solidum.com>
+'8' all SNP8023 advanced NIC card
+ <mailto:mcr@solidum.com>
';' 64-7F linux/vfio.h
';' 80-FF linux/iommufd.h
-'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
-'@' 00-0F linux/radeonfb.h conflict!
-'@' 00-0F drivers/video/aty/aty128fb.c conflict!
-'A' 00-1F linux/apm_bios.h conflict!
-'A' 00-0F linux/agpgart.h, conflict!
+'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
+'@' 00-0F linux/radeonfb.h conflict!
+'@' 00-0F drivers/video/aty/aty128fb.c conflict!
+'A' 00-1F linux/apm_bios.h conflict!
+'A' 00-0F linux/agpgart.h, conflict!
drivers/char/agp/compat_ioctl.h
-'A' 00-7F sound/asound.h conflict!
-'B' 00-1F linux/cciss_ioctl.h conflict!
-'B' 00-0F include/linux/pmu.h conflict!
-'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
-'B' 00-0F xen/xenbus_dev.h conflict!
-'C' all linux/soundcard.h conflict!
-'C' 01-2F linux/capi.h conflict!
-'C' F0-FF drivers/net/wan/cosa.h conflict!
+'A' 00-7F sound/asound.h conflict!
+'B' 00-1F linux/cciss_ioctl.h conflict!
+'B' 00-0F include/linux/pmu.h conflict!
+'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
+'B' 00-0F xen/xenbus_dev.h conflict!
+'C' all linux/soundcard.h conflict!
+'C' 01-2F linux/capi.h conflict!
+'C' F0-FF drivers/net/wan/cosa.h conflict!
'D' all arch/s390/include/asm/dasd.h
-'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022
+'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022
'D' 05 drivers/scsi/pmcraid.h
-'E' all linux/input.h conflict!
-'E' 00-0F xen/evtchn.h conflict!
-'F' all linux/fb.h conflict!
-'F' 01-02 drivers/scsi/pmcraid.h conflict!
-'F' 20 drivers/video/fsl-diu-fb.h conflict!
-'F' 20 linux/ivtvfb.h conflict!
-'F' 20 linux/matroxfb.h conflict!
-'F' 20 drivers/video/aty/atyfb_base.c conflict!
-'F' 00-0F video/da8xx-fb.h conflict!
-'F' 80-8F linux/arcfb.h conflict!
-'F' DD video/sstfb.h conflict!
-'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
-'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
-'H' 00-7F linux/hiddev.h conflict!
-'H' 00-0F linux/hidraw.h conflict!
-'H' 01 linux/mei.h conflict!
-'H' 02 linux/mei.h conflict!
-'H' 03 linux/mei.h conflict!
-'H' 00-0F sound/asound.h conflict!
-'H' 20-40 sound/asound_fm.h conflict!
-'H' 80-8F sound/sfnt_info.h conflict!
-'H' 10-8F sound/emu10k1.h conflict!
-'H' 10-1F sound/sb16_csp.h conflict!
-'H' 10-1F sound/hda_hwdep.h conflict!
-'H' 40-4F sound/hdspm.h conflict!
-'H' 40-4F sound/hdsp.h conflict!
+'E' all linux/input.h conflict!
+'E' 00-0F xen/evtchn.h conflict!
+'F' all linux/fb.h conflict!
+'F' 01-02 drivers/scsi/pmcraid.h conflict!
+'F' 20 drivers/video/fsl-diu-fb.h conflict!
+'F' 20 linux/ivtvfb.h conflict!
+'F' 20 linux/matroxfb.h conflict!
+'F' 20 drivers/video/aty/atyfb_base.c conflict!
+'F' 00-0F video/da8xx-fb.h conflict!
+'F' 80-8F linux/arcfb.h conflict!
+'F' DD video/sstfb.h conflict!
+'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
+'H' 00-7F linux/hiddev.h conflict!
+'H' 00-0F linux/hidraw.h conflict!
+'H' 01 linux/mei.h conflict!
+'H' 02 linux/mei.h conflict!
+'H' 03 linux/mei.h conflict!
+'H' 00-0F sound/asound.h conflict!
+'H' 20-40 sound/asound_fm.h conflict!
+'H' 80-8F sound/sfnt_info.h conflict!
+'H' 10-8F sound/emu10k1.h conflict!
+'H' 10-1F sound/sb16_csp.h conflict!
+'H' 10-1F sound/hda_hwdep.h conflict!
+'H' 40-4F sound/hdspm.h conflict!
+'H' 40-4F sound/hdsp.h conflict!
'H' 90 sound/usb/usx2y/usb_stream.h
-'H' 00-0F uapi/misc/habanalabs.h conflict!
+'H' 00-0F uapi/misc/habanalabs.h conflict!
'H' A0 uapi/linux/usb/cdc-wdm.h
-'H' C0-F0 net/bluetooth/hci.h conflict!
-'H' C0-DF net/bluetooth/hidp/hidp.h conflict!
-'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict!
-'H' C0-DF net/bluetooth/bnep/bnep.h conflict!
-'H' F1 linux/hid-roccat.h <mailto:erazor_de@users.sourceforge.net>
+'H' C0-F0 net/bluetooth/hci.h conflict!
+'H' C0-DF net/bluetooth/hidp/hidp.h conflict!
+'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict!
+'H' C0-DF net/bluetooth/bnep/bnep.h conflict!
+'H' F1 linux/hid-roccat.h <mailto:erazor_de@users.sourceforge.net>
'H' F8-FA sound/firewire.h
-'I' all linux/isdn.h conflict!
-'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
-'I' 40-4F linux/mISDNif.h conflict!
+'I' all linux/isdn.h conflict!
+'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
+'I' 40-4F linux/mISDNif.h conflict!
'K' all linux/kd.h
-'L' 00-1F linux/loop.h conflict!
-'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
-'L' E0-FF linux/ppdd.h encrypted disk device driver
- <http://linux01.gwdg.de/~alatham/ppdd.html>
-'M' all linux/soundcard.h conflict!
-'M' 01-16 mtd/mtd-abi.h conflict!
+'L' 00-1F linux/loop.h conflict!
+'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
+'L' E0-FF linux/ppdd.h encrypted disk device driver
+ <http://linux01.gwdg.de/~alatham/ppdd.html>
+'M' all linux/soundcard.h conflict!
+'M' 01-16 mtd/mtd-abi.h conflict!
and drivers/mtd/mtdchar.c
'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h
-'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
+'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
'N' 00-1F drivers/usb/scanner.h
'N' 40-7F drivers/block/nvme.c
-'N' 80-8F uapi/linux/ntsync.h NT synchronization primitives
- <mailto:wine-devel@winehq.org>
-'O' 00-06 mtd/ubi-user.h UBI
-'P' all linux/soundcard.h conflict!
-'P' 60-6F sound/sscape_ioctl.h conflict!
-'P' 00-0F drivers/usb/class/usblp.c conflict!
-'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
-'P' 00-0F xen/privcmd.h conflict!
-'P' 00-05 linux/tps6594_pfsm.h conflict!
+'N' 80-8F uapi/linux/ntsync.h NT synchronization primitives
+ <mailto:wine-devel@winehq.org>
+'O' 00-06 mtd/ubi-user.h UBI
+'P' all linux/soundcard.h conflict!
+'P' 60-6F sound/sscape_ioctl.h conflict!
+'P' 00-0F drivers/usb/class/usblp.c conflict!
+'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
+'P' 00-0F xen/privcmd.h conflict!
+'P' 00-05 linux/tps6594_pfsm.h conflict!
'Q' all linux/soundcard.h
-'R' 00-1F linux/random.h conflict!
-'R' 01 linux/rfkill.h conflict!
+'R' 00-1F linux/random.h conflict!
+'R' 01 linux/rfkill.h conflict!
'R' 20-2F linux/trace_mmap.h
'R' C0-DF net/bluetooth/rfcomm.h
'R' E0 uapi/linux/fsl_mc.h
-'S' all linux/cdrom.h conflict!
-'S' 80-81 scsi/scsi_ioctl.h conflict!
-'S' 82-FF scsi/scsi.h conflict!
-'S' 00-7F sound/asequencer.h conflict!
-'T' all linux/soundcard.h conflict!
-'T' 00-AF sound/asound.h conflict!
-'T' all arch/x86/include/asm/ioctls.h conflict!
-'T' C0-DF linux/if_tun.h conflict!
-'U' all sound/asound.h conflict!
-'U' 00-CF linux/uinput.h conflict!
+'S' all linux/cdrom.h conflict!
+'S' 80-81 scsi/scsi_ioctl.h conflict!
+'S' 82-FF scsi/scsi.h conflict!
+'S' 00-7F sound/asequencer.h conflict!
+'T' all linux/soundcard.h conflict!
+'T' 00-AF sound/asound.h conflict!
+'T' all arch/x86/include/asm/ioctls.h conflict!
+'T' C0-DF linux/if_tun.h conflict!
+'U' all sound/asound.h conflict!
+'U' 00-CF linux/uinput.h conflict!
'U' 00-EF linux/usbdevice_fs.h
'U' C0-CF drivers/bluetooth/hci_uart.h
-'V' all linux/vt.h conflict!
-'V' all linux/videodev2.h conflict!
-'V' C0 linux/ivtvfb.h conflict!
-'V' C0 linux/ivtv.h conflict!
-'V' C0 media/si4713.h conflict!
-'W' 00-1F linux/watchdog.h conflict!
-'W' 00-1F linux/wanrouter.h conflict! (pre 3.9)
-'W' 00-3F sound/asound.h conflict!
+'V' all linux/vt.h conflict!
+'V' all linux/videodev2.h conflict!
+'V' C0 linux/ivtvfb.h conflict!
+'V' C0 linux/ivtv.h conflict!
+'V' C0 media/si4713.h conflict!
+'W' 00-1F linux/watchdog.h conflict!
+'W' 00-1F linux/wanrouter.h conflict! (pre 3.9)
+'W' 00-3F sound/asound.h conflict!
'W' 40-5F drivers/pci/switch/switchtec.c
'W' 60-61 linux/watch_queue.h
-'X' all fs/xfs/xfs_fs.h, conflict!
+'X' all fs/xfs/xfs_fs.h, conflict!
fs/xfs/linux-2.6/xfs_ioctl32.h,
include/linux/falloc.h,
linux/fs.h,
-'X' all fs/ocfs2/ocfs_fs.h conflict!
+'X' all fs/ocfs2/ocfs_fs.h conflict!
'Z' 14-15 drivers/message/fusion/mptctl.h
-'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices
- <mailto:gregkh@linuxfoundation.org>
-'a' all linux/atm*.h, linux/sonet.h ATM on linux
- <http://lrcwww.epfl.ch/>
-'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver
-'b' 00-FF conflict! bit3 vme host bridge
- <mailto:natalia@nikhefk.nikhef.nl>
-'b' 00-0F linux/dma-buf.h conflict!
-'c' 00-7F linux/comstats.h conflict!
-'c' 00-7F linux/coda.h conflict!
-'c' 00-1F linux/chio.h conflict!
-'c' 80-9F arch/s390/include/asm/chsc.h conflict!
+'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices
+ <mailto:gregkh@linuxfoundation.org>
+'a' all linux/atm*.h, linux/sonet.h ATM on linux
+ <http://lrcwww.epfl.ch/>
+'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver
+'b' 00-FF conflict! bit3 vme host bridge
+ <mailto:natalia@nikhefk.nikhef.nl>
+'b' 00-0F linux/dma-buf.h conflict!
+'c' 00-7F linux/comstats.h conflict!
+'c' 00-7F linux/coda.h conflict!
+'c' 00-1F linux/chio.h conflict!
+'c' 80-9F arch/s390/include/asm/chsc.h conflict!
'c' A0-AF arch/x86/include/asm/msr.h conflict!
-'d' 00-FF linux/char/drm/drm.h conflict!
-'d' 02-40 pcmcia/ds.h conflict!
+'d' 00-FF linux/char/drm/drm.h conflict!
+'d' 02-40 pcmcia/ds.h conflict!
'd' F0-FF linux/digi1.h
-'e' all linux/digi1.h conflict!
-'f' 00-1F linux/ext2_fs.h conflict!
-'f' 00-1F linux/ext3_fs.h conflict!
-'f' 00-0F fs/jfs/jfs_dinode.h conflict!
-'f' 00-0F fs/ext4/ext4.h conflict!
-'f' 00-0F linux/fs.h conflict!
-'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict!
+'e' all linux/digi1.h conflict!
+'f' 00-1F linux/ext2_fs.h conflict!
+'f' 00-1F linux/ext3_fs.h conflict!
+'f' 00-0F fs/jfs/jfs_dinode.h conflict!
+'f' 00-0F fs/ext4/ext4.h conflict!
+'f' 00-0F linux/fs.h conflict!
+'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict!
'f' 13-27 linux/fscrypt.h
'f' 81-8F linux/fsverity.h
'g' 00-0F linux/usb/gadgetfs.h
'g' 20-2F linux/usb/g_printer.h
-'h' 00-7F conflict! Charon filesystem
- <mailto:zapman@interlan.net>
-'h' 00-1F linux/hpet.h conflict!
+'h' 00-7F conflict! Charon filesystem
+ <mailto:zapman@interlan.net>
+'h' 00-1F linux/hpet.h conflict!
'h' 80-8F fs/hfsplus/ioctl.c
-'i' 00-3F linux/i2o-dev.h conflict!
-'i' 0B-1F linux/ipmi.h conflict!
+'i' 00-3F linux/i2o-dev.h conflict!
+'i' 0B-1F linux/ipmi.h conflict!
'i' 80-8F linux/i8k.h
-'i' 90-9F `linux/iio/*.h` IIO
+'i' 90-9F `linux/iio/*.h` IIO
'j' 00-3F linux/joystick.h
-'k' 00-0F linux/spi/spidev.h conflict!
-'k' 00-05 video/kyro.h conflict!
-'k' 10-17 linux/hsi/hsi_char.h HSI character device
-'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
- <http://web.archive.org/web/%2A/http://mikonos.dia.unisa.it/tcfs>
-'l' 40-7F linux/udf_fs_i.h in development:
- <https://github.com/pali/udftools>
-'m' 00-09 linux/mmtimer.h conflict!
-'m' all linux/mtio.h conflict!
-'m' all linux/soundcard.h conflict!
-'m' all linux/synclink.h conflict!
-'m' 00-19 drivers/message/fusion/mptctl.h conflict!
-'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
+'k' 00-0F linux/spi/spidev.h conflict!
+'k' 00-05 video/kyro.h conflict!
+'k' 10-17 linux/hsi/hsi_char.h HSI character device
+'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
+ <http://web.archive.org/web/%2A/http://mikonos.dia.unisa.it/tcfs>
+'l' 40-7F linux/udf_fs_i.h in development:
+ <https://github.com/pali/udftools>
+'m' 00-09 linux/mmtimer.h conflict!
+'m' all linux/mtio.h conflict!
+'m' all linux/soundcard.h conflict!
+'m' all linux/synclink.h conflict!
+'m' 00-19 drivers/message/fusion/mptctl.h conflict!
+'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
-'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
-'n' E0-FF linux/matroxfb.h matroxfb
-'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
-'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
-'o' 40-41 mtd/ubi-user.h UBI
-'o' 01-A1 `linux/dvb/*.h` DVB
-'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this)
-'p' 00-1F linux/rtc.h conflict!
+'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
+'n' E0-FF linux/matroxfb.h matroxfb
+'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
+'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
+'o' 40-41 mtd/ubi-user.h UBI
+'o' 01-A1 `linux/dvb/*.h` DVB
+'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this)
+'p' 00-1F linux/rtc.h conflict!
'p' 40-7F linux/nvram.h
-'p' 80-9F linux/ppdev.h user-space parport
- <mailto:tim@cyberelk.net>
-'p' A1-A5 linux/pps.h LinuxPPS
-'p' B1-B3 linux/pps_gen.h LinuxPPS
- <mailto:giometti@linux.it>
+'p' 80-9F linux/ppdev.h user-space parport
+ <mailto:tim@cyberelk.net>
+'p' A1-A5 linux/pps.h LinuxPPS
+'p' B1-B3 linux/pps_gen.h LinuxPPS
+ <mailto:giometti@linux.it>
'q' 00-1F linux/serio.h
-'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK
- linux/ixjuser.h <http://web.archive.org/web/%2A/http://www.quicknet.net>
+'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK
+ linux/ixjuser.h <http://web.archive.org/web/%2A/http://www.quicknet.net>
'r' 00-1F linux/msdos_fs.h and fs/fat/dir.c
's' all linux/cdk.h
't' 00-7F linux/ppp-ioctl.h
't' 80-8F linux/isdn_ppp.h
-'t' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM
-'u' 00-1F linux/smb_fs.h gone
-'u' 00-2F linux/ublk_cmd.h conflict!
-'u' 20-3F linux/uvcvideo.h USB video class host driver
-'u' 40-4f linux/udmabuf.h userspace dma-buf misc device
-'v' 00-1F linux/ext2_fs.h conflict!
-'v' 00-1F linux/fs.h conflict!
-'v' 00-0F linux/sonypi.h conflict!
-'v' 00-0F media/v4l2-subdev.h conflict!
-'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API
-'v' C0-FF linux/meye.h conflict!
-'w' all CERN SCI driver
-'y' 00-1F packet based user level communications
- <mailto:zapman@interlan.net>
-'z' 00-3F CAN bus card conflict!
- <mailto:hdstich@connectu.ulm.circular.de>
-'z' 40-7F CAN bus card conflict!
- <mailto:oe@port.de>
-'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
+'t' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM
+'u' 00-1F linux/smb_fs.h gone
+'u' 00-2F linux/ublk_cmd.h conflict!
+'u' 20-3F linux/uvcvideo.h USB video class host driver
+'u' 40-4f linux/udmabuf.h userspace dma-buf misc device
+'v' 00-1F linux/ext2_fs.h conflict!
+'v' 00-1F linux/fs.h conflict!
+'v' 00-0F linux/sonypi.h conflict!
+'v' 00-0F media/v4l2-subdev.h conflict!
+'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API
+'v' C0-FF linux/meye.h conflict!
+'w' all CERN SCI driver
+'y' 00-1F packet based user level communications
+ <mailto:zapman@interlan.net>
+'z' 00-3F CAN bus card conflict!
+ <mailto:hdstich@connectu.ulm.circular.de>
+'z' 40-7F CAN bus card conflict!
+ <mailto:oe@port.de>
+'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
'|' 00-7F linux/media.h
-'|' 80-9F samples/ Any sample and example drivers
+'|' 80-9F samples/ Any sample and example drivers
0x80 00-1F linux/fb.h
0x81 00-1F linux/vduse.h
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
-0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
-0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
+0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
+0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
0x8A 00-1F linux/eventpoll.h
0x8B all linux/wireless.h
-0x8C 00-3F WiNRADiO driver
- <http://www.winradio.com.au/>
+0x8C 00-3F WiNRADiO driver
+ <http://www.winradio.com.au/>
0x90 00 drivers/cdrom/sbpcd.h
0x92 00-0F drivers/usb/mon/mon_bin.c
0x93 60-7F linux/auto_fs.h
-0x94 all fs/btrfs/ioctl.h Btrfs filesystem
- and linux/fs.h some lifted to vfs/generic
-0x97 00-7F fs/ceph/ioctl.h Ceph file system
-0x99 00-0F 537-Addinboard driver
- <mailto:buk@buks.ipn.de>
+0x94 all fs/btrfs/ioctl.h Btrfs filesystem
+ and linux/fs.h some lifted to vfs/generic
+0x97 00-7F fs/ceph/ioctl.h Ceph file system
+0x99 00-0F 537-Addinboard driver
+ <mailto:buk@buks.ipn.de>
0x9A 00-0F include/uapi/fwctl/fwctl.h
-0xA0 all linux/sdp/sdp.h Industrial Device Project
- <mailto:kenji@bitgate.com>
-0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
-0xA2 all uapi/linux/acrn.h ACRN hypervisor
-0xA3 80-8F Port ACL in development:
- <mailto:tlewis@mindspring.com>
+0xA0 all linux/sdp/sdp.h Industrial Device Project
+ <mailto:kenji@bitgate.com>
+0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
+0xA2 all uapi/linux/acrn.h ACRN hypervisor
+0xA3 80-8F Port ACL in development:
+ <mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
-0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
-0xA4 00-1F uapi/asm/sgx.h <mailto:linux-sgx@vger.kernel.org>
-0xA5 01-05 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator
- <mailto:luzmaximilian@gmail.com>
-0xA5 20-2F linux/surface_aggregator/dtx.h Microsoft Surface DTX driver
- <mailto:luzmaximilian@gmail.com>
+0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
+0xA4 00-1F uapi/asm/sgx.h <mailto:linux-sgx@vger.kernel.org>
+0xA5 01-05 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator
+ <mailto:luzmaximilian@gmail.com>
+0xA5 20-2F linux/surface_aggregator/dtx.h Microsoft Surface DTX driver
+ <mailto:luzmaximilian@gmail.com>
0xAA 00-3F linux/uapi/linux/userfaultfd.h
0xAB 00-1F linux/nbd.h
0xAC 00-1F linux/raw.h
-0xAD 00 Netfilter device in development:
- <mailto:rusty@rustcorp.com.au>
-0xAE 00-1F linux/kvm.h Kernel-based Virtual Machine
- <mailto:kvm@vger.kernel.org>
-0xAE 40-FF linux/kvm.h Kernel-based Virtual Machine
- <mailto:kvm@vger.kernel.org>
-0xAE 20-3F linux/nitro_enclaves.h Nitro Enclaves
-0xAF 00-1F linux/fsl_hypervisor.h Freescale hypervisor
-0xB0 all RATIO devices in development:
- <mailto:vgo@ratio.de>
-0xB1 00-1F PPPoX
- <mailto:mostrows@styx.uwaterloo.ca>
-0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API
- <mailto:linuxppc-dev>
-0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API
- <mailto:linuxppc-dev>
-0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API
- <mailto:linuxppc-dev>
-0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API
- <mailto:linuxppc-dev>
-0xB2 08 powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API
- <mailto:linuxppc-dev>
+0xAD 00 Netfilter device in development:
+ <mailto:rusty@rustcorp.com.au>
+0xAE 00-1F linux/kvm.h Kernel-based Virtual Machine
+ <mailto:kvm@vger.kernel.org>
+0xAE 40-FF linux/kvm.h Kernel-based Virtual Machine
+ <mailto:kvm@vger.kernel.org>
+0xAE 20-3F linux/nitro_enclaves.h Nitro Enclaves
+0xAF 00-1F linux/fsl_hypervisor.h Freescale hypervisor
+0xB0 all RATIO devices in development:
+ <mailto:vgo@ratio.de>
+0xB1 00-1F PPPoX
+ <mailto:mostrows@styx.uwaterloo.ca>
+0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API
+ <mailto:linuxppc-dev@lists.ozlabs.org>
+0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API
+ <mailto:linuxppc-dev@lists.ozlabs.org>
+0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API
+ <mailto:linuxppc-dev@lists.ozlabs.org>
+0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API
+ <mailto:linuxppc-dev@lists.ozlabs.org>
+0xB2 08 arch/powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API
+ <mailto:linuxppc-dev@lists.ozlabs.org>
0xB3 00 linux/mmc/ioctl.h
-0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
-0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
+0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
+0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
0xB6 all linux/fpga-dfl.h
-0xB7 all uapi/linux/remoteproc_cdev.h <mailto:linux-remoteproc@vger.kernel.org>
-0xB7 all uapi/linux/nsfs.h <mailto:Andrei Vagin <avagin@openvz.org>>
-0xB8 01-02 uapi/misc/mrvl_cn10k_dpi.h Marvell CN10K DPI driver
-0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver
- <mailto:linux-hyperv@vger.kernel.org>
+0xB7 all uapi/linux/remoteproc_cdev.h <mailto:linux-remoteproc@vger.kernel.org>
+0xB7 all uapi/linux/nsfs.h <mailto:Andrei Vagin <avagin@openvz.org>>
+0xB8 01-02 uapi/misc/mrvl_cn10k_dpi.h Marvell CN10K DPI driver
+0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver
+ <mailto:linux-hyperv@vger.kernel.org>
0xC0 00-0F linux/usb/iowarrior.h
-0xCA 00-0F uapi/misc/cxl.h Dead since 6.15
+0xCA 00-0F uapi/misc/cxl.h Dead since 6.15
0xCA 10-2F uapi/misc/ocxl.h
-0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15
-0xCB 00-1F CBM serial IEC bus in development:
- <mailto:michael.klein@puffin.lb.shuttle.de>
-0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
-0xCD 01 linux/reiserfs_fs.h Dead since 6.13
-0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices
+0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15
+0xCB 00-1F CBM serial IEC bus in development:
+ <mailto:michael.klein@puffin.lb.shuttle.de>
+0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
+0xCD 01 linux/reiserfs_fs.h Dead since 6.13
+0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices
0xCF 02 fs/smb/client/cifs_ioctl.h
0xDB 00-0F drivers/char/mwave/mwavepub.h
-0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
- <mailto:aherrman@de.ibm.com>
+0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
+ <mailto:aherrman@de.ibm.com>
0xE5 00-3F linux/fuse.h
-0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver
-0xEE 00-09 uapi/linux/pfrut.h Platform Firmware Runtime Update and Telemetry
-0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
- <mailto:thomas@winischhofer.net>
-0xF6 all LTTng Linux Trace Toolkit Next Generation
- <mailto:mathieu.desnoyers@efficios.com>
-0xF8 all arch/x86/include/uapi/asm/amd_hsmp.h AMD HSMP EPYC system management interface driver
- <mailto:nchatrad@amd.com>
-0xF9 00-0F uapi/misc/amd-apml.h AMD side band system management interface driver
- <mailto:naveenkrishna.chatradhi@amd.com>
+0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver
+0xEE 00-09 uapi/linux/pfrut.h Platform Firmware Runtime Update and Telemetry
+0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
+ <mailto:thomas@winischhofer.net>
+0xF6 all LTTng Linux Trace Toolkit Next Generation
+ <mailto:mathieu.desnoyers@efficios.com>
+0xF8 all arch/x86/include/uapi/asm/amd_hsmp.h AMD HSMP EPYC system management interface driver
+ <mailto:nchatrad@amd.com>
+0xF9 00-0F uapi/misc/amd-apml.h AMD side band system management interface driver
+ <mailto:naveenkrishna.chatradhi@amd.com>
0xFD all linux/dm-ioctl.h
0xFE all linux/isst_if.h
-==== ===== ======================================================= ================================================================
+==== ===== ========================================================= ================================================================
diff --git a/Documentation/userspace-api/iommufd.rst b/Documentation/userspace-api/iommufd.rst
index b0df15865dec..03f7510384d2 100644
--- a/Documentation/userspace-api/iommufd.rst
+++ b/Documentation/userspace-api/iommufd.rst
@@ -124,6 +124,17 @@ Following IOMMUFD objects are exposed to userspace:
used to allocate a vEVENTQ. Each vIOMMU can support multiple types of vEVENTS,
but is confined to one vEVENTQ per vEVENTQ type.
+- IOMMUFD_OBJ_HW_QUEUE, representing a hardware accelerated queue, as a subset
+ of IOMMU's virtualization features, for the IOMMU HW to directly read or write
+ the virtual queue memory owned by a guest OS. This HW-acceleration feature can
+ allow VM to work with the IOMMU HW directly without a VM Exit, so as to reduce
+ overhead from the hypercalls. Along with the HW QUEUE object, iommufd provides
+ user space an mmap interface for VMM to mmap a physical MMIO region from the
+ host physical address space to the guest physical address space, allowing the
+ guest OS to directly control the allocated HW QUEUE. Thus, when allocating a
+ HW QUEUE, the VMM must request a pair of mmap info (offset/length) and pass in
+ exactly to an mmap syscall via its offset and length arguments.
+
All user-visible objects are destroyed via the IOMMU_DESTROY uAPI.
The diagrams below show relationships between user-visible objects and kernel
@@ -270,6 +281,7 @@ User visible objects are backed by following datastructures:
- iommufd_viommu for IOMMUFD_OBJ_VIOMMU.
- iommufd_vdevice for IOMMUFD_OBJ_VDEVICE.
- iommufd_veventq for IOMMUFD_OBJ_VEVENTQ.
+- iommufd_hw_queue for IOMMUFD_OBJ_HW_QUEUE.
Several terminologies when looking at these datastructures:
diff --git a/Documentation/userspace-api/media/cec/cec-pin-error-inj.rst b/Documentation/userspace-api/media/cec/cec-pin-error-inj.rst
index 411d42a742f3..c02790319f3f 100644
--- a/Documentation/userspace-api/media/cec/cec-pin-error-inj.rst
+++ b/Documentation/userspace-api/media/cec/cec-pin-error-inj.rst
@@ -41,6 +41,9 @@ error injection status::
# <op> rx-clear clear all rx error injections for <op>
# <op> tx-clear clear all tx error injections for <op>
#
+ # RX error injection settings:
+ # rx-no-low-drive do not generate low-drive pulses
+ #
# RX error injection:
# <op>[,<mode>] rx-nack NACK the message instead of sending an ACK
# <op>[,<mode>] rx-low-drive <bit> force a low-drive condition at this bit position
@@ -53,6 +56,10 @@ error injection status::
# tx-custom-low-usecs <usecs> define the 'low' time for the custom pulse
# tx-custom-high-usecs <usecs> define the 'high' time for the custom pulse
# tx-custom-pulse transmit the custom pulse once the bus is idle
+ # tx-glitch-low-usecs <usecs> define the 'low' time for the glitch pulse
+ # tx-glitch-high-usecs <usecs> define the 'high' time for the glitch pulse
+ # tx-glitch-falling-edge send the glitch pulse after every falling edge
+ # tx-glitch-rising-edge send the glitch pulse after every rising edge
#
# TX error injection:
# <op>[,<mode>] tx-no-eom don't set the EOM bit
@@ -193,6 +200,14 @@ Receive Messages
This does not work if the remote CEC transmitter has logical address
0 ('TV') since that will always win.
+``rx-no-low-drive``
+ The receiver will ignore situations that would normally generate a
+ Low Drive pulse (3.6 ms). This is typically done if a spurious pulse is
+ detected when receiving a message, and it indicates to the transmitter that
+ the message has to be retransmitted since the receiver got confused.
+ Disabling this is useful to test how other CEC devices handle glitches
+ by ensuring we will not be the one that generates a Low Drive.
+
Transmit Messages
-----------------
@@ -327,3 +342,30 @@ Custom Pulses
``tx-custom-pulse``
Transmit a single custom pulse as soon as the CEC bus is idle.
+
+Glitch Pulses
+-------------
+
+This emulates what happens if the signal on the CEC line is seeing spurious
+pulses. Typically this happens after the falling or rising edge where there
+is a short voltage fluctuation that, if the CEC hardware doesn't do
+deglitching, can be seen as a spurious pulse and can cause a Low Drive
+condition or corrupt data.
+
+``tx-glitch-low-usecs <usecs>``
+ This defines the duration in microseconds that the glitch pulse pulls
+ the CEC line low. The default is 1 microsecond. The range is 0-100
+ microseconds. If 0, then no glitch pulse will be generated.
+
+``tx-glitch-high-usecs <usecs>``
+ This defines the duration in microseconds that the glitch pulse keeps the
+ CEC line high (unless another CEC adapter pulls it low in that time).
+ The default is 1 microseconds. The range is 0-100 microseconds. If 0, then
+ no glitch pulse will be generated.The total period of the glitch pulse is
+ ``tx-custom-low-usecs + tx-custom-high-usecs``.
+
+``tx-glitch-falling-edge``
+ Send the glitch pulse right after the falling edge.
+
+``tx-glitch-rising-edge``
+ Send the glitch pulse right after the rising edge.
diff --git a/Documentation/userspace-api/media/rc/rc-protos.rst b/Documentation/userspace-api/media/rc/rc-protos.rst
index 2a888ff5829f..ec706290c921 100644
--- a/Documentation/userspace-api/media/rc/rc-protos.rst
+++ b/Documentation/userspace-api/media/rc/rc-protos.rst
@@ -449,6 +449,6 @@ the 32 bits.
xbox-dvd (RC_PROTO_XBOX_DVD)
----------------------------
-This protocol is used by XBox DVD Remote, which was made for the original
-XBox. There is no in-kernel decoder or encoder for this protocol. The usb
+This protocol is used by Xbox DVD Remote, which was made for the original
+Xbox. There is no in-kernel decoder or encoder for this protocol. The usb
device decodes the protocol. There is a BPF decoder available in v4l-utils.
diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst
index 35674eeae20d..856acf6a890c 100644
--- a/Documentation/userspace-api/media/v4l/biblio.rst
+++ b/Documentation/userspace-api/media/v4l/biblio.rst
@@ -150,7 +150,7 @@ ITU-T.81
========
-:title: ITU-T Recommendation T.81 "Information Technology --- Digital Compression and Coding of Continous-Tone Still Images --- Requirements and Guidelines"
+:title: ITU-T Recommendation T.81 "Information Technology --- Digital Compression and Coding of Continuous-Tone Still Images --- Requirements and Guidelines"
:author: International Telecommunication Union (http://www.itu.int)
diff --git a/Documentation/userspace-api/media/v4l/dev-sliced-vbi.rst b/Documentation/userspace-api/media/v4l/dev-sliced-vbi.rst
index 42cdb0a9f786..96e0e85a822c 100644
--- a/Documentation/userspace-api/media/v4l/dev-sliced-vbi.rst
+++ b/Documentation/userspace-api/media/v4l/dev-sliced-vbi.rst
@@ -48,7 +48,7 @@ capabilities, and they may support :ref:`control` ioctls.
The :ref:`video standard <standard>` ioctls provide information vital
to program a sliced VBI device, therefore must be supported.
-.. _sliced-vbi-format-negotitation:
+.. _sliced-vbi-format-negotiation:
Sliced VBI Format Negotiation
=============================
@@ -377,7 +377,7 @@ Sliced VBI Data in MPEG Streams
If a device can produce an MPEG output stream, it may be capable of
providing
-:ref:`negotiated sliced VBI services <sliced-vbi-format-negotitation>`
+:ref:`negotiated sliced VBI services <sliced-vbi-format-negotiation>`
as data embedded in the MPEG stream. Users or applications control this
sliced VBI data insertion with the
:ref:`V4L2_CID_MPEG_STREAM_VBI_FMT <v4l2-mpeg-stream-vbi-fmt>`
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-fm-rx.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-fm-rx.rst
index b6cfc0e823d2..ccd439e9e0e3 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-fm-rx.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-fm-rx.rst
@@ -64,17 +64,12 @@ FM_RX Control IDs
broadcasts speech. If the transmitter doesn't make this distinction,
then it will be set.
-``V4L2_CID_TUNE_DEEMPHASIS``
- (enum)
-
-enum v4l2_deemphasis -
+``V4L2_CID_TUNE_DEEMPHASIS (enum)``
Configures the de-emphasis value for reception. A de-emphasis filter
is applied to the broadcast to accentuate the high audio
frequencies. Depending on the region, a time constant of either 50
- or 75 useconds is used. The enum v4l2_deemphasis defines possible
- values for de-emphasis. Here they are:
-
-
+ or 75 microseconds is used. The enum v4l2_deemphasis defines possible
+ values for de-emphasis. They are:
.. flat-table::
:header-rows: 0
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-fm-tx.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-fm-tx.rst
index 04c997c9a4c3..cb40cf4cc3ec 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-fm-tx.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-fm-tx.rst
@@ -104,7 +104,7 @@ FM_TX Control IDs
``V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (integer)``
Sets the audio deviation limiter feature release time. Unit is in
- useconds. Step and range are driver-specific.
+ microseconds. Step and range are driver-specific.
``V4L2_CID_AUDIO_LIMITER_DEVIATION (integer)``
Configures audio frequency deviation level in Hz. The range and step
@@ -121,16 +121,16 @@ FM_TX Control IDs
range and step are driver-specific.
``V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (integer)``
- Sets the threshold level for audio compression freature. It is a dB
+ Sets the threshold level for audio compression feature. It is a dB
value. The range and step are driver-specific.
``V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (integer)``
- Sets the attack time for audio compression feature. It is a useconds
+ Sets the attack time for audio compression feature. It is a microseconds
value. The range and step are driver-specific.
``V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (integer)``
Sets the release time for audio compression feature. It is a
- useconds value. The range and step are driver-specific.
+ microseconds value. The range and step are driver-specific.
``V4L2_CID_PILOT_TONE_ENABLED (boolean)``
Enables or disables the pilot tone generation feature.
@@ -143,17 +143,12 @@ FM_TX Control IDs
Configures pilot tone frequency value. Unit is in Hz. The range and
step are driver-specific.
-``V4L2_CID_TUNE_PREEMPHASIS``
- (enum)
-
-enum v4l2_preemphasis -
+``V4L2_CID_TUNE_PREEMPHASIS (enum)``
Configures the pre-emphasis value for broadcasting. A pre-emphasis
filter is applied to the broadcast to accentuate the high audio
frequencies. Depending on the region, a time constant of either 50
- or 75 useconds is used. The enum v4l2_preemphasis defines possible
- values for pre-emphasis. Here they are:
-
-
+ or 75 microseconds is used. The enum v4l2_preemphasis defines possible
+ values for pre-emphasis. They are:
.. flat-table::
:header-rows: 0
@@ -166,8 +161,6 @@ enum v4l2_preemphasis -
* - ``V4L2_PREEMPHASIS_75_uS``
- A pre-emphasis of 75 uS is used.
-
-
``V4L2_CID_TUNE_POWER_LEVEL (integer)``
Sets the output power level for signal transmission. Unit is in
dBuV. Range and step are driver-specific.
diff --git a/Documentation/userspace-api/media/v4l/meta-formats.rst b/Documentation/userspace-api/media/v4l/meta-formats.rst
index bb6876cfc271..0de80328c36b 100644
--- a/Documentation/userspace-api/media/v4l/meta-formats.rst
+++ b/Documentation/userspace-api/media/v4l/meta-formats.rst
@@ -20,6 +20,7 @@ These formats are used for the :ref:`metadata` interface only.
metafmt-pisp-fe
metafmt-rkisp1
metafmt-uvc
+ metafmt-uvc-msxu-1-5
metafmt-vivid
metafmt-vsp1-hgo
metafmt-vsp1-hgt
diff --git a/Documentation/userspace-api/media/v4l/metafmt-uvc-msxu-1-5.rst b/Documentation/userspace-api/media/v4l/metafmt-uvc-msxu-1-5.rst
new file mode 100644
index 000000000000..dd1c3076df24
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/metafmt-uvc-msxu-1-5.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: GFDL-1.1-no-invariants-or-later
+
+.. _v4l2-meta-fmt-uvc-msxu-1-5:
+
+***********************************
+V4L2_META_FMT_UVC_MSXU_1_5 ('UVCM')
+***********************************
+
+Microsoft(R)'s UVC Payload Metadata.
+
+
+Description
+===========
+
+V4L2_META_FMT_UVC_MSXU_1_5 buffers follow the metadata buffer layout of
+V4L2_META_FMT_UVC with the only difference that it includes all the UVC
+metadata in the `buffer[]` field, not just the first 2-12 bytes.
+
+The metadata format follows the specification from Microsoft(R) [1].
+
+.. _1:
+
+[1] https://docs.microsoft.com/en-us/windows-hardware/drivers/stream/uvc-extensions-1-5
diff --git a/Documentation/userspace-api/media/v4l/metafmt-uvc.rst b/Documentation/userspace-api/media/v4l/metafmt-uvc.rst
index 784346d14bbd..4c05e9e54683 100644
--- a/Documentation/userspace-api/media/v4l/metafmt-uvc.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-uvc.rst
@@ -44,7 +44,9 @@ Each individual block contains the following fields:
them
* - :cspan:`1` *The rest is an exact copy of the UVC payload header:*
* - __u8 length;
- - length of the rest of the block, including this field
+ - length of the rest of the block, including this field. Please note that
+ regardless of this value, for V4L2_META_FMT_UVC the kernel will never
+ copy more than 2-12 bytes.
* - __u8 flags;
- Flags, indicating presence of other standard UVC fields
* - __u8 buf[];
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-bayer.rst b/Documentation/userspace-api/media/v4l/pixfmt-bayer.rst
index ed3eb432967d..b5ca501842b0 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-bayer.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-bayer.rst
@@ -19,6 +19,7 @@ orders. See also `the Wikipedia article on Bayer filter
.. toctree::
:maxdepth: 1
+ pixfmt-rawnn-cru
pixfmt-srggb8
pixfmt-srggb8-pisp-comp
pixfmt-srggb10
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-rawnn-cru.rst b/Documentation/userspace-api/media/v4l/pixfmt-rawnn-cru.rst
new file mode 100644
index 000000000000..db81f1cfe0f5
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/pixfmt-rawnn-cru.rst
@@ -0,0 +1,143 @@
+.. SPDX-License-Identifier: GFDL-1.1-no-invariants-or-later
+
+.. _v4l2-pix-fmt-raw-cru10:
+.. _v4l2-pix-fmt-raw-cru12:
+.. _v4l2-pix-fmt-raw-cru14:
+.. _v4l2-pix-fmt-raw-cru20:
+
+**********************************************************************************************************************************
+V4L2_PIX_FMT_RAW_CRU10 ('CR10'), V4L2_PIX_FMT_RAW_CRU12 ('CR12'), V4L2_PIX_FMT_RAW_CRU14 ('CR14'), V4L2_PIX_FMT_RAW_CRU20 ('CR20')
+**********************************************************************************************************************************
+
+===============================================================
+Renesas RZ/V2H Camera Receiver Unit 64-bit packed pixel formats
+===============================================================
+
+| V4L2_PIX_FMT_RAW_CRU10 (CR10)
+| V4L2_PIX_FMT_RAW_CRU12 (CR12)
+| V4L2_PIX_FMT_RAW_CRU14 (CR14)
+| V4L2_PIX_FMT_RAW_CRU20 (CR20)
+
+Description
+===========
+
+These pixel formats are some of the RAW outputs for the Camera Receiver Unit in
+the Renesas RZ/V2H SoC. They are raw formats which pack pixels contiguously into
+64-bit units, with the 4 or 8 most significant bits padded.
+
+**Byte Order**
+
+.. flat-table:: RAW formats
+ :header-rows: 2
+ :stub-columns: 0
+ :widths: 36 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
+ :fill-cells:
+
+ * - :rspan:`1` Pixel Format Code
+ - :cspan:`63` Data organization
+ * - 63
+ - 62
+ - 61
+ - 60
+ - 59
+ - 58
+ - 57
+ - 56
+ - 55
+ - 54
+ - 53
+ - 52
+ - 51
+ - 50
+ - 49
+ - 48
+ - 47
+ - 46
+ - 45
+ - 44
+ - 43
+ - 42
+ - 41
+ - 40
+ - 39
+ - 38
+ - 37
+ - 36
+ - 35
+ - 34
+ - 33
+ - 32
+ - 31
+ - 30
+ - 29
+ - 28
+ - 27
+ - 26
+ - 25
+ - 24
+ - 23
+ - 22
+ - 21
+ - 20
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
+ - 6
+ - 5
+ - 4
+ - 3
+ - 2
+ - 1
+ - 0
+ * - V4L2_PIX_FMT_RAW_CRU10
+ - 0
+ - 0
+ - 0
+ - 0
+ - :cspan:`9` P5
+ - :cspan:`9` P4
+ - :cspan:`9` P3
+ - :cspan:`9` P2
+ - :cspan:`9` P1
+ - :cspan:`9` P0
+ * - V4L2_PIX_FMT_RAW_CRU12
+ - 0
+ - 0
+ - 0
+ - 0
+ - :cspan:`11` P4
+ - :cspan:`11` P3
+ - :cspan:`11` P2
+ - :cspan:`11` P1
+ - :cspan:`11` P0
+ * - V4L2_PIX_FMT_RAW_CRU14
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - :cspan:`13` P3
+ - :cspan:`13` P2
+ - :cspan:`13` P1
+ - :cspan:`13` P0
+ * - V4L2_PIX_FMT_RAW_CRU20
+ - 0
+ - 0
+ - 0
+ - 0
+ - :cspan:`19` P2
+ - :cspan:`19` P1
+ - :cspan:`19` P0
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst b/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
index 7c3810ff783c..8c03aedcc00e 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
@@ -6,7 +6,7 @@
.. _v4l2-pix-fmt-sgrbg12p:
*******************************************************************************************************************************
-V4L2_PIX_FMT_SRGGB12P ('pRCC'), V4L2_PIX_FMT_SGRBG12P ('pgCC'), V4L2_PIX_FMT_SGBRG12P ('pGCC'), V4L2_PIX_FMT_SBGGR12P ('pBCC'),
+V4L2_PIX_FMT_SRGGB12P ('pRCC'), V4L2_PIX_FMT_SGRBG12P ('pgCC'), V4L2_PIX_FMT_SGBRG12P ('pGCC'), V4L2_PIX_FMT_SBGGR12P ('pBCC')
*******************************************************************************************************************************
@@ -20,7 +20,7 @@ Description
These four pixel formats are packed raw sRGB / Bayer formats with 12
bits per colour. Every two consecutive samples are packed into three
bytes. Each of the first two bytes contain the 8 high order bits of
-the pixels, and the third byte contains the four least significants
+the pixels, and the third byte contains the four least significant
bits of each pixel, in the same order.
Each n-pixel row contains n/2 green samples and n/2 blue or red
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-srggb14p.rst b/Documentation/userspace-api/media/v4l/pixfmt-srggb14p.rst
index 3572e42adb22..f4f53d7dbdeb 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-srggb14p.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-srggb14p.rst
@@ -24,7 +24,7 @@ These four pixel formats are packed raw sRGB / Bayer formats with 14
bits per colour. Every four consecutive samples are packed into seven
bytes. Each of the first four bytes contain the eight high order bits
of the pixels, and the three following bytes contains the six least
-significants bits of each pixel, in the same order.
+significant bits of each pixel, in the same order.
Each n-pixel row contains n/2 green samples and n/2 blue or red samples,
with alternating green-red and green-blue rows. They are conventionally
diff --git a/Documentation/userspace-api/sysfs-platform_profile.rst b/Documentation/userspace-api/sysfs-platform_profile.rst
index 7f013356118a..6613e188242a 100644
--- a/Documentation/userspace-api/sysfs-platform_profile.rst
+++ b/Documentation/userspace-api/sysfs-platform_profile.rst
@@ -18,9 +18,9 @@ API for selecting the platform profile of these automatic mechanisms.
Note that this API is only for selecting the platform profile, it is
NOT a goal of this API to allow monitoring the resulting performance
characteristics. Monitoring performance is best done with device/vendor
-specific tools such as e.g. turbostat.
+specific tools, e.g. turbostat.
-Specifically when selecting a high performance profile the actual achieved
+Specifically, when selecting a high performance profile the actual achieved
performance may be limited by various factors such as: the heat generated
by other components, room temperature, free air flow at the bottom of a
laptop, etc. It is explicitly NOT a goal of this API to let userspace know
@@ -44,7 +44,7 @@ added. Drivers which wish to introduce new profile names must:
"Custom" profile support
========================
The platform_profile class also supports profiles advertising a "custom"
-profile. This is intended to be set by drivers when the setttings in the
+profile. This is intended to be set by drivers when the settings in the
driver have been modified in a way that a standard profile doesn't represent
the current state.
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 544fb11351d9..6aa40ee05a4a 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -2006,7 +2006,7 @@ frequency is KHz.
If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also
be used as a vm ioctl to set the initial tsc frequency of subsequently
-created vCPUs.
+created vCPUs. Note, the vm ioctl is only allowed prior to creating vCPUs.
For TSC protected Confidential Computing (CoCo) VMs where TSC frequency
is configured once at VM scope and remains unchanged during VM's
@@ -7851,6 +7851,7 @@ Valid bits in args[0] are::
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+ #define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4)
Enabling this capability on a VM provides userspace with a way to no
longer intercept some instructions for improved latency in some
@@ -7861,6 +7862,28 @@ all such vmexits.
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
+Virtualizing the ``IA32_APERF`` and ``IA32_MPERF`` MSRs requires more
+than just disabling APERF/MPERF exits. While both Intel and AMD
+document strict usage conditions for these MSRs--emphasizing that only
+the ratio of their deltas over a time interval (T0 to T1) is
+architecturally defined--simply passing through the MSRs can still
+produce an incorrect ratio.
+
+This erroneous ratio can occur if, between T0 and T1:
+
+1. The vCPU thread migrates between logical processors.
+2. Live migration or suspend/resume operations take place.
+3. Another task shares the vCPU's logical processor.
+4. C-states lower than C0 are emulated (e.g., via HLT interception).
+5. The guest TSC frequency doesn't match the host TSC frequency.
+
+Due to these complexities, KVM does not automatically associate this
+passthrough capability with the guest CPUID bit,
+``CPUID.6:ECX.APERFMPERF[bit 0]``. Userspace VMMs that deem this
+mechanism adequate for virtualizing the ``IA32_APERF`` and
+``IA32_MPERF`` MSRs must set the guest CPUID bit explicitly.
+
+
7.14 KVM_CAP_S390_HPAGE_1M
--------------------------
@@ -8387,7 +8410,7 @@ core crystal clock frequency, if a non-zero CPUID 0x15 is exposed to the guest.
7.36 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
----------------------------------------------------------
-:Architectures: x86, arm64
+:Architectures: x86, arm64, riscv
:Type: vm
:Parameters: args[0] - size of the dirty log ring
@@ -8599,7 +8622,7 @@ ENOSYS for the others.
When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of
type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request.
-7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
+7.42 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
-------------------------------------
:Architectures: arm64
@@ -8628,6 +8651,17 @@ given VM.
When this capability is enabled, KVM resets the VCPU when setting
MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved.
+7.43 KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED
+-------------------------------------------
+
+:Architectures: arm64
+:Target: VM
+:Parameters: None
+
+This capability indicate to the userspace whether a PFNMAP memory region
+can be safely mapped as cacheable. This relies on the presence of
+force write back (FWB) feature support on the hardware.
+
8. Other capabilities.
======================
diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.rst b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
index e860498b1e35..ff02102f7141 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-v3.rst
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
@@ -78,6 +78,8 @@ Groups:
-ENXIO The group or attribute is unknown/unsupported for this device
or hardware support is missing.
-EFAULT Invalid user pointer for attr->addr.
+ -EBUSY Attempt to write a register that is read-only after
+ initialization
======= =============================================================
@@ -120,6 +122,12 @@ Groups:
Note that distributor fields are not banked, but return the same value
regardless of the mpidr used to access the register.
+ Userspace is allowed to write the following register fields prior to
+ initialization of the VGIC:
+
+ * GICD_IIDR.Revision
+ * GICD_TYPER2.nASSGIcap
+
GICD_IIDR.Revision is updated when the KVM implementation is changed in a
way directly observable by the guest or userspace. Userspace should read
GICD_IIDR from KVM and write back the read value to confirm its expected
@@ -128,6 +136,12 @@ Groups:
behavior.
+ GICD_TYPER2.nASSGIcap allows userspace to control the support of SGIs
+ without an active state. At VGIC creation the field resets to the
+ maximum capability of the system. Userspace is expected to read the field
+ to determine the supported value(s) before writing to the field.
+
+
The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such
that a write of a clear bit has no effect, whereas a write with a set bit
clears that value. To allow userspace to freely set the values of these two
@@ -202,16 +216,69 @@ Groups:
KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS accesses the CPU interface registers for the
CPU specified by the mpidr field.
- CPU interface registers access is not implemented for AArch32 mode.
- Error -ENXIO is returned when accessed in AArch32 mode.
+ The available registers are:
+
+ =============== ====================================================
+ ICC_PMR_EL1
+ ICC_BPR0_EL1
+ ICC_AP0R0_EL1
+ ICC_AP0R1_EL1 when the host implements at least 6 bits of priority
+ ICC_AP0R2_EL1 when the host implements 7 bits of priority
+ ICC_AP0R3_EL1 when the host implements 7 bits of priority
+ ICC_AP1R0_EL1
+ ICC_AP1R1_EL1 when the host implements at least 6 bits of priority
+ ICC_AP1R2_EL1 when the host implements 7 bits of priority
+ ICC_AP1R3_EL1 when the host implements 7 bits of priority
+ ICC_BPR1_EL1
+ ICC_CTLR_EL1
+ ICC_SRE_EL1
+ ICC_IGRPEN0_EL1
+ ICC_IGRPEN1_EL1
+ =============== ====================================================
+
+ When EL2 is available for the guest, these registers are also available:
+
+ ============= ====================================================
+ ICH_AP0R0_EL2
+ ICH_AP0R1_EL2 when the host implements at least 6 bits of priority
+ ICH_AP0R2_EL2 when the host implements 7 bits of priority
+ ICH_AP0R3_EL2 when the host implements 7 bits of priority
+ ICH_AP1R0_EL2
+ ICH_AP1R1_EL2 when the host implements at least 6 bits of priority
+ ICH_AP1R2_EL2 when the host implements 7 bits of priority
+ ICH_AP1R3_EL2 when the host implements 7 bits of priority
+ ICH_HCR_EL2
+ ICC_SRE_EL2
+ ICH_VTR_EL2
+ ICH_VMCR_EL2
+ ICH_LR0_EL2
+ ICH_LR1_EL2
+ ICH_LR2_EL2
+ ICH_LR3_EL2
+ ICH_LR4_EL2
+ ICH_LR5_EL2
+ ICH_LR6_EL2
+ ICH_LR7_EL2
+ ICH_LR8_EL2
+ ICH_LR9_EL2
+ ICH_LR10_EL2
+ ICH_LR11_EL2
+ ICH_LR12_EL2
+ ICH_LR13_EL2
+ ICH_LR14_EL2
+ ICH_LR15_EL2
+ ============= ====================================================
+
+ CPU interface registers are only described using the AArch64
+ encoding.
Errors:
- ======= =====================================================
- -ENXIO Getting or setting this register is not yet supported
+ ======= =================================================
+ -ENXIO Getting or setting this register is not supported
-EBUSY VCPU is running
-EINVAL Invalid mpidr or register value supplied
- ======= =====================================================
+ ======= =================================================
KVM_DEV_ARM_VGIC_GRP_NR_IRQS
diff --git a/MAINTAINERS b/MAINTAINERS
index 230c72b3c2b4..daf520a13bdf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -158,7 +158,7 @@ S: Maintained
W: http://github.com/v9fs
Q: http://patchwork.kernel.org/project/v9fs-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git
-T: git git://github.com/martinetd/linux.git
+T: git https://github.com/martinetd/linux.git
F: Documentation/filesystems/9p.rst
F: fs/9p/
F: include/net/9p/
@@ -824,7 +824,7 @@ F: drivers/media/platform/allegro-dvt/
ALLIED VISION ALVIUM CAMERA DRIVER
M: Tommaso Merciai <tomm.merciai@gmail.com>
-M: Martin Hecht <martin.hecht@avnet.eu>
+M: Martin Hecht <mhecht73@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/i2c/alliedvision,alvium-csi2.yaml
@@ -2002,6 +2002,16 @@ F: drivers/irqchip/irq-gic*.[ch]
F: include/linux/irqchip/arm-gic*.h
F: include/linux/irqchip/arm-vgic-info.h
+ARM GENERIC INTERRUPT CONTROLLER V5 DRIVERS
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
+M: Marc Zyngier <maz@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5*.yaml
+F: drivers/irqchip/irq-gic-its-msi-parent.[ch]
+F: drivers/irqchip/irq-gic-v5*.[ch]
+F: include/linux/irqchip/arm-gic-v5.h
+
ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
@@ -2588,7 +2598,7 @@ M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://github.com/ulli-kroll/linux.git
+T: git https://github.com/ulli-kroll/linux.git
F: Documentation/devicetree/bindings/arm/gemini.yaml
F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml
F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
@@ -2795,7 +2805,7 @@ M: Vladimir Zapolskiy <vz@mleia.com>
M: Piotr Wojtaszczyk <piotr.wojtaszczyk@timesys.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://github.com/vzapolskiy/linux-lpc32xx.git
+T: git https://github.com/vzapolskiy/linux-lpc32xx.git
F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
F: arch/arm/boot/dts/nxp/lpc/lpc32*
F: arch/arm/mach-lpc32xx/
@@ -2959,7 +2969,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Odd Fixes
F: Documentation/devicetree/bindings/arm/moxart.yaml
-F: Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt
+F: Documentation/devicetree/bindings/clock/moxa,moxart-clock.yaml
F: arch/arm/boot/dts/moxa/
F: drivers/clk/clk-moxart.c
@@ -2969,7 +2979,7 @@ M: Romain Perier <romain.perier@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://linux-chenxing.org/
-T: git git://github.com/linux-chenxing/linux.git
+T: git https://github.com/linux-chenxing/linux.git
F: Documentation/devicetree/bindings/arm/mstar/*
F: Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml
F: Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
@@ -3340,10 +3350,15 @@ M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
F: drivers/clk/socfpga/
+ARM/SOCFPGA DWMAC GLUE LAYER BINDINGS
+M: Matthew Gerlach <matthew.gerlach@altera.com>
+S: Maintained
+F: Documentation/devicetree/bindings/net/altr,gmii-to-sgmii-2.0.yaml
+F: Documentation/devicetree/bindings/net/altr,socfpga-stmmac.yaml
+
ARM/SOCFPGA DWMAC GLUE LAYER
M: Maxime Chevallier <maxime.chevallier@bootlin.com>
S: Maintained
-F: Documentation/devicetree/bindings/net/socfpga-dwmac.txt
F: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
ARM/SOCFPGA EDAC BINDINGS
@@ -3894,7 +3909,7 @@ ATHEROS 71XX/9XXX GPIO DRIVER
M: Alban Bedel <albeu@free.fr>
S: Maintained
W: https://github.com/AlbanBedel/linux
-T: git git://github.com/AlbanBedel/linux
+T: git https://github.com/AlbanBedel/linux.git
F: Documentation/devicetree/bindings/gpio/qca,ar7100-gpio.yaml
F: drivers/gpio/gpio-ath79.c
@@ -3902,8 +3917,8 @@ ATHEROS 71XX/9XXX USB PHY DRIVER
M: Alban Bedel <albeu@free.fr>
S: Maintained
W: https://github.com/AlbanBedel/linux
-T: git git://github.com/AlbanBedel/linux
-F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
+T: git https://github.com/AlbanBedel/linux.git
+F: Documentation/devicetree/bindings/phy/qca,ar7100-usb-phy.yaml
F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
@@ -3967,7 +3982,7 @@ F: drivers/net/ethernet/cadence/
ATMEL MAXTOUCH DRIVER
M: Nick Dyer <nick@shmanahar.org>
S: Maintained
-T: git git://github.com/ndyer/linux.git
+T: git https://github.com/ndyer/linux.git
F: Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
F: drivers/input/touchscreen/atmel_mxt_ts.c
@@ -4936,6 +4951,12 @@ F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
+BROADCOM BNG_EN 800 GIGABIT ETHERNET DRIVER
+M: Vikas Gupta <vikas.gupta@broadcom.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/broadcom/bnge/
+
BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS
M: Arend van Spriel <arend.vanspriel@broadcom.com>
L: linux-wireless@vger.kernel.org
@@ -5168,7 +5189,6 @@ F: include/linux/platform_data/brcmnand.h
BROADCOM STB PCIE DRIVER
M: Jim Quinlan <jim2101024@gmail.com>
-M: Nicolas Saenz Julienne <nsaenz@kernel.org>
M: Florian Fainelli <florian.fainelli@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-pci@vger.kernel.org
@@ -6063,6 +6083,7 @@ F: include/dt-bindings/clock/
F: include/linux/clk-pr*
F: include/linux/clk/
F: include/linux/of_clk.h
+F: scripts/gdb/linux/clk.py
F: rust/helpers/clk.c
F: rust/kernel/clk.rs
X: drivers/clk/clkdev.c
@@ -6247,9 +6268,11 @@ L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
F: include/linux/memcontrol.h
+F: include/linux/page_counter.h
F: mm/memcontrol.c
F: mm/memcontrol-v1.c
F: mm/memcontrol-v1.h
+F: mm/page_counter.c
F: mm/swap_cgroup.c
F: samples/cgroup/*
F: tools/testing/selftests/cgroup/memcg_protection.m
@@ -6690,7 +6713,7 @@ S: Supported
F: drivers/input/keyboard/dlink-dir685-touchkeys.c
DALLAS/MAXIM DS1685-FAMILY REAL TIME CLOCK
-M: Joshua Kinard <kumba@gentoo.org>
+M: Joshua Kinard <linux@kumba.dev>
S: Maintained
F: drivers/rtc/rtc-ds1685.c
F: include/linux/rtc/ds1685.h
@@ -7405,6 +7428,8 @@ M: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/devicetree/bindings/dpll/dpll-device.yaml
+F: Documentation/devicetree/bindings/dpll/dpll-pin.yaml
F: Documentation/driver-api/dpll.rst
F: drivers/dpll/*
F: include/linux/dpll.h
@@ -7580,10 +7605,12 @@ M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: drivers/firmware/sysfb*.c
F: drivers/gpu/drm/sysfb/
F: drivers/video/aperture.c
F: drivers/video/nomodeset.c
F: include/linux/aperture.h
+F: include/linux/sysfb.h
F: include/video/nomodeset.h
DRM DRIVER FOR GENERIC EDP PANELS
@@ -7799,6 +7826,7 @@ F: include/uapi/drm/nouveau_drm.h
CORE DRIVER FOR NVIDIA GPUS [RUST]
M: Danilo Krummrich <dakr@kernel.org>
+M: Alexandre Courbot <acourbot@nvidia.com>
L: nouveau@lists.freedesktop.org
S: Supported
Q: https://patchwork.freedesktop.org/project/nouveau/
@@ -7914,6 +7942,7 @@ F: drivers/gpu/drm/sitronix/st7586.c
DRM DRIVER FOR SITRONIX ST7571 PANELS
M: Marcus Folkesson <marcus.folkesson@gmail.com>
S: Maintained
+F: Documentation/devicetree/bindings/display/sitronix,st7567.yaml
F: Documentation/devicetree/bindings/display/sitronix,st7571.yaml
F: drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -8145,6 +8174,14 @@ F: Documentation/devicetree/bindings/display/imx/
F: drivers/gpu/drm/imx/ipuv3/
F: drivers/gpu/ipu-v3/
+DRM DRIVERS FOR FREESCALE IMX8 DISPLAY CONTROLLER
+M: Liu Ying <victor.liu@nxp.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/devicetree/bindings/display/imx/fsl,imx8qxp-dc*.yaml
+F: drivers/gpu/drm/imx/dc/
+
DRM DRIVERS FOR FREESCALE IMX BRIDGE
M: Liu Ying <victor.liu@nxp.com>
L: dri-devel@lists.freedesktop.org
@@ -8305,6 +8342,7 @@ M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/devicetree/bindings/display/ti/ti,am625-oldi.yaml
F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -8388,9 +8426,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/scheduler/
F: include/drm/gpu_scheduler.h
+DRM LOG
+M: Jocelyn Falempe <jfalempe@redhat.com>
+M: Javier Martinez Canillas <javierm@redhat.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: drivers/gpu/drm/clients/drm_log.c
+
DRM PANEL DRIVERS
M: Neil Armstrong <neil.armstrong@linaro.org>
-R: Jessica Zhang <quic_jesszhan@quicinc.com>
+R: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -8399,6 +8445,26 @@ F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
F: include/drm/drm_panel.h
+DRM PANIC
+M: Jocelyn Falempe <jfalempe@redhat.com>
+M: Javier Martinez Canillas <javierm@redhat.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: drivers/gpu/drm/drm_draw.c
+F: drivers/gpu/drm/drm_draw_internal.h
+F: drivers/gpu/drm/drm_panic*.c
+F: include/drm/drm_panic*
+
+DRM PANIC QR CODE
+M: Jocelyn Falempe <jfalempe@redhat.com>
+M: Javier Martinez Canillas <javierm@redhat.com>
+L: dri-devel@lists.freedesktop.org
+L: rust-for-linux@vger.kernel.org
+S: Supported
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: drivers/gpu/drm/drm_panic_qr.rs
+
DRM PRIVACY-SCREEN CLASS
M: Hans de Goede <hansg@kernel.org>
L: dri-devel@lists.freedesktop.org
@@ -11034,7 +11100,7 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
F: drivers/infiniband/hw/hns/
HISILICON SAS Controller
-M: Yihang Li <liyihang9@huawei.com>
+M: Yihang Li <liyihang9@h-partners.com>
S: Supported
W: http://www.hisilicon.com
F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -11372,6 +11438,7 @@ F: drivers/tty/hvc/
HUNG TASK DETECTOR
M: Andrew Morton <akpm@linux-foundation.org>
R: Lance Yang <lance.yang@linux.dev>
+R: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: include/linux/hung_task.h
@@ -11546,6 +11613,13 @@ S: Maintained
F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
F: drivers/i3c/master/i3c-master-cdns.c
+I3C DRIVER FOR RENESAS
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+M: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
+S: Supported
+F: Documentation/devicetree/bindings/i3c/renesas,i3c.yaml
+F: drivers/i3c/master/renesas-i3c.c
+
I3C DRIVER FOR SYNOPSYS DESIGNWARE
S: Orphan
F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
@@ -11556,6 +11630,7 @@ M: Alexandre Belloni <alexandre.belloni@bootlin.com>
R: Frank Li <Frank.Li@nxp.com>
L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+Q: https://patchwork.kernel.org/project/linux-i3c/list/
C: irc://chat.freenode.net/linux-i3c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
F: Documentation/ABI/testing/sysfs-bus-i3c
@@ -12136,6 +12211,13 @@ L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/include/asm/intel-family.h
+INTEL DISCRETE GRAPHICS NVM MTD DRIVER
+M: Alexander Usyskin <alexander.usyskin@intel.com>
+L: linux-mtd@lists.infradead.org
+S: Supported
+F: drivers/mtd/devices/mtd_intel_dg.c
+F: include/linux/intel_dg_nvm_aux.h
+
INTEL DRM DISPLAY FOR XE AND I915 DRIVERS
M: Jani Nikula <jani.nikula@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
@@ -12326,6 +12408,15 @@ T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/ipu6-isys.rst
F: drivers/media/pci/intel/ipu6/
+INTEL IPU7 INPUT SYSTEM DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+R: Bingbu Cao <bingbu.cao@intel.com>
+R: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media.git
+F: drivers/staging/media/ipu7/
+
INTEL ISHTP ECLITE DRIVER
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -12493,10 +12584,9 @@ S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL PTP DFL ToD DRIVER
-M: Tianfei Zhang <tianfei.zhang@intel.com>
L: linux-fpga@vger.kernel.org
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/ptp/ptp_dfl_tod.c
INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
@@ -12610,9 +12700,19 @@ M: Miri Korenblit <miriam.rachel.korenblit@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next.git/
F: drivers/net/wireless/intel/iwlwifi/
+INTEL VISION SENSING CONTROLLER DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+R: Bingbu Cao <bingbu.cao@intel.com>
+R: Lixu Zhang <lixu.zhang@intel.com>
+R: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media.git
+F: drivers/media/pci/intel/ivsc/
+
INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
S: Orphan
W: https://slimbootloader.github.io/security/firmware-update.html
@@ -12624,9 +12724,8 @@ S: Maintained
F: drivers/platform/x86/intel/wmi/thunderbolt.c
INTEL WWAN IOSM DRIVER
-M: M Chetan Kumar <m.chetan.kumar@intel.com>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/wwan/iosm/
INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY
@@ -13074,11 +13173,9 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
-M: Masahiro Yamada <masahiroy@kernel.org>
L: linux-kbuild@vger.kernel.org
-S: Maintained
+S: Orphan
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kbuild
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
@@ -13143,13 +13240,12 @@ S: Maintained
F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
-M: Masahiro Yamada <masahiroy@kernel.org>
-R: Nathan Chancellor <nathan@kernel.org>
-R: Nicolas Schier <nicolas@fjasle.eu>
+M: Nathan Chancellor <nathan@kernel.org>
+M: Nicolas Schier <nicolas@fjasle.eu>
L: linux-kbuild@vger.kernel.org
-S: Maintained
+S: Odd Fixes
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
F: Documentation/kbuild/
F: Makefile
F: scripts/*vmlinux*
@@ -13444,6 +13540,7 @@ F: Documentation/admin-guide/mm/kho.rst
F: Documentation/core-api/kho/*
F: include/linux/kexec_handover.h
F: kernel/kexec_handover.c
+F: tools/testing/selftests/kho/
KEYS-ENCRYPTED
M: Mimi Zohar <zohar@linux.ibm.com>
@@ -13588,7 +13685,6 @@ F: scripts/Makefile.kmsan
KPROBES
M: Naveen N Rao <naveen@kernel.org>
-M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -14607,7 +14703,7 @@ MARVELL ARMADA 3700 PHY DRIVERS
M: Miquel Raynal <miquel.raynal@bootlin.com>
S: Maintained
F: Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
-F: Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+F: Documentation/devicetree/bindings/phy/marvell,comphy-cp110.yaml
F: drivers/phy/marvell/phy-mvebu-a3700-comphy.c
F: drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -15576,7 +15672,6 @@ MEDIATEK T7XX 5G WWAN MODEM DRIVER
M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
R: Liu Haijun <haijun.liu@mediatek.com>
-R: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
R: Ricardo Martinez <ricardo.martinez@linux.intel.com>
L: netdev@vger.kernel.org
S: Supported
@@ -15837,6 +15932,8 @@ F: Documentation/admin-guide/mm/memory-hotplug.rst
F: Documentation/core-api/memory-hotplug.rst
F: drivers/base/memory.c
F: include/linux/memory_hotplug.h
+F: include/linux/memremap.h
+F: mm/memremap.c
F: mm/memory_hotplug.c
F: tools/testing/selftests/memory-hotplug/
@@ -15847,23 +15944,8 @@ S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
-F: Documentation/admin-guide/mm/
-F: Documentation/mm/
-F: include/linux/gfp.h
-F: include/linux/gfp_types.h
-F: include/linux/memory_hotplug.h
-F: include/linux/memory-tiers.h
-F: include/linux/mempolicy.h
-F: include/linux/mempool.h
-F: include/linux/memremap.h
-F: include/linux/mmzone.h
-F: include/linux/mmu_notifier.h
-F: include/linux/pagewalk.h
-F: include/trace/events/ksm.h
F: mm/
F: tools/mm/
-F: tools/testing/selftests/mm/
-N: include/linux/page[-_]*
MEMORY MANAGEMENT - CORE
M: Andrew Morton <akpm@linux-foundation.org>
@@ -15878,18 +15960,40 @@ L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/linux/gfp.h
+F: include/linux/gfp_types.h
+F: include/linux/highmem.h
F: include/linux/memory.h
F: include/linux/mm.h
F: include/linux/mm_*.h
+F: include/linux/mmzone.h
F: include/linux/mmdebug.h
+F: include/linux/mmu_notifier.h
F: include/linux/pagewalk.h
+F: include/linux/pgtable.h
+F: include/linux/ptdump.h
+F: include/linux/vmpressure.h
+F: include/linux/vmstat.h
F: kernel/fork.c
F: mm/Kconfig
F: mm/debug.c
+F: mm/folio-compat.c
+F: mm/highmem.c
F: mm/init-mm.c
+F: mm/internal.h
+F: mm/maccess.c
F: mm/memory.c
+F: mm/mmu_notifier.c
+F: mm/mmzone.c
F: mm/pagewalk.c
+F: mm/pgtable-generic.c
+F: mm/ptdump.c
+F: mm/sparse-vmemmap.c
+F: mm/sparse.c
F: mm/util.c
+F: mm/vmpressure.c
+F: mm/vmstat.c
+N: include/linux/page[-_]*
MEMORY MANAGEMENT - EXECMEM
M: Andrew Morton <akpm@linux-foundation.org>
@@ -15929,6 +16033,7 @@ F: Documentation/mm/ksm.rst
F: include/linux/ksm.h
F: include/trace/events/ksm.h
F: mm/ksm.c
+F: mm/mm_slot.h
MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION
M: Andrew Morton <akpm@linux-foundation.org>
@@ -15946,11 +16051,49 @@ S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: include/linux/mempolicy.h
+F: include/uapi/linux/mempolicy.h
F: include/linux/migrate.h
+F: include/linux/migrate_mode.h
F: mm/mempolicy.c
F: mm/migrate.c
F: mm/migrate_device.c
+MEMORY MANAGEMENT - MISC
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Mike Rapoport <rppt@kernel.org>
+R: Suren Baghdasaryan <surenb@google.com>
+R: Michal Hocko <mhocko@suse.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/admin-guide/mm/
+F: Documentation/mm/
+F: include/linux/cma.h
+F: include/linux/dmapool.h
+F: include/linux/ioremap.h
+F: include/linux/memory-tiers.h
+F: include/linux/page_idle.h
+F: mm/backing-dev.c
+F: mm/cma.c
+F: mm/cma_debug.c
+F: mm/cma_sysfs.c
+F: mm/dmapool.c
+F: mm/dmapool_test.c
+F: mm/early_ioremap.c
+F: mm/fadvise.c
+F: mm/ioremap.c
+F: mm/mapping_dirty_helpers.c
+F: mm/memory-tiers.c
+F: mm/page_idle.c
+F: mm/pgalloc-track.h
+F: mm/process_vm_access.c
+F: tools/testing/selftests/mm/
+
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15987,6 +16130,7 @@ F: include/linux/gfp.h
F: include/linux/page-isolation.h
F: mm/compaction.c
F: mm/debug_page_alloc.c
+F: mm/debug_page_ref.c
F: mm/fail_page_alloc.c
F: mm/page_alloc.c
F: mm/page_ext.c
@@ -15995,8 +16139,10 @@ F: mm/page_isolation.c
F: mm/page_owner.c
F: mm/page_poison.c
F: mm/page_reporting.c
+F: mm/page_reporting.h
F: mm/show_mem.c
F: mm/shuffle.c
+F: mm/shuffle.h
MEMORY MANAGEMENT - RECLAIM
M: Andrew Morton <akpm@linux-foundation.org>
@@ -16074,6 +16220,7 @@ F: include/linux/khugepaged.h
F: include/trace/events/huge_memory.h
F: mm/huge_memory.c
F: mm/khugepaged.c
+F: mm/mm_slot.h
F: tools/testing/selftests/mm/khugepaged.c
F: tools/testing/selftests/mm/split_huge_page_test.c
F: tools/testing/selftests/mm/transhuge-stress.c
@@ -16116,6 +16263,7 @@ S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: include/trace/events/mmap.h
+F: mm/interval_tree.c
F: mm/mincore.c
F: mm/mlock.c
F: mm/mmap.c
@@ -16610,6 +16758,14 @@ L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/microchip/
+MICROCHIP ZL3073X DRIVER
+M: Ivan Vecera <ivecera@redhat.com>
+M: Prathosh Satish <Prathosh.Satish@microchip.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/dpll/microchip,zl30731.yaml
+F: drivers/dpll/zl3073x/
+
MICROSEMI MIPS SOCS
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
M: UNGLinuxDriver@microchip.com
@@ -17292,6 +17448,7 @@ F: drivers/net/ethernet/neterion/
NETFILTER
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Jozsef Kadlecsik <kadlec@netfilter.org>
+M: Florian Westphal <fw@strlen.de>
L: netfilter-devel@vger.kernel.org
L: coreteam@netfilter.org
S: Maintained
@@ -18055,6 +18212,7 @@ NXP i.MX 8M ISI DRIVER
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/fsl,imx8*-isi.yaml
F: Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
F: drivers/media/platform/nxp/imx8-isi/
@@ -18100,9 +18258,9 @@ L: linux-clk@vger.kernel.org
L: imx@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
-F: Documentation/devicetree/bindings/clock/imx*
+F: Documentation/devicetree/bindings/clock/*imx*
F: drivers/clk/imx/
-F: include/dt-bindings/clock/imx*
+F: include/dt-bindings/clock/*imx*
NXP PF8100/PF8121A/PF8200 PMIC REGULATOR DEVICE DRIVER
M: Jagan Teki <jagan@amarulasolutions.com>
@@ -18929,6 +19087,11 @@ F: Documentation/mm/page_table_check.rst
F: include/linux/page_table_check.h
F: mm/page_table_check.c
+PAGE STATE DEBUG SCRIPT
+M: Ye Liu <liuye@kylinos.cn>
+S: Maintained
+F: tools/mm/show_page_info.py
+
PANASONIC LAPTOP ACPI EXTRAS DRIVER
M: Kenneth Chan <kenneth.t.chan@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -19048,7 +19211,7 @@ M: Pali Rohár <pali@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
+F: Documentation/devicetree/bindings/pci/marvell,armada-3700-pcie.yaml
F: drivers/pci/controller/pci-aardvark.c
PCI DRIVER FOR ALTERA PCIE IP
@@ -19063,7 +19226,7 @@ M: Toan Le <toan@os.amperecomputing.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/pci/xgene-pci.txt
+F: Documentation/devicetree/bindings/pci/apm,xgene-pcie.yaml
F: drivers/pci/controller/pci-xgene.c
PCI DRIVER FOR ARM VERSATILE PLATFORM
@@ -19382,7 +19545,7 @@ PCIE DRIVER FOR AMAZON ANNAPURNA LABS
M: Jonathan Chocron <jonnyc@amazon.com>
L: linux-pci@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/pcie-al.txt
+F: Documentation/devicetree/bindings/pci/amazon,al-alpine-v3-pcie.yaml
F: drivers/pci/controller/dwc/pcie-al.c
PCIE DRIVER FOR AMLOGIC MESON
@@ -19565,6 +19728,7 @@ F: arch/*/include/asm/percpu.h
F: include/linux/percpu*.h
F: lib/percpu*.c
F: mm/percpu*.c
+F: mm/percpu-internal.h
PER-TASK DELAY ACCOUNTING
M: Balbir Singh <bsingharora@gmail.com>
@@ -19573,6 +19737,16 @@ S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
+TASK DELAY MONITORING TOOLS
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Wang Yaxin <wang.yaxin@zte.com.cn>
+M: Fan Yu <fan.yu9@zte.com.cn>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/accounting/delay-accounting.rst
+F: tools/accounting/delaytop.c
+F: tools/accounting/getdelays.c
+
PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
@@ -19840,7 +20014,7 @@ L: linux-pm@vger.kernel.org
S: Supported
W: https://01.org/pm-graph
B: https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools
-T: git git://github.com/intel/pm-graph
+T: git https://github.com/intel/pm-graph.git
F: tools/power/pm-graph
PM6764TR DRIVER
@@ -20231,8 +20405,8 @@ M: Haojian Zhuang <haojian.zhuang@gmail.com>
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://github.com/hzhuang1/linux.git
-T: git git://github.com/rjarzmik/linux.git
+T: git https://github.com/hzhuang1/linux.git
+T: git https://github.com/rjarzmik/linux.git
F: arch/arm/boot/dts/intel/pxa/
F: arch/arm/mach-pxa/
F: drivers/dma/pxa*
@@ -20291,12 +20465,6 @@ S: Maintained
F: drivers/firmware/qemu_fw_cfg.c
F: include/uapi/linux/qemu_fw_cfg.h
-QIB DRIVER
-M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-F: drivers/infiniband/hw/qib/
-
QLOGIC QL41xxx FCOE DRIVER
M: Saurav Kashyap <skashyap@marvell.com>
M: Javed Hasan <jhasan@marvell.com>
@@ -20460,7 +20628,7 @@ QUALCOMM ATHEROS QCA7K ETHERNET DRIVER
M: Stefan Wahren <wahrenst@gmx.net>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/qca,qca7000.txt
+F: Documentation/devicetree/bindings/net/qca,qca7000.yaml
F: drivers/net/ethernet/qualcomm/qca*
QUALCOMM BAM-DMUX WWAN NETWORK DRIVER
@@ -20475,6 +20643,7 @@ QUALCOMM CAMERA SUBSYSTEM DRIVER
M: Robert Foss <rfoss@kernel.org>
M: Todor Tomov <todor.too@gmail.com>
M: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+R: Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/media/qcom_camss.rst
@@ -20497,6 +20666,7 @@ L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/ABI/testing/sysfs-driver-qaic
F: Documentation/accel/qaic/
F: drivers/accel/qaic/
F: include/uapi/drm/qaic_accel.h
@@ -20658,6 +20828,13 @@ S: Maintained
F: Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
F: drivers/mtd/nand/raw/qcom_nandc.c
+QUALCOMM SMB CHARGER DRIVER
+M: Casey Connolly <casey.connolly@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
+F: drivers/power/supply/qcom_smbx.c
+
QUALCOMM QSEECOM DRIVER
M: Maximilian Luz <luzmaximilian@gmail.com>
L: linux-arm-msm@vger.kernel.org
@@ -21308,6 +21485,14 @@ S: Maintained
F: Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+RENESAS RZ/V2H(P) RSPI DRIVER
+M: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+L: linux-spi@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml
+F: drivers/spi/spi-rzv2h-rspi.c
+
RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER
M: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
M: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
@@ -21619,6 +21804,14 @@ S: Maintained
F: Documentation/devicetree/bindings/media/rockchip-rga.yaml
F: drivers/media/platform/rockchip/rga/
+ROCKCHIP RKVDEC VIDEO DECODER DRIVER
+M: Detlev Casanova <detlev.casanova@collabora.com>
+L: linux-media@vger.kernel.org
+L: linux-rockchip@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/rockchip,vdec.yaml
+F: drivers/media/platform/rockchip/rkvdec/
+
ROCKCHIP RK3308 INTERNAL AUDIO CODEC
M: Luca Ceresoli <luca.ceresoli@bootlin.com>
S: Maintained
@@ -21861,6 +22054,10 @@ K: \b(?i:rust)\b
RUST [ALLOC]
M: Danilo Krummrich <dakr@kernel.org>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Uladzislau Rezki <urezki@gmail.com>
L: rust-for-linux@vger.kernel.org
S: Maintained
T: git https://github.com/Rust-for-Linux/linux.git alloc-next
@@ -22762,7 +22959,9 @@ R: Muchun Song <muchun.song@linux.dev>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/shrinker_debugfs.rst
+F: include/linux/list_lru.h
F: include/linux/shrinker.h
+F: mm/list_lru.c
F: mm/shrinker.c
F: mm/shrinker_debug.c
@@ -22994,17 +23193,24 @@ F: Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
F: drivers/nvmem/layouts/sl28vpd.c
SLAB ALLOCATOR
-M: Christoph Lameter <cl@gentwo.org>
-M: David Rientjes <rientjes@google.com>
-M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Christoph Lameter <cl@gentwo.org>
+R: David Rientjes <rientjes@google.com>
R: Roman Gushchin <roman.gushchin@linux.dev>
R: Harry Yoo <harry.yoo@oracle.com>
L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
-F: include/linux/sl?b*.h
-F: mm/sl?b*
+F: Documentation/admin-guide/mm/slab.rst
+F: Documentation/mm/slab.rst
+F: include/linux/mempool.h
+F: include/linux/slab.h
+F: mm/failslab.c
+F: mm/mempool.c
+F: mm/slab.h
+F: mm/slab_common.c
+F: mm/slub.c
SLCAN CAN NETWORK DRIVER
M: Dario Binacchi <dario.binacchi@amarulasolutions.com>
@@ -23030,7 +23236,7 @@ M: Casey Schaufler <casey@schaufler-ca.com>
L: linux-security-module@vger.kernel.org
S: Maintained
W: http://schaufler-ca.com
-T: git git://github.com/cschaufler/smack-next
+T: git https://github.com/cschaufler/smack-next.git
F: Documentation/admin-guide/LSM/Smack.rst
F: security/smack/
@@ -23135,7 +23341,7 @@ S: Maintained
F: drivers/leds/leds-net48xx.c
SOFT-IWARP DRIVER (siw)
-M: Bernard Metzler <bmt@zurich.ibm.com>
+M: Bernard Metzler <bernard.metzler@linux.dev>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/siw/
@@ -23190,6 +23396,7 @@ F: drivers/md/md*
F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
+F: lib/raid6/
SOLIDRUN CLEARFOG SUPPORT
M: Russell King <linux@armlinux.org.uk>
@@ -23456,7 +23663,6 @@ SOUNDWIRE SUBSYSTEM
M: Vinod Koul <vkoul@kernel.org>
M: Bard Liao <yung-chuan.liao@linux.intel.com>
R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
-R: Sanyog Kale <sanyog.r.kale@intel.com>
L: linux-sound@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire.git
@@ -23635,6 +23841,12 @@ F: drivers/bus/stm32_etzpc.c
F: drivers/bus/stm32_firewall.c
F: drivers/bus/stm32_rifsc.c
+ST STM32 HDP PINCTRL DRIVER
+M: Clément Le Goffic <legoffic.clement@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/pinctrl/st,stm32-hdp.yaml
+F: drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
+
ST STM32 I2C/SMBUS DRIVER
M: Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
M: Alain Volmat <alain.volmat@foss.st.com>
@@ -23648,6 +23860,14 @@ S: Maintained
F: Documentation/devicetree/bindings/memory-controllers/st,stm32mp25-omm.yaml
F: drivers/memory/stm32_omm.c
+ST STM32 PINCTRL DRIVER
+M: Antonio Borneo <antonio.borneo@foss.st.com>
+S: Maintained
+F: Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+F: drivers/pinctrl/stm32/
+F: include/dt-bindings/pinctrl/stm32-pinfunc.h
+X: drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
+
ST STM32 SPI DRIVER
M: Alain Volmat <alain.volmat@foss.st.com>
L: linux-spi@vger.kernel.org
@@ -24726,7 +24946,7 @@ TEXAS INSTRUMENTS TPS6131X FLASH LED DRIVER
M: Matthias Fend <matthias.fend@emfend.at>
L: linux-leds@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/leds/ti,tps6131x.yaml
+F: Documentation/devicetree/bindings/leds/ti,tps61310.yaml
F: drivers/leds/flash/leds-tps6131x.c
TEXAS INSTRUMENTS' DAC7612 DAC DRIVER
@@ -25285,6 +25505,13 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/toshiba-wmi.c
+TOUCH OVERLAY
+M: Javier Carrasco <javier.carrasco@wolfvision.net>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/touch-overlay.c
+F: include/linux/input/touch-overlay.h
+
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Jarkko Sakkinen <jarkko@kernel.org>
@@ -25371,7 +25598,7 @@ TRADITIONAL CHINESE DOCUMENTATION
M: Hu Haowen <2023002089@link.tyut.edu.cn>
S: Maintained
W: https://github.com/srcres258/linux-doc
-T: git git://github.com/srcres258/linux-doc.git doc-zh-tw
+T: git https://github.com/srcres258/linux-doc.git doc-zh-tw
F: Documentation/translations/zh_TW/
TRIGGER SOURCE - ADI UTIL SIGMA DELTA SPI
@@ -26000,6 +26227,7 @@ S: Maintained
W: http://www.ideasonboard.org/uvc/
T: git git://linuxtv.org/media.git
F: Documentation/userspace-api/media/drivers/uvcvideo.rst
+F: Documentation/userspace-api/media/v4l/metafmt-uvc-msxu-1-5.rst
F: Documentation/userspace-api/media/v4l/metafmt-uvc.rst
F: drivers/media/common/uvc.c
F: drivers/media/usb/uvc/
@@ -26071,6 +26299,13 @@ F: Documentation/driver-api/uio-howto.rst
F: drivers/uio/
F: include/linux/uio_driver.h
+USERSPACE STACK UNWINDING
+M: Josh Poimboeuf <jpoimboe@kernel.org>
+M: Steven Rostedt <rostedt@goodmis.org>
+S: Maintained
+F: include/linux/unwind*.h
+F: kernel/unwind/
+
UTIL-LINUX PACKAGE
M: Karel Zak <kzak@redhat.com>
L: util-linux@vger.kernel.org
@@ -26233,7 +26468,6 @@ S: Maintained
F: drivers/vfio/platform/
VFIO QAT PCI DRIVER
-M: Xin Zeng <xin.zeng@intel.com>
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
L: kvm@vger.kernel.org
L: qat-linux@intel.com
@@ -26598,6 +26832,7 @@ F: include/uapi/linux/vm_sockets.h
F: include/uapi/linux/vm_sockets_diag.h
F: include/uapi/linux/vsockmon.h
F: net/vmw_vsock/
+F: tools/testing/selftests/vsock/
F: tools/testing/vsock/
VMALLOC
@@ -27587,6 +27822,7 @@ L: linux-mm@kvack.org
S: Maintained
F: Documentation/mm/zsmalloc.rst
F: include/linux/zsmalloc.h
+F: mm/zpdesc.h
F: mm/zsmalloc.c
ZSTD
diff --git a/Makefile b/Makefile
index 076de54b3311..6bfe776bf3c5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 16
+PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -479,11 +479,17 @@ export rust_common_flags := --edition=2021 \
-Wrust_2018_idioms \
-Wunreachable_pub \
-Wclippy::all \
+ -Wclippy::as_ptr_cast_mut \
+ -Wclippy::as_underscore \
+ -Wclippy::cast_lossless \
-Wclippy::ignored_unit_patterns \
-Wclippy::mut_mut \
-Wclippy::needless_bitwise_bool \
-Aclippy::needless_lifetimes \
-Wclippy::no_mangle_with_rust_abi \
+ -Wclippy::ptr_as_ptr \
+ -Wclippy::ptr_cast_constness \
+ -Wclippy::ref_as_ptr \
-Wclippy::undocumented_unsafe_blocks \
-Wclippy::unnecessary_safety_comment \
-Wclippy::unnecessary_safety_doc \
@@ -543,6 +549,7 @@ LZMA = lzma
LZ4 = lz4
XZ = xz
ZSTD = zstd
+TAR = tar
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
@@ -622,7 +629,7 @@ export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN
export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
-export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
+export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD TAR
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS KBUILD_PROCMACROLDFLAGS LDFLAGS_MODULE
export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
@@ -1135,7 +1142,7 @@ KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# userspace programs are linked via the compiler, use the correct linker
-ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
+ifdef CONFIG_CC_IS_CLANG
KBUILD_USERLDFLAGS += --ld-path=$(LD)
endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 3d81c5f1ba2d..d1b4ffd6e085 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -444,6 +444,13 @@ config HAVE_HARDLOCKUP_DETECTOR_ARCH
It uses the same command line parameters, and sysctl interface,
as the generic hardlockup detectors.
+config UNWIND_USER
+ bool
+
+config HAVE_UNWIND_USER_FP
+ bool
+ select UNWIND_USER
+
config HAVE_PERF_REGS
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 109a4cddcd13..80367f2cf821 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -7,6 +7,7 @@ config ALPHA
select ARCH_HAS_DMA_OPS if PCI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_MODULE_NEEDS_WEAK_PER_CPU if SMP
select ARCH_NO_PREEMPT
select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 6923249f2d49..4383d66341dc 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -9,10 +9,9 @@
* way above 4G.
*
* Always use weak definitions for percpu variables in modules.
+ * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU
+ * in the Kconfig.
*/
-#if defined(MODULE) && defined(CONFIG_SMP)
-#define ARCH_NEEDS_WEAK_PER_CPU
-#endif
#include <asm-generic/percpu.h>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 8f1f18adcdb5..5ef57f88df6b 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -152,6 +152,9 @@
#define SO_PASSRIGHTS 83
+#define SO_INQ 84
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index b1bfbd11980d..d38f4d6759e4 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -17,6 +17,7 @@
#include <linux/vmalloc.h>
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
+#include <linux/string.h>
#include <linux/module.h>
#include <linux/memblock.h>
@@ -79,10 +80,12 @@ mk_resource_name(int pe, int port, char *str)
{
char tmp[80];
char *name;
-
- sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
- name = memblock_alloc_or_panic(strlen(tmp) + 1, SMP_CACHE_BYTES);
- strcpy(name, tmp);
+ size_t sz;
+
+ sz = scnprintf(tmp, sizeof(tmp), "PCI %s PE %d PORT %d", str, pe, port);
+ sz += 1; /* NUL terminator */
+ name = memblock_alloc_or_panic(sz, SMP_CACHE_BYTES);
+ strscpy(name, tmp, sz);
return name;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a6e80653abd1..b1f3df39ed40 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -100,12 +100,12 @@ config ARM
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EXTRA_IPI_TRACEPOINTS
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST if ARM_LPAE
- select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
diff --git a/arch/arm/boot/dts/broadcom/bcm7445.dtsi b/arch/arm/boot/dts/broadcom/bcm7445.dtsi
index 5ac2042515b8..c6307c7437e3 100644
--- a/arch/arm/boot/dts/broadcom/bcm7445.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm7445.dtsi
@@ -237,7 +237,8 @@
ranges = <0x0 0x0 0x80000>;
memc-ddr@2000 {
- compatible = "brcm,brcmstb-memc-ddr";
+ compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ "brcm,brcmstb-memc-ddr";
reg = <0x2000 0x800>;
};
@@ -259,7 +260,8 @@
ranges = <0x0 0x80000 0x80000>;
memc-ddr@2000 {
- compatible = "brcm,brcmstb-memc-ddr";
+ compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ "brcm,brcmstb-memc-ddr";
reg = <0x2000 0x800>;
};
@@ -281,7 +283,8 @@
ranges = <0x0 0x100000 0x80000>;
memc-ddr@2000 {
- compatible = "brcm,brcmstb-memc-ddr";
+ compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ "brcm,brcmstb-memc-ddr";
reg = <0x2000 0x800>;
};
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-wrv54g.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-wrv54g.dts
index 98275a363c57..cb1842c83ac8 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-wrv54g.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-wrv54g.dts
@@ -72,10 +72,55 @@
cs-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
- switch@0 {
+ ethernet-switch@0 {
compatible = "micrel,ks8995";
reg = <0>;
spi-max-frequency = <50000000>;
+
+ /*
+ * The PHYs are accessed over the external MDIO
+ * bus and not internally through the switch control
+ * registers.
+ */
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@0 {
+ reg = <0>;
+ label = "1";
+ phy-mode = "mii";
+ phy-handle = <&phy1>;
+ };
+ ethernet-port@1 {
+ reg = <1>;
+ label = "2";
+ phy-mode = "mii";
+ phy-handle = <&phy2>;
+ };
+ ethernet-port@2 {
+ reg = <2>;
+ label = "3";
+ phy-mode = "mii";
+ phy-handle = <&phy3>;
+ };
+ ethernet-port@3 {
+ reg = <3>;
+ label = "4";
+ phy-mode = "mii";
+ phy-handle = <&phy4>;
+ };
+ ethernet-port@4 {
+ reg = <4>;
+ ethernet = <&ethb>;
+ phy-mode = "mii";
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ };
};
};
@@ -135,40 +180,59 @@
};
/*
- * EthB - connected to the KS8995 switch ports 1-4
- * FIXME: the boardfile defines .phy_mask = 0x1e for this port to enable output to
- * all four switch ports, also using an out of tree multiphy patch.
- * Do we need a new binding and property for this?
+ * EthB connects to the KS8995 CPU port and faces ports 1-4
+ * through the switch fabric.
+ *
+ * To complicate things, the MDIO channel is also only
+ * accessible through EthB, but used independently for PHY
+ * control.
*/
- ethernet@c8009000 {
+ ethb: ethernet@c8009000 {
status = "okay";
queue-rx = <&qmgr 3>;
queue-txready = <&qmgr 20>;
- phy-mode = "rgmii";
- phy-handle = <&phy4>;
+ phy-mode = "mii";
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
mdio {
#address-cells = <1>;
#size-cells = <0>;
- /* Should be ports 1-4 on the KS8995 switch */
+ /*
+ * LAN ports 1-4 on the KS8995 switch
+ * and PHY5 for WAN need to be accessed
+ * through this external MDIO channel.
+ */
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
phy4: ethernet-phy@4 {
reg = <4>;
};
-
- /* Should be port 5 on the KS8995 switch */
phy5: ethernet-phy@5 {
reg = <5>;
};
};
};
- /* EthC - connected to KS8995 switch port 5 */
- ethernet@c800a000 {
+ /*
+ * EthC connects to MII-P5 on the KS8995 bypassing
+ * all of the switch logic and facing PHY5
+ */
+ ethc: ethernet@c800a000 {
status = "okay";
queue-rx = <&qmgr 4>;
queue-txready = <&qmgr 21>;
- phy-mode = "rgmii";
+ phy-mode = "mii";
phy-handle = <&phy5>;
};
};
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index d7e2ea27ce59..3389a70e4d49 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -617,8 +617,8 @@ static int sa1111_setup_gpios(struct sa1111 *sachip)
sachip->gc.direction_input = sa1111_gpio_direction_input;
sachip->gc.direction_output = sa1111_gpio_direction_output;
sachip->gc.get = sa1111_gpio_get;
- sachip->gc.set_rv = sa1111_gpio_set;
- sachip->gc.set_multiple_rv = sa1111_gpio_set_multiple;
+ sachip->gc.set = sa1111_gpio_set;
+ sachip->gc.set_multiple = sa1111_gpio_set_multiple;
sachip->gc.to_irq = sa1111_gpio_to_irq;
sachip->gc.base = -1;
sachip->gc.ngpio = 18;
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 2d3ee76c8e17..dddb73c96826 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -218,7 +218,7 @@ static int scoop_probe(struct platform_device *pdev)
devptr->gpio.label = dev_name(&pdev->dev);
devptr->gpio.base = inf->gpio_base;
devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
- devptr->gpio.set_rv = scoop_gpio_set;
+ devptr->gpio.set = scoop_gpio_set;
devptr->gpio.get = scoop_gpio_get;
devptr->gpio.direction_input = scoop_gpio_direction_input;
devptr->gpio.direction_output = scoop_gpio_direction_output;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index e8810d300457..f2822eeefb95 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -794,10 +794,12 @@ CONFIG_SND=m
CONFIG_SND_HDA_TEGRA=m
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
-CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
CONFIG_SND_HDA_CODEC_REALTEK_LIB=m
CONFIG_SND_HDA_CODEC_ALC269=m
CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=m
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=m
CONFIG_SND_HDA_CODEC_HDMI_TEGRA=m
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 1491add30093..939913ed9a73 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -142,7 +142,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 3a9bda2bf422..ba863b445417 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -225,7 +225,12 @@ CONFIG_SND_HDA_TEGRA=y
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC269=y
CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
+CONFIG_SND_HDA_CODEC_HDMI_TEGRA=y
# CONFIG_SND_ARM is not set
# CONFIG_SND_SPI is not set
# CONFIG_SND_USB is not set
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index c60104dc1585..df5afe601e4a 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -206,7 +206,7 @@ static int ctr_encrypt(struct skcipher_request *req)
while (walk.nbytes > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- int bytes = walk.nbytes;
+ unsigned int bytes = walk.nbytes;
if (unlikely(bytes < AES_BLOCK_SIZE))
src = dst = memcpy(buf + sizeof(buf) - bytes,
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
deleted file mode 100644
index f8500e5d6ea8..000000000000
--- a/arch/arm/include/asm/cti.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASMARM_CTI_H
-#define __ASMARM_CTI_H
-
-#include <asm/io.h>
-#include <asm/hardware/coresight.h>
-
-/* The registers' definition is from section 3.2 of
- * Embedded Cross Trigger Revision: r0p0
- */
-#define CTICONTROL 0x000
-#define CTISTATUS 0x004
-#define CTILOCK 0x008
-#define CTIPROTECTION 0x00C
-#define CTIINTACK 0x010
-#define CTIAPPSET 0x014
-#define CTIAPPCLEAR 0x018
-#define CTIAPPPULSE 0x01c
-#define CTIINEN 0x020
-#define CTIOUTEN 0x0A0
-#define CTITRIGINSTATUS 0x130
-#define CTITRIGOUTSTATUS 0x134
-#define CTICHINSTATUS 0x138
-#define CTICHOUTSTATUS 0x13c
-#define CTIPERIPHID0 0xFE0
-#define CTIPERIPHID1 0xFE4
-#define CTIPERIPHID2 0xFE8
-#define CTIPERIPHID3 0xFEC
-#define CTIPCELLID0 0xFF0
-#define CTIPCELLID1 0xFF4
-#define CTIPCELLID2 0xFF8
-#define CTIPCELLID3 0xFFC
-
-/* The below are from section 3.6.4 of
- * CoreSight v1.0 Architecture Specification
- */
-#define LOCKACCESS 0xFB0
-#define LOCKSTATUS 0xFB4
-
-/**
- * struct cti - cross trigger interface struct
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out_for_irq: triger out number which will cause
- * the @irq happen
- *
- * cti struct used to operate cti registers.
- */
-struct cti {
- void __iomem *base;
- int irq;
- int trig_out_for_irq;
-};
-
-/**
- * cti_init - initialize the cti instance
- * @cti: cti instance
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out: triger out number which will cause
- * the @irq happen
- *
- * called by machine code to pass the board dependent
- * @base, @irq and @trig_out to cti.
- */
-static inline void cti_init(struct cti *cti,
- void __iomem *base, int irq, int trig_out)
-{
- cti->base = base;
- cti->irq = irq;
- cti->trig_out_for_irq = trig_out;
-}
-
-/**
- * cti_map_trigger - use the @chan to map @trig_in to @trig_out
- * @cti: cti instance
- * @trig_in: trigger in number
- * @trig_out: trigger out number
- * @channel: channel number
- *
- * This function maps one trigger in of @trig_in to one trigger
- * out of @trig_out using the channel @chan.
- */
-static inline void cti_map_trigger(struct cti *cti,
- int trig_in, int trig_out, int chan)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINEN + trig_in * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIINEN + trig_in * 4);
-
- val = __raw_readl(base + CTIOUTEN + trig_out * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIOUTEN + trig_out * 4);
-}
-
-/**
- * cti_enable - enable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_enable(struct cti *cti)
-{
- __raw_writel(0x1, cti->base + CTICONTROL);
-}
-
-/**
- * cti_disable - disable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_disable(struct cti *cti)
-{
- __raw_writel(0, cti->base + CTICONTROL);
-}
-
-/**
- * cti_irq_ack - clear the cti irq
- * @cti: cti instance
- *
- * clear the cti irq
- */
-static inline void cti_irq_ack(struct cti *cti)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINTACK);
- val |= BIT(cti->trig_out_for_irq);
- __raw_writel(val, base + CTIINTACK);
-}
-
-/**
- * cti_unlock - unlock cti module
- * @cti: cti instance
- *
- * unlock the cti module, or else any writes to the cti
- * module is not allowed.
- */
-static inline void cti_unlock(struct cti *cti)
-{
- __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-
-/**
- * cti_lock - lock cti module
- * @cti: cti instance
- *
- * lock the cti module, so any writes to the cti
- * module will be not allowed.
- */
-static inline void cti_lock(struct cti *cti)
-{
- __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a41c93988d2c..0bfd66c7ada0 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1004,7 +1004,7 @@ static void __init reserve_crashkernel(void)
total_mem = get_total_mem();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base,
- NULL, NULL);
+ NULL, NULL, NULL);
/* invalid value specified or crashkernel=0 */
if (ret || !crash_size)
return;
diff --git a/arch/arm/mach-s3c/gpio-samsung.c b/arch/arm/mach-s3c/gpio-samsung.c
index 206a492fbaf5..81e198e5a6d3 100644
--- a/arch/arm/mach-s3c/gpio-samsung.c
+++ b/arch/arm/mach-s3c/gpio-samsung.c
@@ -517,7 +517,7 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
if (!gc->direction_output)
gc->direction_output = samsung_gpiolib_2bit_output;
if (!gc->set)
- gc->set_rv = samsung_gpiolib_set;
+ gc->set = samsung_gpiolib_set;
if (!gc->get)
gc->get = samsung_gpiolib_get;
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index bad8aa661e9d..2b833aa0212b 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -80,7 +80,7 @@ void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
{
unsigned long m = mask, v = val;
- assabet_bcr_gc->set_multiple_rv(assabet_bcr_gc, &m, &v);
+ assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v);
}
EXPORT_SYMBOL(ASSABET_BCR_frob);
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6516598c8a71..88fe79f0a4ed 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -126,7 +126,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val)
unsigned long m = mask, v = val;
if (nep)
- n->gpio[0]->set_multiple_rv(n->gpio[0], &m, &v);
+ n->gpio[0]->set_multiple(n->gpio[0], &m, &v);
else
WARN(1, "nep unset\n");
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ab01b51de559..46169fe42c61 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -268,7 +268,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
int sig, code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
- unsigned long vm_flags = VM_ACCESS_FLAGS;
+ vm_flags_t vm_flags = VM_ACCESS_FLAGS;
if (kprobe_page_fault(regs, fsr))
return 0;
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 6f09f65e3d95..49e29b7894a3 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -540,7 +540,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
ochip->chip.direction_input = orion_gpio_direction_input;
ochip->chip.get = orion_gpio_get;
ochip->chip.direction_output = orion_gpio_direction_output;
- ochip->chip.set_rv = orion_gpio_set;
+ ochip->chip.set = orion_gpio_set;
ochip->chip.to_irq = orion_gpio_to_irq;
ochip->chip.base = gpio_base;
ochip->chip.ngpio = ngpio;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7c456aa838b9..e9bbfacc35a6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -42,7 +42,6 @@ config ARM64
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTDUMP
- select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_SETUP_DMA_OPS
@@ -127,6 +126,7 @@ config ARM64
select ARM_GIC_V2M if PCI
select ARM_GIC_V3
select ARM_GIC_V3_ITS if PCI
+ select ARM_GIC_V5
select ARM_PSCI_FW
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
@@ -134,6 +134,7 @@ config ARM64
select CPU_PM if (SUSPEND || CPU_IDLE)
select CPUMASK_OFFSTACK if NR_CPUS > 256
select DCACHE_WORD_ACCESS
+ select HAVE_EXTRA_IPI_TRACEPOINTS
select DYNAMIC_FTRACE if FUNCTION_TRACER
select DMA_BOUNCE_UNALIGNED_KMALLOC
select DMA_DIRECT_REMAP
@@ -221,7 +222,6 @@ config ARM64
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GUP_FAST
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
@@ -232,6 +232,7 @@ config ARM64
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IOREMAP_PROT
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PERF_EVENTS
@@ -240,6 +241,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
@@ -278,6 +280,7 @@ config ARM64
select HAVE_SOFTIRQ_ON_OWN_STACK
select USER_STACKTRACE_SUPPORT
select VDSO_GETRANDOM
+ select VMAP_STACK
help
ARM 64-bit (AArch64) Linux support.
@@ -2498,3 +2501,4 @@ source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
+source "kernel/livepatch/Kconfig"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 90d6b028fbbb..a88f5ad9328c 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -333,7 +333,6 @@ config ARCH_STM32
bool "STMicroelectronics STM32 SoC Family"
select GPIOLIB
select PINCTRL
- select PINCTRL_STM32MP257
select ARM_SMC_MBOX
select ARM_SCMI_PROTOCOL
select REGULATOR
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index dd065b1bf94a..8877953ce292 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -1430,6 +1430,31 @@
status = "disabled";
};
+ ufshci: ufshci@11270000 {
+ compatible = "mediatek,mt8195-ufshci";
+ reg = <0 0x11270000 0 0x2300>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&ufsphy>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_AES_UFSFDE>,
+ <&infracfg_ao CLK_INFRA_AO_AES>,
+ <&infracfg_ao CLK_INFRA_AO_UFS_TICK>,
+ <&infracfg_ao CLK_INFRA_AO_UNIPRO_SYS>,
+ <&infracfg_ao CLK_INFRA_AO_UNIPRO_TICK>,
+ <&infracfg_ao CLK_INFRA_AO_UFS_MP_SAP_B>,
+ <&infracfg_ao CLK_INFRA_AO_UFS_TX_SYMBOL>,
+ <&infracfg_ao CLK_INFRA_AO_PERI_UFS_MEM_SUB>;
+ clock-names = "ufs", "ufs_aes", "ufs_tick",
+ "unipro_sysclk", "unipro_tick",
+ "unipro_mp_bclk", "ufs_tx_symbol",
+ "ufs_mem_sub";
+ freq-table-hz = <0 0>, <0 0>, <0 0>,
+ <0 0>, <0 0>, <0 0>,
+ <0 0>, <0 0>;
+
+ mediatek,ufs-disable-mcq;
+ status = "disabled";
+ };
+
lvts_mcu: thermal-sensor@11278000 {
compatible = "mediatek,mt8195-lvts-mcu";
reg = <0 0x11278000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8370.dtsi b/arch/arm64/boot/dts/mediatek/mt8370.dtsi
index cf1a3759451f..7ac8b8d03494 100644
--- a/arch/arm64/boot/dts/mediatek/mt8370.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8370.dtsi
@@ -59,6 +59,22 @@
<&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
+/*
+ * Please note that overriding compatibles is a discouraged practice and is a
+ * clear indication of nodes not being, well, compatible!
+ *
+ * This is a special case, where the GPU is the same as MT8188, but with one
+ * of the cores fused out in this lower-binned SoC.
+ */
+&gpu {
+ compatible = "mediatek,mt8370-mali", "arm,mali-valhall-jm";
+
+ power-domains = <&spm MT8188_POWER_DOMAIN_MFG2>,
+ <&spm MT8188_POWER_DOMAIN_MFG3>;
+
+ power-domain-names = "core0", "core1";
+};
+
&ppi_cluster0 {
affinity = <&cpu0 &cpu1 &cpu2 &cpu3>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra264.dtsi b/arch/arm64/boot/dts/nvidia/tegra264.dtsi
index 62c87a387b14..e02659efa233 100644
--- a/arch/arm64/boot/dts/nvidia/tegra264.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra264.dtsi
@@ -11,7 +11,6 @@
interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
- numa-node-id = <0>;
reserved-memory {
#address-cells = <2>;
@@ -341,7 +340,6 @@
status = "okay";
enable-method = "psci";
- numa-node-id = <0>;
i-cache-size = <65536>;
i-cache-line-size = <64>;
@@ -358,7 +356,6 @@
status = "okay";
enable-method = "psci";
- numa-node-id = <0>;
i-cache-size = <65536>;
i-cache-line-size = <64>;
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index 6e73809f6492..a5f13801b784 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -21,16 +21,21 @@
#endif
#ifdef CONFIG_GENERIC_BUG
-
-#define __BUG_ENTRY(flags) \
+#define __BUG_ENTRY_START \
.pushsection __bug_table,"aw"; \
.align 2; \
14470: .long 14471f - .; \
-_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
- .short flags; \
+
+#define __BUG_ENTRY_END \
.align 2; \
.popsection; \
14471:
+
+#define __BUG_ENTRY(flags) \
+ __BUG_ENTRY_START \
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ .short flags; \
+ __BUG_ENTRY_END
#else
#define __BUG_ENTRY(flags)
#endif
@@ -41,4 +46,24 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
#define ASM_BUG() ASM_BUG_FLAGS(0)
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_LOCATION_STRING(file, line) \
+ ".long " file "- .;" \
+ ".short " line ";"
+#else
+#define __BUG_LOCATION_STRING(file, line)
+#endif
+
+#define __BUG_ENTRY_STRING(file, line, flags) \
+ __stringify(__BUG_ENTRY_START) \
+ __BUG_LOCATION_STRING(file, line) \
+ ".short " flags ";" \
+ __stringify(__BUG_ENTRY_END)
+
+#define ARCH_WARN_ASM(file, line, flags, size) \
+ __BUG_ENTRY_STRING(file, line, flags) \
+ __stringify(brk BUG_BRK_IMM)
+
+#define ARCH_WARN_REACHABLE
+
#endif /* __ASM_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index c56c21bb1eec..23be85d93348 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -58,7 +58,7 @@
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- bic \tmp, \tmp, #DBG_MDSCR_SS
+ bic \tmp, \tmp, #MDSCR_EL1_SS
msr mdscr_el1, \tmp
isb // Take effect before a subsequent clear of DAIF.D
9990:
@@ -68,7 +68,7 @@
.macro enable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- orr \tmp, \tmp, #DBG_MDSCR_SS
+ orr \tmp, \tmp, #MDSCR_EL1_SS
msr mdscr_el1, \tmp
9990:
.endm
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 1ca947d5c939..f5801b0ba9e9 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,6 +44,9 @@
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
+#define gsb_ack() asm volatile(GSB_ACK_BARRIER_INSN : : : "memory")
+#define gsb_sys() asm volatile(GSB_SYS_BARRIER_INSN : : : "memory")
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h
new file mode 100644
index 000000000000..ab90f0351b7a
--- /dev/null
+++ b/arch/arm64/include/asm/cfi.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_CFI_H
+#define _ASM_ARM64_CFI_H
+
+#define __bpfcall
+
+#endif /* _ASM_ARM64_CFI_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c4326f1cb917..bf13d676aae2 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -275,6 +275,14 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/* Panic when a conflict is detected */
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
+/*
+ * When paired with SCOPE_LOCAL_CPU, all early CPUs must satisfy the
+ * condition. This is different from SCOPE_SYSTEM where the check is performed
+ * only once at the end of the SMP boot on the sanitised ID registers.
+ * SCOPE_SYSTEM is not suitable for cases where the capability depends on
+ * properties local to a CPU like MIDR_EL1.
+ */
+#define ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS ((u16)BIT(7))
/*
* CPU errata workarounds that need to be enabled at boot time if one or
@@ -304,6 +312,16 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time and present on all early CPUs. Late CPUs
+ * are permitted to have the feature even if it hasn't been enabled, although
+ * the feature will not be used by Linux in this case. If all early CPUs have
+ * the feature, then every late CPU must have it.
+ */
+#define ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU | \
+ ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS)
/*
* CPU feature detected at boot time, on one or more CPUs. A late CPU
@@ -391,6 +409,11 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
}
+static inline bool cpucap_match_all_early_cpus(const struct arm64_cpu_capabilities *cap)
+{
+ return cap->type & ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS;
+}
+
/*
* Generic helper for handling capabilities with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
@@ -848,6 +871,11 @@ static inline bool system_supports_pmuv3(void)
return cpus_have_final_cap(ARM64_HAS_PMUV3);
}
+static inline bool system_supports_bbml2_noabort(void)
+{
+ return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 8f6ba31b8658..f5e3ed2420ce 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -13,14 +13,8 @@
#include <asm/ptrace.h>
/* Low-level stepping controls. */
-#define DBG_MDSCR_SS (1 << 0)
#define DBG_SPSR_SS (1 << 21)
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE (1 << 13)
-#define DBG_MDSCR_MDE (1 << 15)
-#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -62,30 +56,6 @@ struct task_struct;
#define DBG_HOOK_HANDLED 0
#define DBG_HOOK_ERROR 1
-struct step_hook {
- struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned long esr);
-};
-
-void register_user_step_hook(struct step_hook *hook);
-void unregister_user_step_hook(struct step_hook *hook);
-
-void register_kernel_step_hook(struct step_hook *hook);
-void unregister_kernel_step_hook(struct step_hook *hook);
-
-struct break_hook {
- struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned long esr);
- u16 imm;
- u16 mask; /* These bits are ignored when comparing with imm */
-};
-
-void register_user_break_hook(struct break_hook *hook);
-void unregister_user_break_hook(struct break_hook *hook);
-
-void register_kernel_break_hook(struct break_hook *hook);
-void unregister_kernel_break_hook(struct break_hook *hook);
-
u8 debug_monitors_arch(void);
enum dbg_active_el {
@@ -108,17 +78,15 @@ void kernel_rewind_single_step(struct pt_regs *regs);
void kernel_fastforward_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-int reinstall_suspended_bps(struct pt_regs *regs);
+bool try_step_suspended_breakpoints(struct pt_regs *regs);
#else
-static inline int reinstall_suspended_bps(struct pt_regs *regs)
+static inline bool try_step_suspended_breakpoints(struct pt_regs *regs)
{
- return -ENODEV;
+ return false;
}
#endif
-int aarch32_break_handler(struct pt_regs *regs);
-
-void debug_traps_init(void);
+bool try_handle_aarch32_break(struct pt_regs *regs);
#endif /* __ASSEMBLY */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 9f38340d24c2..46033027510c 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -165,6 +165,50 @@
.Lskip_gicv3_\@:
.endm
+/* GICv5 system register access */
+.macro __init_el2_gicv5
+ mrs_s x0, SYS_ID_AA64PFR2_EL1
+ ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4
+ cbz x0, .Lskip_gicv5_\@
+
+ mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \
+ ICH_HFGITR_EL2_GICRCDIA | \
+ ICH_HFGITR_EL2_GICCDDI | \
+ ICH_HFGITR_EL2_GICCDEOI | \
+ ICH_HFGITR_EL2_GICCDHM | \
+ ICH_HFGITR_EL2_GICCDRCFG | \
+ ICH_HFGITR_EL2_GICCDPEND | \
+ ICH_HFGITR_EL2_GICCDAFF | \
+ ICH_HFGITR_EL2_GICCDPRI | \
+ ICH_HFGITR_EL2_GICCDDIS | \
+ ICH_HFGITR_EL2_GICCDEN)
+ msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps
+ mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PCR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_HAPR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_CR0_EL1 | \
+ ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_APR_EL1)
+ msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps
+ mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PCR_EL1 | \
+ ICH_HFGWTR_EL2_ICC_CR0_EL1 | \
+ ICH_HFGWTR_EL2_ICC_APR_EL1)
+ msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps
+.Lskip_gicv5_\@:
+.endm
+
.macro __init_el2_hstr
msr hstr_el2, xzr // Disable CP15 traps to EL2
.endm
@@ -189,6 +233,28 @@
.Lskip_set_cptr_\@:
.endm
+/*
+ * Configure BRBE to permit recording cycle counts and branch mispredicts.
+ *
+ * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and
+ * BRBCR_EL1.CC=1.
+ *
+ * At any EL, to record branch mispredicts BRBE requires that both
+ * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1.
+ *
+ * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are
+ * executing in EL1.
+ */
+.macro __init_el2_brbe
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_\@
+
+ mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED
+ msr_s SYS_BRBCR_EL2, x0
+.Lskip_brbe_\@:
+.endm
+
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
@@ -196,20 +262,62 @@
cbz x1, .Lskip_fgt_\@
mov x0, xzr
+ mov x2, xzr
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3
b.lt .Lskip_spe_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */
- orr x0, x0, #(1 << 62)
+ orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK
+ orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK
.Lskip_spe_fgt_\@:
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_fgt_\@
+
+ /*
+ * Disable read traps for the following registers
+ *
+ * [BRBSRC|BRBTGT|RBINF]_EL1
+ * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1
+ */
+ orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK
+
+ /*
+ * Disable write traps for the following registers
+ *
+ * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1
+ */
+ orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK
+
+ /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */
+ orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK
+ orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK
+
+ /* Disable read traps for BRBIDR_EL1 */
+ orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK
+
+.Lskip_brbe_fgt_\@:
.Lset_debug_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
- msr_s SYS_HDFGWTR_EL2, x0
+ msr_s SYS_HDFGWTR_EL2, x2
mov x0, xzr
+ mov x2, xzr
+
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_insn_fgt_\@
+
+ /* Disable traps for BRBIALL instruction */
+ orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK
+
+ /* Disable traps for BRBINJ instruction */
+ orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK
+
+.Lskip_brbe_insn_fgt_\@:
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lskip_sme_fgt_\@
@@ -250,7 +358,7 @@
.Lset_fgt_\@:
msr_s SYS_HFGRTR_EL2, x0
msr_s SYS_HFGWTR_EL2, x0
- msr_s SYS_HFGITR_EL2, xzr
+ msr_s SYS_HFGITR_EL2, x2
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
@@ -300,9 +408,11 @@
__init_el2_hcrx
__init_el2_timers
__init_el2_debug
+ __init_el2_brbe
__init_el2_lor
__init_el2_stage2
__init_el2_gicv3
+ __init_el2_gicv5
__init_el2_hstr
__init_el2_nvhe_idregs
__init_el2_cptr
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index d48fc16584cd..e3874c4fc399 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -59,8 +59,20 @@ void do_el0_bti(struct pt_regs *regs);
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
void do_el0_gcs(struct pt_regs *regs, unsigned long esr);
void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+void do_breakpoint(unsigned long esr, struct pt_regs *regs);
+void do_watchpoint(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
+#else
+static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
+static inline void do_watchpoint(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs) {}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
+void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
+void do_el0_brk64(unsigned long esr, struct pt_regs *regs);
+void do_el1_brk64(unsigned long esr, struct pt_regs *regs);
+void do_bkpt32(unsigned long esr, struct pt_regs *regs);
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
void do_sme_acc(unsigned long esr, struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h
index f50660603ecf..5bc432234d3a 100644
--- a/arch/arm64/include/asm/gcs.h
+++ b/arch/arm64/include/asm/gcs.h
@@ -58,7 +58,7 @@ static inline u64 gcsss2(void)
static inline bool task_gcs_el0_enabled(struct task_struct *task)
{
- return current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
+ return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
}
void gcs_set_el0_mode(struct task_struct *task);
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 1c3f9617d54f..13f94c8ddfc0 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -176,6 +176,8 @@
#define KERNEL_HWCAP_POE __khwcap2_feature(POE)
#define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128)
+#define KERNEL_HWCAP_MTE_FAR __khwcap3_feature(MTE_FAR)
+#define KERNEL_HWCAP_MTE_STORE_ONLY __khwcap3_feature(MTE_STORE_ONLY)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index 21fc85e9d2be..3184f5d1e3ae 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -24,6 +24,18 @@ static inline void arch_kgdb_breakpoint(void)
extern void kgdb_handle_bus_error(void);
extern int kgdb_fault_expected;
+int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr);
+int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr);
+#ifdef CONFIG_KGDB
+int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr);
+#else
+static inline int kgdb_single_step_handler(struct pt_regs *regs,
+ unsigned long esr)
+{
+ return DBG_HOOK_ERROR;
+}
+#endif
+
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index be7a3680dadf..f2782560647b 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -41,4 +41,12 @@ void __kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
+
+int __kprobes kprobe_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+int __kprobes kprobe_ss_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+int __kprobes kretprobe_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 0720898f563e..fa8a08a1ccd5 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -45,16 +45,39 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
+static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, false, addr);
+}
+
+static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, true, addr);
+}
+
+static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
+{
+ /*
+ * ESR_ELx.ISV (later renamed to IDS) indicates whether or not
+ * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
+ *
+ * Set the bit when injecting an SError w/o an ESR to indicate ISS
+ * does not follow the architected format.
+ */
+ return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
+}
+
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
+int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
{
@@ -195,6 +218,11 @@ static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
}
+static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
+{
+ return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
+}
+
static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
{
bool e2h, tge;
@@ -224,6 +252,20 @@ static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
}
+static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
+{
+ return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+}
+
+static inline bool vserror_state_is_nested(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_ctxt(vcpu))
+ return false;
+
+ return vcpu_el2_amo_is_set(vcpu) ||
+ (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
+}
+
/*
* The layout of SPSR for an AArch32 state is different when observed from an
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
@@ -627,6 +669,9 @@ static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
if (kvm_has_fpmr(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+
+ if (kvm_has_sctlr2(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
}
}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3e41a880b062..2f2394cce24e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -523,6 +523,7 @@ enum vcpu_sysreg {
/* Anything from this can be RES0/RES1 sanitised */
MARKER(__SANITISED_REG_START__),
TCR2_EL2, /* Extended Translation Control Register (EL2) */
+ SCTLR2_EL2, /* System Control Register 2 (EL2) */
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
@@ -537,6 +538,7 @@ enum vcpu_sysreg {
VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
VNCR(TCR_EL1), /* Translation Control Register */
VNCR(TCR2_EL1), /* Extended Translation Control Register */
+ VNCR(SCTLR2_EL1), /* System Control Register 2 */
VNCR(ESR_EL1), /* Exception Syndrome Register */
VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
@@ -565,6 +567,10 @@ enum vcpu_sysreg {
VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
+ /* FEAT_RAS registers */
+ VNCR(VDISR_EL2),
+ VNCR(VSESR_EL2),
+
VNCR(HFGRTR_EL2),
VNCR(HFGWTR_EL2),
VNCR(HFGITR_EL2),
@@ -704,6 +710,7 @@ struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6
#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7
+#define KVM_HOST_DATA_FLAG_HAS_BRBE 8
unsigned long flags;
struct kvm_cpu_context host_ctxt;
@@ -737,6 +744,7 @@ struct kvm_host_data {
u64 trfcr_el1;
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2;
+ u64 brbcr_el1;
} host_debug_state;
/* Guest trace filter value */
@@ -817,7 +825,7 @@ struct kvm_vcpu_arch {
u8 iflags;
/* State flags for kernel bookkeeping, unused by the hypervisor code */
- u8 sflags;
+ u16 sflags;
/*
* Don't run the guest (internal implementation need).
@@ -953,9 +961,21 @@ struct kvm_vcpu_arch {
__vcpu_flags_preempt_enable(); \
} while (0)
+#define __vcpu_test_and_clear_flag(v, flagset, f, m) \
+ ({ \
+ typeof(v->arch.flagset) set; \
+ \
+ set = __vcpu_get_flag(v, flagset, f, m); \
+ __vcpu_clear_flag(v, flagset, f, m); \
+ \
+ set; \
+ })
+
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
+#define vcpu_test_and_clear_flag(v, ...) \
+ __vcpu_test_and_clear_flag((v), __VA_ARGS__)
/* KVM_ARM_VCPU_INIT completed */
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
@@ -1015,6 +1035,8 @@ struct kvm_vcpu_arch {
#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
/* KVM is currently emulating a nested ERET */
#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
+/* SError pending for nested guest */
+#define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -1149,6 +1171,8 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
* System registers listed in the switch are not saved on every
* exit from the guest but are only saved on vcpu_put.
*
+ * SYSREGS_ON_CPU *MUST* be checked before using this helper.
+ *
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
* should never be listed below, because the guest cannot modify its
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
@@ -1186,6 +1210,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
+ case SCTLR2_EL1: *val = read_sysreg_s(SYS_SCTLR2_EL12); break;
default: return false;
}
@@ -1200,6 +1225,8 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
* System registers listed in the switch are not restored on every
* entry to the guest but are only restored on vcpu_load.
*
+ * SYSREGS_ON_CPU *MUST* be checked before using this helper.
+ *
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
* should never be listed below, because the MPIDR should only be set
* once, before running the VCPU, and never changed later.
@@ -1236,6 +1263,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
+ case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
default: return false;
}
@@ -1387,8 +1415,6 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != INVALID_GPA);
}
-void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
-
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
@@ -1665,6 +1691,12 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
#define kvm_has_s1poe(k) \
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
+#define kvm_has_ras(k) \
+ (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
+
+#define kvm_has_sctlr2(k) \
+ (kvm_has_feat((k), ID_AA64MMFR3_EL1, SCTLRX, IMP))
+
static inline bool kvm_arch_has_irq_bypass(void)
{
return true;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index b98ac6aa631f..ae563ebd6aee 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -371,6 +371,24 @@ static inline void kvm_fault_unlock(struct kvm *kvm)
read_unlock(&kvm->mmu_lock);
}
+/*
+ * ARM64 KVM relies on a simple conversion from physaddr to a kernel
+ * virtual address (KVA) when it does cache maintenance as the CMO
+ * instructions work on virtual addresses. This is incompatible with
+ * VM_PFNMAP VMAs which may not have a kernel direct mapping to a
+ * virtual address.
+ *
+ * With S2FWB and CACHE DIC features, KVM need not do cache flushing
+ * and CMOs are NOP'd. This has the effect of no longer requiring a
+ * KVA for addresses mapped into the S2. The presence of these features
+ * are thus necessary to support cacheable S2 mapping of VM_PFNMAP.
+ */
+static inline bool kvm_supports_cacheable_pfnmap(void)
+{
+ return cpus_have_final_cap(ARM64_HAS_STAGE2_FWB) &&
+ cpus_have_final_cap(ARM64_HAS_CACHE_DIC);
+}
+
#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
void kvm_s2_ptdump_create_debugfs(struct kvm *kvm);
#else
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 0bd07ea068a1..7fd76f41c296 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -80,6 +80,8 @@ extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
+extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
+extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
struct kvm_s2_trans {
phys_addr_t output;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 717829df294e..5213248e081b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -118,7 +118,7 @@
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
* stacks are a multiple of page size.
*/
-#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#if (MIN_THREAD_SHIFT < PAGE_SHIFT)
#define THREAD_SHIFT PAGE_SHIFT
#else
#define THREAD_SHIFT MIN_THREAD_SHIFT
@@ -135,11 +135,7 @@
* checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
* assembly.
*/
-#ifdef CONFIG_VMAP_STACK
#define THREAD_ALIGN (2 * THREAD_SIZE)
-#else
-#define THREAD_ALIGN THREAD_SIZE
-#endif
#define IRQ_STACK_SIZE THREAD_SIZE
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 21df8bbd2668..8770c7ee759f 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -11,10 +11,10 @@
#include <linux/shmem_fs.h>
#include <linux/types.h>
-static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- unsigned long ret = 0;
+ vm_flags_t ret = 0;
if (system_supports_bti() && (prot & PROT_BTI))
ret |= VM_ARM64_BTI;
@@ -34,8 +34,8 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
-static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
- unsigned long flags)
+static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file,
+ unsigned long flags)
{
/*
* Only allow MTE on anonymous mappings as these are guaranteed to be
@@ -68,7 +68,7 @@ static inline bool arch_validate_prot(unsigned long prot,
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
-static inline bool arch_validate_flags(unsigned long vm_flags)
+static inline bool arch_validate_flags(vm_flags_t vm_flags)
{
if (system_supports_mte()) {
/*
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 7830d031742e..85dceb1c66f4 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -17,7 +17,6 @@
#define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
/*
* PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 192d86e1cc76..abd2dee416b3 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -190,7 +190,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
-#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
PTE_ATTRINDX(MT_NORMAL_TAGGED))
@@ -372,11 +371,6 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}
-static inline pte_t pte_mkdevmap(pte_t pte)
-{
- return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
-}
-
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline int pte_uffd_wp(pte_t pte)
{
@@ -653,14 +647,6 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return __pmd((pmd_val(pmd) & ~mask) | val);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
-#endif
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
-}
-
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
static inline pmd_t pmd_mkspecial(pmd_t pmd)
@@ -1302,16 +1288,6 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
pmd_pte(entry), dirty);
}
-
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
#endif
#ifdef CONFIG_PAGE_TABLE_CHECK
@@ -1643,6 +1619,14 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
#define arch_wants_old_prefaulted_pte cpu_has_hw_af
+/*
+ * Request exec memory is read into pagecache in at least 64K folios. This size
+ * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB
+ * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base
+ * pages are in use.
+ */
+#define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT)
+
static inline bool pud_sect_supported(void)
{
return PAGE_SIZE == SZ_4K;
@@ -1659,6 +1643,16 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
+#define modify_prot_start_ptes modify_prot_start_ptes
+extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr);
+
+#define modify_prot_commit_ptes modify_prot_commit_ptes
+extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte,
+ unsigned int nr);
+
#ifdef CONFIG_ARM64_CONTPTE
/*
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 1bf1a3b16e88..61d62bfd5a7b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -23,6 +23,8 @@
#define MTE_CTRL_TCF_ASYNC (1UL << 17)
#define MTE_CTRL_TCF_ASYMM (1UL << 18)
+#define MTE_CTRL_STORE_ONLY (1UL << 19)
+
#ifndef __ASSEMBLY__
#include <linux/build_bug.h>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2510eec026f7..d48ef6d5abcc 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -50,10 +50,32 @@ struct seq_file;
*/
extern void smp_init_cpus(void);
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_CPU_STOP_NMI,
+ IPI_TIMER,
+ IPI_IRQ_WORK,
+ NR_IPI,
+ /*
+ * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
+ * with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
+ IPI_KGDB_ROUNDUP,
+ MAX_IPI
+};
+
/*
* Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
+extern void set_smp_ipi_range_percpu(int ipi_base, int nr_ipi, int ncpus);
+
+static inline void set_smp_ipi_range(int ipi_base, int n)
+{
+ set_smp_ipi_range_percpu(ipi_base, n, 0);
+}
/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 66ec8caa6ac0..6d3280932bf5 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -59,7 +59,6 @@ static inline bool on_task_stack(const struct task_struct *tsk,
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
-#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
static inline struct stack_info stackinfo_get_overflow(void)
@@ -72,11 +71,8 @@ static inline struct stack_info stackinfo_get_overflow(void)
.high = high,
};
}
-#else
-#define stackinfo_get_overflow() stackinfo_get_unknown()
-#endif
-#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
+#if defined(CONFIG_ARM_SDE_INTERFACE)
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f1bb0d10c39a..d5b5f2ae1afa 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -113,10 +113,14 @@
/* Register-based PAN access, for save/restore purposes */
#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3)
-#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
- __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
+#define __SYS_BARRIER_INSN(op0, op1, CRn, CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | \
+ sys_insn((op0), (op1), (CRn), (CRm), (op2)) | \
+ ((Rt) & 0x1f))
-#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 3, 3, 0, 7, 31)
+#define GSB_SYS_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 0, 31)
+#define GSB_ACK_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 1, 31)
/* Data cache zero operations */
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
@@ -202,16 +206,8 @@
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
-#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0)
#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
-#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1)
#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
-#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2)
-#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2)
-
-#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0)
-#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1)
-#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0)
#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
@@ -277,8 +273,6 @@
/* ETM */
#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
-#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0)
-
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -821,6 +815,12 @@
#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6)
#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
+/*
+ * BRBE Instructions
+ */
+#define BRB_IALL_INSN __emit_inst(0xd5000000 | OP_BRB_IALL | (0x1f))
+#define BRB_INJ_INSN __emit_inst(0xd5000000 | OP_BRB_INJ | (0x1f))
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
@@ -1078,6 +1078,67 @@
#define GCS_CAP(x) ((((unsigned long)x) & GCS_CAP_ADDR_MASK) | \
GCS_CAP_VALID_TOKEN)
+/*
+ * Definitions for GICv5 instructions
+ */
+#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3)
+#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0)
+#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0)
+#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1)
+#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1)
+#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7)
+#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4)
+#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2)
+#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5)
+#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0)
+
+/* Definitions for GIC CDAFF */
+#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32)
+#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28)
+#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDI */
+#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDIS */
+#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r)
+#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r)
+
+/* Definitions for GIC CDEN */
+#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDHM */
+#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32)
+#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPEND */
+#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32)
+#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPRI */
+#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35)
+#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDRCFG */
+#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GICR CDIA */
+#define GICV5_GIC_CDIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GIC_CDIA_VALID_MASK, r)
+#define GICV5_GIC_CDIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDIA_ID_MASK GENMASK_ULL(23, 0)
+
+#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
+#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
#define ARM64_FEATURE_FIELD_BITS 4
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index c34344256762..344b1c1a4bbb 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -25,10 +25,6 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
int signo, int sicode, unsigned long far,
unsigned long err);
-void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long,
- struct pt_regs *),
- int sig, int code, const char *name);
-
struct mm_struct;
extern void __show_regs(struct pt_regs *);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1269c2487574..f241b8601ebd 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -70,6 +70,7 @@ void arch_setup_new_exec(void);
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
#define TIF_SECCOMP 11 /* syscall secure computing */
#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
+#define TIF_PATCH_PENDING 13 /* pending live patching update */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
@@ -96,6 +97,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
@@ -107,7 +109,8 @@ void arch_setup_new_exec(void);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
- _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING)
+ _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING | \
+ _TIF_PATCH_PENDING)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index aa9efee17277..18a5dc0c9a54 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -323,17 +323,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
}
/*
- * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
- * all the previously issued TLBIs targeting mm have completed. But since we
- * can be executing on a remote CPU, a DSB cannot guarantee this like it can
- * for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
- */
-static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
- flush_tlb_mm(mm);
-}
-
-/*
* To support TLB batched flush for multiple pages unmapping, we only send
* the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
* completion at the end in arch_tlbbatch_flush(). Since we've already issued
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 82cf1f879c61..e3e8944a71c3 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -29,6 +29,12 @@ void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey);
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
+int bug_brk_handler(struct pt_regs *regs, unsigned long esr);
+int cfi_brk_handler(struct pt_regs *regs, unsigned long esr);
+int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr);
+int kasan_brk_handler(struct pt_regs *regs, unsigned long esr);
+int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr);
+
int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
/*
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
index 014b02897f8e..89bfb0213a50 100644
--- a/arch/arm64/include/asm/uprobes.h
+++ b/arch/arm64/include/asm/uprobes.h
@@ -28,4 +28,15 @@ struct arch_uprobe {
bool simulate;
};
+int uprobe_brk_handler(struct pt_regs *regs, unsigned long esr);
+#ifdef CONFIG_UPROBES
+int uprobe_single_step_handler(struct pt_regs *regs, unsigned long esr);
+#else
+static inline int uprobe_single_step_handler(struct pt_regs *regs,
+ unsigned long esr)
+{
+ return DBG_HOOK_ERROR;
+}
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
index 6f556e993644..f6ec500ad3fa 100644
--- a/arch/arm64/include/asm/vncr_mapping.h
+++ b/arch/arm64/include/asm/vncr_mapping.h
@@ -51,6 +51,7 @@
#define VNCR_SP_EL1 0x240
#define VNCR_VBAR_EL1 0x250
#define VNCR_TCR2_EL1 0x270
+#define VNCR_SCTLR2_EL1 0x278
#define VNCR_PIRE0_EL1 0x290
#define VNCR_PIR_EL1 0x2A0
#define VNCR_POR_EL1 0x2A8
@@ -84,6 +85,7 @@
#define VNCR_ICH_HCR_EL2 0x4C0
#define VNCR_ICH_VMCR_EL2 0x4C8
#define VNCR_VDISR_EL2 0x500
+#define VNCR_VSESR_EL2 0x508
#define VNCR_PMBLIMITR_EL1 0x800
#define VNCR_PMBPTR_EL1 0x810
#define VNCR_PMBSR_EL1 0x820
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 705a7afa8e58..72c78468b806 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -143,5 +143,7 @@
/*
* HWCAP3 flags - for AT_HWCAP3
*/
+#define HWCAP3_MTE_FAR (1UL << 0)
+#define HWCAP3_MTE_STORE_ONLY (1UL << 1)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index a2faf0049dab..76f32e424065 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -80,7 +80,7 @@ obj-y += head.o
always-$(KBUILD_BUILTIN) += vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y)
-AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
+AFLAGS_head.o += -DVMLINUX_PATH="\"$(abspath vmlinux)\""
endif
# for cleaning
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index b9a66fc146c9..4d529ff7ba51 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -197,6 +197,8 @@ out:
*/
void __init acpi_boot_table_init(void)
{
+ int ret;
+
/*
* Enable ACPI instead of device tree unless
* - ACPI has been disabled explicitly (acpi=off), or
@@ -250,10 +252,12 @@ done:
* behaviour, use acpi=nospcr to disable console in ACPI SPCR
* table as default serial console.
*/
- acpi_parse_spcr(earlycon_acpi_spcr_enable,
+ ret = acpi_parse_spcr(earlycon_acpi_spcr_enable,
!param_acpi_nospcr);
- pr_info("Use ACPI SPCR as default console: %s\n",
- param_acpi_nospcr ? "No" : "Yes");
+ if (!ret || param_acpi_nospcr || !IS_ENABLED(CONFIG_ACPI_SPCR_TABLE))
+ pr_info("Use ACPI SPCR as default console: No\n");
+ else
+ pr_info("Use ACPI SPCR as default console: Yes\n");
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e151585c6cca..9ad065f15f1d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -303,6 +303,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_DF2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0),
@@ -320,6 +321,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTEFAR_SHIFT, 4, ID_AA64PFR2_EL1_MTEFAR_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTESTOREONLY_SHIFT, 4, ID_AA64PFR2_EL1_MTESTOREONLY_NI),
ARM64_FTR_END,
};
@@ -500,6 +503,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE),
FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_SCTLRX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -2213,6 +2217,38 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
}
+static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
+{
+ /*
+ * We want to allow usage of BBML2 in as wide a range of kernel contexts
+ * as possible. This list is therefore an allow-list of known-good
+ * implementations that both support BBML2 and additionally, fulfill the
+ * extra constraint of never generating TLB conflict aborts when using
+ * the relaxed BBML2 semantics (such aborts make use of BBML2 in certain
+ * kernel contexts difficult to prove safe against recursive aborts).
+ *
+ * Note that implementations can only be considered "known-good" if their
+ * implementors attest to the fact that the implementation never raises
+ * TLB conflict aborts for BBML2 mapping granularity changes.
+ */
+ static const struct midr_range supports_bbml2_noabort_list[] = {
+ MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
+ MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
+ {}
+ };
+
+ /* Does our cpu guarantee to never raise TLB conflict aborts? */
+ if (!is_midr_in_range_list(supports_bbml2_noabort_list))
+ return false;
+
+ /*
+ * We currently ignore the ID_AA64MMFR2_EL1 register, and only care
+ * about whether the MIDR check passes.
+ */
+
+ return true;
+}
+
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
@@ -2296,11 +2332,11 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
{
/*
- * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU
+ * ARM64_HAS_GICV3_CPUIF has a lower index, and is a boot CPU
* feature, so will be detected earlier.
*/
- BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS);
- if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS))
+ BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GICV3_CPUIF);
+ if (!cpus_have_cap(ARM64_HAS_GICV3_CPUIF))
return false;
return enable_pseudo_nmi;
@@ -2496,8 +2532,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_always,
},
{
- .desc = "GIC system register CPU interface",
- .capability = ARM64_HAS_GIC_CPUIF_SYSREGS,
+ .desc = "GICv3 CPU interface",
+ .capability = ARM64_HAS_GICV3_CPUIF,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP)
@@ -2874,6 +2910,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3)
},
+ {
+ .desc = "FAR on MTE Tag Check Fault",
+ .capability = ARM64_MTE_FAR,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, MTEFAR, IMP)
+ },
+ {
+ .desc = "Store Only MTE Tag Check",
+ .capability = ARM64_MTE_STORE_ONLY,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, MTESTOREONLY, IMP)
+ },
#endif /* CONFIG_ARM64_MTE */
{
.desc = "RCpc load-acquire (LDAPR)",
@@ -2981,6 +3031,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
},
{
+ .desc = "BBM Level 2 without TLB conflict abort",
+ .capability = ARM64_HAS_BBML2_NOABORT,
+ .type = ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
+ .matches = has_bbml2_noabort,
+ },
+ {
.desc = "52-bit Virtual Addressing for KVM (LPA2)",
.capability = ARM64_HAS_LPA2,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
@@ -3061,6 +3117,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_pmuv3,
},
#endif
+ {
+ .desc = "SCTLR2",
+ .capability = ARM64_HAS_SCTLR2,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, SCTLRX, IMP)
+ },
+ {
+ .desc = "GICv5 CPU interface",
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .capability = ARM64_HAS_GICV5_CPUIF,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, GCIE, IMP)
+ },
{},
};
@@ -3218,6 +3288,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#ifdef CONFIG_ARM64_MTE
HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
+ HWCAP_CAP(ID_AA64PFR2_EL1, MTEFAR, IMP, CAP_HWCAP, KERNEL_HWCAP_MTE_FAR),
+ HWCAP_CAP(ID_AA64PFR2_EL1, MTESTOREONLY, IMP, CAP_HWCAP , KERNEL_HWCAP_MTE_STORE_ONLY),
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP),
@@ -3377,18 +3449,49 @@ static void update_cpu_capabilities(u16 scope_mask)
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (i = 0; i < ARM64_NCAPS; i++) {
+ bool match_all = false;
+ bool caps_set = false;
+ bool boot_cpu = false;
+
caps = cpucap_ptrs[i];
- if (!caps || !(caps->type & scope_mask) ||
- cpus_have_cap(caps->capability) ||
- !caps->matches(caps, cpucap_default_scope(caps)))
+ if (!caps || !(caps->type & scope_mask))
continue;
- if (caps->desc && !caps->cpus)
+ match_all = cpucap_match_all_early_cpus(caps);
+ caps_set = cpus_have_cap(caps->capability);
+ boot_cpu = scope_mask & SCOPE_BOOT_CPU;
+
+ /*
+ * Unless it's a match-all CPUs feature, avoid probing if
+ * already detected.
+ */
+ if (!match_all && caps_set)
+ continue;
+
+ /*
+ * A match-all CPUs capability is only set when probing the
+ * boot CPU. It may be cleared subsequently if not detected on
+ * secondary ones.
+ */
+ if (match_all && !caps_set && !boot_cpu)
+ continue;
+
+ if (!caps->matches(caps, cpucap_default_scope(caps))) {
+ if (match_all)
+ __clear_bit(caps->capability, system_cpucaps);
+ continue;
+ }
+
+ /*
+ * Match-all CPUs capabilities are logged later when the
+ * system capabilities are finalised.
+ */
+ if (!match_all && caps->desc && !caps->cpus)
pr_info("detected: %s\n", caps->desc);
__set_bit(caps->capability, system_cpucaps);
- if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
+ if (boot_cpu && (caps->type & SCOPE_BOOT_CPU))
set_bit(caps->capability, boot_cpucaps);
}
}
@@ -3789,17 +3892,24 @@ static void __init setup_system_capabilities(void)
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
apply_alternatives_all();
- /*
- * Log any cpucaps with a cpumask as these aren't logged by
- * update_cpu_capabilities().
- */
for (int i = 0; i < ARM64_NCAPS; i++) {
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
- if (caps && caps->cpus && caps->desc &&
- cpumask_any(caps->cpus) < nr_cpu_ids)
+ if (!caps || !caps->desc)
+ continue;
+
+ /*
+ * Log any cpucaps with a cpumask as these aren't logged by
+ * update_cpu_capabilities().
+ */
+ if (caps->cpus && cpumask_any(caps->cpus) < nr_cpu_ids)
pr_info("detected: %s on CPU%*pbl\n",
caps->desc, cpumask_pr_args(caps->cpus));
+
+ /* Log match-all CPUs capabilities */
+ if (cpucap_match_all_early_cpus(caps) &&
+ cpus_have_cap(caps->capability))
+ pr_info("detected: %s\n", caps->desc);
}
/*
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index c1f2b6b04b41..ba834909a28b 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -160,6 +160,8 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_SFEXPA] = "smesfexpa",
[KERNEL_HWCAP_SME_STMOP] = "smestmop",
[KERNEL_HWCAP_SME_SMOP4] = "smesmop4",
+ [KERNEL_HWCAP_MTE_FAR] = "mtefar",
+ [KERNEL_HWCAP_MTE_STORE_ONLY] = "mtestoreonly",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 58f047de3e1c..110d9ff54174 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -21,8 +21,12 @@
#include <asm/cputype.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
+#include <asm/exception.h>
+#include <asm/kgdb.h>
+#include <asm/kprobes.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
+#include <asm/uprobes.h>
/* Determine debug architecture. */
u8 debug_monitors_arch(void)
@@ -34,7 +38,7 @@ u8 debug_monitors_arch(void)
/*
* MDSCR access routines.
*/
-static void mdscr_write(u32 mdscr)
+static void mdscr_write(u64 mdscr)
{
unsigned long flags;
flags = local_daif_save();
@@ -43,7 +47,7 @@ static void mdscr_write(u32 mdscr)
}
NOKPROBE_SYMBOL(mdscr_write);
-static u32 mdscr_read(void)
+static u64 mdscr_read(void)
{
return read_sysreg(mdscr_el1);
}
@@ -79,16 +83,16 @@ static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum dbg_active_el el)
{
- u32 mdscr, enable = 0;
+ u64 mdscr, enable = 0;
WARN_ON(preemptible());
if (this_cpu_inc_return(mde_ref_count) == 1)
- enable = DBG_MDSCR_MDE;
+ enable = MDSCR_EL1_MDE;
if (el == DBG_ACTIVE_EL1 &&
this_cpu_inc_return(kde_ref_count) == 1)
- enable |= DBG_MDSCR_KDE;
+ enable |= MDSCR_EL1_KDE;
if (enable && debug_enabled) {
mdscr = mdscr_read();
@@ -100,16 +104,16 @@ NOKPROBE_SYMBOL(enable_debug_monitors);
void disable_debug_monitors(enum dbg_active_el el)
{
- u32 mdscr, disable = 0;
+ u64 mdscr, disable = 0;
WARN_ON(preemptible());
if (this_cpu_dec_return(mde_ref_count) == 0)
- disable = ~DBG_MDSCR_MDE;
+ disable = ~MDSCR_EL1_MDE;
if (el == DBG_ACTIVE_EL1 &&
this_cpu_dec_return(kde_ref_count) == 0)
- disable &= ~DBG_MDSCR_KDE;
+ disable &= ~MDSCR_EL1_KDE;
if (disable) {
mdscr = mdscr_read();
@@ -156,74 +160,6 @@ NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
-static DEFINE_SPINLOCK(debug_hook_lock);
-static LIST_HEAD(user_step_hook);
-static LIST_HEAD(kernel_step_hook);
-
-static void register_debug_hook(struct list_head *node, struct list_head *list)
-{
- spin_lock(&debug_hook_lock);
- list_add_rcu(node, list);
- spin_unlock(&debug_hook_lock);
-
-}
-
-static void unregister_debug_hook(struct list_head *node)
-{
- spin_lock(&debug_hook_lock);
- list_del_rcu(node);
- spin_unlock(&debug_hook_lock);
- synchronize_rcu();
-}
-
-void register_user_step_hook(struct step_hook *hook)
-{
- register_debug_hook(&hook->node, &user_step_hook);
-}
-
-void unregister_user_step_hook(struct step_hook *hook)
-{
- unregister_debug_hook(&hook->node);
-}
-
-void register_kernel_step_hook(struct step_hook *hook)
-{
- register_debug_hook(&hook->node, &kernel_step_hook);
-}
-
-void unregister_kernel_step_hook(struct step_hook *hook)
-{
- unregister_debug_hook(&hook->node);
-}
-
-/*
- * Call registered single step handlers
- * There is no Syndrome info to check for determining the handler.
- * So we call all the registered handlers, until the right handler is
- * found which returns zero.
- */
-static int call_step_hook(struct pt_regs *regs, unsigned long esr)
-{
- struct step_hook *hook;
- struct list_head *list;
- int retval = DBG_HOOK_ERROR;
-
- list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
-
- /*
- * Since single-step exception disables interrupt, this function is
- * entirely not preemptible, and we can use rcu list safely here.
- */
- list_for_each_entry_rcu(hook, list, node) {
- retval = hook->fn(regs, esr);
- if (retval == DBG_HOOK_HANDLED)
- break;
- }
-
- return retval;
-}
-NOKPROBE_SYMBOL(call_step_hook);
-
static void send_user_sigtrap(int si_code)
{
struct pt_regs *regs = current_pt_regs();
@@ -238,105 +174,110 @@ static void send_user_sigtrap(int si_code)
"User debug trap");
}
-static int single_step_handler(unsigned long unused, unsigned long esr,
- struct pt_regs *regs)
+/*
+ * We have already unmasked interrupts and enabled preemption
+ * when calling do_el0_softstep() from entry-common.c.
+ */
+void do_el0_softstep(unsigned long esr, struct pt_regs *regs)
{
- bool handler_found = false;
+ if (uprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ return;
+ send_user_sigtrap(TRAP_TRACE);
/*
- * If we are stepping a pending breakpoint, call the hw_breakpoint
- * handler first.
+ * ptrace will disable single step unless explicitly
+ * asked to re-enable it. For other clients, it makes
+ * sense to leave it enabled (i.e. rewind the controls
+ * to the active-not-pending state).
*/
- if (!reinstall_suspended_bps(regs))
- return 0;
-
- if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
-
- if (!handler_found && user_mode(regs)) {
- send_user_sigtrap(TRAP_TRACE);
-
- /*
- * ptrace will disable single step unless explicitly
- * asked to re-enable it. For other clients, it makes
- * sense to leave it enabled (i.e. rewind the controls
- * to the active-not-pending state).
- */
- user_rewind_single_step(current);
- } else if (!handler_found) {
- pr_warn("Unexpected kernel single-step exception at EL1\n");
- /*
- * Re-enable stepping since we know that we will be
- * returning to regs.
- */
- set_regs_spsr_ss(regs);
- }
-
- return 0;
+ user_rewind_single_step(current);
}
-NOKPROBE_SYMBOL(single_step_handler);
-
-static LIST_HEAD(user_break_hook);
-static LIST_HEAD(kernel_break_hook);
-void register_user_break_hook(struct break_hook *hook)
+void do_el1_softstep(unsigned long esr, struct pt_regs *regs)
{
- register_debug_hook(&hook->node, &user_break_hook);
-}
+ if (kgdb_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ return;
-void unregister_user_break_hook(struct break_hook *hook)
-{
- unregister_debug_hook(&hook->node);
+ pr_warn("Unexpected kernel single-step exception at EL1\n");
+ /*
+ * Re-enable stepping since we know that we will be
+ * returning to regs.
+ */
+ set_regs_spsr_ss(regs);
}
+NOKPROBE_SYMBOL(do_el1_softstep);
-void register_kernel_break_hook(struct break_hook *hook)
+static int call_el1_break_hook(struct pt_regs *regs, unsigned long esr)
{
- register_debug_hook(&hook->node, &kernel_break_hook);
-}
+ if (esr_brk_comment(esr) == BUG_BRK_IMM)
+ return bug_brk_handler(regs, esr);
-void unregister_kernel_break_hook(struct break_hook *hook)
-{
- unregister_debug_hook(&hook->node);
-}
+ if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr))
+ return cfi_brk_handler(regs, esr);
-static int call_break_hook(struct pt_regs *regs, unsigned long esr)
-{
- struct break_hook *hook;
- struct list_head *list;
+ if (esr_brk_comment(esr) == FAULT_BRK_IMM)
+ return reserved_fault_brk_handler(regs, esr);
- list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
+ (esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
+ return kasan_brk_handler(regs, esr);
- /*
- * Since brk exception disables interrupt, this function is
- * entirely not preemptible, and we can use rcu list safely here.
- */
- list_for_each_entry_rcu(hook, list, node) {
- if ((esr_brk_comment(esr) & ~hook->mask) == hook->imm)
- return hook->fn(regs, esr);
+ if (IS_ENABLED(CONFIG_UBSAN_TRAP) && esr_is_ubsan_brk(esr))
+ return ubsan_brk_handler(regs, esr);
+
+ if (IS_ENABLED(CONFIG_KGDB)) {
+ if (esr_brk_comment(esr) == KGDB_DYN_DBG_BRK_IMM)
+ return kgdb_brk_handler(regs, esr);
+ if (esr_brk_comment(esr) == KGDB_COMPILED_DBG_BRK_IMM)
+ return kgdb_compiled_brk_handler(regs, esr);
+ }
+
+ if (IS_ENABLED(CONFIG_KPROBES)) {
+ if (esr_brk_comment(esr) == KPROBES_BRK_IMM)
+ return kprobe_brk_handler(regs, esr);
+ if (esr_brk_comment(esr) == KPROBES_BRK_SS_IMM)
+ return kprobe_ss_brk_handler(regs, esr);
}
+ if (IS_ENABLED(CONFIG_KRETPROBES) &&
+ esr_brk_comment(esr) == KRETPROBES_BRK_IMM)
+ return kretprobe_brk_handler(regs, esr);
+
return DBG_HOOK_ERROR;
}
-NOKPROBE_SYMBOL(call_break_hook);
+NOKPROBE_SYMBOL(call_el1_break_hook);
-static int brk_handler(unsigned long unused, unsigned long esr,
- struct pt_regs *regs)
+/*
+ * We have already unmasked interrupts and enabled preemption
+ * when calling do_el0_brk64() from entry-common.c.
+ */
+void do_el0_brk64(unsigned long esr, struct pt_regs *regs)
{
- if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
- return 0;
+ if (IS_ENABLED(CONFIG_UPROBES) &&
+ esr_brk_comment(esr) == UPROBES_BRK_IMM &&
+ uprobe_brk_handler(regs, esr) == DBG_HOOK_HANDLED)
+ return;
- if (user_mode(regs)) {
- send_user_sigtrap(TRAP_BRKPT);
- } else {
- pr_warn("Unexpected kernel BRK exception at EL1\n");
- return -EFAULT;
- }
+ send_user_sigtrap(TRAP_BRKPT);
+}
- return 0;
+void do_el1_brk64(unsigned long esr, struct pt_regs *regs)
+{
+ if (call_el1_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return;
+
+ die("Oops - BRK", regs, esr);
+}
+NOKPROBE_SYMBOL(do_el1_brk64);
+
+#ifdef CONFIG_COMPAT
+void do_bkpt32(unsigned long esr, struct pt_regs *regs)
+{
+ arm64_notify_die("aarch32 BKPT", regs, SIGTRAP, TRAP_BRKPT, regs->pc, esr);
}
-NOKPROBE_SYMBOL(brk_handler);
+#endif /* CONFIG_COMPAT */
-int aarch32_break_handler(struct pt_regs *regs)
+bool try_handle_aarch32_break(struct pt_regs *regs)
{
u32 arm_instr;
u16 thumb_instr;
@@ -344,7 +285,7 @@ int aarch32_break_handler(struct pt_regs *regs)
void __user *pc = (void __user *)instruction_pointer(regs);
if (!compat_user_mode(regs))
- return -EFAULT;
+ return false;
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
@@ -368,20 +309,12 @@ int aarch32_break_handler(struct pt_regs *regs)
}
if (!bp)
- return -EFAULT;
+ return false;
send_user_sigtrap(TRAP_BRKPT);
- return 0;
-}
-NOKPROBE_SYMBOL(aarch32_break_handler);
-
-void __init debug_traps_init(void)
-{
- hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
- TRAP_TRACE, "single-step handler");
- hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
- TRAP_BRKPT, "BRK handler");
+ return true;
}
+NOKPROBE_SYMBOL(try_handle_aarch32_break);
/* Re-enable single step for syscall restarting. */
void user_rewind_single_step(struct task_struct *task)
@@ -415,7 +348,7 @@ void kernel_enable_single_step(struct pt_regs *regs)
{
WARN_ON(!irqs_disabled());
set_regs_spsr_ss(regs);
- mdscr_write(mdscr_read() | DBG_MDSCR_SS);
+ mdscr_write(mdscr_read() | MDSCR_EL1_SS);
enable_debug_monitors(DBG_ACTIVE_EL1);
}
NOKPROBE_SYMBOL(kernel_enable_single_step);
@@ -423,7 +356,7 @@ NOKPROBE_SYMBOL(kernel_enable_single_step);
void kernel_disable_single_step(void)
{
WARN_ON(!irqs_disabled());
- mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
+ mdscr_write(mdscr_read() & ~MDSCR_EL1_SS);
disable_debug_monitors(DBG_ACTIVE_EL1);
}
NOKPROBE_SYMBOL(kernel_disable_single_step);
@@ -431,7 +364,7 @@ NOKPROBE_SYMBOL(kernel_disable_single_step);
int kernel_active_single_step(void)
{
WARN_ON(!irqs_disabled());
- return mdscr_read() & DBG_MDSCR_SS;
+ return mdscr_read() & MDSCR_EL1_SS;
}
NOKPROBE_SYMBOL(kernel_active_single_step);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 62230d6dd919..6c371b158b99 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -215,11 +215,6 @@ static int __init arm64_efi_rt_init(void)
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
- if (!IS_ENABLED(CONFIG_VMAP_STACK)) {
- clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- return -ENOMEM;
- }
-
p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n");
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 7c1970b341b8..2b0c5925502e 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -8,6 +8,7 @@
#include <linux/context_tracking.h>
#include <linux/kasan.h>
#include <linux/linkage.h>
+#include <linux/livepatch.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
#include <linux/resume_user_mode.h>
@@ -144,6 +145,9 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
(void __user *)NULL, current);
}
+ if (thread_flags & _TIF_PATCH_PENDING)
+ klp_update_patch_state(current);
+
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
@@ -344,7 +348,7 @@ static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static void cortex_a76_erratum_1463225_svc_handler(void)
{
- u32 reg, val;
+ u64 reg, val;
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
return;
@@ -354,7 +358,7 @@ static void cortex_a76_erratum_1463225_svc_handler(void)
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
reg = read_sysreg(mdscr_el1);
- val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE;
write_sysreg(val, mdscr_el1);
asm volatile("msr daifclr, #8");
isb();
@@ -441,6 +445,28 @@ static __always_inline void fpsimd_syscall_exit(void)
__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
}
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+static void debug_exception_enter(struct pt_regs *regs)
+{
+ preempt_disable();
+
+ /* This code is a bit fragile. Test it. */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+static void debug_exception_exit(struct pt_regs *regs)
+{
+ preempt_enable_no_resched();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
UNHANDLED(el1t, 64, sync)
UNHANDLED(el1t, 64, irq)
UNHANDLED(el1t, 64, fiq)
@@ -504,13 +530,51 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
+{
+ arm64_enter_el1_dbg(regs);
+ debug_exception_enter(regs);
+ do_breakpoint(esr, regs);
+ debug_exception_exit(regs);
+ arm64_exit_el1_dbg(regs);
+}
+
+static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
{
+ arm64_enter_el1_dbg(regs);
+ if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
+ debug_exception_enter(regs);
+ /*
+ * After handling a breakpoint, we suspend the breakpoint
+ * and use single-step to move to the next instruction.
+ * If we are stepping a suspended breakpoint there's nothing more to do:
+ * the single-step is complete.
+ */
+ if (!try_step_suspended_breakpoints(regs))
+ do_el1_softstep(esr, regs);
+ debug_exception_exit(regs);
+ }
+ arm64_exit_el1_dbg(regs);
+}
+
+static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
+{
+ /* Watchpoints are the only debug exception to write FAR_EL1 */
unsigned long far = read_sysreg(far_el1);
arm64_enter_el1_dbg(regs);
- if (!cortex_a76_erratum_1463225_debug_handler(regs))
- do_debug_exception(far, esr, regs);
+ debug_exception_enter(regs);
+ do_watchpoint(far, esr, regs);
+ debug_exception_exit(regs);
+ arm64_exit_el1_dbg(regs);
+}
+
+static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
+{
+ arm64_enter_el1_dbg(regs);
+ debug_exception_enter(regs);
+ do_el1_brk64(esr, regs);
+ debug_exception_exit(regs);
arm64_exit_el1_dbg(regs);
}
@@ -553,10 +617,16 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
el1_mops(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_CUR:
+ el1_breakpt(regs, esr);
+ break;
case ESR_ELx_EC_SOFTSTP_CUR:
+ el1_softstp(regs, esr);
+ break;
case ESR_ELx_EC_WATCHPT_CUR:
+ el1_watchpt(regs, esr);
+ break;
case ESR_ELx_EC_BRK64:
- el1_dbg(regs, esr);
+ el1_brk64(regs, esr);
break;
case ESR_ELx_EC_FPAC:
el1_fpac(regs, esr);
@@ -747,14 +817,56 @@ static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
-static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
{
- /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ if (!is_ttbr0_addr(regs->pc))
+ arm64_apply_bp_hardening();
+
+ enter_from_user_mode(regs);
+ debug_exception_enter(regs);
+ do_breakpoint(esr, regs);
+ debug_exception_exit(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ exit_to_user_mode(regs);
+}
+
+static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
+{
+ if (!is_ttbr0_addr(regs->pc))
+ arm64_apply_bp_hardening();
+
+ enter_from_user_mode(regs);
+ /*
+ * After handling a breakpoint, we suspend the breakpoint
+ * and use single-step to move to the next instruction.
+ * If we are stepping a suspended breakpoint there's nothing more to do:
+ * the single-step is complete.
+ */
+ if (!try_step_suspended_breakpoints(regs)) {
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_softstep(esr, regs);
+ }
+ exit_to_user_mode(regs);
+}
+
+static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
+{
+ /* Watchpoints are the only debug exception to write FAR_EL1 */
unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(regs);
- do_debug_exception(far, esr, regs);
+ debug_exception_enter(regs);
+ do_watchpoint(far, esr, regs);
+ debug_exception_exit(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ exit_to_user_mode(regs);
+}
+
+static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
+ do_el0_brk64(esr, regs);
exit_to_user_mode(regs);
}
@@ -826,10 +938,16 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
el0_gcs(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
+ el0_breakpt(regs, esr);
+ break;
case ESR_ELx_EC_SOFTSTP_LOW:
+ el0_softstp(regs, esr);
+ break;
case ESR_ELx_EC_WATCHPT_LOW:
+ el0_watchpt(regs, esr);
+ break;
case ESR_ELx_EC_BRK64:
- el0_dbg(regs, esr);
+ el0_brk64(regs, esr);
break;
case ESR_ELx_EC_FPAC:
el0_fpac(regs, esr);
@@ -912,6 +1030,14 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
exit_to_user_mode(regs);
}
+static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ do_bkpt32(esr, regs);
+ exit_to_user_mode(regs);
+}
+
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -946,10 +1072,16 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
el0_cp15(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
+ el0_breakpt(regs, esr);
+ break;
case ESR_ELx_EC_SOFTSTP_LOW:
+ el0_softstp(regs, esr);
+ break;
case ESR_ELx_EC_WATCHPT_LOW:
+ el0_watchpt(regs, esr);
+ break;
case ESR_ELx_EC_BKPT32:
- el0_dbg(regs, esr);
+ el0_bkpt32(regs, esr);
break;
default:
el0_inv(regs, esr);
@@ -977,7 +1109,6 @@ UNHANDLED(el0t, 32, fiq)
UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
-#ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -986,7 +1117,6 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
arm64_enter_nmi(regs);
panic_bad_stack(regs, esr, far);
}
-#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_ARM_SDE_INTERFACE
asmlinkage noinstr unsigned long
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index fe62218b8e0e..f8018b5c1f9a 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -55,7 +55,6 @@
.endif
sub sp, sp, #PT_REGS_SIZE
-#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
@@ -97,7 +96,6 @@
/* We were already on the overflow stack. Restore sp/x0 and carry on. */
sub sp, sp, x0
mrs x0, tpidrro_el0
-#endif
b el\el\ht\()_\regsize\()_\label
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
@@ -540,7 +538,6 @@ SYM_CODE_START(vectors)
kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
-#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(__bad_stack)
/*
* We detected an overflow in kernel_ventry, which switched to the
@@ -568,7 +565,6 @@ SYM_CODE_START_LOCAL(__bad_stack)
bl handle_bad_stack
ASM_BUG()
SYM_CODE_END(__bad_stack)
-#endif /* CONFIG_VMAP_STACK */
.macro entry_handler el:req, ht:req, regsize:req, label:req
@@ -1009,7 +1005,6 @@ SYM_CODE_START(__sdei_asm_handler)
1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
2: str x19, [x5]
-#ifdef CONFIG_VMAP_STACK
/*
* entry.S may have been using sp as a scratch register, find whether
* this is a normal or critical event and switch to the appropriate
@@ -1022,7 +1017,6 @@ SYM_CODE_START(__sdei_asm_handler)
2: mov x6, #SDEI_STACK_SIZE
add x5, x5, x6
mov sp, x5
-#endif
#ifdef CONFIG_SHADOW_CALL_STACK
/* Use a separate shadow call stack for normal and critical events */
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 722ac45f9f7b..ab76b36dce82 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -22,6 +22,7 @@
#include <asm/current.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/cputype.h>
@@ -618,8 +619,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers);
/*
* Debug exception handlers.
*/
-static int breakpoint_handler(unsigned long unused, unsigned long esr,
- struct pt_regs *regs)
+void do_breakpoint(unsigned long esr, struct pt_regs *regs)
{
int i, step = 0, *kernel_step;
u32 ctrl_reg;
@@ -662,7 +662,7 @@ unlock:
}
if (!step)
- return 0;
+ return;
if (user_mode(regs)) {
debug_info->bps_disabled = 1;
@@ -670,7 +670,7 @@ unlock:
/* If we're already stepping a watchpoint, just return. */
if (debug_info->wps_disabled)
- return 0;
+ return;
if (test_thread_flag(TIF_SINGLESTEP))
debug_info->suspended_step = 1;
@@ -681,7 +681,7 @@ unlock:
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
- return 0;
+ return;
if (kernel_active_single_step()) {
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
@@ -690,10 +690,8 @@ unlock:
kernel_enable_single_step(regs);
}
}
-
- return 0;
}
-NOKPROBE_SYMBOL(breakpoint_handler);
+NOKPROBE_SYMBOL(do_breakpoint);
/*
* Arm64 hardware does not always report a watchpoint hit address that matches
@@ -752,8 +750,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr,
return step;
}
-static int watchpoint_handler(unsigned long addr, unsigned long esr,
- struct pt_regs *regs)
+void do_watchpoint(unsigned long addr, unsigned long esr, struct pt_regs *regs)
{
int i, step = 0, *kernel_step, access, closest_match = 0;
u64 min_dist = -1, dist;
@@ -808,7 +805,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
rcu_read_unlock();
if (!step)
- return 0;
+ return;
/*
* We always disable EL0 watchpoints because the kernel can
@@ -821,7 +818,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
/* If we're already stepping a breakpoint, just return. */
if (debug_info->bps_disabled)
- return 0;
+ return;
if (test_thread_flag(TIF_SINGLESTEP))
debug_info->suspended_step = 1;
@@ -832,7 +829,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
- return 0;
+ return;
if (kernel_active_single_step()) {
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
@@ -841,44 +838,41 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
kernel_enable_single_step(regs);
}
}
-
- return 0;
}
-NOKPROBE_SYMBOL(watchpoint_handler);
+NOKPROBE_SYMBOL(do_watchpoint);
/*
* Handle single-step exception.
*/
-int reinstall_suspended_bps(struct pt_regs *regs)
+bool try_step_suspended_breakpoints(struct pt_regs *regs)
{
struct debug_info *debug_info = &current->thread.debug;
- int handled_exception = 0, *kernel_step;
-
- kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+ int *kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+ bool handled_exception = false;
/*
- * Called from single-step exception handler.
- * Return 0 if execution can resume, 1 if a SIGTRAP should be
- * reported.
+ * Called from single-step exception entry.
+ * Return true if we stepped a breakpoint and can resume execution,
+ * false if we need to handle a single-step.
*/
if (user_mode(regs)) {
if (debug_info->bps_disabled) {
debug_info->bps_disabled = 0;
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
- handled_exception = 1;
+ handled_exception = true;
}
if (debug_info->wps_disabled) {
debug_info->wps_disabled = 0;
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
- handled_exception = 1;
+ handled_exception = true;
}
if (handled_exception) {
if (debug_info->suspended_step) {
debug_info->suspended_step = 0;
/* Allow exception handling to fall-through. */
- handled_exception = 0;
+ handled_exception = false;
} else {
user_disable_single_step(current);
}
@@ -892,17 +886,17 @@ int reinstall_suspended_bps(struct pt_regs *regs)
if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
kernel_disable_single_step();
- handled_exception = 1;
+ handled_exception = true;
} else {
- handled_exception = 0;
+ handled_exception = false;
}
*kernel_step = ARM_KERNEL_STEP_NONE;
}
- return !handled_exception;
+ return handled_exception;
}
-NOKPROBE_SYMBOL(reinstall_suspended_bps);
+NOKPROBE_SYMBOL(try_step_suspended_breakpoints);
/*
* Context-switcher for restoring suspended breakpoints.
@@ -987,12 +981,6 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("found %d breakpoint and %d watchpoint registers.\n",
core_num_brps, core_num_wrps);
- /* Register debug fault handlers. */
- hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
- TRAP_HWBKPT, "hw-breakpoint handler");
- hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
- TRAP_HWBKPT, "hw-watchpoint handler");
-
/*
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 85087e2df564..c0065a1d77cf 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -51,7 +51,6 @@ static void init_irq_scs(void)
scs_alloc(early_cpu_to_node(cpu));
}
-#ifdef CONFIG_VMAP_STACK
static void __init init_irq_stacks(void)
{
int cpu;
@@ -62,18 +61,6 @@ static void __init init_irq_stacks(void)
per_cpu(irq_stack_ptr, cpu) = p;
}
}
-#else
-/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
-DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
-
-static void init_irq_stacks(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
-}
-#endif
#ifndef CONFIG_PREEMPT_RT
static void ____do_softirq(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index f3c4d3a8a20f..968324a79a89 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -234,23 +234,23 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
return err;
}
-static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr)
+int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
-NOKPROBE_SYMBOL(kgdb_brk_fn)
+NOKPROBE_SYMBOL(kgdb_brk_handler)
-static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr)
+int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr)
{
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
-NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
+NOKPROBE_SYMBOL(kgdb_compiled_brk_handler);
-static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr)
+int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr)
{
if (!kgdb_single_step)
return DBG_HOOK_ERROR;
@@ -258,21 +258,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr)
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
-NOKPROBE_SYMBOL(kgdb_step_brk_fn);
-
-static struct break_hook kgdb_brkpt_hook = {
- .fn = kgdb_brk_fn,
- .imm = KGDB_DYN_DBG_BRK_IMM,
-};
-
-static struct break_hook kgdb_compiled_brkpt_hook = {
- .fn = kgdb_compiled_brk_fn,
- .imm = KGDB_COMPILED_DBG_BRK_IMM,
-};
-
-static struct step_hook kgdb_step_hook = {
- .fn = kgdb_step_brk_fn
-};
+NOKPROBE_SYMBOL(kgdb_single_step_handler);
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
@@ -311,15 +297,7 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
- int ret = register_die_notifier(&kgdb_notifier);
-
- if (ret != 0)
- return ret;
-
- register_kernel_break_hook(&kgdb_brkpt_hook);
- register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
- register_kernel_step_hook(&kgdb_step_hook);
- return 0;
+ return register_die_notifier(&kgdb_notifier);
}
/*
@@ -329,9 +307,6 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_kernel_break_hook(&kgdb_brkpt_hook);
- unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
- unregister_kernel_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 06bb680bfe97..40148d2725ce 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -23,6 +23,7 @@
#include <asm/insn.h>
#include <asm/scs.h>
#include <asm/sections.h>
+#include <asm/text-patching.h>
enum aarch64_reloc_op {
RELOC_OP_NONE,
@@ -48,7 +49,17 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
return 0;
}
-static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
+#define WRITE_PLACE(place, val, mod) do { \
+ __typeof__(val) __val = (val); \
+ \
+ if (mod->state == MODULE_STATE_UNFORMED) \
+ *(place) = __val; \
+ else \
+ aarch64_insn_copy(place, &(__val), sizeof(*place)); \
+} while (0)
+
+static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len,
+ struct module *me)
{
s64 sval = do_reloc(op, place, val);
@@ -66,7 +77,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
switch (len) {
case 16:
- *(s16 *)place = sval;
+ WRITE_PLACE((s16 *)place, sval, me);
switch (op) {
case RELOC_OP_ABS:
if (sval < 0 || sval > U16_MAX)
@@ -82,7 +93,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
}
break;
case 32:
- *(s32 *)place = sval;
+ WRITE_PLACE((s32 *)place, sval, me);
switch (op) {
case RELOC_OP_ABS:
if (sval < 0 || sval > U32_MAX)
@@ -98,7 +109,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
}
break;
case 64:
- *(s64 *)place = sval;
+ WRITE_PLACE((s64 *)place, sval, me);
break;
default:
pr_err("Invalid length (%d) for data relocation\n", len);
@@ -113,7 +124,8 @@ enum aarch64_insn_movw_imm_type {
};
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
- int lsb, enum aarch64_insn_movw_imm_type imm_type)
+ int lsb, enum aarch64_insn_movw_imm_type imm_type,
+ struct module *me)
{
u64 imm;
s64 sval;
@@ -145,7 +157,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
/* Update the instruction with the new encoding. */
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
- *place = cpu_to_le32(insn);
+ WRITE_PLACE(place, cpu_to_le32(insn), me);
if (imm > U16_MAX)
return -ERANGE;
@@ -154,7 +166,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
}
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
- int lsb, int len, enum aarch64_insn_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type,
+ struct module *me)
{
u64 imm, imm_mask;
s64 sval;
@@ -170,7 +183,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
/* Update the instruction's immediate field. */
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
- *place = cpu_to_le32(insn);
+ WRITE_PLACE(place, cpu_to_le32(insn), me);
/*
* Extract the upper value bits (including the sign bit) and
@@ -189,17 +202,17 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
}
static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
- __le32 *place, u64 val)
+ __le32 *place, u64 val, struct module *me)
{
u32 insn;
if (!is_forbidden_offset_for_adrp(place))
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
- AARCH64_INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR, me);
/* patch ADRP to ADR if it is in range */
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
- AARCH64_INSN_IMM_ADR)) {
+ AARCH64_INSN_IMM_ADR, me)) {
insn = le32_to_cpu(*place);
insn &= ~BIT(31);
} else {
@@ -211,7 +224,7 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
AARCH64_INSN_BRANCH_NOLINK);
}
- *place = cpu_to_le32(insn);
+ WRITE_PLACE(place, cpu_to_le32(insn), me);
return 0;
}
@@ -255,23 +268,23 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Data relocations. */
case R_AARCH64_ABS64:
overflow_check = false;
- ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me);
break;
case R_AARCH64_ABS32:
- ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me);
break;
case R_AARCH64_ABS16:
- ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me);
break;
case R_AARCH64_PREL64:
overflow_check = false;
- ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me);
break;
case R_AARCH64_PREL32:
- ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me);
break;
case R_AARCH64_PREL16:
- ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me);
break;
/* MOVW instruction relocations. */
@@ -280,88 +293,88 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
fallthrough;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
fallthrough;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
fallthrough;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVKZ, me);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- AARCH64_INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ, me);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- AARCH64_INSN_IMM_19);
+ AARCH64_INSN_IMM_19, me);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- AARCH64_INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR, me);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
fallthrough;
case R_AARCH64_ADR_PREL_PG_HI21:
- ovf = reloc_insn_adrp(me, sechdrs, loc, val);
+ ovf = reloc_insn_adrp(me, sechdrs, loc, val, me);
if (ovf && ovf != -ERANGE)
return ovf;
break;
@@ -369,46 +382,46 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- AARCH64_INSN_IMM_12);
+ AARCH64_INSN_IMM_12, me);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- AARCH64_INSN_IMM_12);
+ AARCH64_INSN_IMM_12, me);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- AARCH64_INSN_IMM_12);
+ AARCH64_INSN_IMM_12, me);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- AARCH64_INSN_IMM_12);
+ AARCH64_INSN_IMM_12, me);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- AARCH64_INSN_IMM_12);
+ AARCH64_INSN_IMM_12, me);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- AARCH64_INSN_IMM_14);
+ AARCH64_INSN_IMM_14, me);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- AARCH64_INSN_IMM_19);
+ AARCH64_INSN_IMM_19, me);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- AARCH64_INSN_IMM_26);
+ AARCH64_INSN_IMM_26, me);
if (ovf == -ERANGE) {
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
- 26, AARCH64_INSN_IMM_26);
+ 26, AARCH64_INSN_IMM_26, me);
}
break;
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 2fbfd27ff5f2..e5e773844889 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -200,7 +200,7 @@ static void mte_update_sctlr_user(struct task_struct *task)
* program requested values go with what was requested.
*/
resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
- sctlr &= ~SCTLR_EL1_TCF0_MASK;
+ sctlr &= ~(SCTLR_EL1_TCF0_MASK | SCTLR_EL1_TCSO0_MASK);
/*
* Pick an actual setting. The order in which we check for
* set bits and map into register values determines our
@@ -212,6 +212,10 @@ static void mte_update_sctlr_user(struct task_struct *task)
sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
+
+ if (mte_ctrl & MTE_CTRL_STORE_ONLY)
+ sctlr |= SYS_FIELD_PREP(SCTLR_EL1, TCSO0, 1);
+
task->thread.sctlr_user = sctlr;
}
@@ -371,6 +375,9 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
(arg & PR_MTE_TCF_SYNC))
mte_ctrl |= MTE_CTRL_TCF_ASYMM;
+ if (arg & PR_MTE_STORE_ONLY)
+ mte_ctrl |= MTE_CTRL_STORE_ONLY;
+
task->thread.mte_ctrl = mte_ctrl;
if (task == current) {
preempt_disable();
@@ -398,6 +405,8 @@ long get_mte_ctrl(struct task_struct *task)
ret |= PR_MTE_TCF_ASYNC;
if (mte_ctrl & MTE_CTRL_TCF_SYNC)
ret |= PR_MTE_TCF_SYNC;
+ if (mte_ctrl & MTE_CTRL_STORE_ONLY)
+ ret |= PR_MTE_STORE_ONLY;
return ret;
}
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
index f440bf57b1a5..be92d73c25b2 100644
--- a/arch/arm64/kernel/pi/Makefile
+++ b/arch/arm64/kernel/pi/Makefile
@@ -41,4 +41,4 @@ obj-y := idreg-override.pi.o \
obj-$(CONFIG_RELOCATABLE) += relocate.pi.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_early.pi.o
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.pi.o
-extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
+targets := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d9e462eafb95..0c5d408afd95 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -292,8 +292,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
return 0;
}
-static int __kprobes
-kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
+int __kprobes
+kprobe_brk_handler(struct pt_regs *regs, unsigned long esr)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
@@ -336,13 +336,8 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
return DBG_HOOK_HANDLED;
}
-static struct break_hook kprobes_break_hook = {
- .imm = KPROBES_BRK_IMM,
- .fn = kprobe_breakpoint_handler,
-};
-
-static int __kprobes
-kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
+int __kprobes
+kprobe_ss_brk_handler(struct pt_regs *regs, unsigned long esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long addr = instruction_pointer(regs);
@@ -360,13 +355,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
return DBG_HOOK_ERROR;
}
-static struct break_hook kprobes_break_ss_hook = {
- .imm = KPROBES_BRK_SS_IMM,
- .fn = kprobe_breakpoint_ss_handler,
-};
-
-static int __kprobes
-kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
+int __kprobes
+kretprobe_brk_handler(struct pt_regs *regs, unsigned long esr)
{
if (regs->pc != (unsigned long)__kretprobe_trampoline)
return DBG_HOOK_ERROR;
@@ -375,11 +365,6 @@ kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
return DBG_HOOK_HANDLED;
}
-static struct break_hook kretprobes_break_hook = {
- .imm = KRETPROBES_BRK_IMM,
- .fn = kretprobe_breakpoint_handler,
-};
-
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
@@ -422,9 +407,5 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int __init arch_init_kprobes(void)
{
- register_kernel_break_hook(&kprobes_break_hook);
- register_kernel_break_hook(&kprobes_break_ss_hook);
- register_kernel_break_hook(&kretprobes_break_hook);
-
return 0;
}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index a362f3dbb3d1..b60739d3983f 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -12,7 +12,7 @@
SYM_CODE_START(__kretprobe_trampoline)
/*
* Trigger a breakpoint exception. The PC will be adjusted by
- * kretprobe_breakpoint_handler(), and no subsequent instructions will
+ * kretprobe_brk_handler(), and no subsequent instructions will
* be executed from the trampoline.
*/
brk #KRETPROBES_BRK_IMM
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index cb3d05af36e3..1f91fd2a8187 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -173,7 +173,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
return NOTIFY_DONE;
}
-static int uprobe_breakpoint_handler(struct pt_regs *regs,
+int uprobe_brk_handler(struct pt_regs *regs,
unsigned long esr)
{
if (uprobe_pre_sstep_notifier(regs))
@@ -182,7 +182,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs,
return DBG_HOOK_ERROR;
}
-static int uprobe_single_step_handler(struct pt_regs *regs,
+int uprobe_single_step_handler(struct pt_regs *regs,
unsigned long esr)
{
struct uprobe_task *utask = current->utask;
@@ -194,23 +194,3 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
return DBG_HOOK_ERROR;
}
-/* uprobe breakpoint handler hook */
-static struct break_hook uprobes_break_hook = {
- .imm = UPROBES_BRK_IMM,
- .fn = uprobe_breakpoint_handler,
-};
-
-/* uprobe single step handler hook */
-static struct step_hook uprobes_step_hook = {
- .fn = uprobe_single_step_handler,
-};
-
-static int __init arch_init_uprobes(void)
-{
- register_user_break_hook(&uprobes_break_hook);
- register_user_step_hook(&uprobes_step_hook);
-
- return 0;
-}
-
-device_initcall(arch_init_uprobes);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 08b7042a2e2d..96482a1412c6 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -307,13 +307,13 @@ static int copy_thread_gcs(struct task_struct *p,
p->thread.gcs_base = 0;
p->thread.gcs_size = 0;
+ p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
+ p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
+
gcs = gcs_alloc_thread_stack(p, args);
if (IS_ERR_VALUE(gcs))
return PTR_ERR((void *)gcs);
- p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
- p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
-
return 0;
}
@@ -341,7 +341,6 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
fpsimd_release_task(tsk);
- gcs_free(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -856,10 +855,14 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
if (is_compat_thread(ti))
return -EINVAL;
- if (system_supports_mte())
+ if (system_supports_mte()) {
valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
| PR_MTE_TAG_MASK;
+ if (cpus_have_cap(ARM64_MTE_STORE_ONLY))
+ valid_mask |= PR_MTE_STORE_ONLY;
+ }
+
if (arg & ~valid_mask)
return -EINVAL;
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 255d12f881c2..6f24a0251e18 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -34,10 +34,8 @@ unsigned long sdei_exit_mode;
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
-#ifdef CONFIG_VMAP_STACK
DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
-#endif
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
@@ -65,8 +63,7 @@ static void free_sdei_stacks(void)
{
int cpu;
- if (!IS_ENABLED(CONFIG_VMAP_STACK))
- return;
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
for_each_possible_cpu(cpu) {
_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
@@ -91,8 +88,7 @@ static int init_sdei_stacks(void)
int cpu;
int err = 0;
- if (!IS_ENABLED(CONFIG_VMAP_STACK))
- return 0;
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
for_each_possible_cpu(cpu) {
err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 417140cd399b..db3f972f8cd9 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -95,8 +95,11 @@ static void save_reset_user_access_state(struct user_access_state *ua_state)
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
write_sysreg_s(por_enable_all, SYS_POR_EL0);
- /* Ensure that any subsequent uaccess observes the updated value */
- isb();
+ /*
+ * No ISB required as we can tolerate spurious Overlay faults -
+ * the fault handler will check again based on the new value
+ * of POR_EL0.
+ */
}
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 21a795303568..68cea3a4a35c 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -64,26 +64,18 @@ struct secondary_data secondary_data;
/* Number of CPUs which aren't online, but looping in kernel text. */
static int cpus_stuck_in_kernel;
-enum ipi_msg_type {
- IPI_RESCHEDULE,
- IPI_CALL_FUNC,
- IPI_CPU_STOP,
- IPI_CPU_STOP_NMI,
- IPI_TIMER,
- IPI_IRQ_WORK,
- NR_IPI,
- /*
- * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
- * with trace_ipi_*
- */
- IPI_CPU_BACKTRACE = NR_IPI,
- IPI_KGDB_ROUNDUP,
- MAX_IPI
-};
-
static int ipi_irq_base __ro_after_init;
static int nr_ipi __ro_after_init = NR_IPI;
-static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
+
+struct ipi_descs {
+ struct irq_desc *descs[MAX_IPI];
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct ipi_descs, pcpu_ipi_desc);
+
+#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
+
+static bool percpu_ipi_descs __ro_after_init;
static bool crash_stop;
@@ -844,7 +836,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
+ seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
@@ -917,9 +909,20 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
#endif
}
+static void arm64_send_ipi(const cpumask_t *mask, unsigned int nr)
+{
+ unsigned int cpu;
+
+ if (!percpu_ipi_descs)
+ __ipi_send_mask(get_ipi_desc(0, nr), mask);
+ else
+ for_each_cpu(cpu, mask)
+ __ipi_send_single(get_ipi_desc(cpu, nr), cpu);
+}
+
static void arm64_backtrace_ipi(cpumask_t *mask)
{
- __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+ arm64_send_ipi(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
@@ -944,7 +947,7 @@ void kgdb_roundup_cpus(void)
if (cpu == this_cpu)
continue;
- __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
+ __ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
}
}
#endif
@@ -1013,14 +1016,16 @@ static void do_handle_IPI(int ipinr)
static irqreturn_t ipi_handler(int irq, void *data)
{
- do_handle_IPI(irq - ipi_irq_base);
+ unsigned int ipi = (irq - ipi_irq_base) % nr_ipi;
+
+ do_handle_IPI(ipi);
return IRQ_HANDLED;
}
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
- __ipi_send_mask(ipi_desc[ipinr], target);
+ arm64_send_ipi(target, ipinr);
}
static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
@@ -1046,11 +1051,15 @@ static void ipi_setup(int cpu)
return;
for (i = 0; i < nr_ipi; i++) {
- if (ipi_should_be_nmi(i)) {
- prepare_percpu_nmi(ipi_irq_base + i);
- enable_percpu_nmi(ipi_irq_base + i, 0);
+ if (!percpu_ipi_descs) {
+ if (ipi_should_be_nmi(i)) {
+ prepare_percpu_nmi(ipi_irq_base + i);
+ enable_percpu_nmi(ipi_irq_base + i, 0);
+ } else {
+ enable_percpu_irq(ipi_irq_base + i, 0);
+ }
} else {
- enable_percpu_irq(ipi_irq_base + i, 0);
+ enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
}
}
}
@@ -1064,44 +1073,77 @@ static void ipi_teardown(int cpu)
return;
for (i = 0; i < nr_ipi; i++) {
- if (ipi_should_be_nmi(i)) {
- disable_percpu_nmi(ipi_irq_base + i);
- teardown_percpu_nmi(ipi_irq_base + i);
+ if (!percpu_ipi_descs) {
+ if (ipi_should_be_nmi(i)) {
+ disable_percpu_nmi(ipi_irq_base + i);
+ teardown_percpu_nmi(ipi_irq_base + i);
+ } else {
+ disable_percpu_irq(ipi_irq_base + i);
+ }
} else {
- disable_percpu_irq(ipi_irq_base + i);
+ disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
}
}
}
#endif
-void __init set_smp_ipi_range(int ipi_base, int n)
+static void ipi_setup_sgi(int ipi)
{
- int i;
+ int err, irq, cpu;
- WARN_ON(n < MAX_IPI);
- nr_ipi = min(n, MAX_IPI);
+ irq = ipi_irq_base + ipi;
- for (i = 0; i < nr_ipi; i++) {
- int err;
+ if (ipi_should_be_nmi(ipi)) {
+ err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
+ WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
+ } else {
+ err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
+ WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
+ }
- if (ipi_should_be_nmi(i)) {
- err = request_percpu_nmi(ipi_base + i, ipi_handler,
- "IPI", &irq_stat);
- WARN(err, "Could not request IPI %d as NMI, err=%d\n",
- i, err);
- } else {
- err = request_percpu_irq(ipi_base + i, ipi_handler,
- "IPI", &irq_stat);
- WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
- i, err);
- }
+ for_each_possible_cpu(cpu)
+ get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
+
+ irq_set_status_flags(irq, IRQ_HIDDEN);
+}
+
+static void ipi_setup_lpi(int ipi, int ncpus)
+{
+ for (int cpu = 0; cpu < ncpus; cpu++) {
+ int err, irq;
+
+ irq = ipi_irq_base + (cpu * nr_ipi) + ipi;
+
+ err = irq_force_affinity(irq, cpumask_of(cpu));
+ WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
+
+ err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
+ NULL);
+ WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
+
+ irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
- ipi_desc[i] = irq_to_desc(ipi_base + i);
- irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
}
+}
+
+void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+ percpu_ipi_descs = !!ncpus;
ipi_irq_base = ipi_base;
+ for (i = 0; i < nr_ipi; i++) {
+ if (!percpu_ipi_descs)
+ ipi_setup_sgi(i);
+ else
+ ipi_setup_lpi(i, ncpus);
+ }
+
/* Setup the boot CPU immediately */
ipi_setup(smp_processor_id());
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 1d9d51d7627f..3ebcf8c53fb0 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -152,6 +152,8 @@ kunwind_recover_return_address(struct kunwind_state *state)
orig_pc = kretprobe_find_ret_addr(state->task,
(void *)state->common.fp,
&state->kr_cur);
+ if (!orig_pc)
+ return -EINVAL;
state->common.pc = orig_pc;
state->flags.kretprobe = 1;
}
@@ -277,21 +279,24 @@ kunwind_next(struct kunwind_state *state)
typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
-static __always_inline void
+static __always_inline int
do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
void *cookie)
{
- if (kunwind_recover_return_address(state))
- return;
+ int ret;
- while (1) {
- int ret;
+ ret = kunwind_recover_return_address(state);
+ if (ret)
+ return ret;
+ while (1) {
if (!consume_state(state, cookie))
- break;
+ return -EINVAL;
ret = kunwind_next(state);
+ if (ret == -ENOENT)
+ return 0;
if (ret < 0)
- break;
+ return ret;
}
}
@@ -324,7 +329,7 @@ do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
: stackinfo_get_unknown(); \
})
-static __always_inline void
+static __always_inline int
kunwind_stack_walk(kunwind_consume_fn consume_state,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@@ -332,10 +337,8 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
struct stack_info stacks[] = {
stackinfo_get_task(task),
STACKINFO_CPU(irq),
-#if defined(CONFIG_VMAP_STACK)
STACKINFO_CPU(overflow),
-#endif
-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
+#if defined(CONFIG_ARM_SDE_INTERFACE)
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
@@ -352,7 +355,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
if (regs) {
if (task != current)
- return;
+ return -EINVAL;
kunwind_init_from_regs(&state, regs);
} else if (task == current) {
kunwind_init_from_caller(&state);
@@ -360,7 +363,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
kunwind_init_from_task(&state, task);
}
- do_kunwind(&state, consume_state, cookie);
+ return do_kunwind(&state, consume_state, cookie);
}
struct kunwind_consume_entry_data {
@@ -387,6 +390,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+static __always_inline bool
+arch_reliable_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ /*
+ * At an exception boundary we can reliably consume the saved PC. We do
+ * not know whether the LR was live when the exception was taken, and
+ * so we cannot perform the next unwind step reliably.
+ *
+ * All that matters is whether the *entire* unwind is reliable, so give
+ * up as soon as we hit an exception boundary.
+ */
+ if (state->source == KUNWIND_SOURCE_REGS_PC)
+ return false;
+
+ return arch_kunwind_consume_entry(state, cookie);
+}
+
+noinline noinstr int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie,
+ struct task_struct *task)
+{
+ struct kunwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ return kunwind_stack_walk(arch_reliable_kunwind_consume_entry, &data,
+ task, NULL);
+}
+
struct bpf_unwind_consume_entry_data {
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
void *cookie;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 9bfa5c944379..f528b6041f6a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -454,7 +454,7 @@ void do_el0_undef(struct pt_regs *regs, unsigned long esr)
u32 insn;
/* check for AArch32 breakpoint instructions */
- if (!aarch32_break_handler(regs))
+ if (try_handle_aarch32_break(regs))
return;
if (user_insn_read(regs, &insn))
@@ -894,8 +894,6 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
"Bad EL0 synchronous exception");
}
-#ifdef CONFIG_VMAP_STACK
-
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
@@ -927,10 +925,10 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
nmi_panic(NULL, "kernel stack overflow");
cpu_park_loop();
}
-#endif
void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
{
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
console_verbose();
pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
@@ -987,7 +985,7 @@ void do_serror(struct pt_regs *regs, unsigned long esr)
int is_valid_bugaddr(unsigned long addr)
{
/*
- * bug_handler() only called for BRK #BUG_BRK_IMM.
+ * bug_brk_handler() only called for BRK #BUG_BRK_IMM.
* So the answer is trivial -- any spurious instances with no
* bug table entry will be rejected by report_bug() and passed
* back to the debug-monitors code and handled as a fatal
@@ -997,7 +995,7 @@ int is_valid_bugaddr(unsigned long addr)
}
#endif
-static int bug_handler(struct pt_regs *regs, unsigned long esr)
+int bug_brk_handler(struct pt_regs *regs, unsigned long esr)
{
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
@@ -1017,13 +1015,8 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr)
return DBG_HOOK_HANDLED;
}
-static struct break_hook bug_break_hook = {
- .fn = bug_handler,
- .imm = BUG_BRK_IMM,
-};
-
#ifdef CONFIG_CFI_CLANG
-static int cfi_handler(struct pt_regs *regs, unsigned long esr)
+int cfi_brk_handler(struct pt_regs *regs, unsigned long esr)
{
unsigned long target;
u32 type;
@@ -1046,15 +1039,9 @@ static int cfi_handler(struct pt_regs *regs, unsigned long esr)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}
-
-static struct break_hook cfi_break_hook = {
- .fn = cfi_handler,
- .imm = CFI_BRK_IMM_BASE,
- .mask = CFI_BRK_IMM_MASK,
-};
#endif /* CONFIG_CFI_CLANG */
-static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
+int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
"Kernel text patching",
@@ -1064,11 +1051,6 @@ static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
return DBG_HOOK_ERROR;
}
-static struct break_hook fault_break_hook = {
- .fn = reserved_fault_handler,
- .imm = FAULT_BRK_IMM,
-};
-
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_ESR_RECOVER 0x20
@@ -1076,7 +1058,7 @@ static struct break_hook fault_break_hook = {
#define KASAN_ESR_SIZE_MASK 0x0f
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
-static int kasan_handler(struct pt_regs *regs, unsigned long esr)
+int kasan_brk_handler(struct pt_regs *regs, unsigned long esr)
{
bool recover = esr & KASAN_ESR_RECOVER;
bool write = esr & KASAN_ESR_WRITE;
@@ -1107,62 +1089,12 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}
-
-static struct break_hook kasan_break_hook = {
- .fn = kasan_handler,
- .imm = KASAN_BRK_IMM,
- .mask = KASAN_BRK_MASK,
-};
#endif
#ifdef CONFIG_UBSAN_TRAP
-static int ubsan_handler(struct pt_regs *regs, unsigned long esr)
+int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr)
{
die(report_ubsan_failure(esr & UBSAN_BRK_MASK), regs, esr);
return DBG_HOOK_HANDLED;
}
-
-static struct break_hook ubsan_break_hook = {
- .fn = ubsan_handler,
- .imm = UBSAN_BRK_IMM,
- .mask = UBSAN_BRK_MASK,
-};
-#endif
-
-/*
- * Initial handler for AArch64 BRK exceptions
- * This handler only used until debug_traps_init().
- */
-int __init early_brk64(unsigned long addr, unsigned long esr,
- struct pt_regs *regs)
-{
-#ifdef CONFIG_CFI_CLANG
- if (esr_is_cfi_brk(esr))
- return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
-#endif
-#ifdef CONFIG_KASAN_SW_TAGS
- if ((esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
- return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
-#endif
-#ifdef CONFIG_UBSAN_TRAP
- if (esr_is_ubsan_brk(esr))
- return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
-#endif
- return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
-}
-
-void __init trap_init(void)
-{
- register_kernel_break_hook(&bug_break_hook);
-#ifdef CONFIG_CFI_CLANG
- register_kernel_break_hook(&cfi_break_hook);
-#endif
- register_kernel_break_hook(&fault_break_hook);
-#ifdef CONFIG_KASAN_SW_TAGS
- register_kernel_break_hook(&kasan_break_hook);
-#endif
-#ifdef CONFIG_UBSAN_TRAP
- register_kernel_break_hook(&ubsan_break_hook);
#endif
- debug_traps_init();
-}
diff --git a/arch/arm64/kernel/watchdog_hld.c b/arch/arm64/kernel/watchdog_hld.c
index dcd25322127c..3093037dcb7b 100644
--- a/arch/arm64/kernel/watchdog_hld.c
+++ b/arch/arm64/kernel/watchdog_hld.c
@@ -34,3 +34,61 @@ bool __init arch_perf_nmi_is_available(void)
*/
return arm_pmu_irq_is_nmi();
}
+
+static int watchdog_perf_update_period(void *data)
+{
+ int cpu = smp_processor_id();
+ u64 max_cpu_freq, new_period;
+
+ max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
+ if (!max_cpu_freq)
+ return 0;
+
+ new_period = watchdog_thresh * max_cpu_freq;
+ hardlockup_detector_perf_adjust_period(new_period);
+
+ return 0;
+}
+
+static int watchdog_freq_notifier_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int cpu;
+
+ if (val != CPUFREQ_CREATE_POLICY)
+ return NOTIFY_DONE;
+
+ /*
+ * Let each online CPU related to the policy update the period by their
+ * own. This will serialize with the framework on start/stop the lockup
+ * detector (softlockup_{start,stop}_all) and avoid potential race
+ * condition. Otherwise we may have below theoretical race condition:
+ * (core 0/1 share the same policy)
+ * [core 0] [core 1]
+ * hardlockup_detector_event_create()
+ * hw_nmi_get_sample_period()
+ * (cpufreq registered, notifier callback invoked)
+ * watchdog_freq_notifier_callback()
+ * watchdog_perf_update_period()
+ * (since core 1's event's not yet created,
+ * the period is not set)
+ * perf_event_create_kernel_counter()
+ * (event's period is SAFE_MAX_CPU_FREQ)
+ */
+ for_each_cpu(cpu, policy->cpus)
+ smp_call_on_cpu(cpu, watchdog_perf_update_period, NULL, false);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block watchdog_freq_notifier = {
+ .notifier_call = watchdog_freq_notifier_callback,
+};
+
+static int __init init_watchdog_freq_notifier(void)
+{
+ return cpufreq_register_notifier(&watchdog_freq_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(init_watchdog_freq_notifier);
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 7c329e01c557..3ebc0570345c 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -23,7 +23,8 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-v3.o vgic/vgic-v4.o \
vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
- vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o
+ vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o \
+ vgic/vgic-v5.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 701ea10a63f1..dbd74e4885e2 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -830,7 +830,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
* by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but
* not both). This simplifies the handling of the EL1NV* bits.
*/
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ if (is_nested_ctxt(vcpu)) {
u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
/* Use the VHE format for mental sanity */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 23dd3f3fc3eb..888f7c7abf54 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -408,6 +408,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
r = BIT(0);
break;
+ case KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED:
+ if (!kvm)
+ r = -EINVAL;
+ else
+ r = kvm_supports_cacheable_pfnmap();
+ break;
+
default:
r = 0;
}
@@ -521,7 +528,7 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
* Either we're running an L2 guest, and the API/APK bits come
* from L1's HCR_EL2, or API/APK are both set.
*/
- if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
+ if (unlikely(is_nested_ctxt(vcpu))) {
u64 val;
val = __vcpu_sys_reg(vcpu, HCR_EL2);
@@ -740,7 +747,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
+ bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
+
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
}
@@ -1183,6 +1191,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
+ kvm_nested_flush_hwstate(vcpu);
+
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_flush_hwstate(vcpu);
@@ -1282,6 +1292,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
+ kvm_nested_sync_hwstate(vcpu);
+
preempt_enable();
/*
@@ -2765,19 +2777,15 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
}
-bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
- struct kvm_kernel_irq_routing_entry *new)
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
{
- if (old->type != KVM_IRQ_ROUTING_MSI ||
- new->type != KVM_IRQ_ROUTING_MSI)
- return true;
-
- return memcmp(&old->msi, &new->msi, sizeof(new->msi));
-}
+ if (old->type == KVM_IRQ_ROUTING_MSI &&
+ new->type == KVM_IRQ_ROUTING_MSI &&
+ !memcmp(&old->msi, &new->msi, sizeof(new->msi)))
+ return;
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
-{
/*
* Remapping the vLPI requires taking the its_lock mutex to resolve
* the new translation. We're in spinlock land at this point, so no
@@ -2785,7 +2793,7 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
*
* Unmap the vLPI and fall back to software LPI injection.
*/
- return kvm_vgic_v4_unset_forwarding(kvm, host_irq);
+ return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq);
}
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index a25be111cd8f..0e5610533949 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1047,34 +1047,51 @@ static void compute_s1_overlay_permissions(struct kvm_vcpu *vcpu,
idx = FIELD_GET(PTE_PO_IDX_MASK, wr->desc);
- switch (wi->regime) {
- case TR_EL10:
- pov_perms = perm_idx(vcpu, POR_EL1, idx);
- uov_perms = perm_idx(vcpu, POR_EL0, idx);
- break;
- case TR_EL20:
- pov_perms = perm_idx(vcpu, POR_EL2, idx);
- uov_perms = perm_idx(vcpu, POR_EL0, idx);
- break;
- case TR_EL2:
- pov_perms = perm_idx(vcpu, POR_EL2, idx);
- uov_perms = 0;
- break;
- }
+ if (wr->pov) {
+ switch (wi->regime) {
+ case TR_EL10:
+ pov_perms = perm_idx(vcpu, POR_EL1, idx);
+ break;
+ case TR_EL20:
+ pov_perms = perm_idx(vcpu, POR_EL2, idx);
+ break;
+ case TR_EL2:
+ pov_perms = perm_idx(vcpu, POR_EL2, idx);
+ break;
+ }
+
+ if (pov_perms & ~POE_RWX)
+ pov_perms = POE_NONE;
- if (pov_perms & ~POE_RWX)
- pov_perms = POE_NONE;
+ /* R_QXXPC, S1PrivOverflow enabled */
+ if (wr->pwxn && (pov_perms & POE_X))
+ pov_perms &= ~POE_W;
- if (wi->poe && wr->pov) {
wr->pr &= pov_perms & POE_R;
wr->pw &= pov_perms & POE_W;
wr->px &= pov_perms & POE_X;
}
- if (uov_perms & ~POE_RWX)
- uov_perms = POE_NONE;
+ if (wr->uov) {
+ switch (wi->regime) {
+ case TR_EL10:
+ uov_perms = perm_idx(vcpu, POR_EL0, idx);
+ break;
+ case TR_EL20:
+ uov_perms = perm_idx(vcpu, POR_EL0, idx);
+ break;
+ case TR_EL2:
+ uov_perms = 0;
+ break;
+ }
+
+ if (uov_perms & ~POE_RWX)
+ uov_perms = POE_NONE;
+
+ /* R_NPBXC, S1UnprivOverlay enabled */
+ if (wr->uwxn && (uov_perms & POE_X))
+ uov_perms &= ~POE_W;
- if (wi->e0poe && wr->uov) {
wr->ur &= uov_perms & POE_R;
wr->uw &= uov_perms & POE_W;
wr->ux &= uov_perms & POE_X;
@@ -1095,24 +1112,15 @@ static void compute_s1_permissions(struct kvm_vcpu *vcpu,
if (!wi->hpd)
compute_s1_hierarchical_permissions(vcpu, wi, wr);
- if (wi->poe || wi->e0poe)
- compute_s1_overlay_permissions(vcpu, wi, wr);
+ compute_s1_overlay_permissions(vcpu, wi, wr);
- /* R_QXXPC */
- if (wr->pwxn) {
- if (!wr->pov && wr->pw)
- wr->px = false;
- if (wr->pov && wr->px)
- wr->pw = false;
- }
+ /* R_QXXPC, S1PrivOverlay disabled */
+ if (!wr->pov)
+ wr->px &= !(wr->pwxn && wr->pw);
- /* R_NPBXC */
- if (wr->uwxn) {
- if (!wr->uov && wr->uw)
- wr->ux = false;
- if (wr->uov && wr->ux)
- wr->uw = false;
- }
+ /* R_NPBXC, S1UnprivOverlay disabled */
+ if (!wr->uov)
+ wr->ux &= !(wr->uwxn && wr->uw);
pan = wi->pan && (wr->ur || wr->uw ||
(pan3_enabled(vcpu, wi->regime) && wr->ux));
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
index 54911a93b001..da66c4a14775 100644
--- a/arch/arm64/kvm/config.c
+++ b/arch/arm64/kvm/config.c
@@ -66,7 +66,6 @@ struct reg_bits_to_feat_map {
#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP
#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP
#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP
-#define FEAT_PMUv3p9 ID_AA64DFR0_EL1, PMUVer, V3P9
#define FEAT_TRBE ID_AA64DFR0_EL1, TraceBuffer, IMP
#define FEAT_TRBEv1p1 ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1
#define FEAT_DoubleLock ID_AA64DFR0_EL1, DoubleLock, IMP
@@ -89,6 +88,7 @@ struct reg_bits_to_feat_map {
#define FEAT_RASv2 ID_AA64PFR0_EL1, RAS, V2
#define FEAT_GICv3 ID_AA64PFR0_EL1, GIC, IMP
#define FEAT_LOR ID_AA64MMFR1_EL1, LO, IMP
+#define FEAT_SPEv1p2 ID_AA64DFR0_EL1, PMSVer, V1P2
#define FEAT_SPEv1p4 ID_AA64DFR0_EL1, PMSVer, V1P4
#define FEAT_SPEv1p5 ID_AA64DFR0_EL1, PMSVer, V1P5
#define FEAT_ATS1A ID_AA64ISAR2_EL1, ATS1A, IMP
@@ -131,6 +131,27 @@ struct reg_bits_to_feat_map {
#define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP
#define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP
#define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP
+#define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP
+#define FEAT_CPA2 ID_AA64ISAR3_EL1, CPA, CPA2
+#define FEAT_ASID2 ID_AA64MMFR4_EL1, ASID2, IMP
+#define FEAT_MEC ID_AA64MMFR3_EL1, MEC, IMP
+#define FEAT_HAFT ID_AA64MMFR1_EL1, HAFDBS, HAFT
+#define FEAT_BTI ID_AA64PFR1_EL1, BT, IMP
+#define FEAT_ExS ID_AA64MMFR0_EL1, EXS, IMP
+#define FEAT_IESB ID_AA64MMFR2_EL1, IESB, IMP
+#define FEAT_LSE2 ID_AA64MMFR2_EL1, AT, IMP
+#define FEAT_LSMAOC ID_AA64MMFR2_EL1, LSM, IMP
+#define FEAT_MixedEnd ID_AA64MMFR0_EL1, BIGEND, IMP
+#define FEAT_MixedEndEL0 ID_AA64MMFR0_EL1, BIGENDEL0, IMP
+#define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2
+#define FEAT_MTE_ASYNC ID_AA64PFR1_EL1, MTE_frac, ASYNC
+#define FEAT_MTE_STORE_ONLY ID_AA64PFR2_EL1, MTESTOREONLY, IMP
+#define FEAT_PAN ID_AA64MMFR1_EL1, PAN, IMP
+#define FEAT_PAN3 ID_AA64MMFR1_EL1, PAN, PAN3
+#define FEAT_SSBS ID_AA64PFR1_EL1, SSBS, IMP
+#define FEAT_TIDCP1 ID_AA64MMFR1_EL1, TIDCP1, IMP
+#define FEAT_FGT ID_AA64MMFR0_EL1, FGT, IMP
+#define FEAT_MTPMU ID_AA64DFR0_EL1, MTPMU, IMP
static bool not_feat_aa64el3(struct kvm *kvm)
{
@@ -218,11 +239,62 @@ static bool feat_trbe_mpam(struct kvm *kvm)
(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_MPAM));
}
+static bool feat_asid2_e2h1(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_ASID2) && !kvm_has_feat(kvm, FEAT_E2H0);
+}
+
+static bool feat_d128_e2h1(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_D128) && !kvm_has_feat(kvm, FEAT_E2H0);
+}
+
+static bool feat_mec_e2h1(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_MEC) && !kvm_has_feat(kvm, FEAT_E2H0);
+}
+
static bool feat_ebep_pmuv3_ss(struct kvm *kvm)
{
return kvm_has_feat(kvm, FEAT_EBEP) || kvm_has_feat(kvm, FEAT_PMUv3_SS);
}
+static bool feat_mixedendel0(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_MixedEnd) || kvm_has_feat(kvm, FEAT_MixedEndEL0);
+}
+
+static bool feat_mte_async(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, FEAT_MTE2) && kvm_has_feat_enum(kvm, FEAT_MTE_ASYNC);
+}
+
+#define check_pmu_revision(k, r) \
+ ({ \
+ (kvm_has_feat((k), ID_AA64DFR0_EL1, PMUVer, r) && \
+ !kvm_has_feat((k), ID_AA64DFR0_EL1, PMUVer, IMP_DEF)); \
+ })
+
+static bool feat_pmuv3p1(struct kvm *kvm)
+{
+ return check_pmu_revision(kvm, V3P1);
+}
+
+static bool feat_pmuv3p5(struct kvm *kvm)
+{
+ return check_pmu_revision(kvm, V3P5);
+}
+
+static bool feat_pmuv3p7(struct kvm *kvm)
+{
+ return check_pmu_revision(kvm, V3P7);
+}
+
+static bool feat_pmuv3p9(struct kvm *kvm)
+{
+ return check_pmu_revision(kvm, V3P9);
+}
+
static bool compute_hcr_rw(struct kvm *kvm, u64 *bits)
{
/* This is purely academic: AArch32 and NV are mutually exclusive */
@@ -681,7 +753,7 @@ static const struct reg_bits_to_feat_map hdfgrtr2_feat_map[] = {
NEEDS_FEAT(HDFGRTR2_EL2_nPMICFILTR_EL0 |
HDFGRTR2_EL2_nPMICNTR_EL0,
FEAT_PMUv3_ICNTR),
- NEEDS_FEAT(HDFGRTR2_EL2_nPMUACR_EL1, FEAT_PMUv3p9),
+ NEEDS_FEAT(HDFGRTR2_EL2_nPMUACR_EL1, feat_pmuv3p9),
NEEDS_FEAT(HDFGRTR2_EL2_nPMSSCR_EL1 |
HDFGRTR2_EL2_nPMSSDATA,
FEAT_PMUv3_SS),
@@ -713,7 +785,7 @@ static const struct reg_bits_to_feat_map hdfgwtr2_feat_map[] = {
FEAT_PMUv3_ICNTR),
NEEDS_FEAT(HDFGWTR2_EL2_nPMUACR_EL1 |
HDFGWTR2_EL2_nPMZR_EL0,
- FEAT_PMUv3p9),
+ feat_pmuv3p9),
NEEDS_FEAT(HDFGWTR2_EL2_nPMSSCR_EL1, FEAT_PMUv3_SS),
NEEDS_FEAT(HDFGWTR2_EL2_nPMIAR_EL1, FEAT_SEBEP),
NEEDS_FEAT(HDFGWTR2_EL2_nPMSDSFR_EL1, feat_spe_fds),
@@ -832,6 +904,150 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h),
};
+static const struct reg_bits_to_feat_map sctlr2_feat_map[] = {
+ NEEDS_FEAT(SCTLR2_EL1_NMEA |
+ SCTLR2_EL1_EASE,
+ FEAT_DoubleFault2),
+ NEEDS_FEAT(SCTLR2_EL1_EnADERR, feat_aderr),
+ NEEDS_FEAT(SCTLR2_EL1_EnANERR, feat_anerr),
+ NEEDS_FEAT(SCTLR2_EL1_EnIDCP128, FEAT_SYSREG128),
+ NEEDS_FEAT(SCTLR2_EL1_EnPACM |
+ SCTLR2_EL1_EnPACM0,
+ feat_pauth_lr),
+ NEEDS_FEAT(SCTLR2_EL1_CPTA |
+ SCTLR2_EL1_CPTA0 |
+ SCTLR2_EL1_CPTM |
+ SCTLR2_EL1_CPTM0,
+ FEAT_CPA2),
+};
+
+static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = {
+ NEEDS_FEAT(TCR2_EL2_FNG1 |
+ TCR2_EL2_FNG0 |
+ TCR2_EL2_A2,
+ feat_asid2_e2h1),
+ NEEDS_FEAT(TCR2_EL2_DisCH1 |
+ TCR2_EL2_DisCH0 |
+ TCR2_EL2_D128,
+ feat_d128_e2h1),
+ NEEDS_FEAT(TCR2_EL2_AMEC1, feat_mec_e2h1),
+ NEEDS_FEAT(TCR2_EL2_AMEC0, FEAT_MEC),
+ NEEDS_FEAT(TCR2_EL2_HAFT, FEAT_HAFT),
+ NEEDS_FEAT(TCR2_EL2_PTTWI |
+ TCR2_EL2_PnCH,
+ FEAT_THE),
+ NEEDS_FEAT(TCR2_EL2_AIE, FEAT_AIE),
+ NEEDS_FEAT(TCR2_EL2_POE |
+ TCR2_EL2_E0POE,
+ FEAT_S1POE),
+ NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE),
+};
+
+static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
+ NEEDS_FEAT(SCTLR_EL1_CP15BEN |
+ SCTLR_EL1_ITD |
+ SCTLR_EL1_SED,
+ FEAT_AA32EL0),
+ NEEDS_FEAT(SCTLR_EL1_BT0 |
+ SCTLR_EL1_BT1,
+ FEAT_BTI),
+ NEEDS_FEAT(SCTLR_EL1_CMOW, FEAT_CMOW),
+ NEEDS_FEAT(SCTLR_EL1_TSCXT, feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT(SCTLR_EL1_EIS |
+ SCTLR_EL1_EOS,
+ FEAT_ExS),
+ NEEDS_FEAT(SCTLR_EL1_EnFPM, FEAT_FPMR),
+ NEEDS_FEAT(SCTLR_EL1_IESB, FEAT_IESB),
+ NEEDS_FEAT(SCTLR_EL1_EnALS, FEAT_LS64),
+ NEEDS_FEAT(SCTLR_EL1_EnAS0, FEAT_LS64_ACCDATA),
+ NEEDS_FEAT(SCTLR_EL1_EnASR, FEAT_LS64_V),
+ NEEDS_FEAT(SCTLR_EL1_nAA, FEAT_LSE2),
+ NEEDS_FEAT(SCTLR_EL1_LSMAOE |
+ SCTLR_EL1_nTLSMD,
+ FEAT_LSMAOC),
+ NEEDS_FEAT(SCTLR_EL1_EE, FEAT_MixedEnd),
+ NEEDS_FEAT(SCTLR_EL1_E0E, feat_mixedendel0),
+ NEEDS_FEAT(SCTLR_EL1_MSCEn, FEAT_MOPS),
+ NEEDS_FEAT(SCTLR_EL1_ATA0 |
+ SCTLR_EL1_ATA |
+ SCTLR_EL1_TCF0 |
+ SCTLR_EL1_TCF,
+ FEAT_MTE2),
+ NEEDS_FEAT(SCTLR_EL1_ITFSB, feat_mte_async),
+ NEEDS_FEAT(SCTLR_EL1_TCSO0 |
+ SCTLR_EL1_TCSO,
+ FEAT_MTE_STORE_ONLY),
+ NEEDS_FEAT(SCTLR_EL1_NMI |
+ SCTLR_EL1_SPINTMASK,
+ FEAT_NMI),
+ NEEDS_FEAT(SCTLR_EL1_SPAN, FEAT_PAN),
+ NEEDS_FEAT(SCTLR_EL1_EPAN, FEAT_PAN3),
+ NEEDS_FEAT(SCTLR_EL1_EnDA |
+ SCTLR_EL1_EnDB |
+ SCTLR_EL1_EnIA |
+ SCTLR_EL1_EnIB,
+ feat_pauth),
+ NEEDS_FEAT(SCTLR_EL1_EnTP2, FEAT_SME),
+ NEEDS_FEAT(SCTLR_EL1_EnRCTX, FEAT_SPECRES),
+ NEEDS_FEAT(SCTLR_EL1_DSSBS, FEAT_SSBS),
+ NEEDS_FEAT(SCTLR_EL1_TIDCP, FEAT_TIDCP1),
+ NEEDS_FEAT(SCTLR_EL1_TME0 |
+ SCTLR_EL1_TME |
+ SCTLR_EL1_TMT0 |
+ SCTLR_EL1_TMT,
+ FEAT_TME),
+ NEEDS_FEAT(SCTLR_EL1_TWEDEL |
+ SCTLR_EL1_TWEDEn,
+ FEAT_TWED),
+ NEEDS_FEAT(SCTLR_EL1_UCI |
+ SCTLR_EL1_EE |
+ SCTLR_EL1_E0E |
+ SCTLR_EL1_WXN |
+ SCTLR_EL1_nTWE |
+ SCTLR_EL1_nTWI |
+ SCTLR_EL1_UCT |
+ SCTLR_EL1_DZE |
+ SCTLR_EL1_I |
+ SCTLR_EL1_UMA |
+ SCTLR_EL1_SA0 |
+ SCTLR_EL1_SA |
+ SCTLR_EL1_C |
+ SCTLR_EL1_A |
+ SCTLR_EL1_M,
+ FEAT_AA64EL1),
+};
+
+static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = {
+ NEEDS_FEAT(MDCR_EL2_EBWE, FEAT_Debugv8p9),
+ NEEDS_FEAT(MDCR_EL2_TDOSA, FEAT_DoubleLock),
+ NEEDS_FEAT(MDCR_EL2_PMEE, FEAT_EBEP),
+ NEEDS_FEAT(MDCR_EL2_TDCC, FEAT_FGT),
+ NEEDS_FEAT(MDCR_EL2_MTPME, FEAT_MTPMU),
+ NEEDS_FEAT(MDCR_EL2_HPME |
+ MDCR_EL2_HPMN |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TPM,
+ FEAT_PMUv3),
+ NEEDS_FEAT(MDCR_EL2_HPMD, feat_pmuv3p1),
+ NEEDS_FEAT(MDCR_EL2_HCCD |
+ MDCR_EL2_HLP,
+ feat_pmuv3p5),
+ NEEDS_FEAT(MDCR_EL2_HPMFZO, feat_pmuv3p7),
+ NEEDS_FEAT(MDCR_EL2_PMSSE, FEAT_PMUv3_SS),
+ NEEDS_FEAT(MDCR_EL2_E2PB |
+ MDCR_EL2_TPMS,
+ FEAT_SPE),
+ NEEDS_FEAT(MDCR_EL2_HPMFZS, FEAT_SPEv1p2),
+ NEEDS_FEAT(MDCR_EL2_EnSPM, FEAT_SPMU),
+ NEEDS_FEAT(MDCR_EL2_EnSTEPOP, FEAT_STEP2),
+ NEEDS_FEAT(MDCR_EL2_E2TB, FEAT_TRBE),
+ NEEDS_FEAT(MDCR_EL2_TTRF, FEAT_TRF),
+ NEEDS_FEAT(MDCR_EL2_TDA |
+ MDCR_EL2_TDE |
+ MDCR_EL2_TDRA,
+ FEAT_AA64EL1),
+};
+
static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
int map_size, u64 res0, const char *str)
{
@@ -863,6 +1079,14 @@ void __init check_feature_map(void)
__HCRX_EL2_RES0, "HCRX_EL2");
check_feat_map(hcr_feat_map, ARRAY_SIZE(hcr_feat_map),
HCR_EL2_RES0, "HCR_EL2");
+ check_feat_map(sctlr2_feat_map, ARRAY_SIZE(sctlr2_feat_map),
+ SCTLR2_EL1_RES0, "SCTLR2_EL1");
+ check_feat_map(tcr2_el2_feat_map, ARRAY_SIZE(tcr2_el2_feat_map),
+ TCR2_EL2_RES0, "TCR2_EL2");
+ check_feat_map(sctlr_el1_feat_map, ARRAY_SIZE(sctlr_el1_feat_map),
+ SCTLR_EL1_RES0, "SCTLR_EL1");
+ check_feat_map(mdcr_el2_feat_map, ARRAY_SIZE(mdcr_el2_feat_map),
+ MDCR_EL2_RES0, "MDCR_EL2");
}
static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -1077,6 +1301,31 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
*res0 |= HCR_EL2_RES0 | (mask & ~fixed);
*res1 = HCR_EL2_RES1 | (mask & fixed);
break;
+ case SCTLR2_EL1:
+ case SCTLR2_EL2:
+ *res0 = compute_res0_bits(kvm, sctlr2_feat_map,
+ ARRAY_SIZE(sctlr2_feat_map), 0, 0);
+ *res0 |= SCTLR2_EL1_RES0;
+ *res1 = SCTLR2_EL1_RES1;
+ break;
+ case TCR2_EL2:
+ *res0 = compute_res0_bits(kvm, tcr2_el2_feat_map,
+ ARRAY_SIZE(tcr2_el2_feat_map), 0, 0);
+ *res0 |= TCR2_EL2_RES0;
+ *res1 = TCR2_EL2_RES1;
+ break;
+ case SCTLR_EL1:
+ *res0 = compute_res0_bits(kvm, sctlr_el1_feat_map,
+ ARRAY_SIZE(sctlr_el1_feat_map), 0, 0);
+ *res0 |= SCTLR_EL1_RES0;
+ *res1 = SCTLR_EL1_RES1;
+ break;
+ case MDCR_EL2:
+ *res0 = compute_res0_bits(kvm, mdcr_el2_feat_map,
+ ARRAY_SIZE(mdcr_el2_feat_map), 0, 0);
+ *res0 |= MDCR_EL2_RES0;
+ *res1 = MDCR_EL2_RES1;
+ break;
default:
WARN_ON_ONCE(1);
*res0 = *res1 = 0;
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 1a7dab333f55..381382c19fe4 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -81,6 +81,10 @@ void kvm_init_host_debug_data(void)
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
host_data_set_flag(HAS_SPE);
+ /* Check if we have BRBE implemented and available at the host */
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
+ host_data_set_flag(HAS_BRBE);
+
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
/* Force disable trace in protected mode in case of no TRBE */
if (is_protected_kvm_enabled())
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 3a384e9660b8..90cb4b7ae0ff 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -88,6 +88,7 @@ enum cgt_group_id {
CGT_HCRX_EnFPM,
CGT_HCRX_TCR2En,
+ CGT_HCRX_SCTLR2En,
CGT_CNTHCTL_EL1TVT,
CGT_CNTHCTL_EL1TVCT,
@@ -108,6 +109,7 @@ enum cgt_group_id {
CGT_HCR_TTLB_TTLBOS,
CGT_HCR_TVM_TRVM,
CGT_HCR_TVM_TRVM_HCRX_TCR2En,
+ CGT_HCR_TVM_TRVM_HCRX_SCTLR2En,
CGT_HCR_TPU_TICAB,
CGT_HCR_TPU_TOCU,
CGT_HCR_NV1_nNV2_ENSCXT,
@@ -398,6 +400,12 @@ static const struct trap_bits coarse_trap_bits[] = {
.mask = HCRX_EL2_TCR2En,
.behaviour = BEHAVE_FORWARD_RW,
},
+ [CGT_HCRX_SCTLR2En] = {
+ .index = HCRX_EL2,
+ .value = 0,
+ .mask = HCRX_EL2_SCTLR2En,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
[CGT_CNTHCTL_EL1TVT] = {
.index = CNTHCTL_EL2,
.value = CNTHCTL_EL1TVT,
@@ -449,6 +457,8 @@ static const enum cgt_group_id *coarse_control_combo[] = {
MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM),
MCB(CGT_HCR_TVM_TRVM_HCRX_TCR2En,
CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_TCR2En),
+ MCB(CGT_HCR_TVM_TRVM_HCRX_SCTLR2En,
+ CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_SCTLR2En),
MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB),
MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
@@ -782,6 +792,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_SCTLR2_EL1, CGT_HCR_TVM_TRVM_HCRX_SCTLR2En),
SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM),
SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM),
SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM),
@@ -1354,6 +1365,7 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_SCXTNUM_EL0, HFGRTR, SCXTNUM_EL0, 1),
SR_FGT(SYS_SCXTNUM_EL1, HFGRTR, SCXTNUM_EL1, 1),
SR_FGT(SYS_SCTLR_EL1, HFGRTR, SCTLR_EL1, 1),
+ SR_FGT(SYS_SCTLR2_EL1, HFGRTR, SCTLR_EL1, 1),
SR_FGT(SYS_REVIDR_EL1, HFGRTR, REVIDR_EL1, 1),
SR_FGT(SYS_PAR_EL1, HFGRTR, PAR_EL1, 1),
SR_FGT(SYS_MPIDR_EL1, HFGRTR, MPIDR_EL1, 1),
@@ -2592,13 +2604,8 @@ inject:
static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit)
{
- bool control_bit_set;
-
- if (!vcpu_has_nv(vcpu))
- return false;
-
- control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit;
- if (!is_hyp_ctxt(vcpu) && control_bit_set) {
+ if (is_nested_ctxt(vcpu) &&
+ (__vcpu_sys_reg(vcpu, reg) & control_bit)) {
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return true;
}
@@ -2719,6 +2726,9 @@ static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
case except_type_irq:
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_IRQ);
break;
+ case except_type_serror:
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
+ break;
default:
WARN_ONCE(1, "Unsupported EL2 exception injection %d\n", type);
}
@@ -2816,3 +2826,28 @@ int kvm_inject_nested_irq(struct kvm_vcpu *vcpu)
/* esr_el2 value doesn't matter for exits due to irqs. */
return kvm_inject_nested(vcpu, 0, except_type_irq);
}
+
+int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
+{
+ u64 esr = FIELD_PREP(ESR_ELx_EC_MASK,
+ iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
+ esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
+
+ vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
+
+ if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
+ return kvm_inject_nested(vcpu, esr, except_type_serror);
+
+ return kvm_inject_nested_sync(vcpu, esr);
+}
+
+int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr)
+{
+ /*
+ * Hardware sets up the EC field when propagating ESR as a result of
+ * vSError injection. Manually populate EC for an emulated SError
+ * exception.
+ */
+ esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
+ return kvm_inject_nested(vcpu, esr, except_type_serror);
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2196979a24a3..16ba5e9ac86c 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -818,8 +818,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
- events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
+ events->exception.serror_pending = (vcpu->arch.hcr_el2 & HCR_VSE) ||
+ vcpu_get_flag(vcpu, NESTED_SERROR_PENDING);
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
@@ -833,29 +834,62 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
return 0;
}
+static void commit_pending_events(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
+ return;
+
+ /*
+ * Reset the MMIO emulation state to avoid stepping PC after emulating
+ * the exception entry.
+ */
+ vcpu->mmio_needed = false;
+ kvm_call_hyp(__kvm_adjust_pc, vcpu);
+}
+
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+ u64 esr = events->exception.serror_esr;
+ int ret = 0;
- if (serror_pending && has_esr) {
- if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
- return -EINVAL;
-
- if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
- kvm_set_sei_esr(vcpu, events->exception.serror_esr);
- else
- return -EINVAL;
- } else if (serror_pending) {
- kvm_inject_vabt(vcpu);
+ /*
+ * Immediately commit the pending SEA to the vCPU's architectural
+ * state which is necessary since we do not return a pending SEA
+ * to userspace via KVM_GET_VCPU_EVENTS.
+ */
+ if (ext_dabt_pending) {
+ ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ commit_pending_events(vcpu);
}
- if (ext_dabt_pending)
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ if (ret < 0)
+ return ret;
- return 0;
+ if (!serror_pending)
+ return 0;
+
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && has_esr)
+ return -EINVAL;
+
+ if (has_esr && (esr & ~ESR_ELx_ISS_MASK))
+ return -EINVAL;
+
+ if (has_esr)
+ ret = kvm_inject_serror_esr(vcpu, esr);
+ else
+ ret = kvm_inject_serror(vcpu);
+
+ /*
+ * We could've decided that the SError is due for immediate software
+ * injection; commit the exception in case userspace decides it wants
+ * to inject more exceptions for some strange reason.
+ */
+ commit_pending_events(vcpu);
+ return (ret < 0) ? ret : 0;
}
u32 __attribute_const__ kvm_target_cpu(void)
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 453266c96481..a598072f36d2 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -32,7 +32,7 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *);
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
{
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
- kvm_inject_vabt(vcpu);
+ kvm_inject_serror(vcpu);
}
static int handle_hvc(struct kvm_vcpu *vcpu)
@@ -252,7 +252,7 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
return 1;
}
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ if (is_nested_ctxt(vcpu)) {
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return 1;
}
@@ -311,12 +311,11 @@ static int kvm_handle_gcs(struct kvm_vcpu *vcpu)
static int handle_other(struct kvm_vcpu *vcpu)
{
- bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+ bool allowed, fwd = is_nested_ctxt(vcpu);
u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
u64 esr = kvm_vcpu_get_esr(vcpu);
u64 iss = ESR_ELx_ISS(esr);
struct kvm *kvm = vcpu->kvm;
- bool allowed, fwd = false;
/*
* We only trap for two reasons:
@@ -335,28 +334,23 @@ static int handle_other(struct kvm_vcpu *vcpu)
switch (iss) {
case ESR_ELx_ISS_OTHER_ST64BV:
allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
- if (is_l2)
- fwd = !(hcrx & HCRX_EL2_EnASR);
+ fwd &= !(hcrx & HCRX_EL2_EnASR);
break;
case ESR_ELx_ISS_OTHER_ST64BV0:
allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
- if (is_l2)
- fwd = !(hcrx & HCRX_EL2_EnAS0);
+ fwd &= !(hcrx & HCRX_EL2_EnAS0);
break;
case ESR_ELx_ISS_OTHER_LDST64B:
allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
- if (is_l2)
- fwd = !(hcrx & HCRX_EL2_EnALS);
+ fwd &= !(hcrx & HCRX_EL2_EnALS);
break;
case ESR_ELx_ISS_OTHER_TSBCSYNC:
allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1);
- if (is_l2)
- fwd = (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
+ fwd &= (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
break;
case ESR_ELx_ISS_OTHER_PSBCSYNC:
allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5);
- if (is_l2)
- fwd = (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
+ fwd &= (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
break;
default:
/* Clearly, we're missing something. */
@@ -496,7 +490,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
} else {
- kvm_inject_vabt(vcpu);
+ kvm_inject_serror(vcpu);
}
return;
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 6a2a899a344e..95d186e0bf54 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -26,7 +26,8 @@ static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
if (unlikely(vcpu_has_nv(vcpu)))
return vcpu_read_sys_reg(vcpu, reg);
- else if (__vcpu_read_sys_reg_from_cpu(reg, &val))
+ else if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
+ __vcpu_read_sys_reg_from_cpu(reg, &val))
return val;
return __vcpu_sys_reg(vcpu, reg);
@@ -36,7 +37,8 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
{
if (unlikely(vcpu_has_nv(vcpu)))
vcpu_write_sys_reg(vcpu, val, reg);
- else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
+ else if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU) ||
+ !__vcpu_write_sys_reg_to_cpu(val, reg))
__vcpu_assign_sys_reg(vcpu, reg, val);
}
@@ -339,6 +341,10 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
break;
+ case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR):
+ enter_exception64(vcpu, PSR_MODE_EL1h, except_type_serror);
+ break;
+
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync);
break;
@@ -347,9 +353,13 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq);
break;
+ case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR):
+ enter_exception64(vcpu, PSR_MODE_EL2h, except_type_serror);
+ break;
+
default:
/*
- * Only EL1_SYNC and EL2_{SYNC,IRQ} makes
+ * Only EL1_{SYNC,SERR} and EL2_{SYNC,IRQ,SERR} makes
* sense so far. Everything else gets silently
* ignored.
*/
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 2ad57b117385..84ec4e100fbb 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -298,7 +298,7 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
u64 val; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
+ if (is_nested_ctxt(vcpu)) \
compute_clr_set(vcpu, reg, c, s); \
\
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
@@ -436,7 +436,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = vcpu->arch.hcrx_el2;
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ if (is_nested_ctxt(vcpu)) {
u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
hcrx |= val & __HCRX_EL2_MASK;
hcrx &= ~(~val & __HCRX_EL2_nMASK);
@@ -476,21 +476,56 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
write_sysreg_hcr(hcr);
- if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
- write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) {
+ u64 vsesr;
+
+ /*
+ * When HCR_EL2.AMO is set, physical SErrors are taken to EL2
+ * and vSError injection is enabled for EL1. Conveniently, for
+ * NV this means that it is never the case where a 'physical'
+ * SError (injected by KVM or userspace) and vSError are
+ * deliverable to the same context.
+ *
+ * As such, we can trivially select between the host or guest's
+ * VSESR_EL2. Except for the case that FEAT_RAS hasn't been
+ * exposed to the guest, where ESR propagation in hardware
+ * occurs unconditionally.
+ *
+ * Paper over the architectural wart and use an IMPLEMENTATION
+ * DEFINED ESR value in case FEAT_RAS is hidden from the guest.
+ */
+ if (!vserror_state_is_nested(vcpu))
+ vsesr = vcpu->arch.vsesr_el2;
+ else if (kvm_has_ras(kern_hyp_va(vcpu->kvm)))
+ vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2);
+ else
+ vsesr = ESR_ELx_ISV;
+
+ write_sysreg_s(vsesr, SYS_VSESR_EL2);
+ }
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
{
+ u64 *hcr;
+
+ if (vserror_state_is_nested(vcpu))
+ hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2);
+ else
+ hcr = &vcpu->arch.hcr_el2;
+
/*
* If we pended a virtual abort, preserve it until it gets
* cleared. See D1.14.3 (Virtual Interrupts) for details, but
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
+ *
+ * Additionally, when in a nested context we need to propagate the
+ * updated state to the guest hypervisor's HCR_EL2.
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE) {
- vcpu->arch.hcr_el2 &= ~HCR_VSE;
- vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ if (*hcr & HCR_VSE) {
+ *hcr &= ~HCR_VSE;
+ *hcr |= read_sysreg(hcr_el2) & HCR_VSE;
}
}
@@ -531,7 +566,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
* nested guest, as the guest hypervisor could select a smaller VL. Slap
* that into hardware before wrapping up.
*/
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+ if (is_nested_ctxt(vcpu))
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
@@ -557,7 +592,7 @@ static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
if (vcpu_has_sve(vcpu)) {
/* A guest hypervisor may restrict the effective max VL. */
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+ if (is_nested_ctxt(vcpu))
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
else
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 4d0dbea4c56f..a17cbe7582de 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -109,6 +109,28 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
return kvm_has_s1poe(kern_hyp_va(vcpu->kvm));
}
+static inline bool ctxt_has_ras(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_ras(kern_hyp_va(vcpu->kvm));
+}
+
+static inline bool ctxt_has_sctlr2(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_SCTLR2))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_sctlr2(kern_hyp_va(vcpu->kvm));
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -147,6 +169,9 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
+
+ if (ctxt_has_sctlr2(ctxt))
+ ctxt_sys_reg(ctxt, SCTLR2_EL1) = read_sysreg_el1(SYS_SCTLR2);
}
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
@@ -159,8 +184,13 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
if (!has_vhe() && ctxt->__hyp_running_vcpu)
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
- if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
+ return;
+
+ if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt)))
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
+ else if (ctxt_has_ras(ctxt))
+ ctxt_sys_reg(ctxt, VDISR_EL2) = read_sysreg_s(SYS_VDISR_EL2);
}
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
@@ -252,6 +282,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1);
write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
+
+ if (ctxt_has_sctlr2(ctxt))
+ write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR2_EL1), SYS_SCTLR2);
}
/* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */
@@ -275,6 +308,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
{
u64 pstate = to_hw_pstate(ctxt);
u64 mode = pstate & PSR_AA32_MODE_MASK;
+ u64 vdisr;
/*
* Safety check to ensure we're setting the CPU up to enter the guest
@@ -293,8 +327,17 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
write_sysreg_el2(pstate, SYS_SPSR);
- if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
- write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
+ return;
+
+ if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt)))
+ vdisr = ctxt_sys_reg(ctxt, DISR_EL1);
+ else if (ctxt_has_ras(ctxt))
+ vdisr = ctxt_sys_reg(ctxt, VDISR_EL2);
+ else
+ vdisr = 0;
+
+ write_sysreg_s(vdisr, SYS_VDISR_EL2);
}
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 2f4a4f5036bb..2a1c0f49792b 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -92,12 +92,42 @@ static void __trace_switch_to_host(void)
*host_data_ptr(host_debug_state.trfcr_el1));
}
+static void __debug_save_brbe(u64 *brbcr_el1)
+{
+ *brbcr_el1 = 0;
+
+ /* Check if the BRBE is enabled */
+ if (!(read_sysreg_el1(SYS_BRBCR) & (BRBCR_ELx_E0BRE | BRBCR_ELx_ExBRE)))
+ return;
+
+ /*
+ * Prohibit branch record generation while we are in guest.
+ * Since access to BRBCR_EL1 is trapped, the guest can't
+ * modify the filtering set by the host.
+ */
+ *brbcr_el1 = read_sysreg_el1(SYS_BRBCR);
+ write_sysreg_el1(0, SYS_BRBCR);
+}
+
+static void __debug_restore_brbe(u64 brbcr_el1)
+{
+ if (!brbcr_el1)
+ return;
+
+ /* Restore BRBE controls */
+ write_sysreg_el1(brbcr_el1, SYS_BRBCR);
+}
+
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
if (host_data_test_flag(HAS_SPE))
__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
+ /* Disable BRBE branch records */
+ if (host_data_test_flag(HAS_BRBE))
+ __debug_save_brbe(host_data_ptr(host_debug_state.brbcr_el1));
+
if (__trace_needs_switch())
__trace_switch_to_guest();
}
@@ -111,6 +141,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
if (host_data_test_flag(HAS_SPE))
__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
+ if (host_data_test_flag(HAS_BRBE))
+ __debug_restore_brbe(*host_data_ptr(host_debug_state.brbcr_el1));
if (__trace_needs_switch())
__trace_switch_to_host();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 0e752b515d0f..ccd575d5f6de 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -272,7 +272,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
* We're about to restore some new MMU state. Make sure
* ongoing page-table walks that have started before we
* trapped to EL2 have completed. This also synchronises the
- * above disabling of SPE and TRBE.
+ * above disabling of BRBE, SPE and TRBE.
*
* See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
* rule R_LFHQG and subsequent information statements.
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index f162b0df5cae..d81275790e69 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -296,12 +296,19 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
}
/*
- * Prevent the guest from touching the ICC_SRE_EL1 system
- * register. Note that this may not have any effect, as
- * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
+ * GICv5 BET0 FEAT_GCIE_LEGACY doesn't include ICC_SRE_EL2. This is due
+ * to be relaxed in a future spec release, at which point this in
+ * condition can be dropped.
*/
- write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
- ICC_SRE_EL2);
+ if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
+ /*
+ * Prevent the guest from touching the ICC_SRE_EL1 system
+ * register. Note that this may not have any effect, as
+ * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
+ */
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+ ICC_SRE_EL2);
+ }
/*
* If we need to trap system registers, we must write
@@ -322,8 +329,14 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
}
- val = read_gicreg(ICC_SRE_EL2);
- write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
+ /*
+ * Can be dropped in the future when GICv5 spec is relaxed. See comment
+ * above.
+ */
+ if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
+ val = read_gicreg(ICC_SRE_EL2);
+ write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
+ }
if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
@@ -423,10 +436,20 @@ void __vgic_v3_init_lrs(void)
*/
u64 __vgic_v3_get_gic_config(void)
{
- u64 val, sre = read_gicreg(ICC_SRE_EL1);
+ u64 val, sre;
unsigned long flags = 0;
/*
+ * In compat mode, we cannot access ICC_SRE_EL1 at any EL
+ * other than EL1 itself; just return the
+ * ICH_VTR_EL2. ICC_IDR0_EL1 is only implemented on a GICv5
+ * system, so we first check if we have GICv5 support.
+ */
+ if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
+ return read_gicreg(ICH_VTR_EL2);
+
+ sre = read_gicreg(ICC_SRE_EL1);
+ /*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
* view.
@@ -471,6 +494,16 @@ u64 __vgic_v3_get_gic_config(void)
return val;
}
+static void __vgic_v3_compat_mode_enable(void)
+{
+ if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
+ return;
+
+ sysreg_clear_set_s(SYS_ICH_VCTLR_EL2, 0, ICH_VCTLR_EL2_V3);
+ /* Wait for V3 to become enabled */
+ isb();
+}
+
static u64 __vgic_v3_read_vmcr(void)
{
return read_gicreg(ICH_VMCR_EL2);
@@ -490,6 +523,8 @@ void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
{
+ __vgic_v3_compat_mode_enable();
+
/*
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
@@ -1050,7 +1085,7 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
{
u64 ich_hcr;
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ if (!is_nested_ctxt(vcpu))
return false;
ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 477f1580ffea..e482181c6632 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -48,8 +48,7 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
{
- u64 guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
- u64 hcr = vcpu->arch.hcr_el2;
+ u64 guest_hcr, hcr = vcpu->arch.hcr_el2;
if (!vcpu_has_nv(vcpu))
return hcr;
@@ -68,10 +67,21 @@ static u64 __compute_hcr(struct kvm_vcpu *vcpu)
if (!vcpu_el2_e2h_is_set(vcpu))
hcr |= HCR_NV1;
+ /*
+ * Nothing in HCR_EL2 should impact running in hypervisor
+ * context, apart from bits we have defined as RESx (E2H,
+ * HCD and co), or that cannot be set directly (the EXCLUDE
+ * bits). Given that we OR the guest's view with the host's,
+ * we can use the 0 value as the starting point, and only
+ * use the config-driven RES1 bits.
+ */
+ guest_hcr = kvm_vcpu_apply_reg_masks(vcpu, HCR_EL2, 0);
+
write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
} else {
host_data_clear_flag(VCPU_IN_HYP_CONTEXT);
+ guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
if (guest_hcr & HCR_NV) {
u64 va = __fix_to_virt(vncr_fixmap(smp_processor_id()));
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 73e4bc7fde9e..f28c6cf4fe1b 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -77,6 +77,9 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
+
+ if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
+ __vcpu_assign_sys_reg(vcpu, SCTLR2_EL2, read_sysreg_el1(SYS_SCTLR2));
}
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
@@ -139,6 +142,9 @@ static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2), sp_el1);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2), SYS_ELR);
write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2), SYS_SPSR);
+
+ if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR2_EL2), SYS_SCTLR2);
}
/*
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index a640e839848e..6745f38b64f9 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -15,13 +15,11 @@
#include <asm/kvm_nested.h>
#include <asm/esr.h>
-static void pend_sync_exception(struct kvm_vcpu *vcpu)
+static unsigned int exception_target_el(struct kvm_vcpu *vcpu)
{
/* If not nesting, EL1 is the only possible exception target */
- if (likely(!vcpu_has_nv(vcpu))) {
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
- return;
- }
+ if (likely(!vcpu_has_nv(vcpu)))
+ return PSR_MODE_EL1h;
/*
* With NV, we need to pick between EL1 and EL2. Note that we
@@ -32,26 +30,76 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu)
switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
case PSR_MODE_EL2h:
case PSR_MODE_EL2t:
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
- break;
+ return PSR_MODE_EL2h;
case PSR_MODE_EL1h:
case PSR_MODE_EL1t:
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
- break;
+ return PSR_MODE_EL1h;
case PSR_MODE_EL0t:
- if (vcpu_el2_tge_is_set(vcpu))
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
- else
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
- break;
+ return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h;
default:
BUG();
}
}
-static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
+static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu)
+{
+ if (exception_target_el(vcpu) == PSR_MODE_EL2h)
+ return ESR_EL2;
+
+ return ESR_EL1;
+}
+
+static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu)
+{
+ if (exception_target_el(vcpu) == PSR_MODE_EL2h)
+ return FAR_EL2;
+
+ return FAR_EL1;
+}
+
+static void pend_sync_exception(struct kvm_vcpu *vcpu)
+{
+ if (exception_target_el(vcpu) == PSR_MODE_EL1h)
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
+ else
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
+}
+
+static void pend_serror_exception(struct kvm_vcpu *vcpu)
{
- return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
+ if (exception_target_el(vcpu) == PSR_MODE_EL1h)
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR);
+ else
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
+}
+
+static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx)
+{
+ u64 sctlr2;
+
+ if (!kvm_has_sctlr2(vcpu->kvm))
+ return false;
+
+ if (is_nested_ctxt(vcpu) &&
+ !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En))
+ return false;
+
+ if (exception_target_el(vcpu) == PSR_MODE_EL1h)
+ sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1);
+ else
+ sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2);
+
+ return sctlr2 & BIT(idx);
+}
+
+static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu)
+{
+ return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT);
+}
+
+static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu)
+{
+ return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT);
}
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
@@ -60,7 +108,11 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u64 esr = 0;
- pend_sync_exception(vcpu);
+ /* This delight is brought to you by FEAT_DoubleFault2. */
+ if (effective_sctlr2_ease(vcpu))
+ pend_serror_exception(vcpu);
+ else
+ pend_sync_exception(vcpu);
/*
* Build an {i,d}abort, depending on the level and the
@@ -83,13 +135,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
esr |= ESR_ELx_FSC_EXTABT;
- if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
- vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
- vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
- } else {
- vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
- vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
- }
+ vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
}
static void inject_undef64(struct kvm_vcpu *vcpu)
@@ -105,10 +152,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_ELx_IL;
- if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
- vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
- else
- vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
}
#define DFSR_FSC_EXTABT_LPAE 0x10
@@ -155,36 +199,35 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
}
-/**
- * kvm_inject_dabt - inject a data abort into the guest
- * @vcpu: The VCPU to receive the data abort
- * @addr: The address to report in the DFAR
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- */
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
{
if (vcpu_el1_is_32bit(vcpu))
- inject_abt32(vcpu, false, addr);
+ inject_abt32(vcpu, iabt, addr);
else
- inject_abt64(vcpu, false, addr);
+ inject_abt64(vcpu, iabt, addr);
}
-/**
- * kvm_inject_pabt - inject a prefetch abort into the guest
- * @vcpu: The VCPU to receive the prefetch abort
- * @addr: The address to report in the DFAR
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- */
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
{
- if (vcpu_el1_is_32bit(vcpu))
- inject_abt32(vcpu, true, addr);
- else
- inject_abt64(vcpu, true, addr);
+ if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
+ return true;
+
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
+ return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
+ (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
+}
+
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
+{
+ lockdep_assert_held(&vcpu->mutex);
+
+ if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
+ return kvm_inject_nested_sea(vcpu, iabt, addr);
+
+ __kvm_inject_sea(vcpu, iabt, addr);
+ return 1;
}
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
@@ -194,10 +237,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
addr = kvm_vcpu_get_fault_ipa(vcpu);
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
- if (kvm_vcpu_trap_is_iabt(vcpu))
- kvm_inject_pabt(vcpu, addr);
- else
- kvm_inject_dabt(vcpu, addr);
+ __kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
/*
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
@@ -210,9 +250,9 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
return;
- esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
+ esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
esr &= ~GENMASK_ULL(5, 0);
- vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
}
/**
@@ -230,25 +270,70 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu);
}
-void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
+static bool serror_is_masked(struct kvm_vcpu *vcpu)
{
- vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
- *vcpu_hcr(vcpu) |= HCR_VSE;
+ return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu);
}
-/**
- * kvm_inject_vabt - inject an async abort / SError into the guest
- * @vcpu: The VCPU to receive the exception
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- *
- * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
- * the remaining ISS all-zeros so that this error is not interpreted as an
- * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
- * value, so the CPU generates an imp-def value.
- */
-void kvm_inject_vabt(struct kvm_vcpu *vcpu)
+static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
+{
+ if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
+ return true;
+
+ if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
+ return false;
+
+ /*
+ * In another example where FEAT_DoubleFault2 is entirely backwards,
+ * "masked" as it relates to the routing effects of HCRX_EL2.TMEA
+ * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked
+ * for non-maskable SErrors, the EL2 bit takes priority if A is set.
+ */
+ if (vcpu_mode_priv(vcpu))
+ return *vcpu_cpsr(vcpu) & PSR_A_BIT;
+
+ /*
+ * Otherwise SErrors are considered unmasked when taken from EL0 and
+ * NMEA is set.
+ */
+ return serror_is_masked(vcpu);
+}
+
+static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu));
+}
+
+int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
{
- kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
+ lockdep_assert_held(&vcpu->mutex);
+
+ if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu))
+ return kvm_inject_nested_serror(vcpu, esr);
+
+ if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) {
+ vcpu_set_vsesr(vcpu, esr);
+ vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
+ return 1;
+ }
+
+ /*
+ * Emulate the exception entry if SErrors are unmasked. This is useful if
+ * the vCPU is in a nested context w/ vSErrors enabled then we've already
+ * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
+ * VDISR_EL2) to the guest hypervisor.
+ *
+ * As we're emulating the SError injection we need to explicitly populate
+ * ESR_ELx.EC because hardware will not do it on our behalf.
+ */
+ if (!serror_is_masked(vcpu)) {
+ pend_serror_exception(vcpu);
+ esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
+ return 1;
+ }
+
+ vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
+ *vcpu_hcr(vcpu) |= HCR_VSE;
+ return 1;
}
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index ab365e839874..54f9358c9e0e 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -72,7 +72,7 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
return data;
}
-static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
+static bool kvm_pending_external_abort(struct kvm_vcpu *vcpu)
{
if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
return false;
@@ -90,6 +90,8 @@ static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
+ case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR):
+ case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR):
return true;
default:
return false;
@@ -113,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
* Detect if the MMIO return was already handled or if userspace aborted
* the MMIO access.
*/
- if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu)))
+ if (unlikely(!vcpu->mmio_needed || kvm_pending_external_abort(vcpu)))
return 1;
vcpu->mmio_needed = 0;
@@ -169,10 +171,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa);
- if (vcpu_is_protected(vcpu)) {
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
- }
+ if (vcpu_is_protected(vcpu))
+ return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
&vcpu->kvm->arch.flags)) {
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 2942ec92c5a4..1c78864767c5 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -193,11 +193,6 @@ int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
return 0;
}
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_is_map_memory(pfn);
-}
-
static void *stage2_memcache_zalloc_page(void *arg)
{
struct kvm_mmu_memory_cache *mc = arg;
@@ -1470,6 +1465,18 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
return vma->vm_flags & VM_MTE_ALLOWED;
}
+static bool kvm_vma_is_cacheable(struct vm_area_struct *vma)
+{
+ switch (FIELD_GET(PTE_ATTRINDX_MASK, pgprot_val(vma->vm_page_prot))) {
+ case MT_NORMAL_NC:
+ case MT_DEVICE_nGnRnE:
+ case MT_DEVICE_nGnRE:
+ return false;
+ default:
+ return true;
+ }
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1477,8 +1484,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
{
int ret = 0;
bool write_fault, writable, force_pte = false;
- bool exec_fault, mte_allowed;
- bool device = false, vfio_allow_any_uc = false;
+ bool exec_fault, mte_allowed, is_vma_cacheable;
+ bool s2_force_noncacheable = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
phys_addr_t ipa = fault_ipa;
struct kvm *kvm = vcpu->kvm;
@@ -1492,6 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
struct page *page;
+ vm_flags_t vm_flags;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
if (fault_is_perm)
@@ -1619,6 +1627,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
+ vm_flags = vma->vm_flags;
+
+ is_vma_cacheable = kvm_vma_is_cacheable(vma);
+
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
@@ -1642,18 +1654,39 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_noslot_pfn(pfn))
return -EFAULT;
- if (kvm_is_device_pfn(pfn)) {
- /*
- * If the page was identified as device early by looking at
- * the VMA flags, vma_pagesize is already representing the
- * largest quantity we can map. If instead it was mapped
- * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
- * and must not be upgraded.
- *
- * In both cases, we don't let transparent_hugepage_adjust()
- * change things at the last minute.
- */
- device = true;
+ /*
+ * Check if this is non-struct page memory PFN, and cannot support
+ * CMOs. It could potentially be unsafe to access as cachable.
+ */
+ if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
+ if (is_vma_cacheable) {
+ /*
+ * Whilst the VMA owner expects cacheable mapping to this
+ * PFN, hardware also has to support the FWB and CACHE DIC
+ * features.
+ *
+ * ARM64 KVM relies on kernel VA mapping to the PFN to
+ * perform cache maintenance as the CMO instructions work on
+ * virtual addresses. VM_PFNMAP region are not necessarily
+ * mapped to a KVA and hence the presence of hardware features
+ * S2FWB and CACHE DIC are mandatory to avoid the need for
+ * cache maintenance.
+ */
+ if (!kvm_supports_cacheable_pfnmap())
+ return -EFAULT;
+ } else {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
+ s2_force_noncacheable = true;
+ }
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -1662,7 +1695,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
writable = false;
}
- if (exec_fault && device)
+ if (exec_fault && s2_force_noncacheable)
return -ENOEXEC;
/*
@@ -1695,7 +1728,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+ if (vma_pagesize == PAGE_SIZE && !(force_pte || s2_force_noncacheable)) {
if (fault_is_perm && fault_granule > PAGE_SIZE)
vma_pagesize = fault_granule;
else
@@ -1709,7 +1742,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
}
- if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
+ if (!fault_is_perm && !s2_force_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
if (mte_allowed) {
sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1725,7 +1758,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- if (device) {
+ if (s2_force_noncacheable) {
if (vfio_allow_any_uc)
prot |= KVM_PGTABLE_PROT_NORMAL_NC;
else
@@ -1808,7 +1841,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
* There is no need to pass the error into the guest.
*/
if (kvm_handle_guest_sea())
- kvm_inject_vabt(vcpu);
+ return kvm_inject_serror(vcpu);
return 1;
}
@@ -1836,11 +1869,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
- if (is_iabt)
- kvm_inject_pabt(vcpu, fault_ipa);
- else
- kvm_inject_dabt(vcpu, fault_ipa);
- return 1;
+ return kvm_inject_sea(vcpu, is_iabt, fault_ipa);
}
}
@@ -1912,8 +1941,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
}
if (kvm_vcpu_abt_iss1tw(vcpu)) {
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- ret = 1;
+ ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
goto out_unlock;
}
@@ -1958,10 +1986,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
if (ret == 0)
ret = 1;
out:
- if (ret == -ENOEXEC) {
- kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- ret = 1;
- }
+ if (ret == -ENOEXEC)
+ ret = kvm_inject_sea_iabt(vcpu, kvm_vcpu_get_hfar(vcpu));
out_unlock:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
@@ -2221,6 +2247,15 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
ret = -EINVAL;
break;
}
+
+ /*
+ * Cacheable PFNMAP is allowed only if the hardware
+ * supports it.
+ */
+ if (kvm_vma_is_cacheable(vma) && !kvm_supports_cacheable_pfnmap()) {
+ ret = -EINVAL;
+ break;
+ }
}
hva = min(reg_end, vma->vm_end);
} while (hva < reg_end);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index dc1d26559bfa..153b3e11b115 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1441,12 +1441,11 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
break;
case SYS_ID_AA64PFR0_EL1:
- /* No RME, AMU, MPAM, S-EL2, or RAS */
+ /* No RME, AMU, MPAM, or S-EL2 */
val &= ~(ID_AA64PFR0_EL1_RME |
ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SEL2 |
- ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_EL3 |
ID_AA64PFR0_EL1_EL2 |
ID_AA64PFR0_EL1_EL1 |
@@ -1683,69 +1682,21 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1);
/* TCR2_EL2 */
- res0 = TCR2_EL2_RES0;
- res1 = TCR2_EL2_RES1;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
- res0 |= (TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1 | TCR2_EL2_D128);
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, MEC, IMP))
- res0 |= TCR2_EL2_AMEC1 | TCR2_EL2_AMEC0;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, HAFDBS, HAFT))
- res0 |= TCR2_EL2_HAFT;
- if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
- res0 |= TCR2_EL2_PTTWI | TCR2_EL2_PnCH;
- if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
- res0 |= TCR2_EL2_AIE;
- if (!kvm_has_s1poe(kvm))
- res0 |= TCR2_EL2_POE | TCR2_EL2_E0POE;
- if (!kvm_has_s1pie(kvm))
- res0 |= TCR2_EL2_PIE;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
- res0 |= (TCR2_EL2_E0POE | TCR2_EL2_D128 |
- TCR2_EL2_AMEC1 | TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1);
+ get_reg_fixed_bits(kvm, TCR2_EL2, &res0, &res1);
set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
/* SCTLR_EL1 */
- res0 = SCTLR_EL1_RES0;
- res1 = SCTLR_EL1_RES1;
- if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
- res0 |= SCTLR_EL1_EPAN;
+ get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1);
set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
+ /* SCTLR2_ELx */
+ get_reg_fixed_bits(kvm, SCTLR2_EL1, &res0, &res1);
+ set_sysreg_masks(kvm, SCTLR2_EL1, res0, res1);
+ get_reg_fixed_bits(kvm, SCTLR2_EL2, &res0, &res1);
+ set_sysreg_masks(kvm, SCTLR2_EL2, res0, res1);
+
/* MDCR_EL2 */
- res0 = MDCR_EL2_RES0;
- res1 = MDCR_EL2_RES1;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
- res0 |= (MDCR_EL2_HPMN | MDCR_EL2_TPMCR |
- MDCR_EL2_TPM | MDCR_EL2_HPME);
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
- res0 |= MDCR_EL2_E2PB | MDCR_EL2_TPMS;
- if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, SPMU, IMP))
- res0 |= MDCR_EL2_EnSPM;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P1))
- res0 |= MDCR_EL2_HPMD;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
- res0 |= MDCR_EL2_TTRF;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
- res0 |= MDCR_EL2_HCCD | MDCR_EL2_HLP;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
- res0 |= MDCR_EL2_E2TB;
- if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
- res0 |= MDCR_EL2_TDCC;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, MTPMU, IMP) ||
- kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
- res0 |= MDCR_EL2_MTPME;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P7))
- res0 |= MDCR_EL2_HPMFZO;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSS, IMP))
- res0 |= MDCR_EL2_PMSSE;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
- res0 |= MDCR_EL2_HPMFZS;
- if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, EBEP, IMP))
- res0 |= MDCR_EL2_PMEE;
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, V8P9))
- res0 |= MDCR_EL2_EBWE;
- if (!kvm_has_feat(kvm, ID_AA64DFR2_EL1, STEP, IMP))
- res0 |= MDCR_EL2_EnSTEPOP;
+ get_reg_fixed_bits(kvm, MDCR_EL2, &res0, &res1);
set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
/* CNTHCTL_EL2 */
@@ -1802,3 +1753,43 @@ void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
kvm_inject_nested_irq(vcpu);
}
+
+/*
+ * One of the many architectural bugs in FEAT_NV2 is that the guest hypervisor
+ * can write to HCR_EL2 behind our back, potentially changing the exception
+ * routing / masking for even the host context.
+ *
+ * What follows is some slop to (1) react to exception routing / masking and (2)
+ * preserve the pending SError state across translation regimes.
+ */
+void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_has_nv(vcpu))
+ return;
+
+ if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
+ kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
+}
+
+void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ unsigned long *hcr = vcpu_hcr(vcpu);
+
+ if (!vcpu_has_nv(vcpu))
+ return;
+
+ /*
+ * We previously decided that an SError was deliverable to the guest.
+ * Reap the pending state from HCR_EL2 and...
+ */
+ if (unlikely(__test_and_clear_bit(__ffs(HCR_VSE), hcr)))
+ vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
+
+ /*
+ * Re-attempt SError injection in case the deliverability has changed,
+ * which is necessary to faithfully emulate WFI the case of a pending
+ * SError being a wakeup condition.
+ */
+ if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
+ kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c20bd6f21e60..82ffb3b3b3cf 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -108,7 +108,6 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
PURE_EL2_SYSREG( HACR_EL2 );
PURE_EL2_SYSREG( VTTBR_EL2 );
PURE_EL2_SYSREG( VTCR_EL2 );
- PURE_EL2_SYSREG( RVBAR_EL2 );
PURE_EL2_SYSREG( TPIDR_EL2 );
PURE_EL2_SYSREG( HPFAR_EL2 );
PURE_EL2_SYSREG( HCRX_EL2 );
@@ -144,6 +143,7 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
+ MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
default:
return false;
}
@@ -533,8 +533,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
if (p->Op1 == 4) { /* ICC_SRE_EL2 */
- p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE |
- ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB);
+ p->regval = KVM_ICC_SRE_EL2;
} else { /* ICC_SRE_EL1 */
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
}
@@ -773,6 +772,12 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
return mpidr;
}
+static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ return REG_HIDDEN;
+}
+
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
@@ -1612,13 +1617,14 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
break;
case SYS_ID_AA64PFR2_EL1:
- /* We only expose FPMR */
- val &= ID_AA64PFR2_EL1_FPMR;
+ val &= ID_AA64PFR2_EL1_FPMR |
+ (kvm_has_mte(vcpu->kvm) ?
+ ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
+ 0);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1643,8 +1649,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ID_AA64MMFR2_EL1_NV;
break;
case SYS_ID_AA64MMFR3_EL1:
- val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE |
- ID_AA64MMFR3_EL1_S1PIE;
+ val &= ID_AA64MMFR3_EL1_TCRX |
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1POE |
+ ID_AA64MMFR3_EL1_S1PIE;
break;
case SYS_ID_MMFR4_EL1:
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
@@ -1811,7 +1819,7 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
}
- if (kvm_vgic_global_state.type == VGIC_V3) {
+ if (vgic_is_v3(vcpu->kvm)) {
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
}
@@ -1953,6 +1961,14 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
return -EINVAL;
+ /*
+ * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
+ * we support GICv3. Fail attempts to do anything but set that to IMP.
+ */
+ if (vgic_is_v3_compat(vcpu->kvm) &&
+ FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
+ return -EINVAL;
+
return set_id_reg(vcpu, rd, user_val);
}
@@ -2325,6 +2341,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
+#define EL2_REG_VNCR_FILT(name, vis) \
+ EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
+#define EL2_REG_VNCR_GICv3(name) \
+ EL2_REG_VNCR_FILT(name, hidden_visibility)
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
/*
@@ -2483,6 +2503,21 @@ static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
+static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_sctlr2(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, sctlr2_visibility);
+}
+
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -2513,11 +2548,7 @@ static bool access_gic_vtr(struct kvm_vcpu *vcpu,
if (p->is_write)
return write_to_read_only(vcpu, p, r);
- p->regval = kvm_vgic_global_state.ich_vtr_el2;
- p->regval &= ~(ICH_VTR_EL2_DVIM |
- ICH_VTR_EL2_A3V |
- ICH_VTR_EL2_IDbits);
- p->regval |= ICH_VTR_EL2_nV4;
+ p->regval = kvm_get_guest_vtr_el2();
return true;
}
@@ -2588,6 +2619,26 @@ static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
return __el2_visibility(vcpu, rd, tcr2_visibility);
}
+static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
@@ -2639,6 +2690,23 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
return true;
}
+static bool access_ras(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ switch(reg_to_encoding(r)) {
+ default:
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+ }
+
+ return trap_raz_wi(vcpu, p, r);
+}
+
/*
* For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
* AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
@@ -2866,7 +2934,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR0_EL1_FP)),
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
~(ID_AA64PFR1_EL1_PFAR |
- ID_AA64PFR1_EL1_DF2 |
ID_AA64PFR1_EL1_MTEX |
ID_AA64PFR1_EL1_THE |
ID_AA64PFR1_EL1_GCS |
@@ -2878,7 +2945,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR1_EL1_MPAM_frac |
ID_AA64PFR1_EL1_RAS_frac |
ID_AA64PFR1_EL1_MTE)),
- ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
+ ID_WRITABLE(ID_AA64PFR2_EL1,
+ ID_AA64PFR2_EL1_FPMR |
+ ID_AA64PFR2_EL1_MTEFAR |
+ ID_AA64PFR2_EL1_MTESTOREONLY),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
ID_HIDDEN(ID_AA64SMFR0_EL1),
@@ -2945,6 +3015,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR2_EL1_NV |
ID_AA64MMFR2_EL1_CCIDX)),
ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
+ ID_AA64MMFR3_EL1_SCTLRX |
ID_AA64MMFR3_EL1_S1PIE |
ID_AA64MMFR3_EL1_S1POE)),
ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
@@ -2955,6 +3026,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+ { SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
+ .visibility = sctlr2_visibility },
MTE_REG(RGSR_EL1),
MTE_REG(GCR_EL1),
@@ -2984,14 +3057,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
- { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERRIDR_EL1), access_ras },
+ { SYS_DESC(SYS_ERRSELR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXFR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
+ { SYS_DESC(SYS_ERXADDR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
MTE_REG(TFSR_EL1),
MTE_REG(TFSRE0_EL1),
@@ -3302,12 +3375,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
+ EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
+ sctlr2_el2_visibility),
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
- EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
- EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
+ EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
@@ -3327,9 +3402,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
vncr_el2_visibility),
{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
- EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
- EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
- EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
+ EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
EL2_REG_REDIR(ELR_EL2, reset_val, 0),
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
@@ -3344,6 +3424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
+ EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
@@ -3370,43 +3451,44 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
- EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_RVBAR_EL2), undef_access },
{ SYS_DESC(SYS_RMR_EL2), undef_access },
+ EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
- EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0),
+ EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
- EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0),
+ EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
- EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0),
-
- EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0),
- EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0),
+ EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
+
+ EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
@@ -4275,12 +4357,12 @@ static const struct sys_reg_desc cp15_64_regs[] = {
};
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
- bool is_32)
+ bool reset_check)
{
unsigned int i;
for (i = 0; i < n; i++) {
- if (!is_32 && table[i].reg && !table[i].reset) {
+ if (reset_check && table[i].reg && !table[i].reset) {
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
&table[i], i, table[i].name);
return false;
@@ -4475,7 +4557,7 @@ static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
return true;
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
- params->is_write ? "write" : "read", reg_id);
+ str_write_read(params->is_write), reg_id);
return false;
}
@@ -5269,18 +5351,22 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
int __init kvm_sys_reg_table_init(void)
{
+ const struct sys_reg_desc *gicv3_regs;
bool valid = true;
- unsigned int i;
+ unsigned int i, sz;
int ret = 0;
/* Make sure tables are unique and in order. */
- valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
- valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
- valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
- valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
- valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
+ valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
+ valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
+ valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
+ valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
+ valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
+ gicv3_regs = vgic_v3_get_sysreg_table(&sz);
+ valid &= check_sysreg_table(gicv3_regs, sz, false);
+
if (!valid)
return -EINVAL;
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index ef97d9fc67cc..317abc490368 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -108,7 +108,7 @@ inline void print_sys_reg_msg(const struct sys_reg_params *p,
/* Look, we even formatted it for you to paste into the table! */
kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
&(struct va_format){ fmt, &va },
- p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
+ p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, str_write_read(p->is_write));
va_end(va);
}
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index f85415db7713..a7ab9a3bbed0 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -113,7 +113,7 @@ TRACE_EVENT(kvm_sys_access,
__entry->vcpu_pc, __entry->name ?: "UNKN",
__entry->Op0, __entry->Op1, __entry->CRn,
__entry->CRm, __entry->Op2,
- __entry->is_write ? "write" : "read")
+ str_write_read(__entry->is_write))
);
TRACE_EVENT(kvm_set_guest_debug,
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 5eacb4b3250a..bdc2d57370b2 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -297,6 +297,91 @@ static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
return 0;
}
+static int set_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
+ return 0;
+}
+
+static int get_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ *val = __vcpu_sys_reg(vcpu, r->reg);
+ return 0;
+}
+
+static int set_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ u8 idx = r->Op2 & 3;
+
+ if (idx > vgic_v3_max_apr_idx(vcpu))
+ return -EINVAL;
+
+ return set_gic_ich_reg(vcpu, r, val);
+}
+
+static int get_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ u8 idx = r->Op2 & 3;
+
+ if (idx > vgic_v3_max_apr_idx(vcpu))
+ return -EINVAL;
+
+ return get_gic_ich_reg(vcpu, r, val);
+}
+
+static int set_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ if (val != KVM_ICC_SRE_EL2)
+ return -EINVAL;
+ return 0;
+}
+
+static int get_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ *val = KVM_ICC_SRE_EL2;
+ return 0;
+}
+
+static int set_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ if (val != kvm_get_guest_vtr_el2())
+ return -EINVAL;
+ return 0;
+}
+
+static int get_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ *val = kvm_get_guest_vtr_el2();
+ return 0;
+}
+
+static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return vcpu_has_nv(vcpu) ? 0 : REG_HIDDEN;
+}
+
+#define __EL2_REG(r, acc, i) \
+ { \
+ SYS_DESC(SYS_ ## r), \
+ .get_user = get_gic_ ## acc, \
+ .set_user = set_gic_ ## acc, \
+ .reg = i, \
+ .visibility = el2_visibility, \
+ }
+
+#define EL2_REG(r, acc) __EL2_REG(r, acc, r)
+
+#define EL2_REG_RO(r, acc) __EL2_REG(r, acc, 0)
+
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
{ SYS_DESC(SYS_ICC_PMR_EL1),
.set_user = set_gic_pmr, .get_user = get_gic_pmr, },
@@ -328,8 +413,42 @@ static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
.set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1),
.set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
+ EL2_REG(ICH_AP0R0_EL2, ich_apr),
+ EL2_REG(ICH_AP0R1_EL2, ich_apr),
+ EL2_REG(ICH_AP0R2_EL2, ich_apr),
+ EL2_REG(ICH_AP0R3_EL2, ich_apr),
+ EL2_REG(ICH_AP1R0_EL2, ich_apr),
+ EL2_REG(ICH_AP1R1_EL2, ich_apr),
+ EL2_REG(ICH_AP1R2_EL2, ich_apr),
+ EL2_REG(ICH_AP1R3_EL2, ich_apr),
+ EL2_REG_RO(ICC_SRE_EL2, icc_sre),
+ EL2_REG(ICH_HCR_EL2, ich_reg),
+ EL2_REG_RO(ICH_VTR_EL2, ich_vtr),
+ EL2_REG(ICH_VMCR_EL2, ich_reg),
+ EL2_REG(ICH_LR0_EL2, ich_reg),
+ EL2_REG(ICH_LR1_EL2, ich_reg),
+ EL2_REG(ICH_LR2_EL2, ich_reg),
+ EL2_REG(ICH_LR3_EL2, ich_reg),
+ EL2_REG(ICH_LR4_EL2, ich_reg),
+ EL2_REG(ICH_LR5_EL2, ich_reg),
+ EL2_REG(ICH_LR6_EL2, ich_reg),
+ EL2_REG(ICH_LR7_EL2, ich_reg),
+ EL2_REG(ICH_LR8_EL2, ich_reg),
+ EL2_REG(ICH_LR9_EL2, ich_reg),
+ EL2_REG(ICH_LR10_EL2, ich_reg),
+ EL2_REG(ICH_LR11_EL2, ich_reg),
+ EL2_REG(ICH_LR12_EL2, ich_reg),
+ EL2_REG(ICH_LR13_EL2, ich_reg),
+ EL2_REG(ICH_LR14_EL2, ich_reg),
+ EL2_REG(ICH_LR15_EL2, ich_reg),
};
+const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz)
+{
+ *sz = ARRAY_SIZE(gic_v3_icc_reg_descs);
+ return gic_v3_icc_reg_descs;
+}
+
static u64 attr_to_id(u64 attr)
{
return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
@@ -341,8 +460,12 @@ static u64 attr_to_id(u64 attr)
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
- if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
- ARRAY_SIZE(gic_v3_icc_reg_descs)))
+ const struct sys_reg_desc *r;
+
+ r = get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
+ ARRAY_SIZE(gic_v3_icc_reg_descs));
+
+ if (r && !sysreg_hidden(vcpu, r))
return 0;
return -ENXIO;
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index eb1205654ac8..1e680ad6e863 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -157,6 +157,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
+ kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
@@ -165,6 +166,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
else
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
+ kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
+
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
kvm_unlock_all_vcpus(kvm);
@@ -391,11 +395,10 @@ int vgic_init(struct kvm *kvm)
goto out;
/*
- * If we have GICv4.1 enabled, unconditionally request enable the
- * v4 support so that we get HW-accelerated vSGIs. Otherwise, only
- * enable it if we present a virtual ITS to the guest.
+ * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs,
+ * vLPIs) is supported.
*/
- if (vgic_supports_direct_msis(kvm)) {
+ if (vgic_supports_direct_irqs(kvm)) {
ret = vgic_v4_init(kvm);
if (ret)
goto out;
@@ -409,15 +412,7 @@ int vgic_init(struct kvm *kvm)
goto out;
vgic_debug_init(kvm);
-
- /*
- * If userspace didn't set the GIC implementation revision,
- * default to the latest and greatest. You know want it.
- */
- if (!dist->implementation_rev)
- dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST;
dist->initialized = true;
-
out:
return ret;
}
@@ -443,7 +438,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
}
- if (vgic_supports_direct_msis(kvm))
+ if (vgic_supports_direct_irqs(kvm))
vgic_v4_teardown(kvm);
xa_destroy(&dist->lpi_xa);
@@ -674,10 +669,12 @@ void kvm_vgic_init_cpu_hardware(void)
* We want to make sure the list registers start out clear so that we
* only have the program the used registers.
*/
- if (kvm_vgic_global_state.type == VGIC_V2)
+ if (kvm_vgic_global_state.type == VGIC_V2) {
vgic_v2_init_lrs();
- else
+ } else if (kvm_vgic_global_state.type == VGIC_V3 ||
+ kvm_vgic_global_state.has_gcie_v3_compat) {
kvm_call_hyp(__vgic_v3_init_lrs);
+ }
}
/**
@@ -722,6 +719,9 @@ int kvm_vgic_hyp_init(void)
kvm_info("GIC system register CPU interface enabled\n");
}
break;
+ case GIC_V5:
+ ret = vgic_v5_probe(gic_kvm_info);
+ break;
default:
ret = -ENODEV;
}
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 534049c7c94b..7368c13f16b7 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -758,7 +758,7 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
if (irq) {
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
if (irq->hw)
- WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+ its_unmap_vlpi(ite->irq->host_irq);
irq->hw = false;
}
@@ -2694,6 +2694,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
ret = abi->restore_tables(its);
break;
+ default:
+ ret = -ENXIO;
+ break;
}
mutex_unlock(&its->its_lock);
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index f9ae790163fb..3d1a776b716d 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
#include <linux/uaccess.h>
@@ -303,12 +304,6 @@ static int vgic_get_common_attr(struct kvm_device *dev,
VGIC_NR_PRIVATE_IRQS, uaddr);
break;
}
- case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
-
- r = put_user(dev->kvm->arch.vgic.mi_intid, uaddr);
- break;
- }
}
return r;
@@ -510,6 +505,24 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
}
/*
+ * Allow access to certain ID-like registers prior to VGIC initialization,
+ * thereby allowing the VMM to provision the features / sizing of the VGIC.
+ */
+static bool reg_allowed_pre_init(struct kvm_device_attr *attr)
+{
+ if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS)
+ return false;
+
+ switch (attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK) {
+ case GICD_IIDR:
+ case GICD_TYPER2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
* vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
*
* @dev: kvm device handle
@@ -523,7 +536,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
struct vgic_reg_attr reg_attr;
gpa_t addr;
struct kvm_vcpu *vcpu;
- bool uaccess, post_init = true;
+ bool uaccess;
u32 val;
int ret;
@@ -539,9 +552,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
/* Sysregs uaccess is performed by the sysreg handling code */
uaccess = false;
break;
- case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
- post_init = false;
- fallthrough;
default:
uaccess = true;
}
@@ -561,7 +571,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->arch.config_lock);
- if (post_init != vgic_initialized(dev->kvm)) {
+ if (!(vgic_initialized(dev->kvm) || reg_allowed_pre_init(attr))) {
ret = -EBUSY;
goto out;
}
@@ -591,19 +601,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
}
break;
}
- case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
- if (!is_write) {
- val = dev->kvm->arch.vgic.mi_intid;
- ret = 0;
- break;
- }
-
- ret = -EINVAL;
- if ((val < VGIC_NR_PRIVATE_IRQS) && (val >= VGIC_NR_SGIS)) {
- dev->kvm->arch.vgic.mi_intid = val;
- ret = 0;
- }
- break;
default:
ret = -EINVAL;
break;
@@ -630,8 +627,24 @@ static int vgic_v3_set_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
- case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
return vgic_v3_attr_regs_access(dev, attr, true);
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
+ u32 __user *uaddr = (u32 __user *)attr->addr;
+ u32 val;
+
+ if (get_user(val, uaddr))
+ return -EFAULT;
+
+ guard(mutex)(&dev->kvm->arch.config_lock);
+ if (vgic_initialized(dev->kvm))
+ return -EBUSY;
+
+ if (!irq_is_ppi(val))
+ return -EINVAL;
+
+ dev->kvm->arch.vgic.mi_intid = val;
+ return 0;
+ }
default:
return vgic_set_common_attr(dev, attr);
}
@@ -645,8 +658,13 @@ static int vgic_v3_get_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
- case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
return vgic_v3_attr_regs_access(dev, attr, false);
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+ guard(mutex)(&dev->kvm->arch.config_lock);
+ return put_user(dev->kvm->arch.vgic.mi_intid, uaddr);
+ }
default:
return vgic_get_common_attr(dev, attr);
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index ae4c0593d114..a3ef185209e9 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -50,8 +50,17 @@ bool vgic_has_its(struct kvm *kvm)
bool vgic_supports_direct_msis(struct kvm *kvm)
{
- return (kvm_vgic_global_state.has_gicv4_1 ||
- (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
+ return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+}
+
+bool system_supports_direct_sgis(void)
+{
+ return kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi();
+}
+
+bool vgic_supports_direct_sgis(struct kvm *kvm)
+{
+ return kvm->arch.vgic.nassgicap;
}
/*
@@ -86,7 +95,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
}
break;
case GICD_TYPER2:
- if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
+ if (vgic_supports_direct_sgis(vcpu->kvm))
value = GICD_TYPER2_nASSGIcap;
break;
case GICD_IIDR:
@@ -119,7 +128,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
/* Not a GICv4.1? No HW SGIs */
- if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
+ if (!vgic_supports_direct_sgis(vcpu->kvm))
val &= ~GICD_CTLR_nASSGIreq;
/* Dist stays enabled? nASSGIreq is RO */
@@ -133,7 +142,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
if (is_hwsgi != dist->nassgireq)
vgic_v4_configure_vsgis(vcpu->kvm);
- if (kvm_vgic_global_state.has_gicv4_1 &&
+ if (vgic_supports_direct_sgis(vcpu->kvm) &&
was_enabled != dist->enabled)
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
else if (!was_enabled && dist->enabled)
@@ -159,8 +168,18 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
switch (addr & 0x0c) {
case GICD_TYPER2:
- if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
+ reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
+
+ if (reg == val)
+ return 0;
+ if (vgic_initialized(vcpu->kvm))
+ return -EBUSY;
+ if ((reg ^ val) & ~GICD_TYPER2_nASSGIcap)
return -EINVAL;
+ if (!system_supports_direct_sgis() && val)
+ return -EINVAL;
+
+ dist->nassgicap = val & GICD_TYPER2_nASSGIcap;
return 0;
case GICD_IIDR:
reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
@@ -178,7 +197,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
}
case GICD_CTLR:
/* Not a GICv4.1? No HW SGIs */
- if (!kvm_vgic_global_state.has_gicv4_1)
+ if (!vgic_supports_direct_sgis(vcpu->kvm))
val &= ~GICD_CTLR_nASSGIreq;
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index 679aafe77de2..7f1259b49c50 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -116,7 +116,7 @@ bool vgic_state_is_nested(struct kvm_vcpu *vcpu)
{
u64 xmo;
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ if (is_nested_ctxt(vcpu)) {
xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO);
WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO),
"Separate virtual IRQ/FIQ settings not supported\n");
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 193946108192..4d9343d2b0b1 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -356,7 +356,7 @@ int vgic_v4_put(struct kvm_vcpu *vcpu)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
- if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
+ if (!vgic_supports_direct_irqs(vcpu->kvm) || !vpe->resident)
return 0;
return its_make_vpe_non_resident(vpe, vgic_v4_want_doorbell(vcpu));
@@ -367,7 +367,7 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
int err;
- if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
+ if (!vgic_supports_direct_irqs(vcpu->kvm) || vpe->resident)
return 0;
if (vcpu_get_flag(vcpu, IN_WFI))
@@ -527,28 +527,26 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
return NULL;
}
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq)
+void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq)
{
struct vgic_irq *irq;
unsigned long flags;
- int ret = 0;
if (!vgic_supports_direct_msis(kvm))
- return 0;
+ return;
irq = __vgic_host_irq_get_vlpi(kvm, host_irq);
if (!irq)
- return 0;
+ return;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
WARN_ON(irq->hw && irq->host_irq != host_irq);
if (irq->hw) {
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
irq->hw = false;
- ret = its_unmap_vlpi(host_irq);
+ its_unmap_vlpi(host_irq);
}
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
- return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c
new file mode 100644
index 000000000000..6bdbb221bcde
--- /dev/null
+++ b/arch/arm64/kvm/vgic/vgic-v5.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kvm/arm_vgic.h>
+#include <linux/irqchip/arm-vgic-info.h>
+
+#include "vgic.h"
+
+/*
+ * Probe for a vGICv5 compatible interrupt controller, returning 0 on success.
+ * Currently only supports GICv3-based VMs on a GICv5 host, and hence only
+ * registers a VGIC_V3 device.
+ */
+int vgic_v5_probe(const struct gic_kvm_info *info)
+{
+ u64 ich_vtr_el2;
+ int ret;
+
+ if (!info->has_gcie_v3_compat)
+ return -ENODEV;
+
+ kvm_vgic_global_state.type = VGIC_V5;
+ kvm_vgic_global_state.has_gcie_v3_compat = true;
+
+ /* We only support v3 compat mode - use vGICv3 limits */
+ kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
+
+ kvm_vgic_global_state.vcpu_base = 0;
+ kvm_vgic_global_state.vctrl_base = NULL;
+ kvm_vgic_global_state.can_emulate_gicv2 = false;
+ kvm_vgic_global_state.has_gicv4 = false;
+ kvm_vgic_global_state.has_gicv4_1 = false;
+
+ ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
+ kvm_vgic_global_state.ich_vtr_el2 = (u32)ich_vtr_el2;
+
+ /*
+ * The ListRegs field is 5 bits, but there is an architectural
+ * maximum of 16 list registers. Just ignore bit 4...
+ */
+ kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
+
+ ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
+ if (ret) {
+ kvm_err("Cannot register GICv3-legacy KVM device.\n");
+ return ret;
+ }
+
+ static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
+ kvm_info("GCIE legacy system register CPU interface\n");
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 8f8096d48925..f5148b38120a 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -951,7 +951,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* can be directly injected (GICv4).
*/
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
- !vgic_supports_direct_msis(vcpu->kvm))
+ !vgic_supports_direct_irqs(vcpu->kvm))
return;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
@@ -965,7 +965,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);
- if (vgic_supports_direct_msis(vcpu->kvm))
+ if (vgic_supports_direct_irqs(vcpu->kvm))
vgic_v4_commit(vcpu);
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 4349084cb9a6..1384a04c0784 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -64,6 +64,24 @@
KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
+#define KVM_ICC_SRE_EL2 (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | \
+ ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB)
+#define KVM_ICH_VTR_EL2_RES0 (ICH_VTR_EL2_DVIM | \
+ ICH_VTR_EL2_A3V | \
+ ICH_VTR_EL2_IDbits)
+#define KVM_ICH_VTR_EL2_RES1 ICH_VTR_EL2_nV4
+
+static inline u64 kvm_get_guest_vtr_el2(void)
+{
+ u64 vtr;
+
+ vtr = kvm_vgic_global_state.ich_vtr_el2;
+ vtr &= ~KVM_ICH_VTR_EL2_RES0;
+ vtr |= KVM_ICH_VTR_EL2_RES1;
+
+ return vtr;
+}
+
/*
* As per Documentation/virt/kvm/devices/arm-vgic-its.rst,
* below macros are defined for ITS table entry encoding.
@@ -297,6 +315,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr, bool is_write);
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz);
int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u32 intid, u32 *val);
int kvm_register_vgic_device(unsigned long type);
@@ -308,6 +327,8 @@ int vgic_init(struct kvm *kvm);
void vgic_debug_init(struct kvm *kvm);
void vgic_debug_destroy(struct kvm *kvm);
+int vgic_v5_probe(const struct gic_kvm_info *info);
+
static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
@@ -369,7 +390,23 @@ void vgic_its_invalidate_all_caches(struct kvm *kvm);
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
int vgic_its_invall(struct kvm_vcpu *vcpu);
+bool system_supports_direct_sgis(void);
bool vgic_supports_direct_msis(struct kvm *kvm);
+bool vgic_supports_direct_sgis(struct kvm *kvm);
+
+static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
+{
+ /*
+ * Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
+ * indirectly allowing userspace to control whether or not vPEs are
+ * allocated for the VM.
+ */
+ if (system_supports_direct_sgis())
+ return vgic_supports_direct_sgis(kvm);
+
+ return vgic_supports_direct_msis(kvm);
+}
+
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
@@ -389,6 +426,17 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
+static inline bool vgic_is_v3_compat(struct kvm *kvm)
+{
+ return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) &&
+ kvm_vgic_global_state.has_gcie_v3_compat;
+}
+
+static inline bool vgic_is_v3(struct kvm *kvm)
+{
+ return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm);
+}
+
int vgic_its_debug_init(struct kvm_device *dev);
void vgic_its_debug_destroy(struct kvm_device *dev);
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index bcac4f55f9c1..c0557945939c 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -68,7 +68,144 @@ static void contpte_convert(struct mm_struct *mm, unsigned long addr,
pte = pte_mkyoung(pte);
}
- __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
+ /*
+ * On eliding the __tlb_flush_range() under BBML2+noabort:
+ *
+ * NOTE: Instead of using N=16 as the contiguous block length, we use
+ * N=4 for clarity.
+ *
+ * NOTE: 'n' and 'c' are used to denote the "contiguous bit" being
+ * unset and set, respectively.
+ *
+ * We worry about two cases where contiguous bit is used:
+ * - When folding N smaller non-contiguous ptes as 1 contiguous block.
+ * - When unfolding a contiguous block into N smaller non-contiguous ptes.
+ *
+ * Currently, the BBML0 folding case looks as follows:
+ *
+ * 0) Initial page-table layout:
+ *
+ * +----+----+----+----+
+ * |RO,n|RO,n|RO,n|RW,n| <--- last page being set as RO
+ * +----+----+----+----+
+ *
+ * 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
+ *
+ * +----+----+----+----+
+ * | 0 | 0 | 0 | 0 |
+ * +----+----+----+----+
+ *
+ * 2) __flush_tlb_range():
+ *
+ * |____ tlbi + dsb ____|
+ *
+ * 3) __set_ptes() to repaint contiguous block:
+ *
+ * +----+----+----+----+
+ * |RO,c|RO,c|RO,c|RO,c|
+ * +----+----+----+----+
+ *
+ * 4) The kernel will eventually __flush_tlb() for changed page:
+ *
+ * |____| <--- tlbi + dsb
+ *
+ * As expected, the intermediate tlbi+dsb ensures that other PEs
+ * only ever see an invalid (0) entry, or the new contiguous TLB entry.
+ * The final tlbi+dsb will always throw away the newly installed
+ * contiguous TLB entry, which is a micro-optimisation opportunity,
+ * but does not affect correctness.
+ *
+ * In the BBML2 case, the change is avoiding the intermediate tlbi+dsb.
+ * This means a few things, but notably other PEs will still "see" any
+ * stale cached TLB entries. This could lead to a "contiguous bit
+ * misprogramming" issue until the final tlbi+dsb of the changed page,
+ * which would clear out both the stale (RW,n) entry and the new (RO,c)
+ * contiguous entry installed in its place.
+ *
+ * What this is saying, is the following:
+ *
+ * +----+----+----+----+
+ * |RO,n|RO,n|RO,n|RW,n| <--- old page tables, all non-contiguous
+ * +----+----+----+----+
+ *
+ * +----+----+----+----+
+ * |RO,c|RO,c|RO,c|RO,c| <--- new page tables, all contiguous
+ * +----+----+----+----+
+ * /\
+ * ||
+ *
+ * If both the old single (RW,n) and new contiguous (RO,c) TLB entries
+ * are present, and a write is made to this address, do we fault or
+ * is the write permitted (via amalgamation)?
+ *
+ * The relevant Arm ARM DDI 0487L.a requirements are RNGLXZ and RJQQTC,
+ * and together state that when BBML1 or BBML2 are implemented, either
+ * a TLB conflict abort is raised (which we expressly forbid), or will
+ * "produce an OA, access permissions, and memory attributes that are
+ * consistent with any of the programmed translation table values".
+ *
+ * That is to say, will either raise a TLB conflict, or produce one of
+ * the cached TLB entries, but never amalgamate.
+ *
+ * Thus, as the page tables are only considered "consistent" after
+ * the final tlbi+dsb (which evicts both the single stale (RW,n) TLB
+ * entry as well as the new contiguous (RO,c) TLB entry), omitting the
+ * initial tlbi+dsb is correct.
+ *
+ * It is also important to note that at the end of the BBML2 folding
+ * case, we are still left with potentially all N TLB entries still
+ * cached (the N-1 non-contiguous ptes, and the single contiguous
+ * block). However, over time, natural TLB pressure will cause the
+ * non-contiguous pte TLB entries to be flushed, leaving only the
+ * contiguous block TLB entry. This means that omitting the tlbi+dsb is
+ * not only correct, but also keeps our eventual performance benefits.
+ *
+ * For the unfolding case, BBML0 looks as follows:
+ *
+ * 0) Initial page-table layout:
+ *
+ * +----+----+----+----+
+ * |RW,c|RW,c|RW,c|RW,c| <--- last page being set as RO
+ * +----+----+----+----+
+ *
+ * 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
+ *
+ * +----+----+----+----+
+ * | 0 | 0 | 0 | 0 |
+ * +----+----+----+----+
+ *
+ * 2) __flush_tlb_range():
+ *
+ * |____ tlbi + dsb ____|
+ *
+ * 3) __set_ptes() to repaint as non-contiguous:
+ *
+ * +----+----+----+----+
+ * |RW,n|RW,n|RW,n|RW,n|
+ * +----+----+----+----+
+ *
+ * 4) Update changed page permissions:
+ *
+ * +----+----+----+----+
+ * |RW,n|RW,n|RW,n|RO,n| <--- last page permissions set
+ * +----+----+----+----+
+ *
+ * 5) The kernel will eventually __flush_tlb() for changed page:
+ *
+ * |____| <--- tlbi + dsb
+ *
+ * For BBML2, we again remove the intermediate tlbi+dsb. Here, there
+ * are no issues, as the final tlbi+dsb covering the changed page is
+ * guaranteed to remove the original large contiguous (RW,c) TLB entry,
+ * as well as the intermediate (RW,n) TLB entry; the next access will
+ * install the new (RO,n) TLB entry and the page tables are only
+ * considered "consistent" after the final tlbi+dsb, so software must
+ * be prepared for this inconsistency prior to finishing the mm dance
+ * regardless.
+ */
+
+ if (!system_supports_bbml2_noabort())
+ __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
__set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
}
@@ -169,17 +306,46 @@ pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte)
for (i = 0; i < CONT_PTES; i++, ptep++) {
pte = __ptep_get(ptep);
- if (pte_dirty(pte))
+ if (pte_dirty(pte)) {
orig_pte = pte_mkdirty(orig_pte);
-
- if (pte_young(pte))
+ for (; i < CONT_PTES; i++, ptep++) {
+ pte = __ptep_get(ptep);
+ if (pte_young(pte)) {
+ orig_pte = pte_mkyoung(orig_pte);
+ break;
+ }
+ }
+ break;
+ }
+
+ if (pte_young(pte)) {
orig_pte = pte_mkyoung(orig_pte);
+ i++;
+ ptep++;
+ for (; i < CONT_PTES; i++, ptep++) {
+ pte = __ptep_get(ptep);
+ if (pte_dirty(pte)) {
+ orig_pte = pte_mkdirty(orig_pte);
+ break;
+ }
+ }
+ break;
+ }
}
return orig_pte;
}
EXPORT_SYMBOL_GPL(contpte_ptep_get);
+static inline bool contpte_is_consistent(pte_t pte, unsigned long pfn,
+ pgprot_t orig_prot)
+{
+ pgprot_t prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
+
+ return pte_valid_cont(pte) && pte_pfn(pte) == pfn &&
+ pgprot_val(prot) == pgprot_val(orig_prot);
+}
+
pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
{
/*
@@ -202,7 +368,6 @@ pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
pgprot_t orig_prot;
unsigned long pfn;
pte_t orig_pte;
- pgprot_t prot;
pte_t *ptep;
pte_t pte;
int i;
@@ -219,18 +384,44 @@ retry:
for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) {
pte = __ptep_get(ptep);
- prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
- if (!pte_valid_cont(pte) ||
- pte_pfn(pte) != pfn ||
- pgprot_val(prot) != pgprot_val(orig_prot))
+ if (!contpte_is_consistent(pte, pfn, orig_prot))
goto retry;
- if (pte_dirty(pte))
+ if (pte_dirty(pte)) {
orig_pte = pte_mkdirty(orig_pte);
+ for (; i < CONT_PTES; i++, ptep++, pfn++) {
+ pte = __ptep_get(ptep);
+
+ if (!contpte_is_consistent(pte, pfn, orig_prot))
+ goto retry;
+
+ if (pte_young(pte)) {
+ orig_pte = pte_mkyoung(orig_pte);
+ break;
+ }
+ }
+ break;
+ }
- if (pte_young(pte))
+ if (pte_young(pte)) {
orig_pte = pte_mkyoung(orig_pte);
+ i++;
+ ptep++;
+ pfn++;
+ for (; i < CONT_PTES; i++, ptep++, pfn++) {
+ pte = __ptep_get(ptep);
+
+ if (!contpte_is_consistent(pte, pfn, orig_prot))
+ goto retry;
+
+ if (pte_dirty(pte)) {
+ orig_pte = pte_mkdirty(orig_pte);
+ break;
+ }
+ }
+ break;
+ }
}
return orig_pte;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 11eb8d1adc84..d816ff44faff 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -53,18 +53,12 @@ struct fault_info {
};
static const struct fault_info fault_info[];
-static struct fault_info debug_fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
{
return fault_info + (esr & ESR_ELx_FSC);
}
-static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
-{
- return debug_fault_info + DBG_ESR_EVT(esr);
-}
-
static void data_abort_decode(unsigned long esr)
{
unsigned long iss2 = ESR_ELx_ISS2(esr);
@@ -561,7 +555,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault;
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
@@ -838,6 +832,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
*/
siaddr = untagged_addr(far);
}
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
@@ -849,9 +844,12 @@ static int do_tag_check_fault(unsigned long far, unsigned long esr,
/*
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
* for tag check faults. Set them to corresponding bits in the untagged
- * address.
+ * address if ARM64_MTE_FAR isn't supported.
+ * Otherwise, bits 63:60 of FAR_EL1 are not UNKNOWN.
*/
- far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
+ if (!cpus_have_cap(ARM64_MTE_FAR))
+ far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
+
do_bad_area(far, esr, regs);
return 0;
}
@@ -951,75 +949,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
NOKPROBE_SYMBOL(do_sp_pc_abort);
/*
- * __refdata because early_brk64 is __init, but the reference to it is
- * clobbered at arch_initcall time.
- * See traps.c and debug-monitors.c:debug_traps_init().
- */
-static struct fault_info __refdata debug_fault_info[] = {
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
- { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
- { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
- { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
-};
-
-void __init hook_debug_fault_code(int nr,
- int (*fn)(unsigned long, unsigned long, struct pt_regs *),
- int sig, int code, const char *name)
-{
- BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
-
- debug_fault_info[nr].fn = fn;
- debug_fault_info[nr].sig = sig;
- debug_fault_info[nr].code = code;
- debug_fault_info[nr].name = name;
-}
-
-/*
- * In debug exception context, we explicitly disable preemption despite
- * having interrupts disabled.
- * This serves two purposes: it makes it much less likely that we would
- * accidentally schedule in exception context and it will force a warning
- * if we somehow manage to schedule by accident.
- */
-static void debug_exception_enter(struct pt_regs *regs)
-{
- preempt_disable();
-
- /* This code is a bit fragile. Test it. */
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
-}
-NOKPROBE_SYMBOL(debug_exception_enter);
-
-static void debug_exception_exit(struct pt_regs *regs)
-{
- preempt_enable_no_resched();
-}
-NOKPROBE_SYMBOL(debug_exception_exit);
-
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
- struct pt_regs *regs)
-{
- const struct fault_info *inf = esr_to_debug_fault_info(esr);
- unsigned long pc = instruction_pointer(regs);
-
- debug_exception_enter(regs);
-
- if (user_mode(regs) && !is_ttbr0_addr(pc))
- arm64_apply_bp_hardening();
-
- if (inf->fn(addr_if_watchpoint, esr, regs)) {
- arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
- }
-
- debug_exception_exit(regs);
-}
-NOKPROBE_SYMBOL(do_debug_exception);
-
-/*
* Used during anonymous page fault handling.
*/
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
diff --git a/arch/arm64/mm/gcs.c b/arch/arm64/mm/gcs.c
index 5c46ec527b1c..6e93f78de79b 100644
--- a/arch/arm64/mm/gcs.c
+++ b/arch/arm64/mm/gcs.c
@@ -157,12 +157,6 @@ void gcs_free(struct task_struct *task)
if (!system_supports_gcs())
return;
- /*
- * When fork() with CLONE_VM fails, the child (tsk) already
- * has a GCS allocated, and exit_thread() calls this function
- * to free it. In this case the parent (current) and the
- * child share the same mm struct.
- */
if (!task->mm || task->mm != current->mm)
return;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0c8737f4f2ce..1d90a7e75333 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -225,7 +225,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
ncontig = num_contig_ptes(sz, &pgsize);
if (!pte_present(pte)) {
- for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
+ for (i = 0; i < ncontig; i++, ptep++)
__set_ptes_anysz(mm, ptep, pte, 1, pgsize);
return;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0c8c35dd645e..ea84a61ed508 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -106,7 +106,7 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, &high);
+ &low_size, NULL, &high);
if (ret)
return;
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index c86c348857c4..08ee177432c2 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -81,7 +81,7 @@ static int __init adjust_protection_map(void)
}
arch_initcall(adjust_protection_map);
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
ptdesc_t prot;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 00ab1d648db6..34e5d78af076 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,7 @@
#include <linux/set_memory.h>
#include <linux/kfence.h>
#include <linux/pkeys.h>
+#include <linux/mm_inline.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -1524,24 +1525,41 @@ static int __init prevent_bootmem_remove_init(void)
early_initcall(prevent_bootmem_remove_init);
#endif
-pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
{
+ pte_t pte = get_and_clear_ptes(vma->vm_mm, addr, ptep, nr);
+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
- if (pte_user_exec(ptep_get(ptep)))
- return ptep_clear_flush(vma, addr, ptep);
+ if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte))
+ __flush_tlb_range(vma, addr, nr * PAGE_SIZE,
+ PAGE_SIZE, true, 3);
}
- return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+
+ return pte;
+}
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ return modify_prot_start_ptes(vma, addr, ptep, 1);
+}
+
+void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte,
+ unsigned int nr)
+{
+ set_ptes(vma->vm_mm, addr, ptep, pte, nr);
}
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_pte_at(vma->vm_mm, addr, ptep, pte);
+ modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1);
}
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 54dccfd6aa11..8c75965afc9e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -454,7 +454,7 @@ SYM_FUNC_START(__cpu_setup)
dsb nsh
msr cpacr_el1, xzr // Reset cpacr_el1
- mov x1, #1 << 12 // Reset mdscr_el1 and disable
+ mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable
msr mdscr_el1, x1 // access to the DCC from EL0
reset_pmuserenr_el0 x1 // Disable PMU access from EL0
reset_amuserenr_el0 x1 // Disable AMU access from EL0
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 68bf1a125502..1e308328c079 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
-#include <linux/memory_hotplug.h>
#include <linux/seq_file.h>
#include <asm/ptdump.h>
@@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v)
{
struct ptdump_info *info = m->private;
- get_online_mems();
ptdump_walk(m, info);
- put_online_mems();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index a3b0e693a125..bbea4f36f9f2 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -325,4 +325,9 @@
#define A64_MRS_SP_EL0(Rt) \
aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)
+/* Barriers */
+#define A64_SB aarch64_insn_get_sb_value()
+#define A64_DSB_NSH (aarch64_insn_get_dsb_base_value() | 0x7 << 8)
+#define A64_ISB aarch64_insn_get_isb_value()
+
#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index da8b89dd2910..52ffe115a8c4 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -10,6 +10,7 @@
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bpf.h>
+#include <linux/cfi.h>
#include <linux/filter.h>
#include <linux/memory.h>
#include <linux/printk.h>
@@ -30,6 +31,7 @@
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
+#define PRIVATE_SP (MAX_BPF_JIT_REG + 4)
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
@@ -68,6 +70,8 @@ static const int bpf2a64[] = {
[TCCNT_PTR] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
+ /* callee saved register for private stack pointer */
+ [PRIVATE_SP] = A64_R(27),
/* callee saved register for kern_vm_start address */
[ARENA_VM_START] = A64_R(28),
};
@@ -86,6 +90,7 @@ struct jit_ctx {
u64 user_vm_start;
u64 arena_vm_start;
bool fp_used;
+ bool priv_sp_used;
bool write;
};
@@ -98,6 +103,10 @@ struct bpf_plt {
#define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target)
#define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target)
+/* Memory size/value to protect private stack overflow/underflow */
+#define PRIV_STACK_GUARD_SZ 16
+#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
+
static inline void emit(const u32 insn, struct jit_ctx *ctx)
{
if (ctx->image != NULL && ctx->write)
@@ -106,6 +115,14 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx)
ctx->idx++;
}
+static inline void emit_u32_data(const u32 data, struct jit_ctx *ctx)
+{
+ if (ctx->image != NULL && ctx->write)
+ ctx->image[ctx->idx] = data;
+
+ ctx->idx++;
+}
+
static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx)
{
@@ -166,6 +183,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
emit(insn, ctx);
}
+static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_CFI_CLANG))
+ emit_u32_data(hash, ctx);
+}
+
/*
* Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address
@@ -387,8 +410,11 @@ static void find_used_callee_regs(struct jit_ctx *ctx)
if (reg_used & 8)
ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9];
- if (reg_used & 16)
+ if (reg_used & 16) {
ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP];
+ if (ctx->priv_sp_used)
+ ctx->used_callee_reg[i++] = bpf2a64[PRIVATE_SP];
+ }
if (ctx->arena_vm_start)
ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START];
@@ -412,6 +438,7 @@ static void push_callee_regs(struct jit_ctx *ctx)
emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx);
emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx);
+ ctx->fp_used = true;
} else {
find_used_callee_regs(ctx);
for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) {
@@ -461,6 +488,19 @@ static void pop_callee_regs(struct jit_ctx *ctx)
}
}
+static void emit_percpu_ptr(const u8 dst_reg, void __percpu *ptr,
+ struct jit_ctx *ctx)
+{
+ const u8 tmp = bpf2a64[TMP_REG_1];
+
+ emit_a64_mov_i64(dst_reg, (__force const u64)ptr, ctx);
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ emit(A64_MRS_TPIDR_EL2(tmp), ctx);
+ else
+ emit(A64_MRS_TPIDR_EL1(tmp), ctx);
+ emit(A64_ADD(1, dst_reg, dst_reg, tmp), ctx);
+}
+
#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
@@ -476,7 +516,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
const bool is_main_prog = !bpf_is_subprog(prog);
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
- const int idx0 = ctx->idx;
+ const u8 priv_sp = bpf2a64[PRIVATE_SP];
+ void __percpu *priv_stack_ptr;
int cur_offset;
/*
@@ -502,6 +543,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
*
*/
+ emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
+ const int idx0 = ctx->idx;
+
/* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump
@@ -551,15 +595,23 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx);
}
- if (ctx->fp_used)
- /* Set up BPF prog stack base register */
- emit(A64_MOV(1, fp, A64_SP), ctx);
-
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
+ if (ctx->fp_used) {
+ if (ctx->priv_sp_used) {
+ /* Set up private stack pointer */
+ priv_stack_ptr = prog->aux->priv_stack_ptr + PRIV_STACK_GUARD_SZ;
+ emit_percpu_ptr(priv_sp, priv_stack_ptr, ctx);
+ emit(A64_ADD_I(1, fp, priv_sp, ctx->stack_size), ctx);
+ } else {
+ /* Set up BPF prog stack base register */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
+ }
+ }
+
/* Set up function call stack */
- if (ctx->stack_size)
+ if (ctx->stack_size && !ctx->priv_sp_used)
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
if (ctx->arena_vm_start)
@@ -623,7 +675,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_STR64I(tcc, ptr, 0), ctx);
/* restore SP */
- if (ctx->stack_size)
+ if (ctx->stack_size && !ctx->priv_sp_used)
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
pop_callee_regs(ctx);
@@ -991,7 +1043,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
const u8 ptr = bpf2a64[TCCNT_PTR];
/* We're done with BPF stack */
- if (ctx->stack_size)
+ if (ctx->stack_size && !ctx->priv_sp_used)
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
pop_callee_regs(ctx);
@@ -1120,6 +1172,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
+ const u8 priv_sp = bpf2a64[PRIVATE_SP];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -1564,7 +1617,7 @@ emit_cond_jmp:
src = tmp2;
}
if (src == fp) {
- src_adj = A64_SP;
+ src_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
off_adj = off + ctx->stack_size;
} else {
src_adj = src;
@@ -1630,17 +1683,14 @@ emit_cond_jmp:
return ret;
break;
- /* speculation barrier */
+ /* speculation barrier against v1 and v4 */
case BPF_ST | BPF_NOSPEC:
- /*
- * Nothing required here.
- *
- * In case of arm64, we rely on the firmware mitigation of
- * Speculative Store Bypass as controlled via the ssbd kernel
- * parameter. Whenever the mitigation is enabled, it works
- * for all of the kernel code with no need to provide any
- * additional instructions.
- */
+ if (alternative_has_cap_likely(ARM64_HAS_SB)) {
+ emit(A64_SB, ctx);
+ } else {
+ emit(A64_DSB_NSH, ctx);
+ emit(A64_ISB, ctx);
+ }
break;
/* ST: *(size *)(dst + off) = imm */
@@ -1657,7 +1707,7 @@ emit_cond_jmp:
dst = tmp2;
}
if (dst == fp) {
- dst_adj = A64_SP;
+ dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
@@ -1719,7 +1769,7 @@ emit_cond_jmp:
dst = tmp2;
}
if (dst == fp) {
- dst_adj = A64_SP;
+ dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
@@ -1862,6 +1912,39 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end);
}
+static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
+{
+ int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
+ u64 *stack_ptr;
+
+ for_each_possible_cpu(cpu) {
+ stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
+ stack_ptr[0] = PRIV_STACK_GUARD_VAL;
+ stack_ptr[1] = PRIV_STACK_GUARD_VAL;
+ stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
+ stack_ptr[underflow_idx + 1] = PRIV_STACK_GUARD_VAL;
+ }
+}
+
+static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
+ struct bpf_prog *prog)
+{
+ int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
+ u64 *stack_ptr;
+
+ for_each_possible_cpu(cpu) {
+ stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
+ if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
+ stack_ptr[1] != PRIV_STACK_GUARD_VAL ||
+ stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL ||
+ stack_ptr[underflow_idx + 1] != PRIV_STACK_GUARD_VAL) {
+ pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
+ bpf_jit_get_prog_name(prog));
+ break;
+ }
+ }
+}
+
struct arm64_jit_data {
struct bpf_binary_header *header;
u8 *ro_image;
@@ -1874,9 +1957,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
int image_size, prog_size, extable_size, extable_align, extable_offset;
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
- struct bpf_binary_header *ro_header;
+ struct bpf_binary_header *ro_header = NULL;
struct arm64_jit_data *jit_data;
+ void __percpu *priv_stack_ptr = NULL;
bool was_classic = bpf_prog_was_classic(prog);
+ int priv_stack_alloc_sz;
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
@@ -1908,6 +1993,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
prog->aux->jit_data = jit_data;
}
+ priv_stack_ptr = prog->aux->priv_stack_ptr;
+ if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
+ /* Allocate actual private stack size with verifier-calculated
+ * stack size plus two memory guards to protect overflow and
+ * underflow.
+ */
+ priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) +
+ 2 * PRIV_STACK_GUARD_SZ;
+ priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 16, GFP_KERNEL);
+ if (!priv_stack_ptr) {
+ prog = orig_prog;
+ goto out_priv_stack;
+ }
+
+ priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
+ prog->aux->priv_stack_ptr = priv_stack_ptr;
+ }
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
ro_image_ptr = jit_data->ro_image;
@@ -1931,6 +2033,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
+ if (priv_stack_ptr)
+ ctx.priv_sp_used = true;
+
/* Pass 1: Estimate the maximum image size.
*
* BPF line info needs ctx->offset[i] to be the offset of
@@ -2058,9 +2163,9 @@ skip_init_ctx:
jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.ro_image;
+ prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
prog->jited = 1;
- prog->jited_len = prog_size;
+ prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) {
int i;
@@ -2070,7 +2175,12 @@ skip_init_ctx:
ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
+ if (!ro_header && priv_stack_ptr) {
+ free_percpu(priv_stack_ptr);
+ prog->aux->priv_stack_ptr = NULL;
+ }
kvfree(ctx.offset);
+out_priv_stack:
kfree(jit_data);
prog->aux->jit_data = NULL;
}
@@ -2089,6 +2199,11 @@ out_free_hdr:
goto out_off;
}
+bool bpf_jit_supports_private_stack(void)
+{
+ return true;
+}
+
bool bpf_jit_supports_kfunc_call(void)
{
return true;
@@ -2243,11 +2358,6 @@ static int calc_arg_aux(const struct btf_func_model *m,
/* the rest arguments are passed through stack */
for (; i < m->nr_args; i++) {
- /* We can not know for sure about exact alignment needs for
- * struct passed on stack, so deny those
- */
- if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
- return -ENOTSUPP;
stack_slots = (m->arg_size[i] + 7) / 8;
a->bstack_for_args += stack_slots * 8;
a->ostack_for_args = a->ostack_for_args + stack_slots * 8;
@@ -2434,6 +2544,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* return address locates above FP */
retaddr_off = stack_size + 8;
+ if (flags & BPF_TRAMP_F_INDIRECT) {
+ /*
+ * Indirect call for bpf_struct_ops
+ */
+ emit_kcfi(cfi_get_func_hash(func_addr), ctx);
+ }
/* bpf trampoline may be invoked by 3 instruction types:
* 1. bl, attached to bpf prog or kernel function via short jump
* 2. br, attached to bpf prog or kernel function via long jump
@@ -2911,6 +3027,17 @@ bool bpf_jit_supports_percpu_insn(void)
return true;
}
+bool bpf_jit_bypass_spec_v4(void)
+{
+ /* In case of arm64, we rely on the firmware mitigation of Speculative
+ * Store Bypass as controlled via the ssbd kernel parameter. Whenever
+ * the mitigation is enabled, it works for all of the kernel code with
+ * no need to provide any additional instructions. Therefore, skip
+ * inserting nospec insns against Spectre v4.
+ */
+ return true;
+}
+
bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {
@@ -2928,6 +3055,8 @@ void bpf_jit_free(struct bpf_prog *prog)
if (prog->jited) {
struct arm64_jit_data *jit_data = prog->aux->jit_data;
struct bpf_binary_header *hdr;
+ void __percpu *priv_stack_ptr;
+ int priv_stack_alloc_sz;
/*
* If we fail the final pass of JIT (from jit_subprogs),
@@ -2939,8 +3068,16 @@ void bpf_jit_free(struct bpf_prog *prog)
sizeof(jit_data->header->size));
kfree(jit_data);
}
+ prog->bpf_func -= cfi_get_offset();
hdr = bpf_jit_binary_pack_hdr(prog);
bpf_jit_binary_pack_free(hdr, NULL);
+ priv_stack_ptr = prog->aux->priv_stack_ptr;
+ if (priv_stack_ptr) {
+ priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) +
+ 2 * PRIV_STACK_GUARD_SZ;
+ priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
+ free_percpu(prog->aux->priv_stack_ptr);
+ }
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 10effd4cff6b..ef0b7946f5a4 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -35,7 +35,8 @@ HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5
HAS_GENERIC_AUTH_IMP_DEF
-HAS_GIC_CPUIF_SYSREGS
+HAS_GICV3_CPUIF
+HAS_GICV5_CPUIF
HAS_GIC_PRIO_MASKING
HAS_GIC_PRIO_RELAXED_SYNC
HAS_HCR_NV1
@@ -45,10 +46,12 @@ HAS_LPA2
HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
+HAS_BBML2_NOABORT
HAS_PAN
HAS_PMUV3
HAS_S1PIE
HAS_S1POE
+HAS_SCTLR2
HAS_RAS_EXTN
HAS_RNG
HAS_SB
@@ -68,6 +71,8 @@ MPAM
MPAM_HCR
MTE
MTE_ASYMM
+MTE_FAR
+MTE_STORE_ONLY
SME
SME_FA64
SME2
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 8a8cf6874298..696ab1f32a67 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1314,7 +1314,10 @@ UnsignedEnum 19:16 UINJ
0b0000 NI
0b0001 IMP
EndEnum
-Res0 15:12
+UnsignedEnum 15:12 GCIE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 11:8 MTEFAR
0b0000 NI
0b0001 IMP
@@ -1329,6 +1332,138 @@ UnsignedEnum 3:0 MTEPERM
EndEnum
EndSysreg
+
+SysregFields BRBINFx_EL1
+Res0 63:47
+Field 46 CCU
+Field 45:40 CC_EXP
+Field 39:32 CC_MANT
+Res0 31:18
+Field 17 LASTFAILED
+Field 16 T
+Res0 15:14
+Enum 13:8 TYPE
+ 0b000000 DIRECT_UNCOND
+ 0b000001 INDIRECT
+ 0b000010 DIRECT_LINK
+ 0b000011 INDIRECT_LINK
+ 0b000101 RET
+ 0b000111 ERET
+ 0b001000 DIRECT_COND
+ 0b100001 DEBUG_HALT
+ 0b100010 CALL
+ 0b100011 TRAP
+ 0b100100 SERROR
+ 0b100110 INSN_DEBUG
+ 0b100111 DATA_DEBUG
+ 0b101010 ALIGN_FAULT
+ 0b101011 INSN_FAULT
+ 0b101100 DATA_FAULT
+ 0b101110 IRQ
+ 0b101111 FIQ
+ 0b110000 IMPDEF_TRAP_EL3
+ 0b111001 DEBUG_EXIT
+EndEnum
+Enum 7:6 EL
+ 0b00 EL0
+ 0b01 EL1
+ 0b10 EL2
+ 0b11 EL3
+EndEnum
+Field 5 MPRED
+Res0 4:2
+Enum 1:0 VALID
+ 0b00 NONE
+ 0b01 TARGET
+ 0b10 SOURCE
+ 0b11 FULL
+EndEnum
+EndSysregFields
+
+SysregFields BRBCR_ELx
+Res0 63:24
+Field 23 EXCEPTION
+Field 22 ERTN
+Res0 21:10
+Field 9 FZPSS
+Field 8 FZP
+Res0 7
+Enum 6:5 TS
+ 0b01 VIRTUAL
+ 0b10 GUEST_PHYSICAL
+ 0b11 PHYSICAL
+EndEnum
+Field 4 MPRED
+Field 3 CC
+Res0 2
+Field 1 ExBRE
+Field 0 E0BRE
+EndSysregFields
+
+Sysreg BRBCR_EL1 2 1 9 0 0
+Fields BRBCR_ELx
+EndSysreg
+
+Sysreg BRBFCR_EL1 2 1 9 0 1
+Res0 63:30
+Enum 29:28 BANK
+ 0b00 BANK_0
+ 0b01 BANK_1
+EndEnum
+Res0 27:23
+Field 22 CONDDIR
+Field 21 DIRCALL
+Field 20 INDCALL
+Field 19 RTN
+Field 18 INDIRECT
+Field 17 DIRECT
+Field 16 EnI
+Res0 15:8
+Field 7 PAUSED
+Field 6 LASTFAILED
+Res0 5:0
+EndSysreg
+
+Sysreg BRBTS_EL1 2 1 9 0 2
+Field 63:0 TS
+EndSysreg
+
+Sysreg BRBINFINJ_EL1 2 1 9 1 0
+Fields BRBINFx_EL1
+EndSysreg
+
+Sysreg BRBSRCINJ_EL1 2 1 9 1 1
+Field 63:0 ADDRESS
+EndSysreg
+
+Sysreg BRBTGTINJ_EL1 2 1 9 1 2
+Field 63:0 ADDRESS
+EndSysreg
+
+Sysreg BRBIDR0_EL1 2 1 9 2 0
+Res0 63:16
+Enum 15:12 CC
+ 0b0101 20_BIT
+EndEnum
+Enum 11:8 FORMAT
+ 0b0000 FORMAT_0
+EndEnum
+Enum 7:0 NUMREC
+ 0b00001000 8
+ 0b00010000 16
+ 0b00100000 32
+ 0b01000000 64
+EndEnum
+EndSysreg
+
+Sysreg BRBCR_EL2 2 4 9 0 0
+Fields BRBCR_ELx
+EndSysreg
+
+Sysreg BRBCR_EL12 2 5 9 0 0
+Fields BRBCR_ELx
+EndSysreg
+
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60
UnsignedEnum 59:56 F64MM
@@ -3021,6 +3156,435 @@ Sysreg PMIAR_EL1 3 0 9 14 7
Field 63:0 ADDRESS
EndSysreg
+SysregFields ICC_PPI_HMRx_EL1
+Field 63 HM63
+Field 62 HM62
+Field 61 HM61
+Field 60 HM60
+Field 59 HM59
+Field 58 HM58
+Field 57 HM57
+Field 56 HM56
+Field 55 HM55
+Field 54 HM54
+Field 53 HM53
+Field 52 HM52
+Field 51 HM51
+Field 50 HM50
+Field 49 HM49
+Field 48 HM48
+Field 47 HM47
+Field 46 HM46
+Field 45 HM45
+Field 44 HM44
+Field 43 HM43
+Field 42 HM42
+Field 41 HM41
+Field 40 HM40
+Field 39 HM39
+Field 38 HM38
+Field 37 HM37
+Field 36 HM36
+Field 35 HM35
+Field 34 HM34
+Field 33 HM33
+Field 32 HM32
+Field 31 HM31
+Field 30 HM30
+Field 29 HM29
+Field 28 HM28
+Field 27 HM27
+Field 26 HM26
+Field 25 HM25
+Field 24 HM24
+Field 23 HM23
+Field 22 HM22
+Field 21 HM21
+Field 20 HM20
+Field 19 HM19
+Field 18 HM18
+Field 17 HM17
+Field 16 HM16
+Field 15 HM15
+Field 14 HM14
+Field 13 HM13
+Field 12 HM12
+Field 11 HM11
+Field 10 HM10
+Field 9 HM9
+Field 8 HM8
+Field 7 HM7
+Field 6 HM6
+Field 5 HM5
+Field 4 HM4
+Field 3 HM3
+Field 2 HM2
+Field 1 HM1
+Field 0 HM0
+EndSysregFields
+
+Sysreg ICC_PPI_HMR0_EL1 3 0 12 10 0
+Fields ICC_PPI_HMRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_HMR1_EL1 3 0 12 10 1
+Fields ICC_PPI_HMRx_EL1
+EndSysreg
+
+Sysreg ICC_IDR0_EL1 3 0 12 10 2
+Res0 63:12
+UnsignedEnum 11:8 GCIE_LEGACY
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 7:4 PRI_BITS
+ 0b0011 4BITS
+ 0b0100 5BITS
+EndEnum
+UnsignedEnum 3:0 ID_BITS
+ 0b0000 16BITS
+ 0b0001 24BITS
+EndEnum
+EndSysreg
+
+Sysreg ICC_ICSR_EL1 3 0 12 10 4
+Res0 63:48
+Field 47:32 IAFFID
+Res0 31:16
+Field 15:11 Priority
+Res0 10:6
+Field 5 HM
+Field 4 Active
+Field 3 IRM
+Field 2 Pending
+Field 1 Enabled
+Field 0 F
+EndSysreg
+
+SysregFields ICC_PPI_ENABLERx_EL1
+Field 63 EN63
+Field 62 EN62
+Field 61 EN61
+Field 60 EN60
+Field 59 EN59
+Field 58 EN58
+Field 57 EN57
+Field 56 EN56
+Field 55 EN55
+Field 54 EN54
+Field 53 EN53
+Field 52 EN52
+Field 51 EN51
+Field 50 EN50
+Field 49 EN49
+Field 48 EN48
+Field 47 EN47
+Field 46 EN46
+Field 45 EN45
+Field 44 EN44
+Field 43 EN43
+Field 42 EN42
+Field 41 EN41
+Field 40 EN40
+Field 39 EN39
+Field 38 EN38
+Field 37 EN37
+Field 36 EN36
+Field 35 EN35
+Field 34 EN34
+Field 33 EN33
+Field 32 EN32
+Field 31 EN31
+Field 30 EN30
+Field 29 EN29
+Field 28 EN28
+Field 27 EN27
+Field 26 EN26
+Field 25 EN25
+Field 24 EN24
+Field 23 EN23
+Field 22 EN22
+Field 21 EN21
+Field 20 EN20
+Field 19 EN19
+Field 18 EN18
+Field 17 EN17
+Field 16 EN16
+Field 15 EN15
+Field 14 EN14
+Field 13 EN13
+Field 12 EN12
+Field 11 EN11
+Field 10 EN10
+Field 9 EN9
+Field 8 EN8
+Field 7 EN7
+Field 6 EN6
+Field 5 EN5
+Field 4 EN4
+Field 3 EN3
+Field 2 EN2
+Field 1 EN1
+Field 0 EN0
+EndSysregFields
+
+Sysreg ICC_PPI_ENABLER0_EL1 3 0 12 10 6
+Fields ICC_PPI_ENABLERx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_ENABLER1_EL1 3 0 12 10 7
+Fields ICC_PPI_ENABLERx_EL1
+EndSysreg
+
+SysregFields ICC_PPI_ACTIVERx_EL1
+Field 63 Active63
+Field 62 Active62
+Field 61 Active61
+Field 60 Active60
+Field 59 Active59
+Field 58 Active58
+Field 57 Active57
+Field 56 Active56
+Field 55 Active55
+Field 54 Active54
+Field 53 Active53
+Field 52 Active52
+Field 51 Active51
+Field 50 Active50
+Field 49 Active49
+Field 48 Active48
+Field 47 Active47
+Field 46 Active46
+Field 45 Active45
+Field 44 Active44
+Field 43 Active43
+Field 42 Active42
+Field 41 Active41
+Field 40 Active40
+Field 39 Active39
+Field 38 Active38
+Field 37 Active37
+Field 36 Active36
+Field 35 Active35
+Field 34 Active34
+Field 33 Active33
+Field 32 Active32
+Field 31 Active31
+Field 30 Active30
+Field 29 Active29
+Field 28 Active28
+Field 27 Active27
+Field 26 Active26
+Field 25 Active25
+Field 24 Active24
+Field 23 Active23
+Field 22 Active22
+Field 21 Active21
+Field 20 Active20
+Field 19 Active19
+Field 18 Active18
+Field 17 Active17
+Field 16 Active16
+Field 15 Active15
+Field 14 Active14
+Field 13 Active13
+Field 12 Active12
+Field 11 Active11
+Field 10 Active10
+Field 9 Active9
+Field 8 Active8
+Field 7 Active7
+Field 6 Active6
+Field 5 Active5
+Field 4 Active4
+Field 3 Active3
+Field 2 Active2
+Field 1 Active1
+Field 0 Active0
+EndSysregFields
+
+Sysreg ICC_PPI_CACTIVER0_EL1 3 0 12 13 0
+Fields ICC_PPI_ACTIVERx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_CACTIVER1_EL1 3 0 12 13 1
+Fields ICC_PPI_ACTIVERx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_SACTIVER0_EL1 3 0 12 13 2
+Fields ICC_PPI_ACTIVERx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_SACTIVER1_EL1 3 0 12 13 3
+Fields ICC_PPI_ACTIVERx_EL1
+EndSysreg
+
+SysregFields ICC_PPI_PENDRx_EL1
+Field 63 Pend63
+Field 62 Pend62
+Field 61 Pend61
+Field 60 Pend60
+Field 59 Pend59
+Field 58 Pend58
+Field 57 Pend57
+Field 56 Pend56
+Field 55 Pend55
+Field 54 Pend54
+Field 53 Pend53
+Field 52 Pend52
+Field 51 Pend51
+Field 50 Pend50
+Field 49 Pend49
+Field 48 Pend48
+Field 47 Pend47
+Field 46 Pend46
+Field 45 Pend45
+Field 44 Pend44
+Field 43 Pend43
+Field 42 Pend42
+Field 41 Pend41
+Field 40 Pend40
+Field 39 Pend39
+Field 38 Pend38
+Field 37 Pend37
+Field 36 Pend36
+Field 35 Pend35
+Field 34 Pend34
+Field 33 Pend33
+Field 32 Pend32
+Field 31 Pend31
+Field 30 Pend30
+Field 29 Pend29
+Field 28 Pend28
+Field 27 Pend27
+Field 26 Pend26
+Field 25 Pend25
+Field 24 Pend24
+Field 23 Pend23
+Field 22 Pend22
+Field 21 Pend21
+Field 20 Pend20
+Field 19 Pend19
+Field 18 Pend18
+Field 17 Pend17
+Field 16 Pend16
+Field 15 Pend15
+Field 14 Pend14
+Field 13 Pend13
+Field 12 Pend12
+Field 11 Pend11
+Field 10 Pend10
+Field 9 Pend9
+Field 8 Pend8
+Field 7 Pend7
+Field 6 Pend6
+Field 5 Pend5
+Field 4 Pend4
+Field 3 Pend3
+Field 2 Pend2
+Field 1 Pend1
+Field 0 Pend0
+EndSysregFields
+
+Sysreg ICC_PPI_CPENDR0_EL1 3 0 12 13 4
+Fields ICC_PPI_PENDRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_CPENDR1_EL1 3 0 12 13 5
+Fields ICC_PPI_PENDRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_SPENDR0_EL1 3 0 12 13 6
+Fields ICC_PPI_PENDRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_SPENDR1_EL1 3 0 12 13 7
+Fields ICC_PPI_PENDRx_EL1
+EndSysreg
+
+SysregFields ICC_PPI_PRIORITYRx_EL1
+Res0 63:61
+Field 60:56 Priority7
+Res0 55:53
+Field 52:48 Priority6
+Res0 47:45
+Field 44:40 Priority5
+Res0 39:37
+Field 36:32 Priority4
+Res0 31:29
+Field 28:24 Priority3
+Res0 23:21
+Field 20:16 Priority2
+Res0 15:13
+Field 12:8 Priority1
+Res0 7:5
+Field 4:0 Priority0
+EndSysregFields
+
+Sysreg ICC_PPI_PRIORITYR0_EL1 3 0 12 14 0
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR1_EL1 3 0 12 14 1
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR2_EL1 3 0 12 14 2
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR3_EL1 3 0 12 14 3
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR4_EL1 3 0 12 14 4
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR5_EL1 3 0 12 14 5
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR6_EL1 3 0 12 14 6
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR7_EL1 3 0 12 14 7
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR8_EL1 3 0 12 15 0
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR9_EL1 3 0 12 15 1
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR10_EL1 3 0 12 15 2
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR11_EL1 3 0 12 15 3
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR12_EL1 3 0 12 15 4
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR13_EL1 3 0 12 15 5
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR14_EL1 3 0 12 15 6
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
+Sysreg ICC_PPI_PRIORITYR15_EL1 3 0 12 15 7
+Fields ICC_PPI_PRIORITYRx_EL1
+EndSysreg
+
Sysreg PMSELR_EL0 3 3 9 12 5
Res0 63:5
Field 4:0 SEL
@@ -3103,6 +3667,19 @@ Res0 14:12
Field 11:0 AFFINITY
EndSysreg
+Sysreg ICC_CR0_EL1 3 1 12 0 1
+Res0 63:39
+Field 38 PID
+Field 37:32 IPPT
+Res0 31:1
+Field 0 EN
+EndSysreg
+
+Sysreg ICC_PCR_EL1 3 1 12 0 2
+Res0 63:5
+Field 4:0 PRIORITY
+EndSysreg
+
Sysreg CSSELR_EL1 3 2 0 0 0
Res0 63:5
Field 4 TnD
@@ -3989,6 +4566,54 @@ Field 31:16 PhyPARTID29
Field 15:0 PhyPARTID28
EndSysreg
+Sysreg ICH_HFGRTR_EL2 3 4 12 9 4
+Res0 63:21
+Field 20 ICC_PPI_ACTIVERn_EL1
+Field 19 ICC_PPI_PRIORITYRn_EL1
+Field 18 ICC_PPI_PENDRn_EL1
+Field 17 ICC_PPI_ENABLERn_EL1
+Field 16 ICC_PPI_HMRn_EL1
+Res0 15:8
+Field 7 ICC_IAFFIDR_EL1
+Field 6 ICC_ICSR_EL1
+Field 5 ICC_PCR_EL1
+Field 4 ICC_HPPIR_EL1
+Field 3 ICC_HAPR_EL1
+Field 2 ICC_CR0_EL1
+Field 1 ICC_IDRn_EL1
+Field 0 ICC_APR_EL1
+EndSysreg
+
+Sysreg ICH_HFGWTR_EL2 3 4 12 9 6
+Res0 63:21
+Field 20 ICC_PPI_ACTIVERn_EL1
+Field 19 ICC_PPI_PRIORITYRn_EL1
+Field 18 ICC_PPI_PENDRn_EL1
+Field 17 ICC_PPI_ENABLERn_EL1
+Res0 16:7
+Field 6 ICC_ICSR_EL1
+Field 5 ICC_PCR_EL1
+Res0 4:3
+Field 2 ICC_CR0_EL1
+Res0 1
+Field 0 ICC_APR_EL1
+EndSysreg
+
+Sysreg ICH_HFGITR_EL2 3 4 12 9 7
+Res0 63:11
+Field 10 GICRCDNMIA
+Field 9 GICRCDIA
+Field 8 GICCDDI
+Field 7 GICCDEOI
+Field 6 GICCDHM
+Field 5 GICCDRCFG
+Field 4 GICCDPEND
+Field 3 GICCDAFF
+Field 2 GICCDPRI
+Field 1 GICCDDIS
+Field 0 GICCDEN
+EndSysreg
+
Sysreg ICH_HCR_EL2 3 4 12 11 0
Res0 63:32
Field 31:27 EOIcount
@@ -4037,6 +4662,12 @@ Field 1 U
Field 0 EOI
EndSysreg
+Sysreg ICH_VCTLR_EL2 3 4 12 11 4
+Res0 63:2
+Field 1 V3
+Field 0 En
+EndSysreg
+
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
@@ -4150,7 +4781,13 @@ Mapping TCR_EL1
EndSysreg
Sysreg TCR2_EL1 3 0 2 0 3
-Res0 63:16
+Res0 63:22
+Field 21 FNGNA1
+Field 20 FNGNA0
+Res0 19
+Field 18 FNG1
+Field 17 FNG0
+Field 16 A2
Field 15 DisCH1
Field 14 DisCH0
Res0 13:12
@@ -4174,7 +4811,10 @@ Mapping TCR2_EL1
EndSysreg
Sysreg TCR2_EL2 3 4 2 0 3
-Res0 63:16
+Res0 63:19
+Field 18 FNG1
+Field 17 FNG0
+Field 16 A2
Field 15 DisCH1
Field 14 DisCH0
Field 13 AMEC1
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index acc431c331b0..4331313a42ff 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -80,7 +80,6 @@ config CSKY
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index fa3eb87f0999..f0abc38c40ac 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -24,7 +24,6 @@ config LOONGARCH
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PREEMPT_LAZY
- select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
@@ -143,7 +142,6 @@ config LOONGARCH
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
index a34734a6c3ce..018ed904352a 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -41,6 +41,15 @@
};
};
+&apbdma3 {
+ status = "okay";
+};
+
+&mmc0 {
+ status = "okay";
+ bus-width = <4>;
+};
+
&gmac0 {
status = "okay";
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 760c60eebb89..588ebc3bded4 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -104,7 +104,7 @@
status = "disabled";
};
- dma-controller@1fe10c20 {
+ apbdma2: dma-controller@1fe10c20 {
compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
reg = <0 0x1fe10c20 0 0x8>;
interrupt-parent = <&eiointc>;
@@ -114,7 +114,7 @@
status = "disabled";
};
- dma-controller@1fe10c30 {
+ apbdma3: dma-controller@1fe10c30 {
compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
reg = <0 0x1fe10c30 0 0x8>;
interrupt-parent = <&eiointc>;
@@ -437,6 +437,30 @@
status = "disabled";
};
+ mmc0: mmc@1ff64000 {
+ compatible = "loongson,ls2k0500-mmc";
+ reg = <0 0x1ff64000 0 0x2000>,
+ <0 0x1fe10100 0 0x4>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <57>;
+ dmas = <&apbdma3 0>;
+ dma-names = "rx-tx";
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ status = "disabled";
+ };
+
+ mmc@1ff66000 {
+ compatible = "loongson,ls2k0500-mmc";
+ reg = <0 0x1ff66000 0 0x2000>,
+ <0 0x1fe10100 0 0x4>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <58>;
+ dmas = <&apbdma2 0>;
+ dma-names = "rx-tx";
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ status = "disabled";
+ };
+
pmc: power-management@1ff6c000 {
compatible = "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x1ff6c000 0x0 0x58>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 78ea995abf1c..d9a452ada5d7 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -48,6 +48,19 @@
};
};
+&apbdma1 {
+ status = "okay";
+};
+
+&mmc {
+ status = "okay";
+
+ pinctrl-0 = <&sdio_pins_default>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ cd-gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
+};
+
&gmac0 {
status = "okay";
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 1da3beb00f0e..d8e01e2534dd 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -187,14 +187,14 @@
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
- <>,
- <26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 IRQ_TYPE_NONE>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_NONE>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
@@ -209,13 +209,13 @@
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
- <>,
+ <0 IRQ_TYPE_NONE>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
- <>,
- <>,
+ <0 IRQ_TYPE_NONE>,
+ <0 IRQ_TYPE_NONE>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
@@ -256,7 +256,7 @@
status = "disabled";
};
- dma-controller@1fe00c10 {
+ apbdma1: dma-controller@1fe00c10 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c10 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -405,6 +405,18 @@
status = "disabled";
};
+ mmc: mmc@1fe2c000 {
+ compatible = "loongson,ls2k1000-mmc";
+ reg = <0 0x1fe2c000 0 0x68>,
+ <0 0x1fe00438 0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ dmas = <&apbdma1 0>;
+ dma-names = "rx-tx";
+ status = "disabled";
+ };
+
spi0: spi@1fff0220 {
compatible = "loongson,ls2k1000-spi";
reg = <0x0 0x1fff0220 0x0 0x10>;
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index ea9e6985d0e9..3c6b12220386 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -39,6 +39,16 @@
};
};
+&emmc {
+ status = "okay";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
+};
+
&sata {
status = "okay";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index 9e0411f2754c..00cc485b753b 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -259,6 +259,24 @@
status = "disabled";
};
+ emmc: mmc@79990000 {
+ compatible = "loongson,ls2k2000-mmc";
+ reg = <0x0 0x79990000 0x0 0x1000>;
+ interrupt-parent = <&pic>;
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_EMMC_CLK>;
+ status = "disabled";
+ };
+
+ mmc@79991000 {
+ compatible = "loongson,ls2k2000-mmc";
+ reg = <0x0 0x79991000 0x0 0x1000>;
+ interrupt-parent = <&pic>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_EMMC_CLK>;
+ status = "disabled";
+ };
+
pcie@1a000000 {
compatible = "loongson,ls2k-pci";
reg = <0x0 0x1a000000 0x0 0x02000000>,
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 0d59af6007b7..34eaee0384c9 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -225,7 +225,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
@@ -784,8 +783,23 @@ CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=y
+CONFIG_SND_HDA_CODEC_HDMI_ATI=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index 4dc4b3e04225..ab68b594f889 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -10,20 +10,6 @@
uint64_t pmd_to_entrylo(unsigned long pmd_val);
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr,
- unsigned long len)
-{
- unsigned long task_size = STACK_TOP;
-
- if (len > task_size)
- return -ENOMEM;
- if (task_size - len < addr)
- return -EINVAL;
- return 0;
-}
-
#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 3089785ca97e..277d2140676b 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -497,6 +497,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs);
int larch_insn_read(void *addr, u32 *insnp);
int larch_insn_write(void *addr, u32 insn);
int larch_insn_patch_text(void *addr, u32 insn);
+int larch_insn_text_copy(void *dst, void *src, size_t len);
u32 larch_insn_gen_nop(void);
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);
@@ -510,6 +511,8 @@ u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj);
u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
static inline bool signed_imm_check(long val, unsigned int bit)
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index a3c4cc46c892..0cecbd038bb3 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -50,12 +50,6 @@ struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
- u64 ipi_read_exits;
- u64 ipi_write_exits;
- u64 eiointc_read_exits;
- u64 eiointc_write_exits;
- u64 pch_pic_read_exits;
- u64 pch_pic_write_exits;
};
struct kvm_vcpu_stat {
@@ -65,6 +59,12 @@ struct kvm_vcpu_stat {
u64 cpucfg_exits;
u64 signal_exits;
u64 hypercall_exits;
+ u64 ipi_read_exits;
+ u64 ipi_write_exits;
+ u64 eiointc_read_exits;
+ u64 eiointc_write_exits;
+ u64 pch_pic_read_exits;
+ u64 pch_pic_write_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index a0994d226eff..09dfd7eb406e 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -451,6 +451,13 @@
#define LOONGARCH_CSR_KS6 0x36
#define LOONGARCH_CSR_KS7 0x37
#define LOONGARCH_CSR_KS8 0x38
+#define LOONGARCH_CSR_KS9 0x39
+#define LOONGARCH_CSR_KS10 0x3a
+#define LOONGARCH_CSR_KS11 0x3b
+#define LOONGARCH_CSR_KS12 0x3c
+#define LOONGARCH_CSR_KS13 0x3d
+#define LOONGARCH_CSR_KS14 0x3e
+#define LOONGARCH_CSR_KS15 0x3f
/* Exception allocated KS0, KS1 and KS2 statically */
#define EXCEPTION_KS0 LOONGARCH_CSR_KS0
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 7bbfb04a54cc..2fc3789220ac 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -22,7 +22,6 @@
#define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
-#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
@@ -36,7 +35,6 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
-#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@@ -76,8 +74,8 @@
#define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
-#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
-#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index f2aeff544cee..bd128696e96d 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -409,9 +409,6 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
-static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
-
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
@@ -540,17 +537,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd;
}
-static inline int pmd_devmap(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_DEVMAP);
-}
-
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- pmd_val(pmd) |= _PAGE_DEVMAP;
- return pmd;
-}
-
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
@@ -606,11 +592,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pud_devmap(pud) (0)
-#define pgd_devmap(pgd) (0)
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 27144de5c5fe..c0a5dc9aeae2 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -39,16 +39,19 @@ void __init init_environ(void)
static int __init init_cpu_fullname(void)
{
- struct device_node *root;
int cpu, ret;
- char *model;
+ char *cpuname;
+ const char *model;
+ struct device_node *root;
/* Parsing cpuname from DTS model property */
root = of_find_node_by_path("/");
- ret = of_property_read_string(root, "model", (const char **)&model);
+ ret = of_property_read_string(root, "model", &model);
+ if (ret == 0) {
+ cpuname = kstrdup(model, GFP_KERNEL);
+ loongson_sysconf.cpuname = strsep(&cpuname, " ");
+ }
of_node_put(root);
- if (ret == 0)
- loongson_sysconf.cpuname = strsep(&model, " ");
if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
for (cpu = 0; cpu < NR_CPUS; cpu++)
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 14d7d700bcb9..72ecfed29d55 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -4,6 +4,8 @@
*/
#include <linux/sizes.h>
#include <linux/uaccess.h>
+#include <linux/set_memory.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
@@ -218,6 +220,50 @@ int larch_insn_patch_text(void *addr, u32 insn)
return ret;
}
+struct insn_copy {
+ void *dst;
+ void *src;
+ size_t len;
+ unsigned int cpu;
+};
+
+static int text_copy_cb(void *data)
+{
+ int ret = 0;
+ struct insn_copy *copy = data;
+
+ if (smp_processor_id() == copy->cpu) {
+ ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
+ if (ret)
+ pr_err("%s: operation failed\n", __func__);
+ }
+
+ flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
+
+ return ret;
+}
+
+int larch_insn_text_copy(void *dst, void *src, size_t len)
+{
+ int ret = 0;
+ size_t start, end;
+ struct insn_copy copy = {
+ .dst = dst,
+ .src = src,
+ .len = len,
+ .cpu = smp_processor_id(),
+ };
+
+ start = round_down((size_t)dst, PAGE_SIZE);
+ end = round_up((size_t)dst + len, PAGE_SIZE);
+
+ set_memory_rw(start, (end - start) / PAGE_SIZE);
+ ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
+ set_memory_rox(start, (end - start) / PAGE_SIZE);
+
+ return ret;
+}
+
u32 larch_insn_gen_nop(void)
{
return INSN_NOP;
@@ -323,6 +369,34 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
return insn.word;
}
+u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
+ pr_warn("The generated beq instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_beq(&insn, rj, rd, imm >> 2);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
+ pr_warn("The generated bne instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_bne(&insn, rj, rd, imm >> 2);
+
+ return insn.word;
+}
+
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
{
union loongarch_instruction insn;
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
index 84e6de2fd973..8b5140ac9ea1 100644
--- a/arch/loongarch/kernel/relocate_kernel.S
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -109,4 +109,4 @@ SYM_CODE_END(kexec_smp_wait)
relocate_new_kernel_end:
.section ".data"
-SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)
+SYM_DATA(relocate_new_kernel_size, .quad relocate_new_kernel_end - relocate_new_kernel)
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index b99fbb388fe0..075b79b2c1d3 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -191,6 +191,16 @@ static int __init early_parse_mem(char *p)
return -EINVAL;
}
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@') /* Every mem=... should contain '@' */
+ start = memparse(p + 1, &p);
+ else { /* Only one mem=... is allowed if no '@' */
+ usermem = 1;
+ memblock_enforce_memory_limit(size);
+ return 0;
+ }
+
/*
* If a user specifies memory size, we
* blow away any automatically generated
@@ -201,14 +211,6 @@ static int __init early_parse_mem(char *p)
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
- start = 0;
- size = memparse(p, &p);
- if (*p == '@')
- start = memparse(p + 1, &p);
- else {
- pr_err("Invalid format!\n");
- return -EINVAL;
- }
if (!IS_ENABLED(CONFIG_NUMA))
memblock_add(start, size);
@@ -265,7 +267,7 @@ static void __init arch_reserve_crashkernel(void)
return;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &crash_size, &crash_base, &low_size, &high);
+ &crash_size, &crash_base, &low_size, NULL, &high);
if (ret)
return;
diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
index 0005be49b056..0d5fa64a2225 100644
--- a/arch/loongarch/kernel/unwind_orc.c
+++ b/arch/loongarch/kernel/unwind_orc.c
@@ -508,7 +508,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->pc = bt_address(pc);
if (!state->pc) {
- pr_err("cannot find unwind pc at %pK\n", (void *)pc);
+ pr_err("cannot find unwind pc at %p\n", (void *)pc);
goto err;
}
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index fa52251b3bf1..2ce41f93b2a4 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -289,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
+ trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
+ trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst);
break;
case 0x6: /* Cache, Idle and IOCSR GSPR */
@@ -821,32 +823,25 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
return RESUME_GUEST;
}
-static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
- unsigned int min, cpu, i;
- unsigned long ipi_bitmap;
+ unsigned int min, cpu;
struct kvm_vcpu *dest;
+ DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
+ };
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
- for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
- ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
- if (!ipi_bitmap)
+ for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ if (!dest)
continue;
- cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
- while (cpu < BITS_PER_LONG) {
- dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
- cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
- if (!dest)
- continue;
-
- /* Send SWI0 to dest vcpu to emulate IPI interrupt */
- kvm_queue_irq(dest, INT_SWI0);
- kvm_vcpu_kick(dest);
- }
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
}
-
- return 0;
}
/*
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
index a75f865d6fb9..a3a12af9ecbf 100644
--- a/arch/loongarch/kvm/intc/eiointc.c
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -9,7 +9,7 @@
static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
{
- int ipnum, cpu, cpuid, irq_index, irq_mask, irq;
+ int ipnum, cpu, cpuid, irq;
struct kvm_vcpu *vcpu;
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
@@ -18,8 +18,6 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
}
- irq_index = irq / 32;
- irq_mask = BIT(irq & 0x1f);
cpuid = s->coremap.reg_u8[irq];
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
@@ -27,16 +25,16 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
continue;
cpu = vcpu->vcpu_id;
- if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
}
}
static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
{
- int ipnum, cpu, found, irq_index, irq_mask;
+ int ipnum, cpu, found;
struct kvm_vcpu *vcpu;
struct kvm_interrupt vcpu_irq;
@@ -48,19 +46,16 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
cpu = s->sw_coremap[irq];
vcpu = kvm_get_vcpu(s->kvm, cpu);
- irq_index = irq / 32;
- irq_mask = BIT(irq & 0x1f);
-
if (level) {
/* if not enable return false */
- if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
+ if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
return;
- s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
+ __set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
} else {
- s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
}
@@ -110,159 +105,14 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
unsigned long flags;
unsigned long *isr = (unsigned long *)s->isr.reg_u8;
- level ? set_bit(irq, isr) : clear_bit(irq, isr);
spin_lock_irqsave(&s->lock, flags);
+ level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
eiointc_update_irq(s, irq, level);
spin_unlock_irqrestore(&s->lock, flags);
}
-static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s, int index, u8 mask, int level)
-{
- u8 val;
- int irq;
-
- val = mask & s->isr.reg_u8[index];
- irq = ffs(val);
- while (irq != 0) {
- /*
- * enable bit change from 0 to 1,
- * need to update irq by pending bits
- */
- eiointc_update_irq(s, irq - 1 + index * 8, level);
- val &= ~BIT(irq - 1);
- irq = ffs(val);
- }
-}
-
-static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u8 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = offset - EIOINTC_NODETYPE_START;
- data = s->nodetype.reg_u8[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = offset - EIOINTC_IPMAP_START;
- data = s->ipmap.reg_u8[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = offset - EIOINTC_ENABLE_START;
- data = s->enable.reg_u8[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = offset - EIOINTC_BOUNCE_START;
- data = s->bounce.reg_u8[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = offset - EIOINTC_COREISR_START;
- data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = offset - EIOINTC_COREMAP_START;
- data = s->coremap.reg_u8[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u8 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u16 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 1;
- data = s->nodetype.reg_u16[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = (offset - EIOINTC_IPMAP_START) >> 1;
- data = s->ipmap.reg_u16[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 1;
- data = s->enable.reg_u16[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = (offset - EIOINTC_BOUNCE_START) >> 1;
- data = s->bounce.reg_u16[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 1;
- data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = (offset - EIOINTC_COREMAP_START) >> 1;
- data = s->coremap.reg_u16[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u16 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u32 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 2;
- data = s->nodetype.reg_u32[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = (offset - EIOINTC_IPMAP_START) >> 2;
- data = s->ipmap.reg_u32[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 2;
- data = s->enable.reg_u32[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = (offset - EIOINTC_BOUNCE_START) >> 2;
- data = s->bounce.reg_u32[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 2;
- data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = (offset - EIOINTC_COREMAP_START) >> 2;
- data = s->coremap.reg_u32[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u32 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
+static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, unsigned long *val)
{
int index, ret = 0;
u64 data = 0;
@@ -298,7 +148,7 @@ static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eioin
ret = -EINVAL;
break;
}
- *(u64 *)val = data;
+ *val = data;
return ret;
}
@@ -308,7 +158,7 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
gpa_t addr, int len, void *val)
{
int ret = -EINVAL;
- unsigned long flags;
+ unsigned long flags, data, offset;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@@ -321,355 +171,115 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- vcpu->kvm->stat.eiointc_read_exits++;
+ offset = addr & 0x7;
+ addr -= offset;
+ vcpu->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
+ ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+ if (ret)
+ return ret;
+
+ data = data >> (offset * 8);
switch (len) {
case 1:
- ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s8)data;
break;
case 2:
- ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s16)data;
break;
case 4:
- ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
- break;
- case 8:
- ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s32)data;
break;
default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
- }
- spin_unlock_irqrestore(&eiointc->lock, flags);
-
- return ret;
-}
-
-static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int index, irq, bits, ret = 0;
- u8 cpu;
- u8 data, old_data;
- u8 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u8 *)val;
- offset = addr - EIOINTC_BASE;
-
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START);
- s->nodetype.reg_u8[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START);
- s->ipmap.reg_u8[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START);
- old_data = s->enable.reg_u8[index];
- s->enable.reg_u8[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
- eiointc_enable_irq(vcpu, s, index, data, 1);
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
- eiointc_enable_irq(vcpu, s, index, data, 0);
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = offset - EIOINTC_BOUNCE_START;
- s->bounce.reg_u8[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START);
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u8[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq;
- s->coremap.reg_u8[index] = data;
- eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int i, index, irq, bits, ret = 0;
- u8 cpu;
- u16 data, old_data;
- u16 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u16 *)val;
- offset = addr - EIOINTC_BASE;
-
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 1;
- s->nodetype.reg_u16[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START) >> 1;
- s->ipmap.reg_u16[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 1;
- old_data = s->enable.reg_u16[index];
- s->enable.reg_u16[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1);
- }
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0);
- }
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = (offset - EIOINTC_BOUNCE_START) >> 1;
- s->bounce.reg_u16[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 1;
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u16[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 1;
- s->coremap.reg_u16[index] = data;
- eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
+ *(long *)val = (long)data;
break;
}
- return ret;
+ return 0;
}
-static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
+static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
+ gpa_t addr, u64 value, u64 field_mask)
{
- int i, index, irq, bits, ret = 0;
+ int index, irq, ret = 0;
u8 cpu;
- u32 data, old_data;
- u32 coreisr, old_coreisr;
+ u64 data, old, mask;
gpa_t offset;
- data = *(u32 *)val;
- offset = addr - EIOINTC_BASE;
+ offset = addr & 7;
+ mask = field_mask << (offset * 8);
+ data = (value & field_mask) << (offset * 8);
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 2;
- s->nodetype.reg_u32[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START) >> 2;
- s->ipmap.reg_u32[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 2;
- old_data = s->enable.reg_u32[index];
- s->enable.reg_u32[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1);
- }
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0);
- }
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = (offset - EIOINTC_BOUNCE_START) >> 2;
- s->bounce.reg_u32[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 2;
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u32[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 2;
- s->coremap.reg_u32[index] = data;
- eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int i, index, irq, bits, ret = 0;
- u8 cpu;
- u64 data, old_data;
- u64 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u64 *)val;
+ addr -= offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 3;
- s->nodetype.reg_u64[index] = data;
+ old = s->nodetype.reg_u64[index];
+ s->nodetype.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
- index = (offset - EIOINTC_IPMAP_START) >> 3;
- s->ipmap.reg_u64 = data;
+ old = s->ipmap.reg_u64;
+ s->ipmap.reg_u64 = (old & ~mask) | data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 3;
- old_data = s->enable.reg_u64[index];
- s->enable.reg_u64[index] = data;
+ old = s->enable.reg_u64[index];
+ s->enable.reg_u64[index] = (old & ~mask) | data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
- data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1);
+ data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 1);
+ data &= ~BIT_ULL(irq);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
- data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0);
+ data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 3;
- s->bounce.reg_u64[index] = data;
+ old = s->bounce.reg_u64[index];
+ s->bounce.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 3;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u64[cpu][index];
+ old = s->coreisr.reg_u64[cpu][index];
/* write 1 to clear interrupt */
- s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
+ s->coreisr.reg_u64[cpu][index] = old & ~data;
+ data &= old;
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 3;
- s->coremap.reg_u64[index] = data;
- eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
+ index = (offset - EIOINTC_COREMAP_START) >> 3;
+ old = s->coremap.reg_u64[index];
+ s->coremap.reg_u64[index] = (old & ~mask) | data;
+ data = s->coremap.reg_u64[index];
+ eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@@ -684,7 +294,7 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
gpa_t addr, int len, const void *val)
{
int ret = -EINVAL;
- unsigned long flags;
+ unsigned long flags, value;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@@ -697,24 +307,25 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- vcpu->kvm->stat.eiointc_write_exits++;
+ vcpu->stat.eiointc_write_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
case 1:
- ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
+ value = *(unsigned char *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
break;
case 2:
- ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
+ value = *(unsigned short *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
break;
case 4:
- ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
- break;
- case 8:
- ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
+ value = *(unsigned int *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
break;
default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
+ value = *(unsigned long *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
+ break;
}
spin_unlock_irqrestore(&eiointc->lock, flags);
@@ -989,7 +600,7 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
{
int ret;
struct loongarch_eiointc *s;
- struct kvm_io_device *device, *device1;
+ struct kvm_io_device *device;
struct kvm *kvm = dev->kvm;
/* eiointc has been created */
@@ -1017,10 +628,10 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
return ret;
}
- device1 = &s->device_vext;
- kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
+ device = &s->device_vext;
+ kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
- EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
+ EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
if (ret < 0) {
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
kfree(s);
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
index fe734dc062ed..e658d5b37c04 100644
--- a/arch/loongarch/kvm/intc/ipi.c
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -268,36 +268,16 @@ static int kvm_ipi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
- int ret;
- struct loongarch_ipi *ipi;
-
- ipi = vcpu->kvm->arch.ipi;
- if (!ipi) {
- kvm_err("%s: ipi irqchip not valid!\n", __func__);
- return -EINVAL;
- }
- ipi->kvm->stat.ipi_read_exits++;
- ret = loongarch_ipi_readl(vcpu, addr, len, val);
-
- return ret;
+ vcpu->stat.ipi_read_exits++;
+ return loongarch_ipi_readl(vcpu, addr, len, val);
}
static int kvm_ipi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
- int ret;
- struct loongarch_ipi *ipi;
-
- ipi = vcpu->kvm->arch.ipi;
- if (!ipi) {
- kvm_err("%s: ipi irqchip not valid!\n", __func__);
- return -EINVAL;
- }
- ipi->kvm->stat.ipi_write_exits++;
- ret = loongarch_ipi_writel(vcpu, addr, len, val);
-
- return ret;
+ vcpu->stat.ipi_write_exits++;
+ return loongarch_ipi_writel(vcpu, addr, len, val);
}
static const struct kvm_io_device_ops kvm_ipi_ops = {
diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c
index 08fce845f668..6f00ffe05c54 100644
--- a/arch/loongarch/kvm/intc/pch_pic.c
+++ b/arch/loongarch/kvm/intc/pch_pic.c
@@ -196,7 +196,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic reading */
- vcpu->kvm->stat.pch_pic_read_exits++;
+ vcpu->stat.pch_pic_read_exits++;
ret = loongarch_pch_pic_read(s, addr, len, val);
return ret;
@@ -303,7 +303,7 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic writing */
- vcpu->kvm->stat.pch_pic_write_exits++;
+ vcpu->stat.pch_pic_write_exits++;
ret = loongarch_pch_pic_write(s, addr, len, val);
return ret;
diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c
index 4c3f22de4b40..8462083f0301 100644
--- a/arch/loongarch/kvm/interrupt.c
+++ b/arch/loongarch/kvm/interrupt.c
@@ -83,28 +83,11 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.irq_pending;
unsigned long *pending_clr = &vcpu->arch.irq_clear;
- if (!(*pending) && !(*pending_clr))
- return;
-
- if (*pending_clr) {
- priority = __ffs(*pending_clr);
- while (priority <= INT_IPI) {
- kvm_irq_clear(vcpu, priority);
- priority = find_next_bit(pending_clr,
- BITS_PER_BYTE * sizeof(*pending_clr),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending_clr, INT_IPI + 1)
+ kvm_irq_clear(vcpu, priority);
- if (*pending) {
- priority = __ffs(*pending);
- while (priority <= INT_IPI) {
- kvm_irq_deliver(vcpu, priority);
- priority = find_next_bit(pending,
- BITS_PER_BYTE * sizeof(*pending),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending, INT_IPI + 1)
+ kvm_irq_deliver(vcpu, priority);
}
int kvm_pending_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index 1783397b1bc8..145514dab6d5 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -46,11 +46,15 @@ DEFINE_EVENT(kvm_transition, kvm_out,
/* Further exit reasons */
#define KVM_TRACE_EXIT_IDLE 64
#define KVM_TRACE_EXIT_CACHE 65
+#define KVM_TRACE_EXIT_CPUCFG 66
+#define KVM_TRACE_EXIT_CSR 67
/* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \
{ KVM_TRACE_EXIT_IDLE, "IDLE" }, \
- { KVM_TRACE_EXIT_CACHE, "CACHE" }
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_CPUCFG, "CPUCFG" }, \
+ { KVM_TRACE_EXIT_CSR, "CSR" }
DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -82,6 +86,14 @@ DEFINE_EVENT(kvm_exit, kvm_exit_cache,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
+DEFINE_EVENT(kvm_exit, kvm_exit_cpucfg,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+DEFINE_EVENT(kvm_exit, kvm_exit_csr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
DEFINE_EVENT(kvm_exit, kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 5af32ec62cb1..d1b8c50941ca 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -20,7 +20,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, hypercall_exits)
+ STATS_DESC_COUNTER(VCPU, hypercall_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_read_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_write_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
index 99165903908a..f5e910b68229 100644
--- a/arch/loongarch/mm/pageattr.c
+++ b/arch/loongarch/mm/pageattr.c
@@ -118,7 +118,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgp
return 0;
mmap_write_lock(&init_mm);
- ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
+ ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index fa1500d4aa3e..abfdb6bb5c38 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -4,13 +4,20 @@
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
+#include <linux/memory.h>
#include "bpf_jit.h"
-#define REG_TCC LOONGARCH_GPR_A6
-#define TCC_SAVED LOONGARCH_GPR_S5
+#define LOONGARCH_MAX_REG_ARGS 8
+
+#define LOONGARCH_LONG_JUMP_NINSNS 5
+#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
-#define SAVE_RA BIT(0)
-#define SAVE_TCC BIT(1)
+#define LOONGARCH_FENTRY_NINSNS 2
+#define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4)
+#define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
+
+#define REG_TCC LOONGARCH_GPR_A6
+#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
static const int regmap[] = {
/* return value from in-kernel function, and exit value for eBPF program */
@@ -32,32 +39,57 @@ static const int regmap[] = {
[BPF_REG_AX] = LOONGARCH_GPR_T0,
};
-static void mark_call(struct jit_ctx *ctx)
+static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset)
{
- ctx->flags |= SAVE_RA;
-}
+ const struct bpf_prog *prog = ctx->prog;
+ const bool is_main_prog = !bpf_is_subprog(prog);
-static void mark_tail_call(struct jit_ctx *ctx)
-{
- ctx->flags |= SAVE_TCC;
-}
+ if (is_main_prog) {
+ /*
+ * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT
+ * if (REG_TCC > T3 )
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ * else
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ * REG_TCC = LOONGARCH_GPR_SP + store_offset
+ *
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ *
+ * The purpose of this code is to first push the TCC into stack,
+ * and then push the address of TCC into stack.
+ * In cases where bpf2bpf and tailcall are used in combination,
+ * the value in REG_TCC may be a count or an address,
+ * these two cases need to be judged and handled separately.
+ */
+ emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ *store_offset -= sizeof(long);
-static bool seen_call(struct jit_ctx *ctx)
-{
- return (ctx->flags & SAVE_RA);
-}
+ emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4);
-static bool seen_tail_call(struct jit_ctx *ctx)
-{
- return (ctx->flags & SAVE_TCC);
-}
+ /*
+ * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count,
+ * push tcc into stack
+ */
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
-static u8 tail_call_reg(struct jit_ctx *ctx)
-{
- if (seen_call(ctx))
- return TCC_SAVED;
+ /* Push the address of TCC into the REG_TCC */
+ emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
+
+ emit_uncond_jmp(ctx, 2);
+
+ /*
+ * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address,
+ * push tcc_ptr into stack
+ */
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
+ } else {
+ *store_offset -= sizeof(long);
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
+ }
- return REG_TCC;
+ /* Push tcc_ptr into stack */
+ *store_offset -= sizeof(long);
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
}
/*
@@ -80,6 +112,10 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
* | $s4 |
* +-------------------------+
* | $s5 |
+ * +-------------------------+
+ * | tcc |
+ * +-------------------------+
+ * | tcc_ptr |
* +-------------------------+ <--BPF_REG_FP
* | prog->aux->stack_depth |
* | (optional) |
@@ -88,22 +124,32 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
*/
static void build_prologue(struct jit_ctx *ctx)
{
- int stack_adjust = 0, store_offset, bpf_stack_adjust;
+ int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
+ const struct bpf_prog *prog = ctx->prog;
+ const bool is_main_prog = !bpf_is_subprog(prog);
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
- /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
+ /* To store ra, fp, s0, s1, s2, s3, s4, s5 */
stack_adjust += sizeof(long) * 8;
+ /* To store tcc and tcc_ptr */
+ stack_adjust += sizeof(long) * 2;
+
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
+ /* Reserve space for the move_imm + jirl instruction */
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn(ctx, nop);
+
/*
- * First instruction initializes the tail call count (TCC).
- * On tail call we skip this instruction, and the TCC is
- * passed in REG_TCC from the caller.
+ * First instruction initializes the tail call count (TCC)
+ * register to zero. On tail call we skip this instruction,
+ * and the TCC is passed in REG_TCC from the caller.
*/
- emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ if (is_main_prog)
+ emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0);
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
@@ -131,20 +177,13 @@ static void build_prologue(struct jit_ctx *ctx)
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
+ prepare_bpf_tail_call_cnt(ctx, &store_offset);
+
emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
if (bpf_stack_adjust)
emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
- /*
- * Program contains calls and tail calls, so REG_TCC need
- * to be saved across calls.
- */
- if (seen_tail_call(ctx) && seen_call(ctx))
- move_reg(ctx, TCC_SAVED, REG_TCC);
- else
- emit_insn(ctx, nop);
-
ctx->stack_size = stack_adjust;
}
@@ -177,6 +216,16 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
+ /*
+ * When push into the stack, follow the order of tcc then tcc_ptr.
+ * When pop from the stack, first pop tcc_ptr then followed by tcc.
+ */
+ load_offset -= 2 * sizeof(long);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
+
+ load_offset += sizeof(long);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
+
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
if (!is_tail_call) {
@@ -189,7 +238,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
* Call the next bpf prog and skip the first instruction
* of TCC initialization.
*/
- emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6);
}
}
@@ -208,12 +257,10 @@ bool bpf_jit_supports_far_kfunc_call(void)
return true;
}
-/* initialized on the first pass of build_body() */
-static int out_offset = -1;
-static int emit_bpf_tail_call(struct jit_ctx *ctx)
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
{
- int off;
- u8 tcc = tail_call_reg(ctx);
+ int off, tc_ninsn = 0;
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
u8 a1 = LOONGARCH_GPR_A1;
u8 a2 = LOONGARCH_GPR_A2;
u8 t1 = LOONGARCH_GPR_T1;
@@ -222,7 +269,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (tc_ninsn - (cur_offset))
/*
* a0: &ctx
@@ -232,6 +279,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
* if (index >= array->map.max_entries)
* goto out;
*/
+ tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
off = offsetof(struct bpf_array, map.max_entries);
emit_insn(ctx, ldwu, t1, a1, off);
/* bgeu $a2, $t1, jmp_offset */
@@ -239,11 +287,15 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
goto toofar;
/*
- * if (--TCC < 0)
- * goto out;
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
+ * goto out;
*/
- emit_insn(ctx, addid, REG_TCC, tcc, -1);
- if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
+ emit_insn(ctx, ldd, t3, REG_TCC, 0);
+ emit_insn(ctx, addid, t3, t3, 1);
+ emit_insn(ctx, std, t3, REG_TCC, 0);
+ emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0)
goto toofar;
/*
@@ -263,15 +315,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_insn(ctx, ldd, t3, t2, off);
__build_epilogue(ctx, true);
- /* out: */
- if (out_offset == -1)
- out_offset = cur_offset;
- if (cur_offset != out_offset) {
- pr_err_once("tail_call out_offset = %d, expected %d!\n",
- cur_offset, out_offset);
- return -1;
- }
-
return 0;
toofar:
@@ -463,7 +506,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
u64 func_addr;
bool func_addr_fixed, sign_extend;
int i = insn - ctx->prog->insnsi;
- int ret, jmp_offset;
+ int ret, jmp_offset, tcc_ptr_off;
const u8 code = insn->code;
const u8 cond = BPF_OP(code);
const u8 t1 = LOONGARCH_GPR_T1;
@@ -899,12 +942,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* function call */
case BPF_JMP | BPF_CALL:
- mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
+ }
+
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
@@ -915,8 +962,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
- mark_tail_call(ctx);
- if (emit_bpf_tail_call(ctx) < 0)
+ if (emit_bpf_tail_call(ctx, i) < 0)
return -EINVAL;
break;
@@ -1180,12 +1226,528 @@ static int validate_code(struct jit_ctx *ctx)
return -1;
}
+ return 0;
+}
+
+static int validate_ctx(struct jit_ctx *ctx)
+{
+ if (validate_code(ctx))
+ return -1;
+
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
return -1;
return 0;
}
+static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
+{
+ if (!target) {
+ pr_err("bpf_jit: jump target address is error\n");
+ return -EFAULT;
+ }
+
+ move_imm(ctx, LOONGARCH_GPR_T1, target, false);
+ emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
+
+ return 0;
+}
+
+static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
+{
+ int i;
+ struct jit_ctx ctx;
+
+ ctx.idx = 0;
+ ctx.image = (union loongarch_instruction *)insns;
+
+ if (!target) {
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn((&ctx), nop);
+ return 0;
+ }
+
+ return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
+}
+
+static int emit_call(struct jit_ctx *ctx, u64 addr)
+{
+ return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr);
+}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ int ret;
+
+ mutex_lock(&text_mutex);
+ ret = larch_insn_text_copy(dst, src, len);
+ mutex_unlock(&text_mutex);
+
+ return ret ? ERR_PTR(-EINVAL) : dst;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
+ void *old_addr, void *new_addr)
+{
+ int ret;
+ bool is_call = (poke_type == BPF_MOD_CALL);
+ u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+ u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+
+ if (!is_kernel_text((unsigned long)ip) &&
+ !is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
+ if (ret)
+ return ret;
+
+ if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
+ return -EFAULT;
+
+ ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
+ if (ret)
+ return ret;
+
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
+ ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
+
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ int i;
+ int ret = 0;
+ u32 *inst;
+
+ inst = kvmalloc(len, GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ for (i = 0; i < (len / sizeof(u32)); i++)
+ inst[i] = INSN_BREAK;
+
+ mutex_lock(&text_mutex);
+ if (larch_insn_text_copy(dst, inst, len))
+ ret = -EINVAL;
+ mutex_unlock(&text_mutex);
+
+ kvfree(inst);
+
+ return ret;
+}
+
+static void store_args(struct jit_ctx *ctx, int nargs, int args_off)
+{
+ int i;
+
+ for (i = 0; i < nargs; i++) {
+ emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
+ args_off -= 8;
+ }
+}
+
+static void restore_args(struct jit_ctx *ctx, int nargs, int args_off)
+{
+ int i;
+
+ for (i = 0; i < nargs; i++) {
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
+ args_off -= 8;
+ }
+}
+
+static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
+ int args_off, int retval_off, int run_ctx_off, bool save_ret)
+{
+ int ret;
+ u32 *branch;
+ struct bpf_prog *p = l->link.prog;
+ int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+
+ if (l->cookie) {
+ move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
+ } else {
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
+ }
+
+ /* arg1: prog */
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
+ /* arg2: &run_ctx */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off);
+ ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p));
+ if (ret)
+ return ret;
+
+ /* store prog start time */
+ move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0);
+
+ /*
+ * if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ branch = (u32 *)ctx->image + ctx->idx;
+ /* nop reserved for conditional jump */
+ emit_insn(ctx, nop);
+
+ /* arg1: &args_off */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off);
+ if (!p->jited)
+ move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false);
+ ret = emit_call(ctx, (const u64)p->bpf_func);
+ if (ret)
+ return ret;
+
+ if (save_ret) {
+ emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ }
+
+ /* update branch with beqz */
+ if (ctx->image) {
+ int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch;
+ *branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset);
+ }
+
+ /* arg1: prog */
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
+ /* arg2: prog start time */
+ move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1);
+ /* arg3: &run_ctx */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off);
+ ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p));
+
+ return ret;
+}
+
+static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
+ int args_off, int retval_off, int run_ctx_off, u32 **branches)
+{
+ int i;
+
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off);
+ for (i = 0; i < tl->nr_links; i++) {
+ invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off);
+ branches[i] = (u32 *)ctx->image + ctx->idx;
+ emit_insn(ctx, nop);
+ }
+}
+
+void *arch_alloc_bpf_trampoline(unsigned int size)
+{
+ return bpf_prog_pack_alloc(size, jit_fill_hole);
+}
+
+void arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ bpf_prog_pack_free(image, size);
+}
+
+static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
+ void *func_addr, u32 flags)
+{
+ int i, ret, save_ret;
+ int stack_size = 0, nargs = 0;
+ int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
+ bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
+ void *orig_call = func_addr;
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ u32 **branches = NULL;
+
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+ /*
+ * FP + 8 [ RA to parent func ] return address to parent
+ * function
+ * FP + 0 [ FP of parent func ] frame pointer of parent
+ * function
+ * FP - 8 [ T0 to traced func ] return address of traced
+ * function
+ * FP - 16 [ FP of traced func ] frame pointer of traced
+ * function
+ *
+ * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
+ * BPF_TRAMP_F_RET_FENTRY_RET
+ * [ argN ]
+ * [ ... ]
+ * FP - args_off [ arg1 ]
+ *
+ * FP - nargs_off [ regs count ]
+ *
+ * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
+ *
+ * FP - run_ctx_off [ bpf_tramp_run_ctx ]
+ *
+ * FP - sreg_off [ callee saved reg ]
+ *
+ * FP - tcc_ptr_off [ tail_call_cnt_ptr ]
+ */
+
+ if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
+ return -ENOTSUPP;
+
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+ stack_size = 0;
+
+ /* Room of trampoline frame to store return address and frame pointer */
+ stack_size += 16;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret) {
+ /* Save BPF R0 and A0 */
+ stack_size += 16;
+ retval_off = stack_size;
+ }
+
+ /* Room of trampoline frame to store args */
+ nargs = m->nr_args;
+ stack_size += nargs * 8;
+ args_off = stack_size;
+
+ /* Room of trampoline frame to store args number */
+ stack_size += 8;
+ nargs_off = stack_size;
+
+ /* Room of trampoline frame to store ip address */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ stack_size += 8;
+ ip_off = stack_size;
+ }
+
+ /* Room of trampoline frame to store struct bpf_tramp_run_ctx */
+ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
+ run_ctx_off = stack_size;
+
+ stack_size += 8;
+ sreg_off = stack_size;
+
+ /* Room of trampoline frame to store tail_call_cnt_ptr */
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
+ stack_size += 8;
+ tcc_ptr_off = stack_size;
+ }
+
+ stack_size = round_up(stack_size, 16);
+
+ if (is_struct_ops) {
+ /*
+ * For the trampoline called directly, just handle
+ * the frame of trampoline.
+ */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
+ emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
+ } else {
+ /*
+ * For the trampoline called from function entry,
+ * the frame of traced function and the frame of
+ * trampoline need to be considered.
+ */
+ /* RA and FP for parent function */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16);
+ emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16);
+
+ /* RA and FP for traced function */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
+ emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
+ }
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ /* callee saved register S1 to pass start time */
+ emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
+
+ /* store ip address of the traced function */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off);
+ }
+
+ /* store nargs number */
+ move_imm(ctx, LOONGARCH_GPR_T1, nargs, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off);
+
+ store_args(ctx, nargs, args_off);
+
+ /* To traced function */
+ /* Ftrace jump skips 2 NOP instructions */
+ if (is_kernel_text((unsigned long)orig_call))
+ orig_call += LOONGARCH_FENTRY_NBYTES;
+ /* Direct jump skips 5 NOP instructions */
+ else if (is_bpf_text_address((unsigned long)orig_call))
+ orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < fentry->nr_links; i++) {
+ ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off,
+ run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET);
+ if (ret)
+ return ret;
+ }
+ if (fmod_ret->nr_links) {
+ branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL);
+ if (!branches)
+ return -ENOMEM;
+
+ invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches);
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ restore_args(ctx, m->nr_args, args_off);
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ ret = emit_call(ctx, (const u64)orig_call);
+ if (ret)
+ goto out;
+ emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ im->ip_after_call = ctx->ro_image + ctx->idx;
+ /* Reserve space for the move_imm + jirl instruction */
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn(ctx, nop);
+ }
+
+ for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) {
+ int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i];
+ *branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset);
+ }
+
+ for (i = 0; i < fexit->nr_links; i++) {
+ ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = ctx->ro_image + ctx->idx;
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ restore_args(ctx, m->nr_args, args_off);
+
+ if (save_ret) {
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ }
+
+ emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ if (is_struct_ops) {
+ /* trampoline called directly */
+ emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
+
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
+ } else {
+ /* trampoline called from function entry */
+ emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
+
+ emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* return to parent function */
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
+ else
+ /* return to traced function */
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
+ }
+
+ ret = ctx->idx;
+out:
+ kfree(branches);
+
+ return ret;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
+ void *ro_image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ int ret, size;
+ void *image, *tmp;
+ struct jit_ctx ctx;
+
+ size = ro_image_end - ro_image;
+ image = kvmalloc(size, GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ ctx.image = (union loongarch_instruction *)image;
+ ctx.ro_image = (union loongarch_instruction *)ro_image;
+ ctx.idx = 0;
+
+ jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
+ ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
+ if (ret > 0 && validate_code(&ctx) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tmp = bpf_arch_text_copy(ro_image, image, size);
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto out;
+ }
+
+ bpf_flush_icache(ro_image, ro_image_end);
+out:
+ kvfree(image);
+ return ret < 0 ? ret : size;
+}
+
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ int ret;
+ struct jit_ctx ctx;
+ struct bpf_tramp_image im;
+
+ ctx.image = NULL;
+ ctx.idx = 0;
+
+ ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
+
+ /* Page align */
+ return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE);
+}
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
bool tmp_blinded = false, extra_pass = false;
@@ -1288,7 +1850,7 @@ skip_init_ctx:
build_epilogue(&ctx);
/* 3. Extra pass to validate JITed code */
- if (validate_code(&ctx)) {
+ if (validate_ctx(&ctx)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
@@ -1342,7 +1904,6 @@ out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
- out_offset = -1;
return prog;
@@ -1354,6 +1915,16 @@ out_free:
goto out_offset;
}
+bool bpf_jit_bypass_spec_v1(void)
+{
+ return true;
+}
+
+bool bpf_jit_bypass_spec_v4(void)
+{
+ return true;
+}
+
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index f9c569f53949..5697158fd164 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -18,6 +18,7 @@ struct jit_ctx {
u32 *offset;
int num_exentries;
union loongarch_instruction *image;
+ union loongarch_instruction *ro_image;
u32 stack_size;
};
@@ -308,3 +309,8 @@ static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch
return -EINVAL;
}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index ccd2c5e135c6..d8316f993482 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -36,7 +36,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index eb5bb6d36899..11835eb59d94 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -32,6 +32,7 @@ config M68K
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 30638a6e8edc..d036f903864c 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -10,7 +10,7 @@ config BOOTPARAM_STRING
config EARLY_PRINTK
bool "Early printk"
- depends on !(SUN3 || M68000 || COLDFIRE)
+ depends on MMU_MOTOROLA
help
Write kernel log output directly to a serial port.
Where implemented, output goes to the framebuffer as well.
diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c
index 30e5a4ed799d..e2f7af1facb2 100644
--- a/arch/m68k/coldfire/gpio.c
+++ b/arch/m68k/coldfire/gpio.c
@@ -160,7 +160,7 @@ static struct gpio_chip mcfgpio_chip = {
.direction_input = mcfgpio_direction_input,
.direction_output = mcfgpio_direction_output,
.get = mcfgpio_get_value,
- .set_rv = mcfgpio_set_value,
+ .set = mcfgpio_set_value,
.to_irq = mcfgpio_to_irq,
.base = 0,
.ngpio = MCFGPIO_PIN_MAX,
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index d05690289e33..5171bb183967 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -85,7 +85,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -267,6 +266,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -356,6 +356,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -375,6 +376,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_A2065=y
CONFIG_ARIADNE=y
@@ -448,8 +450,10 @@ CONFIG_RTC_DRV_RP5C01=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -548,6 +552,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -580,7 +585,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -600,6 +604,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -631,6 +636,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index a1747fbe23fb..16f343ae48c6 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -81,7 +81,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -263,6 +262,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -336,6 +336,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -355,6 +356,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -405,8 +407,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -505,6 +509,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -537,7 +542,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -557,6 +561,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -588,6 +593,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 74293551f66b..c08788728ea9 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -88,7 +88,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -270,6 +269,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -351,6 +351,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -370,6 +371,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_ATARILANCE=y
CONFIG_NE2000=y
@@ -425,8 +427,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -525,6 +529,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -557,7 +562,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -577,6 +581,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -608,6 +613,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 419b13ae950a..962497e7c53f 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -78,7 +78,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -260,6 +259,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -334,6 +334,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -353,6 +354,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_BVME6000_NET=y
CONFIG_PPP=m
@@ -397,8 +399,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -497,6 +501,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -529,7 +534,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -549,6 +553,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -580,6 +585,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 4c81d756587c..ec28650189e4 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -80,7 +80,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -262,6 +261,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -335,6 +335,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -354,6 +355,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_HPLANCE=y
CONFIG_PPP=m
@@ -407,8 +409,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -507,6 +511,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -539,7 +544,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -559,6 +563,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -590,6 +595,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index daa01d7fb462..0afb3ad180de 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -79,7 +79,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -261,6 +260,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -347,6 +347,7 @@ CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -366,6 +367,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_MACMACE=y
CONFIG_MAC89x0=y
@@ -424,8 +426,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -524,6 +528,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -556,7 +561,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -576,6 +580,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -607,6 +612,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 641ca22eb3b2..b311e953995d 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -99,7 +99,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -281,6 +280,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -390,6 +390,7 @@ CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -409,6 +410,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_A2065=y
CONFIG_ARIADNE=y
@@ -511,8 +513,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -611,6 +615,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -643,7 +648,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -663,6 +667,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -694,6 +699,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index f98ffa7a1640..f4e6224f137f 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -77,7 +77,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -259,6 +258,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -333,6 +333,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -352,6 +353,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_MVME147_NET=y
CONFIG_PPP=m
@@ -397,8 +399,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -497,6 +501,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -529,7 +534,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -549,6 +553,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -580,6 +585,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2bfc3f4b48f9..498e167222f1 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -78,7 +78,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -260,6 +259,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -334,6 +334,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -353,6 +354,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_MVME16x_NET=y
CONFIG_PPP=m
@@ -398,8 +400,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -498,6 +502,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -530,7 +535,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -550,6 +554,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -581,6 +586,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 2bd46cbcca2a..8c6b1eef8534 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -79,7 +79,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -261,6 +260,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -340,6 +340,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -359,6 +360,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_NE2000=y
CONFIG_PLIP=m
@@ -414,8 +416,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -514,6 +518,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -546,7 +551,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -566,6 +570,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -597,6 +602,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index dc7fc94fc669..c34648f299ef 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -74,7 +74,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -256,6 +255,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -330,6 +330,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -349,6 +350,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_SUN3LANCE=y
CONFIG_SUN3_82586=y
@@ -395,8 +397,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -495,6 +499,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -527,7 +532,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -547,6 +551,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -577,6 +582,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index b026a54867f5..73810d14660f 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -75,7 +75,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_ZONES=y
-# CONFIG_NF_CT_PROTO_DCCP is not set
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -257,6 +256,7 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
CONFIG_SCTP_COOKIE_HMAC_SHA1=y
CONFIG_RDS=m
CONFIG_RDS_TCP=m
@@ -331,6 +331,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_WIREGUARD=m
+CONFIG_OVPN=m
CONFIG_EQUALIZER=m
CONFIG_NET_TEAM=m
CONFIG_NET_TEAM_MODE_BROADCAST=m
@@ -350,6 +351,7 @@ CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_SUN3LANCE=y
CONFIG_PPP=m
@@ -395,8 +397,10 @@ CONFIG_RTC_DRV_GENERIC=m
CONFIG_DAX=m
CONFIG_EXT4_FS=y
CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_BTRFS_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -495,6 +499,7 @@ CONFIG_DLM=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_RSA=m
@@ -527,7 +532,6 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
@@ -547,6 +551,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRIME_NUMBERS=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
@@ -578,6 +583,7 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CRC_BENCHMARK=y
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
diff --git a/arch/m68k/include/asm/adb_iop.h b/arch/m68k/include/asm/adb_iop.h
index 6aecd020e2fc..ca10b1ec0c78 100644
--- a/arch/m68k/include/asm/adb_iop.h
+++ b/arch/m68k/include/asm/adb_iop.h
@@ -33,7 +33,7 @@
#define ADB_IOP_SRQ 0x04 /* SRQ detected */
#define ADB_IOP_TIMEOUT 0x02 /* nonzero if timeout */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct adb_iopmsg {
__u8 flags; /* ADB flags */
@@ -43,4 +43,4 @@ struct adb_iopmsg {
__u8 spare[21]; /* spare */
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h
index 81c91af8ec6c..267272b436e2 100644
--- a/arch/m68k/include/asm/bootinfo.h
+++ b/arch/m68k/include/asm/bootinfo.h
@@ -14,7 +14,7 @@
#include <uapi/asm/bootinfo.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_BOOTINFO_PROC
extern void save_bootinfo(const struct bi_record *bi);
@@ -28,7 +28,7 @@ void process_uboot_commandline(char *commandp, int size);
static inline void process_uboot_commandline(char *commandp, int size) {}
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _M68K_BOOTINFO_H */
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index 9b52b060c76a..86cba7c19e67 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -4,7 +4,7 @@
#include <asm/setup.h>
#include <asm/page.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/thread_info.h>
#endif
@@ -41,7 +41,7 @@
#define ALLOWINT (~0x700)
#endif /* machine compilation types */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/*
* This defines the normal kernel pt-regs layout.
*
diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h
index 3b0b64f0a353..f79427bd6487 100644
--- a/arch/m68k/include/asm/kexec.h
+++ b/arch/m68k/include/asm/kexec.h
@@ -15,7 +15,7 @@
#define KEXEC_ARCH KEXEC_ARCH_68K
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
@@ -23,7 +23,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Dummy implementation for now */
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/m68k/include/asm/mac_baboon.h b/arch/m68k/include/asm/mac_baboon.h
index 08d9b8829a1a..ed5b5b48bdf8 100644
--- a/arch/m68k/include/asm/mac_baboon.h
+++ b/arch/m68k/include/asm/mac_baboon.h
@@ -5,7 +5,7 @@
#define BABOON_BASE (0x50F1A000) /* same as IDE controller base */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct baboon {
char pad1[208]; /* generic IDE registers, not used here */
@@ -36,4 +36,4 @@ extern void baboon_register_interrupts(void);
extern void baboon_irq_enable(int);
extern void baboon_irq_disable(int);
-#endif /* __ASSEMBLY **/
+#endif /* __ASSEMBLER__ */
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index 32f1c79c818f..a6753eb16ba4 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -66,7 +66,7 @@
#define IOP_ADDR_ALIVE 0x031F
#define IOP_ADDR_RECV_MSG 0x0320
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* IOP Control registers, staggered because in usual Apple style they were
@@ -163,4 +163,4 @@ extern void iop_ism_irq_poll(uint);
extern void iop_register_interrupts(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
diff --git a/arch/m68k/include/asm/mac_oss.h b/arch/m68k/include/asm/mac_oss.h
index 56ef986c0a9b..a6e86e443155 100644
--- a/arch/m68k/include/asm/mac_oss.h
+++ b/arch/m68k/include/asm/mac_oss.h
@@ -59,7 +59,7 @@
#define OSS_POWEROFF 0x80
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct mac_oss {
__u8 irq_level[0x10]; /* [0x000-0x00f] Interrupt levels */
@@ -77,4 +77,4 @@ extern void oss_register_interrupts(void);
extern void oss_irq_enable(int);
extern void oss_irq_disable(int);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
diff --git a/arch/m68k/include/asm/mac_psc.h b/arch/m68k/include/asm/mac_psc.h
index 86a5a5eab89e..6587dbd54476 100644
--- a/arch/m68k/include/asm/mac_psc.h
+++ b/arch/m68k/include/asm/mac_psc.h
@@ -207,7 +207,7 @@
* Unknown, always 0x0000.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern volatile __u8 *psc;
@@ -249,4 +249,4 @@ static inline u32 psc_read_long(int offset)
return *((volatile __u32 *)(psc + offset));
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index a9ef1e9ba6c4..b065cd8e5071 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -250,7 +250,7 @@
#define IER_SET_BIT(b) (0x80 | (1<<(b)) )
#define IER_CLR_BIT(b) (0x7F & (1<<(b)) )
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern volatile __u8 *via1,*via2;
extern int rbv_present,via_alt_mapping;
@@ -267,6 +267,6 @@ extern void via1_irq(struct irq_desc *desc);
extern void via1_set_head(int);
extern int via2_scsi_drq_pending(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_MAC_VIA_H_ */
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h
index eefaa3a2b596..91074ade14ad 100644
--- a/arch/m68k/include/asm/math-emu.h
+++ b/arch/m68k/include/asm/math-emu.h
@@ -67,7 +67,7 @@
#define PMUNIMPL (1<<PUNIMPL)
#define PMMOVEM (1<<PMOVEM)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -127,7 +127,7 @@ extern unsigned int fp_debugprint;
#define FPDATA ((struct fp_data *)current->thread.fp)
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define FPDATA %a2
@@ -311,6 +311,6 @@ old_gas=old_gas+1
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_M68K_SETUP_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index d79fef609194..189bb7b1e663 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -92,7 +92,7 @@
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
@@ -292,5 +292,5 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _MCF_PGTABLE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
index 283352ab0d5d..db16ea1057f7 100644
--- a/arch/m68k/include/asm/mcfmmu.h
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -88,7 +88,7 @@
#define MMUDR_PAN 10 /* Physical address */
#define MMUDR_PAMASK 0xfffffc00 /* PA mask */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Simple access functions for the MMU registers. Nothing fancy
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 14fee64d3e60..dcf6829b3eab 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -44,7 +44,7 @@
/* We borrow bit 11 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x800
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* This is the cache mode to be used for pages containing page descriptors for
* processors >= '040. It is in pte_mknocache(), and the variable is defined
@@ -202,5 +202,5 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _MOTOROLA_PGTABLE_H */
diff --git a/arch/m68k/include/asm/nettel.h b/arch/m68k/include/asm/nettel.h
index 3bd4b7a4613f..9bf55cef119e 100644
--- a/arch/m68k/include/asm/nettel.h
+++ b/arch/m68k/include/asm/nettel.h
@@ -38,7 +38,7 @@
#define NETtel_LEDADDR 0x30400000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern volatile unsigned short ppdata;
@@ -80,7 +80,7 @@ static __inline__ void mcf_setppdata(unsigned int mask, unsigned int bits)
#define MCFPP_DTR0 0x0040
#define MCFPP_DTR1 0x0000 /* Port 1 no DTR support */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* These functions defined to give quasi generic access to the
* PPIO bits used for DTR/DCD.
diff --git a/arch/m68k/include/asm/openprom.h b/arch/m68k/include/asm/openprom.h
index dd22e649f5c5..6456ba40a946 100644
--- a/arch/m68k/include/asm/openprom.h
+++ b/arch/m68k/include/asm/openprom.h
@@ -21,7 +21,7 @@
#define LINUX_OPPROM_MAGIC 0x10010407
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* V0 prom device operations. */
struct linux_dev_v0_funcs {
int (*v0_devopen)(char *device_str);
@@ -308,6 +308,6 @@ struct linux_prom_ranges {
unsigned int or_size;
};
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* !(__SPARC_OPENPROM_H) */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index b173ba27d36f..d30f8b2f1592 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -10,7 +10,7 @@
#define PAGE_OFFSET (PAGE_OFFSET_RAW)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* These are used to make use of C type-checking..
@@ -48,7 +48,7 @@ extern unsigned long _rambase;
extern unsigned long _ramstart;
extern unsigned long _ramend;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_MMU
#include <asm/page_mm.h>
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index e0ae4d5fc985..ed782609ca41 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -2,7 +2,7 @@
#ifndef _M68K_PAGE_MM_H
#define _M68K_PAGE_MM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <asm/module.h>
@@ -144,6 +144,6 @@ extern int m68k_virt_to_node_shift;
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _M68K_PAGE_MM_H */
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 63c0e706084b..39db2026a4b4 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -2,7 +2,7 @@
#ifndef _M68K_PAGE_NO_H
#define _M68K_PAGE_NO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long memory_start;
extern unsigned long memory_end;
@@ -37,6 +37,6 @@ static inline void *pfn_to_virt(unsigned long pfn)
#define ARCH_PFN_OFFSET PHYS_PFN(PAGE_OFFSET_RAW)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _M68K_PAGE_NO_H */
diff --git a/arch/m68k/include/asm/pgtable.h b/arch/m68k/include/asm/pgtable.h
index 49fcfd734860..02f1a4601379 100644
--- a/arch/m68k/include/asm/pgtable.h
+++ b/arch/m68k/include/asm/pgtable.h
@@ -10,7 +10,7 @@
#include <asm/pgtable_mm.h>
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern void paging_init(void);
#endif
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index dbdf1c2b2f66..62f2ff4e6799 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -11,7 +11,7 @@
#include <asm/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <linux/sched.h>
#include <linux/threads.h>
@@ -145,7 +145,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* MMU-specific headers */
@@ -157,7 +157,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#include <asm/motorola_pgtable.h>
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Macro to mark a page protection value as "uncacheable".
*/
@@ -182,6 +182,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
#define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot)
#endif /* CONFIG_COLDFIRE */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index ea5a80ca1ab3..bc86ce012025 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -4,7 +4,7 @@
#include <uapi/asm/ptrace.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef PS_S
#define PS_S (0x2000)
@@ -24,5 +24,5 @@
#define arch_has_block_step() (1)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _M68K_PTRACE_H */
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 2c99477aaf89..e4ec169f5c7d 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -28,9 +28,9 @@
#define CL_SIZE COMMAND_LINE_SIZE
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long m68k_machtype;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#if !defined(CONFIG_AMIGA)
# define MACH_IS_AMIGA (0)
@@ -199,7 +199,7 @@ extern unsigned long m68k_machtype;
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long m68k_cputype;
extern unsigned long m68k_fputype;
extern unsigned long m68k_mmutype;
@@ -213,7 +213,7 @@ extern unsigned long vme_brdtype;
*/
extern int m68k_is040or060;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#if !defined(CONFIG_M68020)
# define CPU_IS_020 (0)
@@ -321,7 +321,7 @@ extern int m68k_is040or060;
#define NUM_MEMINFO 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct m68k_mem_info {
unsigned long addr; /* physical address of memory chunk */
unsigned long size; /* length of memory chunk (in bytes) */
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 858cbe936f5b..80ca185a18a1 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -4,7 +4,7 @@
#include <asm/sun3mmu.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/virtconvert.h>
#include <linux/linkage.h>
@@ -19,7 +19,7 @@
#define PTOV(addr) __va(addr)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* These need to be defined for compatibility although the sun3 doesn't use them */
#define _PAGE_NOCACHE030 0x040
@@ -74,7 +74,7 @@
/* We borrow bit 6 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x040
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -186,5 +186,5 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* !_SUN3_PGTABLE_H */
diff --git a/arch/m68k/include/asm/sun3mmu.h b/arch/m68k/include/asm/sun3mmu.h
index 21a75daa278f..fee05cd2ce5b 100644
--- a/arch/m68k/include/asm/sun3mmu.h
+++ b/arch/m68k/include/asm/sun3mmu.h
@@ -67,7 +67,7 @@
#define SUN3_BUSERR_PROTERR (0x40)
#define SUN3_BUSERR_INVALID (0x80)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* Read bus error status register (implicitly clearing it). */
static inline unsigned char sun3_get_buserr(void)
@@ -167,6 +167,6 @@ extern void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
extern int sun3_map_test(unsigned long addr, char *val);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* !__SUN3_MMU_H__ */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 3e31adbddc75..5cb3ace55622 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -22,7 +22,7 @@
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -31,7 +31,7 @@ struct thread_info {
__u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define INIT_THREAD_INFO(tsk) \
{ \
@@ -39,7 +39,7 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index a9d5c1c870d3..c7b3989bd4b2 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -11,7 +11,7 @@
#ifndef _M68K_TRAPS_H
#define _M68K_TRAPS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <asm/ptrace.h>
@@ -94,7 +94,7 @@ asmlinkage void bad_inthandler(void);
#define VECOFF(vec) ((vec)<<2)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* Status register bits */
#define PS_T (0x8000)
@@ -271,6 +271,6 @@ struct frame {
asmlinkage void berr_040cleanup(struct frame *fp);
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _M68K_TRAPS_H */
diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h
index f36a09ab5e79..b8139eb39352 100644
--- a/arch/m68k/include/uapi/asm/bootinfo-vme.h
+++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h
@@ -33,7 +33,7 @@
#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Board ID data structure - pointer to this retrieved from Bug by head.S
@@ -56,7 +56,7 @@ typedef struct {
__be32 option2;
} t_bdid, *p_bdid;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h
index 024e87d7095f..28d2d44c08d0 100644
--- a/arch/m68k/include/uapi/asm/bootinfo.h
+++ b/arch/m68k/include/uapi/asm/bootinfo.h
@@ -16,7 +16,7 @@
#include <linux/types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Bootinfo definitions
@@ -43,7 +43,7 @@ struct mem_info {
__be32 size; /* length of memory chunk (in bytes) */
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
@@ -167,7 +167,7 @@ struct mem_info {
#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff)
#define BI_VERSION_MINOR(v) ((v) & 0xffff)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct bootversion {
__be16 branch;
@@ -178,7 +178,7 @@ struct bootversion {
} machversions[];
} __packed;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_M68K_BOOTINFO_H */
diff --git a/arch/m68k/include/uapi/asm/ptrace.h b/arch/m68k/include/uapi/asm/ptrace.h
index ebd9fccb3d11..d70f771399b4 100644
--- a/arch/m68k/include/uapi/asm/ptrace.h
+++ b/arch/m68k/include/uapi/asm/ptrace.h
@@ -22,7 +22,7 @@
#define PT_SR 17
#define PT_PC 18
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* this struct defines the way the registers are stored on the
stack during a system call. */
@@ -81,5 +81,5 @@ struct switch_stack {
#define PTRACE_GETFDPIC_EXEC 0
#define PTRACE_GETFDPIC_INTERP 1
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_M68K_PTRACE_H */
diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c
index f11ef9f1f56f..521cbb8a150c 100644
--- a/arch/m68k/kernel/early_printk.c
+++ b/arch/m68k/kernel/early_printk.c
@@ -16,25 +16,10 @@
#include "../mvme147/mvme147.h"
#include "../mvme16x/mvme16x.h"
-asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
-
-static void __ref debug_cons_write(struct console *c,
- const char *s, unsigned n)
-{
-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
- defined(CONFIG_COLDFIRE))
- if (MACH_IS_MVME147)
- mvme147_scc_write(c, s, n);
- else if (MACH_IS_MVME16x)
- mvme16x_cons_write(c, s, n);
- else
- debug_cons_nputs(s, n);
-#endif
-}
+asmlinkage void __init debug_cons_nputs(struct console *c, const char *s, unsigned int n);
static struct console early_console_instance = {
.name = "debug",
- .write = debug_cons_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
@@ -44,6 +29,12 @@ static int __init setup_early_printk(char *buf)
if (early_console || buf)
return 0;
+ if (MACH_IS_MVME147)
+ early_console_instance.write = mvme147_scc_write;
+ else if (MACH_IS_MVME16x)
+ early_console_instance.write = mvme16x_cons_write;
+ else
+ early_console_instance.write = debug_cons_nputs;
early_console = &early_console_instance;
register_console(early_console);
@@ -51,20 +42,15 @@ static int __init setup_early_printk(char *buf)
}
early_param("earlyprintk", setup_early_printk);
-/*
- * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be called
- * after init sections are discarded (for platforms that use it).
- */
-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
- defined(CONFIG_COLDFIRE))
-
static int __init unregister_early_console(void)
{
- if (!early_console || MACH_IS_MVME16x)
- return 0;
+ /*
+ * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be
+ * called after init sections are discarded (for platforms that use it).
+ */
+ if (early_console && early_console->write == debug_cons_nputs)
+ return unregister_console(early_console);
- return unregister_console(early_console);
+ return 0;
}
late_initcall(unregister_early_console);
-
-#endif
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 852255cf60de..2e4ef0358887 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -3263,8 +3263,8 @@ func_return putn
* turns around and calls the internal routines. This routine
* is used by the boot console.
*
- * The calling parameters are:
- * void debug_cons_nputs(const char *str, unsigned length)
+ * The function signature is -
+ * void debug_cons_nputs(struct console *c, const char *s, unsigned int n)
*
* This routine does NOT understand variable arguments only
* simple strings!
@@ -3273,8 +3273,8 @@ ENTRY(debug_cons_nputs)
moveml %d0/%d1/%a0,%sp@-
movew %sr,%sp@-
ori #0x0700,%sr
- movel %sp@(18),%a0 /* fetch parameter */
- movel %sp@(22),%d1 /* fetch parameter */
+ movel %sp@(22),%a0 /* char *s */
+ movel %sp@(26),%d1 /* unsigned int n */
jra 2f
1:
#ifdef CONSOLE_DEBUG
@@ -3400,6 +3400,7 @@ L(console_clear_loop):
movel %d4,%d1 /* screen height in pixels */
divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */
+ subql #1,%d1 /* row range is 0 to num - 1 */
movel %d0,%a2@(Lconsole_struct_num_columns)
movel %d1,%a2@(Lconsole_struct_num_rows)
@@ -3532,61 +3533,44 @@ func_start console_putc,%a0/%a1/%d0-%d7
tstl %pc@(L(console_font))
jeq L(console_exit)
+ lea %pc@(L(console_globals)),%a0
+
/* Output character in d7 on console.
*/
movel ARG1,%d7
cmpib #'\n',%d7
- jbne 1f
+ jne L(console_not_lf)
- /* A little safe recursion is good for the soul */
- console_putc #'\r'
-1:
- lea %pc@(L(console_globals)),%a0
+ clrl %a0@(Lconsole_struct_cur_column) /* implicit \r */
- cmpib #10,%d7
- jne L(console_not_lf)
movel %a0@(Lconsole_struct_cur_row),%d0
- addil #1,%d0
- movel %d0,%a0@(Lconsole_struct_cur_row)
movel %a0@(Lconsole_struct_num_rows),%d1
cmpl %d1,%d0
jcs 1f
- subil #1,%d0
- movel %d0,%a0@(Lconsole_struct_cur_row)
console_scroll
+ jra L(console_exit)
1:
+ addql #1,%d0
+ movel %d0,%a0@(Lconsole_struct_cur_row)
jra L(console_exit)
L(console_not_lf):
- cmpib #13,%d7
- jne L(console_not_cr)
+ cmpib #'\r',%d7
+ jne L(console_not_lf_not_cr)
clrl %a0@(Lconsole_struct_cur_column)
jra L(console_exit)
-L(console_not_cr):
- cmpib #1,%d7
- jne L(console_not_home)
- clrl %a0@(Lconsole_struct_cur_row)
- clrl %a0@(Lconsole_struct_cur_column)
- jra L(console_exit)
-
-/*
- * At this point we know that the %d7 character is going to be
- * rendered on the screen. Register usage is -
- * a0 = pointer to console globals
- * a1 = font data
- * d0 = cursor column
- * d1 = cursor row to draw the character
- * d7 = character number
- */
-L(console_not_home):
+ /*
+ * At this point we know that the %d7 character is going to be
+ * rendered on the screen. Register usage is -
+ * a0 = pointer to console globals
+ * a1 = font data
+ * d0 = cursor column
+ * d1 = cursor row to draw the character
+ * d7 = character number
+ */
+L(console_not_lf_not_cr):
movel %a0@(Lconsole_struct_cur_column),%d0
- addql #1,%a0@(Lconsole_struct_cur_column)
- movel %a0@(Lconsole_struct_num_columns),%d1
- cmpl %d1,%d0
- jcs 1f
- console_putc #'\n' /* recursion is OK! */
-1:
movel %a0@(Lconsole_struct_cur_row),%d1
/*
@@ -3633,6 +3617,23 @@ L(console_do_font_scanline):
addq #1,%d1
dbra %d7,L(console_read_char_scanline)
+ /*
+ * Register usage in the code below:
+ * a0 = pointer to console globals
+ * d0 = cursor column
+ * d1 = cursor column limit
+ */
+
+ lea %pc@(L(console_globals)),%a0
+
+ movel %a0@(Lconsole_struct_cur_column),%d0
+ addql #1,%d0
+ movel %d0,%a0@(Lconsole_struct_cur_column) /* Update cursor pos */
+ movel %a0@(Lconsole_struct_num_columns),%d1
+ cmpl %d1,%d0
+ jcs L(console_exit)
+ console_putc #'\n' /* Line wrap using tail recursion */
+
L(console_exit):
func_return console_putc
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 01e6b0e37f8d..9cb813eda4fd 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -621,6 +621,22 @@ static u64 mac_read_clk(struct clocksource *cs)
* These problems are avoided by ignoring the low byte. Clock accuracy
* is 256 times worse (error can reach 0.327 ms) but CPU overhead is
* reduced by avoiding slow VIA register accesses.
+ *
+ * The VIA timer counter observably decrements to 0xFFFF before the
+ * counter reload interrupt gets raised. That complicates things a bit.
+ *
+ * State | vT1CH | VIA_TIMER_1_INT | inference drawn
+ * ------+------------+-----------------+-----------------------------
+ * i | FE thru 00 | false | counter is decrementing
+ * ii | FF | false | counter wrapped
+ * iii | FF | true | wrapped, interrupt raised
+ * iv | FF | false | wrapped, interrupt handled
+ * v | FE thru 00 | true | wrapped, interrupt unhandled
+ *
+ * State iv is never observed because handling the interrupt involves
+ * a 6522 register access and every access consumes a "phi 2" clock
+ * cycle. So 0xFF implies either state ii or state iii, depending on
+ * the value of the VIA_TIMER_1_INT bit.
*/
local_irq_save(flags);
diff --git a/arch/m68k/math-emu/fp_emu.h b/arch/m68k/math-emu/fp_emu.h
index c1ecfef7886a..6ac811c31ca4 100644
--- a/arch/m68k/math-emu/fp_emu.h
+++ b/arch/m68k/math-emu/fp_emu.h
@@ -38,12 +38,12 @@
#ifndef _FP_EMU_H
#define _FP_EMU_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/asm-offsets.h>
#endif
#include <asm/math-emu.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define IS_INF(a) ((a)->exp == 0x7fff)
#define IS_ZERO(a) ((a)->mant.m64 == 0)
@@ -124,7 +124,7 @@ extern const struct fp_ext fp_Inf;
: "a1", "d1", "d2", "memory"); \
})
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
/*
* set, reset or clear a bit in the fp status register
@@ -141,6 +141,6 @@ extern const struct fp_ext fp_Inf;
btst #(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _FP_EMU_H */
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 745bd575dcfa..62283bc2ed79 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -92,7 +92,7 @@ void mmu_page_dtor(void *page)
}
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
- struct page instead of separately kmalloced struct. Stolen from
+ struct ptdesc instead of separately kmalloced struct. Stolen from
arch/sparc/mm/srmmu.c ... */
typedef struct list_head ptable_desc;
@@ -103,8 +103,7 @@ static struct list_head ptable_list[3] = {
LIST_HEAD_INIT(ptable_list[2]),
};
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
-#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
+#define PD_PTABLE(ptdesc) ((ptable_desc *)&(virt_to_ptdesc((void *)(ptdesc))->pt_list))
#define PD_PTDESC(ptable) (list_entry(ptable, struct ptdesc, pt_list))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PTDESC(dp)->pt_index)
@@ -121,10 +120,10 @@ void __init init_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long ptable = (unsigned long)table;
- unsigned long page = ptable & PAGE_MASK;
- unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
+ unsigned long pt_addr = ptable & PAGE_MASK;
+ unsigned int mask = 1U << ((ptable - pt_addr)/ptable_size(type));
- dp = PD_PTABLE(page);
+ dp = PD_PTABLE(pt_addr);
if (!(PD_MARKBITS(dp) & mask)) {
PD_MARKBITS(dp) = ptable_mask(type);
list_add(dp, &ptable_list[type]);
@@ -133,9 +132,9 @@ void __init init_pointer_table(void *table, int type)
PD_MARKBITS(dp) &= ~mask;
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
- /* unreserve the page so it's possible to free that page */
- __ClearPageReserved(PD_PAGE(dp));
- init_page_count(PD_PAGE(dp));
+ /* unreserve the ptdesc so it's possible to free that ptdesc */
+ __ClearPageReserved(ptdesc_page(PD_PTDESC(dp)));
+ init_page_count(ptdesc_page(PD_PTDESC(dp)));
return;
}
@@ -148,40 +147,44 @@ void *get_pointer_table(struct mm_struct *mm, int type)
/*
* For a pointer table for a user process address space, a
- * table is taken from a page allocated for the purpose. Each
- * page can hold 8 pointer tables. The page is remapped in
+ * table is taken from a ptdesc allocated for the purpose. Each
+ * ptdesc can hold 8 pointer tables. The ptdesc is remapped in
* virtual address space to be noncacheable.
*/
if (mask == 0) {
- void *page;
+ struct ptdesc *ptdesc;
ptable_desc *new;
+ void *pt_addr;
- if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
+ ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!ptdesc)
return NULL;
+ pt_addr = ptdesc_address(ptdesc);
+
switch (type) {
case TABLE_PTE:
/*
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
- pagetable_pte_ctor(mm, virt_to_ptdesc(page));
+ pagetable_pte_ctor(mm, ptdesc);
break;
case TABLE_PMD:
- pagetable_pmd_ctor(mm, virt_to_ptdesc(page));
+ pagetable_pmd_ctor(mm, ptdesc);
break;
case TABLE_PGD:
- pagetable_pgd_ctor(virt_to_ptdesc(page));
+ pagetable_pgd_ctor(ptdesc);
break;
}
- mmu_page_ctor(page);
+ mmu_page_ctor(pt_addr);
- new = PD_PTABLE(page);
+ new = PD_PTABLE(pt_addr);
PD_MARKBITS(new) = ptable_mask(type) - 1;
list_add_tail(new, dp);
- return (pmd_t *)page;
+ return (pmd_t *)pt_addr;
}
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
@@ -191,28 +194,27 @@ void *get_pointer_table(struct mm_struct *mm, int type)
/* move to end of list */
list_move_tail(dp, &ptable_list[type]);
}
- return page_address(PD_PAGE(dp)) + off;
+ return ptdesc_address(PD_PTDESC(dp)) + off;
}
int free_pointer_table(void *table, int type)
{
ptable_desc *dp;
unsigned long ptable = (unsigned long)table;
- unsigned long page = ptable & PAGE_MASK;
- unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
+ unsigned long pt_addr = ptable & PAGE_MASK;
+ unsigned int mask = 1U << ((ptable - pt_addr)/ptable_size(type));
- dp = PD_PTABLE(page);
+ dp = PD_PTABLE(pt_addr);
if (PD_MARKBITS (dp) & mask)
panic ("table already free!");
PD_MARKBITS (dp) |= mask;
if (PD_MARKBITS(dp) == ptable_mask(type)) {
- /* all tables in page are free, free page */
+ /* all tables in ptdesc are free, free ptdesc */
list_del(dp);
- mmu_page_dtor((void *)page);
- pagetable_dtor(virt_to_ptdesc((void *)page));
- free_page (page);
+ mmu_page_dtor((void *)pt_addr);
+ pagetable_dtor_free(virt_to_ptdesc((void *)pt_addr));
return 1;
} else if (ptable_list[type].next != dp) {
/*
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f18ec02ddeb2..484ebb3baedf 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,6 @@ config MICROBLAZE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_PAGE_SIZE_4KB
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 934eb961bd0d..caf508f6e9ec 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -73,7 +73,6 @@ config MIPS
select HAVE_EBPF_JIT if !CPU_MICROMIPS
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
@@ -563,6 +562,7 @@ config MIPS_MALTA
select MIPS_L1_CACHE_SHIFT_6
select MIPS_MSC
select PCI_GT64XXX_PCI0
+ select RTC_MC146818_LIB
select SMP_UP if SMP
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
@@ -1837,6 +1837,7 @@ config CPU_LOONGSON2EF
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select RTC_MC146818_LIB
config CPU_LOONGSON32
bool
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
index 194034eba75f..e79e26ffac99 100644
--- a/arch/mips/alchemy/common/gpiolib.c
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -101,7 +101,7 @@ struct gpio_chip alchemy_gpio_chip[] = {
.direction_input = gpio1_direction_input,
.direction_output = gpio1_direction_output,
.get = gpio1_get,
- .set_rv = gpio1_set,
+ .set = gpio1_set,
.to_irq = gpio1_to_irq,
.base = ALCHEMY_GPIO1_BASE,
.ngpio = ALCHEMY_GPIO1_NUM,
@@ -111,7 +111,7 @@ struct gpio_chip alchemy_gpio_chip[] = {
.direction_input = gpio2_direction_input,
.direction_output = gpio2_direction_output,
.get = gpio2_get,
- .set_rv = gpio2_set,
+ .set = gpio2_set,
.to_irq = gpio2_to_irq,
.base = ALCHEMY_GPIO2_BASE,
.ngpio = ALCHEMY_GPIO2_NUM,
@@ -151,7 +151,7 @@ static struct gpio_chip au1300_gpiochip = {
.direction_input = alchemy_gpic_dir_input,
.direction_output = alchemy_gpic_dir_output,
.get = alchemy_gpic_get,
- .set_rv = alchemy_gpic_set,
+ .set = alchemy_gpic_set,
.to_irq = alchemy_gpic_gpio_to_irq,
.base = AU1300_GPIO_BASE,
.ngpio = AU1300_GPIO_NUM,
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index e7a53cd0dec5..ff45a6989c3a 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -131,7 +131,7 @@ static struct gpio_chip bcm63xx_gpio_chip = {
.direction_input = bcm63xx_gpio_direction_input,
.direction_output = bcm63xx_gpio_direction_output,
.get = bcm63xx_gpio_get,
- .set_rv = bcm63xx_gpio_set,
+ .set = bcm63xx_gpio_set,
.base = 0,
};
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 196c44fa72d9..8473c4671702 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -54,10 +54,10 @@ UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
# Compressed vmlinux images
#
-extra-y += vmlinux.bin.bz2
-extra-y += vmlinux.bin.gz
-extra-y += vmlinux.bin.lzma
-extra-y += vmlinux.bin.lzo
+targets += vmlinux.bin.bz2
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
+targets += vmlinux.bin.lzo
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
$(call if_changed,bzip2)
diff --git a/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts b/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts
index 6898b2d8267d..9fc1a1b0a81b 100644
--- a/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts
+++ b/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts
@@ -21,3 +21,11 @@
<0x8 0x02000000 0x0 0x7E000000>;
};
};
+
+&i2c2 {
+ temperature-sensor@48 {
+ compatible = "ti,tmp112";
+ reg = <0x48>;
+ label = "U60";
+ };
+};
diff --git a/arch/mips/boot/dts/mobileye/eyeq5.dtsi b/arch/mips/boot/dts/mobileye/eyeq5.dtsi
index a84e6e720619..36a73e8a63a1 100644
--- a/arch/mips/boot/dts/mobileye/eyeq5.dtsi
+++ b/arch/mips/boot/dts/mobileye/eyeq5.dtsi
@@ -110,6 +110,81 @@
ranges;
compatible = "simple-bus";
+ i2c0: i2c@300000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0 0x300000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>; /* Fast mode */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&olb 35>, <&olb EQ5C_PER_I2C>;
+ clock-names = "i2cclk", "apb_pclk";
+ resets = <&olb 0 13>;
+ i2c-transfer-timeout-us = <10000>;
+ mobileye,olb = <&olb 0>;
+ };
+
+ i2c1: i2c@400000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0 0x400000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>; /* Fast mode */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&olb 35>, <&olb EQ5C_PER_I2C>;
+ clock-names = "i2cclk", "apb_pclk";
+ resets = <&olb 0 14>;
+ i2c-transfer-timeout-us = <10000>;
+ mobileye,olb = <&olb 1>;
+ };
+
+ i2c2: i2c@500000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0 0x500000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>; /* Fast mode */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&olb 35>, <&olb EQ5C_PER_I2C>;
+ clock-names = "i2cclk", "apb_pclk";
+ resets = <&olb 0 15>;
+ i2c-transfer-timeout-us = <10000>;
+ mobileye,olb = <&olb 2>;
+ };
+
+ i2c3: i2c@600000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0 0x600000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>; /* Fast mode */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&olb 35>, <&olb EQ5C_PER_I2C>;
+ clock-names = "i2cclk", "apb_pclk";
+ resets = <&olb 0 16>;
+ i2c-transfer-timeout-us = <10000>;
+ mobileye,olb = <&olb 3>;
+ };
+
+ i2c4: i2c@700000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0 0x700000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 5 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>; /* Fast mode */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&olb 35>, <&olb EQ5C_PER_I2C>;
+ clock-names = "i2cclk", "apb_pclk";
+ resets = <&olb 0 17>;
+ i2c-transfer-timeout-us = <10000>;
+ mobileye,olb = <&olb 4>;
+ };
+
uart0: serial@800000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0 0x800000 0x0 0x1000>;
@@ -178,6 +253,58 @@
clocks = <&olb EQ5C_CPU_CORE0>;
};
};
+
+ emmc: mmc@2200000 {
+ compatible = "mobileye,eyeq-sd4hc", "cdns,sd4hc";
+ reg = <0 0x2200000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&olb EQ5C_PER_EMMC>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-ddr-1_8v;
+ sd-uhs-ddr50;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+
+ cdns,phy-input-delay-legacy = <4>;
+ cdns,phy-input-delay-mmc-highspeed = <2>;
+ cdns,phy-input-delay-mmc-ddr = <3>;
+ cdns,phy-dll-delay-sdclk = <32>;
+ cdns,phy-dll-delay-sdclk-hsmmc = <32>;
+ cdns,phy-dll-delay-strobe = <32>;
+ };
+
+ gpio0: gpio@1400000 {
+ compatible = "mobileye,eyeq5-gpio";
+ reg = <0x0 0x1400000 0x0 0x1000>;
+ gpio-bank = <0>;
+ ngpios = <29>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 14 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&olb 0 0 29>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ resets = <&olb 0 26>;
+ };
+
+ gpio1: gpio@1500000 {
+ compatible = "mobileye,eyeq5-gpio";
+ reg = <0x0 0x1500000 0x0 0x1000>;
+ gpio-bank = <1>;
+ ngpios = <23>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 14 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&olb 0 29 23>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ resets = <&olb 0 26>;
+ };
};
};
diff --git a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi
index dabd5ed778b7..5ae939d25ea8 100644
--- a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi
+++ b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi
@@ -109,6 +109,28 @@
clock-names = "ref";
};
+ emmc: mmc@d8010000 {
+ compatible = "mobileye,eyeq-sd4hc", "cdns,sd4hc";
+ reg = <0 0xd8010000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 91 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&olb_south EQ6HC_SOUTH_DIV_EMMC>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-ddr-1_8v;
+ sd-uhs-ddr50;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+
+ cdns,phy-input-delay-legacy = <4>;
+ cdns,phy-input-delay-mmc-highspeed = <2>;
+ cdns,phy-input-delay-mmc-ddr = <3>;
+ cdns,phy-dll-delay-sdclk = <32>;
+ cdns,phy-dll-delay-sdclk-hsmmc = <32>;
+ cdns,phy-dll-delay-strobe = <32>;
+ };
+
olb_south: system-controller@d8013000 {
compatible = "mobileye,eyeq6h-south-olb", "syscon";
reg = <0x0 0xd8013000 0x0 0x1000>;
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 61dcfa5b6ca7..c1ca03a27b6c 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -156,6 +156,15 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ wifi: wifi@180c0000 {
+ compatible = "qca,ar9130-wifi";
+ reg = <0x180c0000 0x230000>;
+
+ interrupts = <2>;
+
+ status = "disabled";
+ };
};
usb_phy: usb-phy {
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index f894fe17816b..a7901bb040ce 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -108,3 +108,7 @@
};
};
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 768ac0f869b1..6eb84a26a20f 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -285,6 +285,15 @@
status = "disabled";
};
+
+ wifi: wifi@18100000 {
+ compatible = "qca,ar9330-wifi";
+ reg = <0x18100000 0x20000>;
+
+ interrupts = <2>;
+
+ status = "disabled";
+ };
};
usb_phy: usb-phy {
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
index c857cd22f7db..08e728b8ced8 100644
--- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -97,3 +97,7 @@
&phy_port4 {
status = "okay";
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
index 7affa58d4fa6..37a74aabe4b4 100644
--- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -98,3 +98,7 @@
reg = <0>;
};
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
index 8904aa917a6e..1450419024cb 100644
--- a/arch/mips/boot/dts/qca/ar9331_omega.dts
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -74,3 +74,7 @@
reg = <0>;
};
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
index dc65ebd60bbc..5786a827c000 100644
--- a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
+++ b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
@@ -106,3 +106,7 @@
&phy_port4 {
status = "okay";
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
index 10b9759228b7..a7108c803eb3 100644
--- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -114,3 +114,7 @@
reg = <0>;
};
};
+
+&wifi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
index 7743d014631a..0bfb1dde9764 100644
--- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -56,7 +56,7 @@
led-power-green {
label = "smartgw:power:green";
gpios = <&gpio 19 GPIO_ACTIVE_HIGH>;
- default-state = "off";
+ linux,default-trigger = "timer";
};
led-power-red {
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index d66045948a83..460164bdd430 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -62,4 +62,14 @@
reg-shift = <2>;
};
};
+
+ wmac: wifi@10180000 {
+ compatible = "ralink,rt2880-wifi";
+ reg = <0x10180000 0x40000>;
+
+ clocks = <&sysc 16>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+ };
};
diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi
index 0212700c4fb4..5d7a6cfa9e2b 100644
--- a/arch/mips/boot/dts/ralink/mt7628a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi
@@ -33,7 +33,7 @@
#size-cells = <1>;
sysc: syscon@0 {
- compatible = "ralink,mt7628-sysc", "syscon";
+ compatible = "ralink,mt7628-sysc", "ralink,mt7688-sysc", "syscon";
reg = <0x0 0x60>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -134,13 +134,8 @@
watchdog: watchdog@100 {
compatible = "mediatek,mt7621-wdt";
- reg = <0x100 0x30>;
-
- resets = <&sysc 8>;
- reset-names = "wdt";
-
- interrupt-parent = <&intc>;
- interrupts = <24>;
+ reg = <0x100 0x100>;
+ mediatek,sysctl = <&sysc>;
status = "disabled";
};
diff --git a/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts b/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts
index 6789bf374044..6f6a05d4088e 100644
--- a/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts
+++ b/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts
@@ -71,3 +71,99 @@
};
};
};
+
+&mdio0 {
+ /* External RTL8224 */
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+};
+
+&mdio1 {
+ /* External RTL8224 */
+ phy4: ethernet-phy@0 {
+ reg = <0>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy5: ethernet-phy@1 {
+ reg = <1>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy6: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+ phy7: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ };
+};
+
+&switch0 {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ phy-handle = <&phy0>;
+ phy-mode = "usxgmii";
+ };
+ port@1 {
+ reg = <1>;
+ phy-handle = <&phy1>;
+ phy-mode = "usxgmii";
+ };
+ port@2 {
+ reg = <2>;
+ phy-handle = <&phy2>;
+ phy-mode = "usxgmii";
+ };
+ port@3 {
+ reg = <3>;
+ phy-handle = <&phy3>;
+ phy-mode = "usxgmii";
+ };
+ port@16 {
+ reg = <16>;
+ phy-handle = <&phy4>;
+ phy-mode = "usxgmii";
+ };
+ port@17 {
+ reg = <17>;
+ phy-handle = <&phy5>;
+ phy-mode = "usxgmii";
+ };
+ port@18 {
+ reg = <18>;
+ phy-handle = <&phy6>;
+ phy-mode = "usxgmii";
+ };
+ port@19 {
+ reg = <19>;
+ phy-handle = <&phy7>;
+ phy-mode = "usxgmii";
+ };
+ port@24{
+ reg = <24>;
+ phy-mode = "10gbase-r";
+ };
+ port@25{
+ reg = <25>;
+ phy-mode = "10gbase-r";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/realtek/rtl930x.dtsi b/arch/mips/boot/dts/realtek/rtl930x.dtsi
index 101bab72a95f..24e262e2dc2a 100644
--- a/arch/mips/boot/dts/realtek/rtl930x.dtsi
+++ b/arch/mips/boot/dts/realtek/rtl930x.dtsi
@@ -48,6 +48,10 @@
#address-cells = <1>;
#size-cells = <1>;
+ interrupt-parent = <&intc>;
+ interrupts = <23>, <24>;
+ interrupt-names = "switch", "nic";
+
reboot@c {
compatible = "syscon-reboot";
reg = <0x0c 0x4>;
@@ -138,6 +142,33 @@
clocks = <&lx_clk>;
};
+ watchdog0: watchdog@3260 {
+ compatible = "realtek,rtl9300-wdt";
+ reg = <0x3260 0xc>;
+
+ realtek,reset-mode = "soc";
+
+ clocks = <&lx_clk>;
+ timeout-sec = <30>;
+
+ interrupt-parent = <&intc>;
+ interrupt-names = "phase1", "phase2";
+ interrupts = <5>, <6>;
+ };
+
+ gpio0: gpio@3300 {
+ compatible = "realtek,rtl9300-gpio", "realtek,otto-gpio";
+ reg = <0x3300 0x1c>, <0x3338 0x8>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <24>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <13>;
+ };
+
snand: spi@1a400 {
compatible = "realtek,rtl9301-snand";
reg = <0x1a400 0x44>;
diff --git a/arch/mips/configs/eyeq5_defconfig b/arch/mips/configs/eyeq5_defconfig
index ff7af5dc6d9d..6688f56aba1c 100644
--- a/arch/mips/configs/eyeq5_defconfig
+++ b/arch/mips/configs/eyeq5_defconfig
@@ -19,20 +19,18 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_EYEQ=y
-CONFIG_MACH_EYEQ5=y
CONFIG_FIT_IMAGE_FDT_EPM5=y
-CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_CPS=y
CONFIG_CPU_HAS_MSA=y
CONFIG_NR_CPUS=16
CONFIG_MIPS_RAW_APPENDED_DTB=y
CONFIG_JUMP_LABEL=y
+CONFIG_PAGE_SIZE_16KB=y
CONFIG_COMPAT_32BIT_TIME=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_TRIM_UNUSED_KSYMS=y
# CONFIG_COMPAT_BRK is not set
-CONFIG_SPARSEMEM_MANUAL=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -64,8 +62,14 @@ CONFIG_CAN_M_CAN=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_NOMADIK=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_PINCTRL=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_NOMADIK=y
+CONFIG_SENSORS_LM75=y
CONFIG_MFD_SYSCON=y
CONFIG_HID_A4TECH=y
CONFIG_HID_BELKIN=y
@@ -79,6 +83,8 @@ CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_CADENCE=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RESET_CONTROLLER=y
# CONFIG_NVMEM is not set
diff --git a/arch/mips/configs/eyeq6_defconfig b/arch/mips/configs/eyeq6_defconfig
index 0afbb45a78e8..0a00a201937b 100644
--- a/arch/mips/configs/eyeq6_defconfig
+++ b/arch/mips/configs/eyeq6_defconfig
@@ -82,6 +82,8 @@ CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_CADENCE=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RESET_CONTROLLER=y
# CONFIG_NVMEM is not set
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 114fcd67898d..cdedbb8a8f53 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -44,7 +44,6 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index f1a8ccf2c459..2decf8b98d31 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -79,7 +79,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig
index 4b7f914d01d0..0cc665d3ea34 100644
--- a/arch/mips/configs/loongson2k_defconfig
+++ b/arch/mips/configs/loongson2k_defconfig
@@ -52,7 +52,6 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -257,6 +256,17 @@ CONFIG_SND_HDA_INTEL=y
CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
CONFIG_SND_HDA_CODEC_ANALOG=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_VIA=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 5ff0c1554168..240efff37d98 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -72,7 +72,6 @@ CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -292,11 +291,24 @@ CONFIG_SND_SEQ_DUMMY=m
# CONFIG_SND_ISA is not set
CONFIG_SND_HDA_INTEL=m
CONFIG_SND_HDA_PATCH_LOADER=y
-CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
CONFIG_SND_HDA_CODEC_REALTEK_LIB=m
+CONFIG_SND_HDA_CODEC_ALC260=m
+CONFIG_SND_HDA_CODEC_ALC262=m
+CONFIG_SND_HDA_CODEC_ALC268=m
CONFIG_SND_HDA_CODEC_ALC269=m
+CONFIG_SND_HDA_CODEC_ALC662=m
+CONFIG_SND_HDA_CODEC_ALC680=m
+CONFIG_SND_HDA_CODEC_ALC861=m
+CONFIG_SND_HDA_CODEC_ALC861VD=m
+CONFIG_SND_HDA_CODEC_ALC880=m
+CONFIG_SND_HDA_CODEC_ALC882=m
CONFIG_SND_HDA_CODEC_SIGMATEL=m
CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=m
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=m
+CONFIG_SND_HDA_CODEC_HDMI_ATI=m
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=m
CONFIG_SND_HDA_CODEC_CONEXANT=m
# CONFIG_SND_USB is not set
CONFIG_HIDRAW=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 869a14b3184f..9fcbac829920 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -80,7 +80,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 41e1fea303ea..19102386a81c 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -84,7 +84,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 13ff1877e26e..1dd07c9d1812 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -82,7 +82,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 9fb114ef5e2d..30d18b084cda 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -56,7 +56,6 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 7b5a5591ccc9..39a2419e1f3e 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -64,7 +64,6 @@ CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a600670d00e9..fd60837ce50b 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -123,6 +123,7 @@ extern struct cpuinfo_mips cpu_data[];
extern void cpu_probe(void);
extern void cpu_report(void);
+extern void cpu_disable_mmid(void);
extern const char *__cpu_name[];
#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index fbc71ddcf0f6..8c460ce01ffe 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -11,20 +11,6 @@
#include <asm/page.h>
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr,
- unsigned long len)
-{
- unsigned long task_size = STACK_TOP;
-
- if (len > task_size)
- return -ENOMEM;
- if (task_size - len < addr)
- return -EINVAL;
- return 0;
-}
-
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
diff --git a/arch/mips/include/asm/mach-generic/mc146818rtc.h b/arch/mips/include/asm/mach-generic/mc146818rtc.h
index 9c72e540ff56..249279b0494d 100644
--- a/arch/mips/include/asm/mach-generic/mc146818rtc.h
+++ b/arch/mips/include/asm/mach-generic/mc146818rtc.h
@@ -29,8 +29,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
#define RTC_ALWAYS_BCD 0
-#ifndef mc146818_decode_year
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
-#endif
-
#endif /* __ASM_MACH_GENERIC_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
index ce4e4c6e09e2..50d487a4c95e 100644
--- a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
@@ -5,7 +5,7 @@
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
* 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
* 2009 Johannes Dickgreber <tanzy@gmx.de>
- * 2015 Joshua Kinard <kumba@gentoo.org>
+ * 2015 Joshua Kinard <linux@kumba.dev>
*
*/
#ifndef __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
diff --git a/arch/mips/include/asm/mach-ip30/spaces.h b/arch/mips/include/asm/mach-ip30/spaces.h
index c8a302dfbe05..d381b93d6ad3 100644
--- a/arch/mips/include/asm/mach-ip30/spaces.h
+++ b/arch/mips/include/asm/mach-ip30/spaces.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2016 Joshua Kinard <kumba@gentoo.org>
+ * Copyright (C) 2016 Joshua Kinard <linux@kumba.dev>
*
*/
#ifndef _ASM_MACH_IP30_SPACES_H
diff --git a/arch/mips/include/asm/mach-jazz/mc146818rtc.h b/arch/mips/include/asm/mach-jazz/mc146818rtc.h
index 987f727afe25..639bff8ebca3 100644
--- a/arch/mips/include/asm/mach-jazz/mc146818rtc.h
+++ b/arch/mips/include/asm/mach-jazz/mc146818rtc.h
@@ -33,6 +33,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
#define RTC_ALWAYS_BCD 0
-#define mc146818_decode_year(year) ((year) + 1980)
-
#endif /* __ASM_MACH_JAZZ_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index c2e0acb755cd..dd9f621d0204 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -99,5 +99,8 @@ extern __iomem void *ltq_cgu_membase;
extern void ltq_pmu_enable(unsigned int module);
extern void ltq_pmu_disable(unsigned int module);
+/* VMMC */
+extern unsigned int *ltq_get_cp1_base(void);
+
#endif /* CONFIG_SOC_TYPE_XWAY */
#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-malta/mc146818rtc.h b/arch/mips/include/asm/mach-malta/mc146818rtc.h
index e8cc7fdf7415..7da2c0ea55da 100644
--- a/arch/mips/include/asm/mach-malta/mc146818rtc.h
+++ b/arch/mips/include/asm/mach-malta/mc146818rtc.h
@@ -31,6 +31,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
#define RTC_ALWAYS_BCD 0
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
-
#endif /* __ASM_MACH_MALTA_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-rm/mc146818rtc.h b/arch/mips/include/asm/mach-rm/mc146818rtc.h
deleted file mode 100644
index a074f4f84f75..000000000000
--- a/arch/mips/include/asm/mach-rm/mc146818rtc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * RTC routines for PC style attached Dallas chip with ARC epoch.
- */
-#ifndef __ASM_MACH_RM_MC146818RTC_H
-#define __ASM_MACH_RM_MC146818RTC_H
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
-#else
-#define mc146818_decode_year(year) ((year) + 1980)
-#endif
-
-#include <asm/mach-generic/mc146818rtc.h>
-
-#endif /* __ASM_MACH_RM_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index cbf5cec345f1..ac52a30b4161 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -8,112 +8,21 @@
#ifndef __ASM_MC146818_TIME_H
#define __ASM_MC146818_TIME_H
-#include <linux/bcd.h>
#include <linux/mc146818rtc.h>
#include <linux/time.h>
-/*
- * For check timing call set_rtc_mmss() 500ms; used in timer interrupt.
- */
-#define USEC_AFTER 500000
-#define USEC_BEFORE 500000
-
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
- */
-static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
-{
- int real_seconds, real_minutes, cmos_minutes;
- unsigned char save_control, save_freq_select;
- int retval = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- cmos_minutes = bcd2bin(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- CMOS_WRITE(real_seconds, RTC_SECONDS);
- CMOS_WRITE(real_minutes, RTC_MINUTES);
- } else {
- printk_once(KERN_NOTICE
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- /* The following flags have to be released exactly in this order,
- * otherwise the DS12887 (popular MC146818A clone with integrated
- * battery and quartz) will not reset the oscillator and will not
- * update precisely 500 ms later. You won't find this mentioned in
- * the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
- */
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return retval;
-}
-
+#ifdef CONFIG_RTC_MC146818_LIB
static inline time64_t mc146818_get_cmos_time(void)
{
- unsigned int year, mon, day, hour, min, sec;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- do {
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
- } while (sec != CMOS_READ(RTC_SECONDS));
+ struct rtc_time tm;
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
+ if (mc146818_get_time(&tm, 1000)) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
}
- spin_unlock_irqrestore(&rtc_lock, flags);
- year = mc146818_decode_year(year);
- return mktime64(year, mon, day, hour, min, sec);
+ return rtc_tm_to_time64(&tm);
}
+#endif /* CONFIG_RTC_MC146818_LIB */
#endif /* __ASM_MC146818_TIME_H */
diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h
index 917009b80e69..1fffd47a4564 100644
--- a/arch/mips/include/asm/mips-cps.h
+++ b/arch/mips/include/asm/mips-cps.h
@@ -258,6 +258,8 @@ static inline bool mips_cps_multicluster_cpus(void)
/**
* mips_cps_first_online_in_cluster() - Detect if CPU is first online in cluster
+ * @first_cpu: The first other online CPU in cluster, or nr_cpu_ids if
+ * the function returns true.
*
* Determine whether the local CPU is the first to be brought online in its
* cluster - that is, whether there are any other online CPUs in the local
@@ -265,6 +267,6 @@ static inline bool mips_cps_multicluster_cpus(void)
*
* Returns true if this CPU is first online, else false.
*/
-extern unsigned int mips_cps_first_online_in_cluster(void);
+extern unsigned int mips_cps_first_online_in_cluster(int *first_cpu);
#endif /* __MIPS_ASM_MIPS_CPS_H__ */
diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h
index 0d03751955c4..c224c2e3575a 100644
--- a/arch/mips/include/asm/sgi/heart.h
+++ b/arch/mips/include/asm/sgi/heart.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
* 2009 Johannes Dickgreber <tanzy@gmx.de>
- * 2007-2015 Joshua Kinard <kumba@gentoo.org>
+ * 2007-2015 Joshua Kinard <linux@kumba.dev>
*/
#ifndef __ASM_SGI_HEART_H
#define __ASM_SGI_HEART_H
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index 10d3ebd890cb..88cfae5d22c8 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -24,6 +24,7 @@ struct core_boot_config {
struct cluster_boot_config {
unsigned long *core_power;
+ struct cpumask cpumask;
struct core_boot_config *core_config;
};
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 61fd4d0aeda4..c0769dc4b853 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -119,4 +119,12 @@ void cleanup_tc(struct tc *tc);
int __init vpe_module_init(void);
void __exit vpe_module_exit(void);
+
+#ifdef CONFIG_MIPS_VPE_LOADER_MT
+void *vpe_alloc(void);
+int vpe_start(void *vpe, unsigned long start);
+int vpe_stop(void *vpe);
+int vpe_free(void *vpe);
+#endif /* CONFIG_MIPS_VPE_LOADER_MT */
+
#endif /* _ASM_VPE_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 31ac655b7837..72fb1b006da9 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -163,6 +163,9 @@
#define SO_PASSRIGHTS 83
+#define SO_INQ 84
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index af7412549e6e..04dc9ab55524 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mmu_context.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
@@ -37,6 +38,8 @@
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+static bool mmid_disabled_quirk;
+
static inline unsigned long cpu_get_msa_id(void)
{
unsigned long status, msa_id;
@@ -645,7 +648,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
if (cpu_has_mips_r6) {
- if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
+ if (!mmid_disabled_quirk && (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid))
config5 |= MIPS_CONF5_MI;
else
config5 &= ~MIPS_CONF5_MI;
@@ -708,7 +711,6 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
max_mmid_width);
asid_mask = GENMASK(max_mmid_width - 1, 0);
}
-
set_cpu_asid_mask(c, asid_mask);
}
}
@@ -2046,3 +2048,39 @@ void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe)
cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP;
cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF;
}
+
+void cpu_disable_mmid(void)
+{
+ int i;
+ unsigned long asid_mask;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config4 = read_c0_config4();
+ unsigned int config5 = read_c0_config5();
+
+ /* Setup the initial ASID mask based on config4 */
+ asid_mask = MIPS_ENTRYHI_ASID;
+ if (config4 & MIPS_CONF4_AE)
+ asid_mask |= MIPS_ENTRYHI_ASIDX;
+ set_cpu_asid_mask(c, asid_mask);
+
+ /* Disable MMID in the C0 and update cpuinfo_mips accordingly */
+ config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
+ config5 &= ~MIPS_CONF5_MI;
+ write_c0_config5(config5);
+ /* Ensure the write to config5 above takes effect */
+ back_to_back_c0_hazard();
+ c->options &= ~MIPS_CPU_MMID;
+
+ /* Setup asid cache value cleared in per_cpu_trap_init() */
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ /* Reinit context for each CPU */
+ for_each_possible_cpu(i)
+ set_cpu_context(i, &init_mm, 0);
+
+ /* Ensure that now MMID will be seen as disable */
+ mmid_disabled_quirk = true;
+
+ pr_info("MMID support disabled due to hardware support issue\n");
+}
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
index 027fb57d0d79..96ac40d20c23 100644
--- a/arch/mips/kernel/gpio_txx9.c
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -70,7 +70,7 @@ static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
static struct gpio_chip txx9_gpio_chip = {
.get = txx9_gpio_get,
- .set_rv = txx9_gpio_set,
+ .set = txx9_gpio_set,
.direction_input = txx9_gpio_dir_in,
.direction_output = txx9_gpio_dir_out,
.label = "TXx9",
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 43cb1e20baed..7c9c5dc38823 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <asm/mips-cps.h>
+#include <asm/smp-cps.h>
#include <asm/mipsregs.h>
void __iomem *mips_gcr_base;
@@ -248,6 +249,11 @@ void mips_cm_update_property(void)
return;
pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
mips_cm_is_l2_hci_broken = true;
+
+ /* Disable MMID only if it was configured */
+ if (cpu_has_mmid)
+ cpu_disable_mmid();
+
of_node_put(cm_node);
}
@@ -529,39 +535,23 @@ void mips_cm_error_report(void)
write_gcr_error_cause(cm_error);
}
-unsigned int mips_cps_first_online_in_cluster(void)
+unsigned int mips_cps_first_online_in_cluster(int *first_cpu)
{
- unsigned int local_cl;
- int i;
-
- local_cl = cpu_cluster(&current_cpu_data);
+ unsigned int local_cl = cpu_cluster(&current_cpu_data);
+ struct cpumask *local_cl_mask;
/*
- * We rely upon knowledge that CPUs are numbered sequentially by
- * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster
- * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same
- * cluster will immediately precede or follow one another.
- *
- * First we scan backwards, until we find an online CPU in the cluster
- * or we move on to another cluster.
+ * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is
+ * not yet done, then we are so early that only one CPU is running, so
+ * it is the first online CPU in the cluster.
*/
- for (i = smp_processor_id() - 1; i >= 0; i--) {
- if (cpu_cluster(&cpu_data[i]) != local_cl)
- break;
- if (!cpu_online(i))
- continue;
- return false;
- }
-
- /* Then do the same for higher numbered CPUs */
- for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) {
- if (cpu_cluster(&cpu_data[i]) != local_cl)
- break;
- if (!cpu_online(i))
- continue;
- return false;
- }
-
- /* We found no online CPUs in the local cluster */
- return true;
+ if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg)
+ local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask;
+ else
+ return true;
+
+ *first_cpu = cpumask_any_and_but(local_cl_mask,
+ cpu_online_mask,
+ smp_processor_id());
+ return (*first_cpu >= nr_cpu_ids);
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b630604c577f..02aa6a04a21d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -690,18 +690,20 @@ unsigned long mips_stack_top(void)
}
/* Space for the VDSO, data page & GIC user page */
- top -= PAGE_ALIGN(current->thread.abi->vdso->size);
- top -= PAGE_SIZE;
- top -= mips_gic_present() ? PAGE_SIZE : 0;
+ if (current->thread.abi) {
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+ }
/* Space for cache colour alignment */
if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
- /* Space to randomize the VDSO base */
- if (current->flags & PF_RANDOMIZE)
- top -= VDSO_RANDOMIZE_SIZE;
-
return top;
}
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index cda7983e7c18..7f1c136ad850 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -138,7 +138,7 @@ static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new,
apply_r_mips_hi16_rel(loc_orig, loc_new, offset);
break;
default:
- pr_err("Unhandled relocation type %d at 0x%pK\n", type,
+ pr_err("Unhandled relocation type %d at 0x%p\n", type,
loc_orig);
return -ENOEXEC;
}
@@ -439,10 +439,10 @@ static void show_kernel_relocation(const char *level)
{
if (__kaslr_offset > 0) {
printk(level);
- pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset);
- pr_cont(" .text @ 0x%pK\n", _text);
- pr_cont(" .data @ 0x%pK\n", _sdata);
- pr_cont(" .bss @ 0x%pK\n", __bss_start);
+ pr_cont("Kernel relocated by 0x%p\n", (void *)__kaslr_offset);
+ pr_cont(" .text @ 0x%p\n", _text);
+ pr_cont(" .data @ 0x%p\n", _sdata);
+ pr_cont(" .bss @ 0x%p\n", __bss_start);
}
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index fbfe0771317e..11b9b6b63e19 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -458,7 +458,7 @@ static void __init mips_parse_crashkernel(void)
total_mem = memblock_phys_mem_size();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base,
- NULL, NULL);
+ NULL, NULL, NULL);
if (ret != 0 || crash_size <= 0)
return;
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 7b0e69af4097..22d4f9ff3ae2 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -281,9 +281,20 @@ static void __init cps_smp_setup(void)
#endif /* CONFIG_MIPS_MT_FPAFF */
}
+unsigned long calibrate_delay_is_known(void)
+{
+ int first_cpu_cluster = 0;
+
+ /* The calibration has to be done on the primary CPU of the cluster */
+ if (mips_cps_first_online_in_cluster(&first_cpu_cluster))
+ return 0;
+
+ return cpu_data[first_cpu_cluster].udelay_val;
+}
+
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
- unsigned int nclusters, ncores, core_vpes, c, cl, cca;
+ unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca;
bool cca_unsuitable, cores_limited;
struct cluster_boot_config *cluster_bootcfg;
struct core_boot_config *core_bootcfg;
@@ -356,10 +367,13 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
+ int v;
core_vpes = core_vpe_count(cl, c);
core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*core_bootcfg[c].vpe_config),
GFP_KERNEL);
+ for (v = 0; v < core_vpes; v++)
+ cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask);
if (!core_bootcfg[c].vpe_config)
goto err_out;
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index cef3c423a41a..a75587018f44 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -315,7 +315,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
* we allocate is out of range, just give up now.
*/
if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
- kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
+ kvm_err("CP0_EBase.WG required for guest exception base %p\n",
gebase);
err = -ENOMEM;
goto out_free_gebase;
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
index 7b98def106e4..2a38c4267685 100644
--- a/arch/mips/lantiq/falcon/prom.c
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -36,14 +36,14 @@
#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
-void __init ltq_soc_nmi_setup(void)
+static void __init ltq_soc_nmi_setup(void)
{
extern void (*nmi_handler)(void);
ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
}
-void __init ltq_soc_ejtag_setup(void)
+static void __init ltq_soc_ejtag_setup(void)
{
extern void (*ejtag_debug_handler)(void);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 1187729d8cbb..577e6e6309a6 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -14,6 +14,7 @@
#include <lantiq_soc.h>
#include "../clk.h"
+#include "../prom.h"
/* infrastructure control register */
#define SYS1_INFRAC 0x00bc
@@ -72,11 +73,6 @@
static void __iomem *sysctl_membase[3], *status_membase;
void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
-void falcon_trigger_hrst(int level)
-{
- sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
-}
-
static inline void sysctl_wait(struct clk *clk,
unsigned int test, unsigned int reg)
{
@@ -214,19 +210,16 @@ void __init ltq_soc_init(void)
of_node_put(np_syseth);
of_node_put(np_sysgpe);
- if ((request_mem_region(res_status.start, resource_size(&res_status),
- res_status.name) < 0) ||
- (request_mem_region(res_ebu.start, resource_size(&res_ebu),
- res_ebu.name) < 0) ||
- (request_mem_region(res_sys[0].start,
- resource_size(&res_sys[0]),
- res_sys[0].name) < 0) ||
- (request_mem_region(res_sys[1].start,
- resource_size(&res_sys[1]),
- res_sys[1].name) < 0) ||
- (request_mem_region(res_sys[2].start,
- resource_size(&res_sys[2]),
- res_sys[2].name) < 0))
+ if ((!request_mem_region(res_status.start, resource_size(&res_status),
+ res_status.name)) ||
+ (!request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name)) ||
+ (!request_mem_region(res_sys[0].start, resource_size(&res_sys[0]),
+ res_sys[0].name)) ||
+ (!request_mem_region(res_sys[1].start, resource_size(&res_sys[1]),
+ res_sys[1].name)) ||
+ (!request_mem_region(res_sys[2].start, resource_size(&res_sys[2]),
+ res_sys[2].name)))
pr_err("Failed to request core resources");
status_membase = ioremap(res_status.start,
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index a112573b6e37..961c55933a6d 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -16,6 +16,7 @@
#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>
+#include <asm/time.h>
#include <lantiq_soc.h>
#include <irq.h>
@@ -335,7 +336,8 @@ static const struct irq_domain_ops irq_domain_ops = {
.map = icu_map,
};
-int __init icu_of_init(struct device_node *node, struct device_node *parent)
+static int __init
+icu_of_init(struct device_node *node, struct device_node *parent)
{
struct device_node *eiu_node;
struct resource res;
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 47ad21430fe2..39fb3ecdd6b7 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -74,7 +74,7 @@ unsigned long ltq_danube_pp32_hz(void)
return clk;
}
-unsigned long ltq_ar9_sys_hz(void)
+static unsigned long ltq_ar9_sys_hz(void)
{
if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
return CLOCK_393M;
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
index 4a808f8c5beb..b79c462fd48a 100644
--- a/arch/mips/lantiq/xway/dcdc.c
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -46,7 +46,7 @@ static struct platform_driver dcdc_driver = {
},
};
-int __init dcdc_init(void)
+static int __init dcdc_init(void)
{
int ret = platform_driver_register(&dcdc_driver);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 934ac72937e5..4693eba6c296 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -289,7 +289,7 @@ static struct platform_driver dma_driver = {
},
};
-int __init
+static int __init
dma_init(void)
{
return platform_driver_register(&dma_driver);
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index 8d52001301de..484c9e3000c1 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -194,7 +194,7 @@ static struct platform_driver dma_driver = {
},
};
-int __init gptu_init(void)
+static int __init gptu_init(void)
{
int ret = platform_driver_register(&dma_driver);
diff --git a/arch/mips/loongson64/setup.c b/arch/mips/loongson64/setup.c
index 257038e18779..b3e590eae952 100644
--- a/arch/mips/loongson64/setup.c
+++ b/arch/mips/loongson64/setup.c
@@ -3,7 +3,6 @@
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*/
-#include <linux/export.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c
index f9b8c85e9843..a6b1bf82057a 100644
--- a/arch/mips/mm/physaddr.c
+++ b/arch/mips/mm/physaddr.c
@@ -30,7 +30,7 @@ static inline bool __debug_virt_addr_valid(unsigned long x)
phys_addr_t __virt_to_phys(volatile const void *x)
{
WARN(!__debug_virt_addr_valid((unsigned long)x),
- "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ "virt_to_phys used for non-linear address: %p (%pS)\n",
x, x);
return __virt_to_phys_nodebug(x);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 76f3b9c0a9f0..347126dc010d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -508,6 +508,60 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
+/* Initialise all TLB entries with unique values */
+static void r4k_tlb_uniquify(void)
+{
+ int entry = num_wired_entries();
+
+ htw_stop();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ while (entry < current_cpu_data.tlbsize) {
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
+ unsigned long asid = 0;
+ int idx;
+
+ /* Skip wired MMID to make ginvt_mmid work */
+ if (cpu_has_mmid)
+ asid = MMID_KERNEL_WIRED + 1;
+
+ /* Check for match before using UNIQUE_ENTRYHI */
+ do {
+ if (cpu_has_mmid) {
+ write_c0_memorymapid(asid);
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ } else {
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
+ }
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ /* No match or match is on current entry */
+ if (idx < 0 || idx == entry)
+ break;
+ /*
+ * If we hit a match, we need to try again with
+ * a different ASID.
+ */
+ asid++;
+ } while (asid < asid_mask);
+
+ if (idx >= 0 && idx != entry)
+ panic("Unable to uniquify TLB entry %d", idx);
+
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
+
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+}
+
/*
* Configure TLB (for init or after a CPU has been powered off).
*/
@@ -547,7 +601,7 @@ static void r4k_tlb_configure(void)
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
- local_flush_tlb_all();
+ r4k_tlb_uniquify();
/* Did I tell you that ARC SUCKS? */
}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 68a8cefed420..0e85839b8225 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -234,7 +234,7 @@ static struct platform_driver ltq_pci_driver = {
},
};
-int __init pcibios_init(void)
+static int __init pcibios_init(void)
{
int ret = platform_driver_register(&ltq_pci_driver);
if (ret)
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 1cada09fa5db..006e2bbab87e 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -264,7 +264,7 @@ static struct platform_driver rt288x_pci_driver = {
},
};
-int __init pcibios_init(void)
+static int __init pcibios_init(void)
{
int ret = platform_driver_register(&rt288x_pci_driver);
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index af5bbbea949b..955b36e89358 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -15,6 +15,7 @@
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
+#include <asm/time.h>
#include "common.h"
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 0e47cd59b6cb..9aa5ef374465 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -164,7 +164,7 @@ static struct rb532_gpio_chip rb532_gpio_chip[] = {
.direction_input = rb532_gpio_direction_input,
.direction_output = rb532_gpio_direction_output,
.get = rb532_gpio_get,
- .set_rv = rb532_gpio_set,
+ .set = rb532_gpio_set,
.to_irq = rb532_gpio_to_irq,
.base = 0,
.ngpio = 32,
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 288d4d17eddd..20ef663af16e 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -165,7 +165,7 @@ static void hub_domain_free(struct irq_domain *domain,
return;
irqd = irq_domain_get_irq_data(domain, virq);
- if (irqd && irqd->chip_data)
+ if (irqd)
kfree(irqd->chip_data);
}
diff --git a/arch/mips/sgi-ip30/ip30-power.c b/arch/mips/sgi-ip30/ip30-power.c
index 120b3f3d5108..66851e17c5a7 100644
--- a/arch/mips/sgi-ip30/ip30-power.c
+++ b/arch/mips/sgi-ip30/ip30-power.c
@@ -3,7 +3,7 @@
* ip30-power.c: Software powerdown and reset handling for IP30 architecture.
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
- * 2014 Joshua Kinard <kumba@gentoo.org>
+ * 2014 Joshua Kinard <linux@kumba.dev>
* 2009 Johannes Dickgreber <tanzy@gmx.de>
*/
diff --git a/arch/mips/sgi-ip30/ip30-setup.c b/arch/mips/sgi-ip30/ip30-setup.c
index e8547636a748..3fcb3ec9f802 100644
--- a/arch/mips/sgi-ip30/ip30-setup.c
+++ b/arch/mips/sgi-ip30/ip30-setup.c
@@ -3,7 +3,7 @@
* SGI IP30 miscellaneous setup bits.
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
- * 2007 Joshua Kinard <kumba@gentoo.org>
+ * 2007 Joshua Kinard <linux@kumba.dev>
* 2009 Johannes Dickgreber <tanzy@gmx.de>
*/
diff --git a/arch/mips/sgi-ip30/ip30-smp.c b/arch/mips/sgi-ip30/ip30-smp.c
index 4bfe654602b1..1e8210f2a9f8 100644
--- a/arch/mips/sgi-ip30/ip30-smp.c
+++ b/arch/mips/sgi-ip30/ip30-smp.c
@@ -5,7 +5,7 @@
* and smp-bmips.c.
*
* Copyright (C) 2005-2007 Stanislaw Skowronek <skylark@unaligned.org>
- * 2006-2007, 2014-2015 Joshua Kinard <kumba@gentoo.org>
+ * 2006-2007, 2014-2015 Joshua Kinard <linux@kumba.dev>
* 2009 Johannes Dickgreber <tanzy@gmx.de>
*/
diff --git a/arch/mips/sgi-ip30/ip30-timer.c b/arch/mips/sgi-ip30/ip30-timer.c
index d13e105478ae..7652f72f0daf 100644
--- a/arch/mips/sgi-ip30/ip30-timer.c
+++ b/arch/mips/sgi-ip30/ip30-timer.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
* Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
- * Copyright (C) 2011 Joshua Kinard <kumba@gentoo.org>
+ * Copyright (C) 2011 Joshua Kinard <linux@kumba.dev>
*/
#include <linux/clocksource.h>
diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c
index 7ceb2b23ea1c..d798ee8c998c 100644
--- a/arch/mips/sgi-ip30/ip30-xtalk.c
+++ b/arch/mips/sgi-ip30/ip30-xtalk.c
@@ -3,7 +3,7 @@
* ip30-xtalk.c - Very basic Crosstalk (XIO) detection support.
* Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
* Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
- * Copyright (C) 2007, 2014-2016 Joshua Kinard <kumba@gentoo.org>
+ * Copyright (C) 2007, 2014-2016 Joshua Kinard <linux@kumba.dev>
*/
#include <linux/init.h>
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 0586ca7668b4..5dc867ea2c69 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -655,7 +655,7 @@ void __init txx9_iocled_init(unsigned long baseaddr,
if (!iocled->mmioaddr)
goto out_free;
iocled->chip.get = txx9_iocled_get;
- iocled->chip.set_rv = txx9_iocled_set;
+ iocled->chip.set = txx9_iocled_set;
iocled->chip.direction_input = txx9_iocled_dir_in;
iocled->chip.direction_output = txx9_iocled_dir_out;
iocled->chip.label = "iocled";
@@ -776,7 +776,7 @@ struct txx9_sramc_dev {
};
static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct txx9_sramc_dev *dev = bin_attr->private;
@@ -791,7 +791,7 @@ static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
}
static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct txx9_sramc_dev *dev = bin_attr->private;
diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h
index eb720110f3a2..e7826a681bc4 100644
--- a/arch/openrisc/include/asm/mmu.h
+++ b/arch/openrisc/include/asm/mmu.h
@@ -15,7 +15,7 @@
#ifndef __ASM_OPENRISC_MMU_H
#define __ASM_OPENRISC_MMU_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
typedef unsigned long mm_context_t;
#endif
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
index c589e96035e1..85797f94d1d7 100644
--- a/arch/openrisc/include/asm/page.h
+++ b/arch/openrisc/include/asm/page.h
@@ -25,7 +25,7 @@
*/
#include <asm/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
@@ -55,10 +55,10 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
@@ -73,7 +73,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr)))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 5bd6463bd514..d33702831505 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -23,7 +23,7 @@
#include <asm-generic/pgtable-nopmd.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/mmu.h>
#include <asm/fixmap.h>
@@ -430,5 +430,5 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
typedef pte_t *pte_addr_t;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_OPENRISC_PGTABLE_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index e05d1b59e24e..3ff893a67c13 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -39,7 +39,7 @@
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
@@ -78,5 +78,5 @@ void show_registers(struct pt_regs *regs);
#define cpu_relax() barrier()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
index e5a282b67075..28facf2f3e00 100644
--- a/arch/openrisc/include/asm/ptrace.h
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -27,7 +27,7 @@
* they share a cacheline (not done yet, though... future optimization).
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This struct describes how the registers are laid out on the kernel stack
* during a syscall or other kernel entry.
@@ -147,7 +147,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Offsets used by 'ptrace' system call interface.
diff --git a/arch/openrisc/include/asm/setup.h b/arch/openrisc/include/asm/setup.h
index 9acbc5deda69..dce9f4d3b378 100644
--- a/arch/openrisc/include/asm/setup.h
+++ b/arch/openrisc/include/asm/setup.h
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <asm-generic/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
void __init or1k_early_setup(void *fdt);
#endif
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 4af3049c34c2..e338fff7efb0 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -17,7 +17,7 @@
#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
#include <asm/processor.h>
#endif
@@ -38,7 +38,7 @@
* - if the contents of this structure are changed, the assembly constants
* must also be changed
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -58,7 +58,7 @@ struct thread_info {
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
@@ -75,7 +75,7 @@ register struct thread_info *current_thread_info_reg asm("r10");
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* thread information flags
diff --git a/arch/openrisc/include/uapi/asm/ptrace.h b/arch/openrisc/include/uapi/asm/ptrace.h
index a77cc9915ca8..1f12a60d5a06 100644
--- a/arch/openrisc/include/uapi/asm/ptrace.h
+++ b/arch/openrisc/include/uapi/asm/ptrace.h
@@ -20,7 +20,7 @@
#ifndef _UAPI__ASM_OPENRISC_PTRACE_H
#define _UAPI__ASM_OPENRISC_PTRACE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This is the layout of the regset returned by the GETREGSET ptrace call
*/
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 3a7b5baaa450..af932a4ad306 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -72,7 +72,7 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size)
* them and setting the cache-inhibit bit.
*/
mmap_write_lock(&init_mm);
- error = walk_page_range_novma(&init_mm, va, va + size,
+ error = walk_kernel_page_table_range(va, va + size,
&set_nocache_walk_ops, NULL, NULL);
mmap_write_unlock(&init_mm);
@@ -87,7 +87,7 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
mmap_write_lock(&init_mm);
/* walk_page_range shouldn't be able to fail here */
- WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
+ WARN_ON(walk_kernel_page_table_range(va, va + size,
&clear_nocache_walk_ops, NULL, NULL));
mmap_write_unlock(&init_mm);
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index fcc5973f7519..2efa4b08b7b8 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -81,7 +81,6 @@ config PARISC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1)
- select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 21b8166a6883..48ae3c79557a 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -39,7 +39,9 @@ endif
export LD_BFD
-# Set default 32 bits cross compilers for vdso
+# Set default 32 bits cross compilers for vdso.
+# This means that for 64BIT, both the 64-bit tools and the 32-bit tools
+# need to be in the path.
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS32_COMPILE := $(call cc-cross-prefix, \
@@ -139,7 +141,7 @@ palo lifimage: vmlinuz
fi
@if test ! -f "$(PALOCONF)"; then \
cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \
- echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \
+ echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \
echo 'You should check it and re-run "make palo".'; \
echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
false; \
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 1a86a4370b29..2c139a4dbf4b 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_user(x) (pte_val(x) & _PAGE_USER)
-#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0))
+#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -392,6 +392,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
}
}
#define set_ptes set_ptes
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
/* Used for deferring calls to flush_dcache_page() */
@@ -456,7 +457,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(pte)) {
return 0;
}
- set_pte(ptep, pte_mkold(pte));
+ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
return 1;
}
@@ -466,7 +467,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- set_pte(ptep, pte_wrprotect(*ptep));
+ set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index 51f40eaf7780..1013eeba31e5 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -32,6 +32,34 @@
pa; \
})
+/**
+ * prober_user() - Probe user read access
+ * @sr: Space regster.
+ * @va: Virtual address.
+ *
+ * Return: Non-zero if address is accessible.
+ *
+ * Due to the way _PAGE_READ is handled in TLB entries, we need
+ * a special check to determine whether a user address is accessible.
+ * The ldb instruction does the initial access check. If it is
+ * successful, the probe instruction checks user access rights.
+ */
+#define prober_user(sr, va) ({ \
+ unsigned long read_allowed; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tldb 0(%%sr%1,%2),%%r0\n" \
+ "\tproberi (%%sr%1,%2),%3,%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
+ : "=&r" (read_allowed) \
+ : "i" (sr), "r" (va), "i" (PRIV_USER) \
+ : "memory" \
+ ); \
+ read_allowed; \
+})
+
#define CR_EIEM 15 /* External Interrupt Enable Mask */
#define CR_CR16 16 /* CR16 Interval Timer */
#define CR_EIRR 23 /* External Interrupt Request Register */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 88d0ae5769dd..6c531d2c847e 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -42,9 +42,24 @@
__gu_err; \
})
-#define __get_user(val, ptr) \
-({ \
- __get_user_internal(SR_USER, val, ptr); \
+#define __probe_user_internal(sr, error, ptr) \
+({ \
+ __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \
+ "\tcmpiclr,= 1,%0,%0\n" \
+ "\tldi %4,%0\n" \
+ : "=r"(error) \
+ : "i"(sr), "r"(ptr), "i"(PRIV_USER), \
+ "i"(-EFAULT)); \
+})
+
+#define __get_user(val, ptr) \
+({ \
+ register long __gu_err; \
+ \
+ __gu_err = __get_user_internal(SR_USER, val, ptr); \
+ if (likely(!__gu_err)) \
+ __probe_user_internal(SR_USER, __gu_err, ptr); \
+ __gu_err; \
})
#define __get_user_asm(sr, val, ldx, ptr) \
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 1f2d5b7a7f5d..c16ec36dfee6 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -144,6 +144,9 @@
#define SO_PASSRIGHTS 0x4051
+#define SO_INQ 0x4052
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index db531e58d70e..37ca484cc495 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -429,7 +429,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
return ptep;
}
-static inline bool pte_needs_flush(pte_t pte)
+static inline bool pte_needs_cache_flush(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
== (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma,
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
pte = ptep_get(ptep);
- needs_flush = pte_needs_flush(pte);
+ needs_flush = pte_needs_cache_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
@@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end)
}
vm = find_vm_area((void *)start);
- if (WARN_ON_ONCE(!vm)) {
+ if (!vm) {
flush_cache_all();
return;
}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ea57bcc21dc5..f4bf61a34701 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -499,6 +499,12 @@
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
+
+#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+ /* need to drop DMB bit, as it's used as SPECIAL flag */
+ depi 0,_PAGE_SPECIAL_BIT,1,\pte
+#endif
+
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
* D <-> _PAGE_DIRTY
@@ -511,17 +517,18 @@
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
-#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
- /* need to drop DMB bit, as it's used as SPECIAL flag */
- depi 0,_PAGE_SPECIAL_BIT,1,\pte
-#endif
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
- * contains _PAGE_READ) */
+ * contains _PAGE_READ). While the kernel can't directly write
+ * user pages which have _PAGE_WRITE zero, it can read pages
+ * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel
+ * exception fault handler doesn't trigger when reading pages
+ * that aren't user read accessible */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
+
/* If we're a gateway page, drop PL2 back to zero for promotion
* to kernel privilege (so we can execute the page as kernel).
* Any privilege promotion page always denys read and write */
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0fa81bf1466b..f58c4bccfbce 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -613,6 +613,9 @@ lws_compare_and_swap32:
lws_compare_and_swap:
/* Trigger memory reference interruptions without writing to memory */
1: ldw 0(%r26), %r28
+ proberi (%r26), PRIV_USER, %r28
+ comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */
+ nop
2: stbys,e %r0, 0(%r26)
/* Calculate 8-bit hash index from virtual address */
@@ -767,6 +770,9 @@ cas2_lock_start:
copy %r26, %r28
depi_safe 0, 31, 2, %r28
10: ldw 0(%r28), %r1
+ proberi (%r28), PRIV_USER, %r1
+ comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */
+ nop
11: stbys,e %r0, 0(%r28)
/* Calculate 8-bit hash index from virtual address */
@@ -951,41 +957,47 @@ atomic_xchg_begin:
/* 8-bit exchange */
1: ldb 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
2: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 16-bit exchange */
3: ldh 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
4: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 32-bit exchange */
5: ldw 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
b atomic_xchg_start
6: stbys,e %r0, 0(%r23)
nop
nop
- nop
- nop
- nop
/* 64-bit exchange */
#ifdef CONFIG_64BIT
7: ldd 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
8: stdby,e %r0, 0(%r23)
#else
7: ldw 0(%r24), %r20
8: ldw 4(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
9: stbys,e %r0, 0(%r20)
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 5fc0c852c84c..69d65ffab312 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#define get_user_space() mfsp(SR_USER)
#define get_kernel_space() SR_KERNEL
@@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user);
unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len)
{
+ unsigned long start = (unsigned long) src;
+ unsigned long end = start + len;
+ unsigned long newlen = len;
+
mtsp(get_user_space(), SR_TEMP1);
mtsp(get_kernel_space(), SR_TEMP2);
- return pa_memcpy(dst, (void __force *)src, len);
+
+ /* Check region is user accessible */
+ if (start)
+ while (start < end) {
+ if (!prober_user(SR_TEMP1, start)) {
+ newlen = (start - (unsigned long) src);
+ break;
+ }
+ start += PAGE_SIZE;
+ /* align to page boundry which may have different permission */
+ start = PAGE_ALIGN_DOWN(start);
+ }
+ return len - newlen + pa_memcpy(dst, (void __force *)src, newlen);
}
EXPORT_SYMBOL(raw_copy_from_user);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index c39de84e98b0..f1785640b049 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -363,6 +363,10 @@ bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
+ if (!user_mode(regs) && fixup_exception(regs)) {
+ return;
+ }
+
if (user_mode(regs)) {
int signo, si_code;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 45b4fa7b9b02..93402a1d9c9f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -147,7 +147,6 @@ config PPC
select ARCH_HAS_PMEM_API
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTDUMP
- select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY
@@ -244,7 +243,6 @@ config PPC
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GUP_FAST
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index f3804103c56c..9753fb87217c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -101,7 +101,7 @@ KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
endif
LDFLAGS_vmlinux-y := -Bstatic
-LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie --no-dynamic-linker
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
index b7eac4e56019..292b909ca9ce 100644
--- a/arch/powerpc/boot/dts/microwatt.dts
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -37,7 +37,7 @@
ibm,powerpc-cpu-features {
display-name = "Microwatt";
- isa = <3010>;
+ isa = <3100>;
device_type = "cpu-features";
compatible = "ibm,powerpc-cpu-features";
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 3347192b77b8..7a31b52e92e1 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 98f56e63ad21..d06388b0f66e 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -46,7 +46,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
-CONFIG_HZ_100=y
+CONFIG_HZ_1000=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_PPC_UV=y
@@ -340,3 +340,4 @@ CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
CONFIG_PRINTK_CALLER=y
+CONFIG_KALLSYMS_ALL=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index dca67aae5da3..ce34597e9f3e 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -57,7 +57,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_FREQ_PMAC64=y
-CONFIG_HZ_100=y
+CONFIG_HZ_1000=y
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -465,3 +465,4 @@ CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_MEMINIT=m
CONFIG_TEST_FREE_PAGES=m
CONFIG_MEMTEST=y
+CONFIG_KALLSYMS_ALL=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index f96f8ed9856c..bb359643ddc1 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -252,7 +252,6 @@ CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index aa90a048f319..7132392fa7cd 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -168,12 +168,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
-static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
-{
- BUG();
- return pmd;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 0bf6fd0bf42a..0fb5b7da9478 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -259,7 +259,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
+ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@@ -281,11 +281,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
-{
- return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
-}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a2ddcbb3fcb9..c19800365315 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -88,7 +88,6 @@
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
-#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
/*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
@@ -109,7 +108,7 @@
*/
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ _PAGE_SOFT_DIRTY)
/*
* user access blocked by key
*/
@@ -123,7 +122,7 @@
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ _PAGE_SOFT_DIRTY)
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
@@ -609,24 +608,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
-static inline pte_t pte_mkdevmap(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
-}
-
-/*
- * This is potentially called with a pmd as the argument, in which case it's not
- * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
- * That's because the bit we use for _PAGE_DEVMAP is not reserved for software
- * use in page directory entries (ie. non-ptes).
- */
-static inline int pte_devmap(pte_t pte)
-{
- __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
-
- return (pte_raw(pte) & mask) == mask;
-}
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
/* FIXME!! check whether this need to be a conditional */
@@ -1379,36 +1360,6 @@ static inline bool arch_needs_pgtable_deposit(void)
}
extern void serialize_against_pte_lookup(struct mm_struct *mm);
-
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- if (radix_enabled())
- return radix__pmd_mkdevmap(pmd);
- return hash__pmd_mkdevmap(pmd);
-}
-
-static inline pud_t pud_mkdevmap(pud_t pud)
-{
- if (radix_enabled())
- return radix__pud_mkdevmap(pud);
- BUG();
- return pud;
-}
-
-static inline int pmd_devmap(pmd_t pmd)
-{
- return pte_devmap(pmd_pte(pmd));
-}
-
-static inline int pud_devmap(pud_t pud)
-{
- return pte_devmap(pud_pte(pud));
-}
-
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h
index 5b178139f3c0..ff911b4251d9 100644
--- a/arch/powerpc/include/asm/book3s/64/pkeys.h
+++ b/arch/powerpc/include/asm/book3s/64/pkeys.h
@@ -5,7 +5,7 @@
#include <asm/book3s/64/hash-pkey.h>
-static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
+static inline u64 vmflag_to_pte_pkey_bits(vm_flags_t vm_flags)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return 0x0UL;
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 8f55ff74bb68..df23a8267e4d 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -264,7 +264,7 @@ static inline int radix__p4d_bad(p4d_t p4d)
static inline int radix__pmd_trans_huge(pmd_t pmd)
{
- return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
+ return (pmd_val(pmd) & _PAGE_PTE) == _PAGE_PTE;
}
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
@@ -274,7 +274,7 @@ static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
static inline int radix__pud_trans_huge(pud_t pud)
{
- return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
+ return (pud_val(pud) & _PAGE_PTE) == _PAGE_PTE;
}
static inline pud_t radix__pud_mkhuge(pud_t pud)
@@ -315,16 +315,6 @@ static inline int radix__has_transparent_pud_hugepage(void)
}
#endif
-static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
-{
- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
-}
-
-static inline pud_t radix__pud_mkdevmap(pud_t pud)
-{
- return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP));
-}
-
struct vmem_altmap;
struct dev_pagemap;
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index f8ce178b43b7..34abf8bea2cc 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -144,9 +144,12 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
bus_addr = 0;
}
- if (!bus_addr) /* need to map it */
+ if (!bus_addr) { /* need to map it */
bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
dir);
+ if (dma_mapping_error(&isa_bridge_pcidev->dev, bus_addr))
+ return -ENOMEM;
+ }
/* remember this one as prev */
prev_addr = addr;
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 6df6dbbe1e7c..ea6c8dc400d2 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -270,6 +270,7 @@
#define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8
#define H_ILLAN_ATTRIBUTES 0x244
+#define H_ADD_LOGICAL_LAN_BUFFERS 0x248
#define H_MODIFY_HEA_QP 0x250
#define H_QUERY_HEA_QP 0x254
#define H_QUERY_HEA 0x258
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 42a51a993d94..912f78a956a1 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -14,7 +14,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/firmware.h>
-static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
#ifdef CONFIG_PPC_MEM_KEYS
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 59a2c7dbc78f..28e752138996 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -30,9 +30,9 @@ extern u32 reserved_allocation_mask; /* bits set for reserved keys */
#endif
-static inline u64 pkey_to_vmflag_bits(u16 pkey)
+static inline vm_flags_t pkey_to_vmflag_bits(u16 pkey)
{
- return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
+ return (((vm_flags_t)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
}
static inline int vma_pkey(struct vm_area_struct *vma)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4312bcb913a4..8053b24afc39 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -425,6 +425,7 @@
#define PPC_RAW_SC() (0x44000002)
#define PPC_RAW_SYNC() (0x7c0004ac)
#define PPC_RAW_ISYNC() (0x4c00012c)
+#define PPC_RAW_LWSYNC() (0x7c2004ac)
/*
* Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/uapi/asm/eeh.h b/arch/powerpc/include/uapi/asm/eeh.h
index 28186071fafc..3b5c47ff3fc4 100644
--- a/arch/powerpc/include/uapi/asm/eeh.h
+++ b/arch/powerpc/include/uapi/asm/eeh.h
@@ -1,18 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Copyright IBM Corp. 2015
*
* Authors: Gavin Shan <gwshan@linux.vnet.ibm.com>
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index eaeda001784e..077c5437f521 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -1,18 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Copyright IBM Corp. 2007
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index a809b1b44ddf..ac596064d4c7 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -1,18 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
diff --git a/arch/powerpc/include/uapi/asm/ps3fb.h b/arch/powerpc/include/uapi/asm/ps3fb.h
index fd7e3a0d35d5..b1c6b0cd9e80 100644
--- a/arch/powerpc/include/uapi/asm/ps3fb.h
+++ b/arch/powerpc/include/uapi/asm/ps3fb.h
@@ -2,19 +2,6 @@
/*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef _ASM_POWERPC_PS3FB_H_
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ca7f7bb2b478..bb836f02101c 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1139,6 +1139,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe)
return ret;
}
+EXPORT_SYMBOL_GPL(eeh_unfreeze_pe);
static struct pci_device_id eeh_reset_ids[] = {
@@ -1208,16 +1209,16 @@ int eeh_dev_open(struct pci_dev *pdev)
struct eeh_dev *edev;
int ret = -ENODEV;
- mutex_lock(&eeh_dev_mutex);
+ guard(mutex)(&eeh_dev_mutex);
/* No PCI device ? */
if (!pdev)
- goto out;
+ return ret;
/* No EEH device or PE ? */
edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe)
- goto out;
+ return ret;
/*
* The PE might have been put into frozen state, but we
@@ -1227,16 +1228,12 @@ int eeh_dev_open(struct pci_dev *pdev)
*/
ret = eeh_pe_change_owner(edev->pe);
if (ret)
- goto out;
+ return ret;
/* Increase PE's pass through count */
atomic_inc(&edev->pe->pass_dev_cnt);
- mutex_unlock(&eeh_dev_mutex);
return 0;
-out:
- mutex_unlock(&eeh_dev_mutex);
- return ret;
}
EXPORT_SYMBOL_GPL(eeh_dev_open);
@@ -1252,22 +1249,20 @@ void eeh_dev_release(struct pci_dev *pdev)
{
struct eeh_dev *edev;
- mutex_lock(&eeh_dev_mutex);
+ guard(mutex)(&eeh_dev_mutex);
/* No PCI device ? */
if (!pdev)
- goto out;
+ return;
/* No EEH device ? */
edev = pci_dev_to_eeh_dev(pdev);
if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
- goto out;
+ return;
/* Decrease PE's pass through count */
WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
eeh_pe_change_owner(edev->pe);
-out:
- mutex_unlock(&eeh_dev_mutex);
}
EXPORT_SYMBOL(eeh_dev_release);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7efe04c68f0f..48ad0116f359 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -257,13 +257,12 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
struct pci_driver *driver;
enum pci_ers_result new_result;
- pci_lock_rescan_remove();
pdev = edev->pdev;
if (pdev)
get_device(&pdev->dev);
- pci_unlock_rescan_remove();
if (!pdev) {
eeh_edev_info(edev, "no device");
+ *result = PCI_ERS_RESULT_DISCONNECT;
return;
}
device_lock(&pdev->dev);
@@ -304,8 +303,9 @@ static void eeh_pe_report(const char *name, struct eeh_pe *root,
struct eeh_dev *edev, *tmp;
pr_info("EEH: Beginning: '%s'\n", name);
- eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
- eeh_pe_report_edev(edev, fn, result);
+ eeh_for_each_pe(root, pe)
+ eeh_pe_for_each_dev(pe, edev, tmp)
+ eeh_pe_report_edev(edev, fn, result);
if (result)
pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
name, pci_ers_result_name(*result));
@@ -383,6 +383,8 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
if (!edev)
return;
+ pci_lock_rescan_remove();
+
/*
* The content in the config space isn't saved because
* the blocked config space on some adapters. We have
@@ -393,14 +395,19 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
if (list_is_last(&edev->entry, &edev->pe->edevs))
eeh_pe_restore_bars(edev->pe);
+ pci_unlock_rescan_remove();
return;
}
pdev = eeh_dev_to_pci_dev(edev);
- if (!pdev)
+ if (!pdev) {
+ pci_unlock_rescan_remove();
return;
+ }
pci_restore_state(pdev);
+
+ pci_unlock_rescan_remove();
}
/**
@@ -647,9 +654,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
} else {
- pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
}
/*
@@ -665,8 +670,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
if (rc)
return rc;
- pci_lock_rescan_remove();
-
/* Restore PE */
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
@@ -674,7 +677,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc) {
- pci_unlock_rescan_remove();
return rc;
}
@@ -709,7 +711,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
pe->tstamp = tstamp;
pe->freeze_count = cnt;
- pci_unlock_rescan_remove();
return 0;
}
@@ -843,10 +844,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
int devices = 0;
+ pci_lock_rescan_remove();
+
bus = eeh_pe_bus_get(pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
+ pci_unlock_rescan_remove();
return;
}
@@ -907,7 +911,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
/* FIXME: Use the same format as dump_stack() */
pr_err("EEH: Call Trace:\n");
for (i = 0; i < pe->trace_entries; i++)
- pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
+ pr_err("EEH: [%p] %pS\n", ptrs[i], ptrs[i]);
pe->trace_entries = 0;
}
@@ -1094,10 +1098,15 @@ recover_failed:
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
+ bus = eeh_pe_bus_get(pe);
+ if (bus)
+ pci_hp_remove_devices(bus);
+ else
+ pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n",
+ __func__, pe->phb->global_number, pe->addr);
+
/* The passed PE should no longer be used */
+ pci_unlock_rescan_remove();
return;
}
@@ -1114,6 +1123,8 @@ out:
eeh_clear_slot_attention(edev->pdev);
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
+
+ pci_unlock_rescan_remove();
}
/**
@@ -1132,6 +1143,7 @@ void eeh_handle_special_event(void)
unsigned long flags;
int rc;
+ pci_lock_rescan_remove();
do {
rc = eeh_ops->next_error(&pe);
@@ -1171,10 +1183,12 @@ void eeh_handle_special_event(void)
break;
case EEH_NEXT_ERR_NONE:
+ pci_unlock_rescan_remove();
return;
default:
pr_warn("%s: Invalid value %d from next_error()\n",
__func__, rc);
+ pci_unlock_rescan_remove();
return;
}
@@ -1186,7 +1200,9 @@ void eeh_handle_special_event(void)
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ pci_unlock_rescan_remove();
eeh_handle_normal_event(pe);
+ pci_lock_rescan_remove();
} else {
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
@@ -1199,7 +1215,6 @@ void eeh_handle_special_event(void)
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- pci_lock_rescan_remove();
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
if (!phb_pe ||
@@ -1218,7 +1233,6 @@ void eeh_handle_special_event(void)
}
pci_hp_remove_devices(bus);
}
- pci_unlock_rescan_remove();
}
/*
@@ -1228,4 +1242,6 @@ void eeh_handle_special_event(void)
if (rc == EEH_NEXT_ERR_DEAD_IOC)
break;
} while (rc != EEH_NEXT_ERR_NONE);
+
+ pci_unlock_rescan_remove();
}
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index d283d281d28e..e740101fadf3 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -671,10 +671,12 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
- if (!edev->pdev->link_active_reporting) {
- eeh_edev_dbg(edev, "No link reporting capability\n");
- msleep(1000);
- return;
+ if (edev->pdev) {
+ if (!edev->pdev->link_active_reporting) {
+ eeh_edev_dbg(edev, "No link reporting capability\n");
+ msleep(1000);
+ return;
+ }
}
/* Wait the link is up until timeout (5s) */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 8ca49e40c473..5782e743fd27 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -333,7 +333,7 @@ static __init u64 fadump_calculate_reserve_size(void)
* memory at a predefined offset.
*/
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &size, &base, NULL, NULL);
+ &size, &base, NULL, NULL, NULL);
if (ret == 0 && size > 0) {
unsigned long max_size;
@@ -1373,15 +1373,12 @@ static void fadump_free_elfcorehdr_buf(void)
static void fadump_invalidate_release_mem(void)
{
- mutex_lock(&fadump_mutex);
- if (!fw_dump.dump_active) {
- mutex_unlock(&fadump_mutex);
- return;
+ scoped_guard(mutex, &fadump_mutex) {
+ if (!fw_dump.dump_active)
+ return;
+ fadump_cleanup();
}
- fadump_cleanup();
- mutex_unlock(&fadump_mutex);
-
fadump_free_elfcorehdr_buf();
fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
fadump_free_cpu_notes_buf();
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 9ea74973d78d..6f444d0822d8 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -141,6 +141,9 @@ void pci_hp_add_devices(struct pci_bus *bus)
struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
+ if (!dn)
+ return;
+
phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 5407024881e5..583dc16e9d3c 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -312,13 +312,13 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
{
struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char *p;
- int next_free, rc;
+ int next_free;
struct flash_block_list *fl;
- mutex_lock(&rtas_update_flash_mutex);
+ guard(mutex)(&rtas_update_flash_mutex);
if (uf->status == FLASH_AUTH || count == 0)
- goto out; /* discard data */
+ return count; /* discard data */
/* In the case that the image is not ready for flashing, the memory
* allocated for the block list will be freed upon the release of the
@@ -327,7 +327,7 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
if (uf->flist == NULL) {
uf->flist = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
- goto nomem;
+ return -ENOMEM;
}
fl = uf->flist;
@@ -338,7 +338,7 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
/* Need to allocate another block_list */
fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
- goto nomem;
+ return -ENOMEM;
fl = fl->next;
next_free = 0;
}
@@ -347,25 +347,17 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
count = RTAS_BLK_SIZE;
p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!p)
- goto nomem;
+ return -ENOMEM;
if(copy_from_user(p, buffer, count)) {
kmem_cache_free(flash_block_cache, p);
- rc = -EFAULT;
- goto error;
+ return -EFAULT;
}
fl->blocks[next_free].data = p;
fl->blocks[next_free].length = count;
fl->num_blocks++;
-out:
- mutex_unlock(&rtas_update_flash_mutex);
- return count;
-nomem:
- rc = -ENOMEM;
-error:
- mutex_unlock(&rtas_update_flash_mutex);
- return rc;
+ return count;
}
/*
@@ -405,19 +397,18 @@ static ssize_t manage_flash_write(struct file *file, const char __user *buf,
static const char reject_str[] = "0";
static const char commit_str[] = "1";
char stkbuf[10];
- int op, rc;
+ int op;
- mutex_lock(&rtas_manage_flash_mutex);
+ guard(mutex)(&rtas_manage_flash_mutex);
if ((args_buf->status == MANAGE_AUTH) || (count == 0))
- goto out;
+ return count;
op = -1;
if (buf) {
if (count > 9) count = 9;
- rc = -EFAULT;
if (copy_from_user (stkbuf, buf, count))
- goto error;
+ return -EFAULT;
if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
op = RTAS_REJECT_TMP_IMG;
else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
@@ -425,18 +416,11 @@ static ssize_t manage_flash_write(struct file *file, const char __user *buf,
}
if (op == -1) { /* buf is empty, or contains invalid string */
- rc = -EINVAL;
- goto error;
+ return -EINVAL;
}
manage_flash(args_buf, op);
-out:
- mutex_unlock(&rtas_manage_flash_mutex);
return count;
-
-error:
- mutex_unlock(&rtas_manage_flash_mutex);
- return rc;
}
/*
@@ -499,16 +483,14 @@ static ssize_t validate_flash_write(struct file *file, const char __user *buf,
{
struct rtas_validate_flash_t *const args_buf =
&rtas_validate_flash_data;
- int rc;
- mutex_lock(&rtas_validate_flash_mutex);
+ guard(mutex)(&rtas_validate_flash_mutex);
/* We are only interested in the first 4K of the
* candidate image */
if ((*off >= VALIDATE_BUF_SIZE) ||
(args_buf->status == VALIDATE_AUTH)) {
*off += count;
- mutex_unlock(&rtas_validate_flash_mutex);
return count;
}
@@ -519,20 +501,14 @@ static ssize_t validate_flash_write(struct file *file, const char __user *buf,
args_buf->status = VALIDATE_INCOMPLETE;
}
- if (!access_ok(buf, count)) {
- rc = -EFAULT;
- goto done;
- }
- if (copy_from_user(args_buf->buf + *off, buf, count)) {
- rc = -EFAULT;
- goto done;
- }
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ if (copy_from_user(args_buf->buf + *off, buf, count))
+ return -EFAULT;
*off += count;
- rc = count;
-done:
- mutex_unlock(&rtas_validate_flash_mutex);
- return rc;
+ return count;
}
static int validate_flash_release(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 00e9c267b912..d1a2d755381c 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -110,7 +110,7 @@ void __init arch_reserve_crashkernel(void)
/* use common parsing */
ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size,
- &crash_base, NULL, NULL);
+ &crash_base, NULL, NULL, NULL);
if (ret)
return;
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 3a6592a31a10..03f8c34fa0a2 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -393,7 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
{
unsigned long gfn = memslot->base_gfn;
unsigned long end, start = gfn_to_hva(kvm, gfn);
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
int ret = 0;
struct vm_area_struct *vma;
int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
index 372a82fa2de3..9260ddbd557f 100644
--- a/arch/powerpc/kvm/trace_book3s.h
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -25,6 +25,7 @@
{0xe00, "H_DATA_STORAGE"}, \
{0xe20, "H_INST_STORAGE"}, \
{0xe40, "H_EMUL_ASSIST"}, \
+ {0xea0, "H_VIRT"}, \
{0xf00, "PERFMON"}, \
{0xf20, "ALTIVEC"}, \
{0xf40, "VSX"}
diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c
index 15d6f3ea7178..cdfd4fe75edb 100644
--- a/arch/powerpc/mm/book3s64/hash_hugepage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugepage.c
@@ -54,7 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
/*
* Make sure this is thp or devmap entry
*/
- if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
+ if (!(old_pmd & H_PAGE_THP_HUGE))
return 0;
rflags = htab_convert_pte_flags(new_pmd, flags);
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 988948d69bc1..82d31177630b 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -195,7 +195,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr
unsigned long old;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ WARN_ON(!hash__pmd_trans_huge(*pmdp));
assert_spin_locked(pmd_lockptr(mm, pmdp));
#endif
@@ -227,7 +227,6 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
- VM_BUG_ON(pmd_devmap(*pmdp));
pmd = *pmdp;
pmd_clear(pmdp);
diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c
index 83c3361b358b..2bcbbf9d85ac 100644
--- a/arch/powerpc/mm/book3s64/hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hugetlbpage.c
@@ -74,7 +74,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
/* Make sure this is a hugetlb entry */
- if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
+ if (old_pte & H_PAGE_THP_HUGE)
return 0;
rflags = htab_convert_pte_flags(new_pte, flags);
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 0db01e10a3f8..c9431ae7f78a 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -62,7 +62,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{
int changed;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ WARN_ON(!pmd_trans_huge(*pmdp));
assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
#endif
changed = !pmd_same(*(pmdp), entry);
@@ -82,7 +82,6 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{
int changed;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pud_devmap(*pudp));
assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
#endif
changed = !pud_same(*(pudp), entry);
@@ -204,8 +203,8 @@ pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
{
pmd_t pmd;
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
- VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
- !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
+ VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp)) ||
+ !pmd_present(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
/*
* if it not a fullmm flush, then we can possibly end up converting
@@ -223,8 +222,7 @@ pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
pud_t pud;
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
- VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) ||
- !pud_present(*pudp));
+ VM_BUG_ON(!pud_present(*pudp));
pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
/*
* if it not a fullmm flush, then we can possibly end up converting
@@ -644,7 +642,7 @@ unsigned long memremap_compat_align(void)
EXPORT_SYMBOL_GPL(memremap_compat_align);
#endif
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
unsigned long prot;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 376dba5992f2..be523e5fe9c5 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1122,18 +1122,25 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pte_t *pte;
/*
- * Make sure we align the start vmemmap addr so that we calculate
- * the correct start_pfn in altmap boundary check to decided whether
- * we should use altmap or RAM based backing memory allocation. Also
- * the address need to be aligned for set_pte operation.
-
- * If the start addr is already PMD_SIZE aligned we will try to use
- * a pmd mapping. We don't want to be too aggressive here beacause
- * that will cause more allocations in RAM. So only if the namespace
- * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
+ * If altmap is present, Make sure we align the start vmemmap addr
+ * to PAGE_SIZE so that we calculate the correct start_pfn in
+ * altmap boundary check to decide whether we should use altmap or
+ * RAM based backing memory allocation. Also the address need to be
+ * aligned for set_pte operation. If the start addr is already
+ * PMD_SIZE aligned and with in the altmap boundary then we will
+ * try to use a pmd size altmap mapping else we go for page size
+ * mapping.
+ *
+ * If altmap is not present, align the vmemmap addr to PMD_SIZE and
+ * always allocate a PMD size page for vmemmap backing.
+ *
*/
- start = ALIGN_DOWN(start, PAGE_SIZE);
+ if (altmap)
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+ else
+ start = ALIGN_DOWN(start, PMD_SIZE);
+
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1159,7 +1166,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
- if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
+ if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
@@ -1173,7 +1180,7 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
vmemmap_set_pmd(pmd, p, node, addr, next);
pr_debug("PMD_SIZE vmemmap mapping\n");
continue;
- } else if (altmap) {
+ } else {
/*
* A vmemmap block allocation can fail due to
* alignment requirements and we trying to align
@@ -1426,7 +1433,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
unsigned long old;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ WARN_ON(!radix__pmd_trans_huge(*pmdp));
assert_spin_locked(pmd_lockptr(mm, pmdp));
#endif
@@ -1443,7 +1450,7 @@ unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long add
unsigned long old;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pud_devmap(*pudp));
+ WARN_ON(!pud_trans_huge(*pudp));
assert_spin_locked(pud_lockptr(mm, pudp));
#endif
@@ -1461,7 +1468,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
- VM_BUG_ON(pmd_devmap(*pmdp));
/*
* khugepaged calls this for normal pmd
*/
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 5c8d1bb98b3e..5e4897daaaea 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -178,7 +178,7 @@ static void __init get_crash_kernel(void *fdt, unsigned long size)
int ret;
ret = parse_crashkernel(boot_command_line, size, &crash_size,
- &crash_base, NULL, NULL);
+ &crash_base, NULL, NULL, NULL);
if (ret != 0 || crash_size == 0)
return;
if (crash_base == 0)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 61df5aed7989..dfaa9fd86f7e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -509,7 +509,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
return NULL;
#endif
- if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
+ if (pmd_trans_huge(pmd)) {
if (is_thp)
*is_thp = true;
ret_pte = (pte_t *)pmdp;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 5daa77aee7f7..025524378443 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -370,6 +370,23 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
return 0;
}
+bool bpf_jit_bypass_spec_v1(void)
+{
+#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
+ return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
+#else
+ return true;
+#endif
+}
+
+bool bpf_jit_bypass_spec_v4(void)
+{
+ return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
+ stf_barrier_type_get() != STF_BARRIER_NONE);
+}
+
/*
* We spill into the redzone always, even if the bpf program has its own stackframe.
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
@@ -392,11 +409,77 @@ asm (
" blr ;"
);
+static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image)
+{
+ u32 code = insn.code;
+ u32 dst_reg = bpf_to_ppc(insn.dst_reg);
+ u32 src_reg = bpf_to_ppc(insn.src_reg);
+ u32 size = BPF_SIZE(code);
+ u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
+ u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
+ s16 off = insn.off;
+ s32 imm = insn.imm;
+
+ switch (imm) {
+ case BPF_LOAD_ACQ:
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
+ EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
+ } else {
+ EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
+ }
+ break;
+ }
+ EMIT(PPC_RAW_LWSYNC());
+ break;
+ case BPF_STORE_REL:
+ EMIT(PPC_RAW_LWSYNC());
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
+ break;
+ case BPF_DW:
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp2_reg, off));
+ EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
+ } else {
+ EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
+ }
+ break;
+ }
+ break;
+ default:
+ pr_err_ratelimited("unexpected atomic load/store op code %02x\n",
+ imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
+ bool sync_emitted, ori31_emitted;
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
@@ -789,30 +872,51 @@ emit_clear:
/*
* BPF_ST NOSPEC (speculation barrier)
+ *
+ * The following must act as a barrier against both Spectre v1
+ * and v4 if we requested both mitigations. Therefore, also emit
+ * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to
+ * the insns needed for a Spectre v4 barrier.
+ *
+ * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4,
+ * we can skip the respective other barrier type as an
+ * optimization.
*/
case BPF_ST | BPF_NOSPEC:
- if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
- !security_ftr_enabled(SEC_FTR_STF_BARRIER))
- break;
-
- switch (stf_barrier) {
- case STF_BARRIER_EIEIO:
- EMIT(PPC_RAW_EIEIO() | 0x02000000);
- break;
- case STF_BARRIER_SYNC_ORI:
+ sync_emitted = false;
+ ori31_emitted = false;
+ if (IS_ENABLED(CONFIG_PPC_E500) &&
+ !bpf_jit_bypass_spec_v1()) {
+ EMIT(PPC_RAW_ISYNC());
EMIT(PPC_RAW_SYNC());
- EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
- EMIT(PPC_RAW_ORI(_R31, _R31, 0));
- break;
- case STF_BARRIER_FALLBACK:
- ctx->seen |= SEEN_FUNC;
- PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTRL());
- break;
- case STF_BARRIER_NONE:
- break;
+ sync_emitted = true;
}
+ if (!bpf_jit_bypass_spec_v4()) {
+ switch (stf_barrier) {
+ case STF_BARRIER_EIEIO:
+ EMIT(PPC_RAW_EIEIO() | 0x02000000);
+ break;
+ case STF_BARRIER_SYNC_ORI:
+ if (!sync_emitted)
+ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+ ori31_emitted = true;
+ break;
+ case STF_BARRIER_FALLBACK:
+ ctx->seen |= SEEN_FUNC;
+ PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+ break;
+ case STF_BARRIER_NONE:
+ break;
+ }
+ }
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ !bpf_jit_bypass_spec_v1() &&
+ !ori31_emitted)
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
break;
/*
@@ -859,8 +963,25 @@ emit_clear:
/*
* BPF_STX ATOMIC (atomic ops)
*/
+ case BPF_STX | BPF_ATOMIC | BPF_B:
+ case BPF_STX | BPF_ATOMIC | BPF_H:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (bpf_atomic_is_load_store(&insn[i])) {
+ ret = emit_atomic_ld_st(insn[i], ctx, image);
+ if (ret)
+ return ret;
+
+ if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+ break;
+ } else if (size == BPF_B || size == BPF_H) {
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
save_reg = tmp2_reg;
ret_reg = src_reg;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 681cf85af2b3..e42677cc254a 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -713,12 +713,12 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
ev_len = be16_to_cpu(event->length);
if (ev_len % 16)
- pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
+ pr_info("event %zu has length %zu not divisible by 16: event=%p\n",
event_idx, ev_len, event);
ev_end = (__u8 *)event + ev_len;
if (ev_end > end) {
- pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
+ pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%p > end=%p, offset=%zu\n",
event_idx, ev_len, ev_end, end,
offset);
return -1;
@@ -726,14 +726,14 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
calc_ev_end = event_end(event, end);
if (!calc_ev_end) {
- pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
+ pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%p end=%p, offset=%zu\n",
event_idx, event_data_bytes, event, end,
offset);
return -1;
}
if (calc_ev_end > ev_end) {
- pr_warn("event %zu exceeds its own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
+ pr_warn("event %zu exceeds its own length: event=%p, end=%p, offset=%zu, calc_ev_end=%p\n",
event_idx, event, ev_end, offset, calc_ev_end);
return -1;
}
diff --git a/arch/powerpc/platforms/44x/gpio.c b/arch/powerpc/platforms/44x/gpio.c
index d540e261d85a..08ab76582568 100644
--- a/arch/powerpc/platforms/44x/gpio.c
+++ b/arch/powerpc/platforms/44x/gpio.c
@@ -180,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void)
gc->direction_input = ppc4xx_gpio_dir_in;
gc->direction_output = ppc4xx_gpio_dir_out;
gc->get = ppc4xx_gpio_get;
- gc->set_rv = ppc4xx_gpio_set;
+ gc->set = ppc4xx_gpio_set;
ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc);
if (ret)
diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
index 9668b052cd4b..f251e0f68262 100644
--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
@@ -240,10 +240,8 @@ static int mpc512x_lpbfifo_kick(void)
dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
/* Make DMA channel work with LPB FIFO data register */
- if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
- ret = -EINVAL;
- goto err_dma_prep;
- }
+ if (dma_dev->device_config(lpbfifo.chan, &dma_conf))
+ return -EINVAL;
sg_init_table(&sg, 1);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index bda707d848a6..7748b6641a3c 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -336,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt)
gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in;
gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out;
gpt->gc.get = mpc52xx_gpt_gpio_get;
- gpt->gc.set_rv = mpc52xx_gpt_gpio_set;
+ gpt->gc.set = mpc52xx_gpt_gpio_set;
gpt->gc.base = -1;
gpt->gc.parent = gpt->dev;
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 6e37dfc6c5c9..cb7b9498f291 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -126,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu)
gc->can_sleep = 1;
gc->ngpio = MCU_NUM_GPIO;
gc->base = -1;
- gc->set_rv = mcu_gpio_set;
+ gc->set = mcu_gpio_set;
gc->direction_output = mcu_gpio_dir_out;
gc->parent = dev;
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 7462c221115c..7433be7d66ee 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -499,7 +499,7 @@ int cpm1_gpiochip_add16(struct device *dev)
gc->direction_input = cpm1_gpio16_dir_in;
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
- gc->set_rv = cpm1_gpio16_set;
+ gc->set = cpm1_gpio16_set;
gc->to_irq = cpm1_gpio16_to_irq;
gc->parent = dev;
gc->owner = THIS_MODULE;
@@ -622,7 +622,7 @@ int cpm1_gpiochip_add32(struct device *dev)
gc->direction_input = cpm1_gpio32_dir_in;
gc->direction_output = cpm1_gpio32_dir_out;
gc->get = cpm1_gpio32_get;
- gc->set_rv = cpm1_gpio32_set;
+ gc->set = cpm1_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
index dc6f75d3ac6e..49b15e7a8265 100644
--- a/arch/powerpc/platforms/book3s/vas-api.c
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -425,23 +425,22 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- mutex_lock(&txwin->task_ref.mmap_mutex);
/*
* The window may be inactive due to lost credit (Ex: core
* removal with DLPAR). If the window is active again when
* the credit is available, map the new paste address at the
* window virtual address.
*/
- if (txwin->status == VAS_WIN_ACTIVE) {
- paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
- if (paste_addr) {
- fault = vmf_insert_pfn(vma, vma->vm_start,
- (paste_addr >> PAGE_SHIFT));
- mutex_unlock(&txwin->task_ref.mmap_mutex);
- return fault;
+ scoped_guard(mutex, &txwin->task_ref.mmap_mutex) {
+ if (txwin->status == VAS_WIN_ACTIVE) {
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (paste_addr) {
+ fault = vmf_insert_pfn(vma, vma->vm_start,
+ (paste_addr >> PAGE_SHIFT));
+ return fault;
+ }
}
}
- mutex_unlock(&txwin->task_ref.mmap_mutex);
/*
* Received this fault due to closing the actual window.
@@ -494,9 +493,8 @@ static void vas_mmap_close(struct vm_area_struct *vma)
return;
}
- mutex_lock(&txwin->task_ref.mmap_mutex);
- txwin->task_ref.vma = NULL;
- mutex_unlock(&txwin->task_ref.mmap_mutex);
+ scoped_guard(mutex, &txwin->task_ref.mmap_mutex)
+ txwin->task_ref.vma = NULL;
}
static const struct vm_operations_struct vas_vm_ops = {
@@ -552,18 +550,16 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
* close/open event and allows mmap() only when the window is
* active.
*/
- mutex_lock(&txwin->task_ref.mmap_mutex);
+ guard(mutex)(&txwin->task_ref.mmap_mutex);
if (txwin->status != VAS_WIN_ACTIVE) {
pr_err("Window is not active\n");
- rc = -EACCES;
- goto out;
+ return -EACCES;
}
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
if (!paste_addr) {
pr_err("Window paste address failed\n");
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
pfn = paste_addr >> PAGE_SHIFT;
@@ -583,8 +579,6 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
txwin->task_ref.vma = vma;
vma->vm_ops = &vas_vm_ops;
-out:
- mutex_unlock(&txwin->task_ref.mmap_mutex);
return rc;
}
diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c
index 64a9c7125c29..f8139948348e 100644
--- a/arch/powerpc/platforms/powernv/ocxl.c
+++ b/arch/powerpc/platforms/powernv/ocxl.c
@@ -172,12 +172,11 @@ static void pnv_ocxl_fixup_actag(struct pci_dev *dev)
if (phb->type != PNV_PHB_NPU_OCAPI)
return;
- mutex_lock(&links_list_lock);
+ guard(mutex)(&links_list_lock);
link = find_link(dev);
if (!link) {
dev_warn(&dev->dev, "couldn't update actag information\n");
- mutex_unlock(&links_list_lock);
return;
}
@@ -206,7 +205,6 @@ static void pnv_ocxl_fixup_actag(struct pci_dev *dev)
dev_dbg(&dev->dev, "total actags for function: %d\n",
link->fn_desired_actags[PCI_FUNC(dev->devfn)]);
- mutex_unlock(&links_list_lock);
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_ocxl_fixup_actag);
@@ -253,12 +251,11 @@ int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
{
struct npu_link *link;
- mutex_lock(&links_list_lock);
+ guard(mutex)(&links_list_lock);
link = find_link(dev);
if (!link) {
dev_err(&dev->dev, "actag information not found\n");
- mutex_unlock(&links_list_lock);
return -ENODEV;
}
/*
@@ -274,7 +271,6 @@ int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
*enabled = link->fn_actags[PCI_FUNC(dev->devfn)].count;
*supported = link->fn_desired_actags[PCI_FUNC(dev->devfn)];
- mutex_unlock(&links_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(pnv_ocxl_get_actag);
@@ -293,12 +289,11 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
*
* We only support one AFU-carrying function for now.
*/
- mutex_lock(&links_list_lock);
+ guard(mutex)(&links_list_lock);
link = find_link(dev);
if (!link) {
dev_err(&dev->dev, "actag information not found\n");
- mutex_unlock(&links_list_lock);
return -ENODEV;
}
@@ -309,7 +304,6 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
break;
}
- mutex_unlock(&links_list_lock);
dev_dbg(&dev->dev, "%d PASIDs available for function\n",
rc ? 0 : *count);
return rc;
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 5f4037c1d7fe..5e0a718d1be7 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -532,7 +532,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_insert(b_dev_info, newpage);
- balloon_page_delete(page);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -542,6 +541,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
*/
plpar_page_set_active(page);
+ balloon_page_finalize(page);
/* balloon page list reference */
put_page(page);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 213aa26dc8b3..979487da6522 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -404,6 +404,45 @@ get_device_node_with_drc_info(u32 index)
return NULL;
}
+static struct device_node *
+get_device_node_with_drc_indexes(u32 drc_index)
+{
+ struct device_node *np = NULL;
+ u32 nr_indexes, index;
+ int i, rc;
+
+ for_each_node_with_property(np, "ibm,drc-indexes") {
+ /*
+ * First element in the array is the total number of
+ * DRC indexes returned.
+ */
+ rc = of_property_read_u32_index(np, "ibm,drc-indexes",
+ 0, &nr_indexes);
+ if (rc)
+ goto out_put_np;
+
+ /*
+ * Retrieve DRC index from the list and return the
+ * device node if matched with the specified index.
+ */
+ for (i = 0; i < nr_indexes; i++) {
+ rc = of_property_read_u32_index(np, "ibm,drc-indexes",
+ i+1, &index);
+ if (rc)
+ goto out_put_np;
+
+ if (drc_index == index)
+ return np;
+ }
+ }
+
+ return NULL;
+
+out_put_np:
+ of_node_put(np);
+ return NULL;
+}
+
static int dlpar_hp_dt_add(u32 index)
{
struct device_node *np, *nodes;
@@ -423,10 +462,19 @@ static int dlpar_hp_dt_add(u32 index)
goto out;
}
+ /*
+ * Recent FW provides ibm,drc-info property. So search
+ * for the user specified DRC index from ibm,drc-info
+ * property. If this property is not available, search
+ * in the indexes array from ibm,drc-indexes property.
+ */
np = get_device_node_with_drc_info(index);
- if (!np)
- return -EIO;
+ if (!np) {
+ np = get_device_node_with_drc_indexes(index);
+ if (!np)
+ return -EIO;
+ }
/* Next, configure the connector. */
nodes = dlpar_configure_connector(cpu_to_be32(index), np);
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 52e2623a741d..aeb8633a3d00 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -29,7 +29,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
nid = of_node_to_nid(dn);
if (likely((nid) >= 0)) {
if (!node_online(nid)) {
- if (__register_one_node(nid)) {
+ if (register_one_node(nid)) {
pr_err("PCI: Failed to register node %d\n", nid);
} else {
update_numa_distance(dn);
diff --git a/arch/powerpc/platforms/pseries/plpks-secvar.c b/arch/powerpc/platforms/pseries/plpks-secvar.c
index 257fd1f8bc19..f9e9cc40c9d0 100644
--- a/arch/powerpc/platforms/pseries/plpks-secvar.c
+++ b/arch/powerpc/platforms/pseries/plpks-secvar.c
@@ -59,7 +59,14 @@ static u32 get_policy(const char *name)
return PLPKS_SIGNEDUPDATE;
}
-static const char * const plpks_var_names[] = {
+static const char * const plpks_var_names_static[] = {
+ "PK",
+ "moduledb",
+ "trustedcadb",
+ NULL,
+};
+
+static const char * const plpks_var_names_dynamic[] = {
"PK",
"KEK",
"db",
@@ -152,39 +159,55 @@ err:
return rc;
}
-// PLPKS dynamic secure boot doesn't give us a format string in the same way OPAL does.
-// Instead, report the format using the SB_VERSION variable in the keystore.
-// The string is made up by us, and takes the form "ibm,plpks-sb-v<n>" (or "ibm,plpks-sb-unknown"
-// if the SB_VERSION variable doesn't exist). Hypervisor defines the SB_VERSION variable as a
-// "1 byte unsigned integer value".
-static ssize_t plpks_secvar_format(char *buf, size_t bufsize)
+/*
+ * Return the key management mode.
+ *
+ * SB_VERSION is defined as a "1 byte unsigned integer value", taking values
+ * starting from 1. It is owned by the Partition Firmware and its presence
+ * indicates that the key management mode is dynamic. Any failure in
+ * reading SB_VERSION defaults the key management mode to static. The error
+ * codes -ENOENT or -EPERM are expected in static key management mode. An
+ * unexpected error code will have to be investigated. Only signed variables
+ * have null bytes in their names, SB_VERSION does not.
+ *
+ * Return 0 to indicate that the key management mode is static. Otherwise
+ * return the SB_VERSION value to indicate that the key management mode is
+ * dynamic.
+ */
+static u8 plpks_get_sb_keymgmt_mode(void)
{
- struct plpks_var var = {0};
- ssize_t ret;
- u8 version;
-
- var.component = NULL;
- // Only the signed variables have null bytes in their names, this one doesn't
- var.name = "SB_VERSION";
- var.namelen = strlen(var.name);
- var.datalen = 1;
- var.data = &version;
-
- // Unlike the other vars, SB_VERSION is owned by firmware instead of the OS
- ret = plpks_read_fw_var(&var);
- if (ret) {
- if (ret == -ENOENT) {
- ret = snprintf(buf, bufsize, "ibm,plpks-sb-unknown");
- } else {
- pr_err("Error %ld reading SB_VERSION from firmware\n", ret);
- ret = -EIO;
- }
- goto err;
+ u8 mode;
+ ssize_t rc;
+ struct plpks_var var = {
+ .component = NULL,
+ .name = "SB_VERSION",
+ .namelen = 10,
+ .datalen = 1,
+ .data = &mode,
+ };
+
+ rc = plpks_read_fw_var(&var);
+ if (rc) {
+ if (rc != -ENOENT && rc != -EPERM)
+ pr_info("Error %ld reading SB_VERSION from firmware\n", rc);
+ mode = 0;
}
+ return mode;
+}
- ret = snprintf(buf, bufsize, "ibm,plpks-sb-v%hhu", version);
-err:
- return ret;
+/*
+ * PLPKS dynamic secure boot doesn't give us a format string in the same way
+ * OPAL does. Instead, report the format using the SB_VERSION variable in the
+ * keystore. The string, made up by us, takes the form of either
+ * "ibm,plpks-sb-v<n>" or "ibm,plpks-sb-v0", based on the key management mode,
+ * and return the length of the secvar format property.
+ */
+static ssize_t plpks_secvar_format(char *buf, size_t bufsize)
+{
+ u8 mode;
+
+ mode = plpks_get_sb_keymgmt_mode();
+ return snprintf(buf, bufsize, "ibm,plpks-sb-v%hhu", mode);
}
static int plpks_max_size(u64 *max_size)
@@ -197,21 +220,34 @@ static int plpks_max_size(u64 *max_size)
return 0;
}
+static const struct secvar_operations plpks_secvar_ops_static = {
+ .get = plpks_get_variable,
+ .set = plpks_set_variable,
+ .format = plpks_secvar_format,
+ .max_size = plpks_max_size,
+ .config_attrs = config_attrs,
+ .var_names = plpks_var_names_static,
+};
-static const struct secvar_operations plpks_secvar_ops = {
+static const struct secvar_operations plpks_secvar_ops_dynamic = {
.get = plpks_get_variable,
.set = plpks_set_variable,
.format = plpks_secvar_format,
.max_size = plpks_max_size,
.config_attrs = config_attrs,
- .var_names = plpks_var_names,
+ .var_names = plpks_var_names_dynamic,
};
static int plpks_secvar_init(void)
{
+ u8 mode;
+
if (!plpks_is_available())
return -ENODEV;
- return set_secvar_ops(&plpks_secvar_ops);
+ mode = plpks_get_sb_keymgmt_mode();
+ if (mode)
+ return set_secvar_ops(&plpks_secvar_ops_dynamic);
+ return set_secvar_ops(&plpks_secvar_ops_static);
}
machine_device_initcall(pseries, plpks_secvar_init);
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index e22fc638dbc7..f469f6a9f6e0 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -210,7 +210,7 @@ int cpm2_gpiochip_add32(struct device *dev)
gc->direction_input = cpm2_gpio32_dir_in;
gc->direction_output = cpm2_gpio32_dir_out;
gc->get = cpm2_gpio32_get;
- gc->set_rv = cpm2_gpio32_set;
+ gc->set = cpm2_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
diff --git a/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c
index ce6c739c51e5..06d9101a5d49 100644
--- a/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c
+++ b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c
@@ -75,7 +75,7 @@ static ssize_t fsl_timer_wakeup_store(struct device *dev,
if (kstrtoll(buf, 0, &interval))
return -EINVAL;
- mutex_lock(&sysfs_lock);
+ guard(mutex)(&sysfs_lock);
if (fsl_wakeup->timer) {
disable_irq_wake(fsl_wakeup->timer->irq);
@@ -83,31 +83,23 @@ static ssize_t fsl_timer_wakeup_store(struct device *dev,
fsl_wakeup->timer = NULL;
}
- if (!interval) {
- mutex_unlock(&sysfs_lock);
+ if (!interval)
return count;
- }
fsl_wakeup->timer = mpic_request_timer(fsl_mpic_timer_irq,
fsl_wakeup, interval);
- if (!fsl_wakeup->timer) {
- mutex_unlock(&sysfs_lock);
+ if (!fsl_wakeup->timer)
return -EINVAL;
- }
ret = enable_irq_wake(fsl_wakeup->timer->irq);
if (ret) {
mpic_free_timer(fsl_wakeup->timer);
fsl_wakeup->timer = NULL;
- mutex_unlock(&sysfs_lock);
-
return ret;
}
mpic_start_timer(fsl_wakeup->timer);
- mutex_unlock(&sysfs_lock);
-
return count;
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 5352932badd8..a4b233a0659e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -43,7 +43,6 @@ config RISCV
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PREPARE_SYNC_CORE_CMD
select ARCH_HAS_PTDUMP if MMU
- select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU
select ARCH_HAS_SET_MEMORY if MMU
@@ -94,6 +93,7 @@ config RISCV
select CLINT_TIMER if RISCV_M_MODE
select CLONE_BACKWARDS
select COMMON_CLK
+ select CPU_NO_EFFICIENT_FFS if !RISCV_ISA_ZBB
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_SUPPORT
@@ -157,7 +157,6 @@ config RISCV
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG)
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && HAVE_DYNAMIC_FTRACE
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index 42724bf7e90e..03f1d7319049 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -297,8 +297,9 @@
reg-names = "dwmac", "apb";
interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
- clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>;
- clock-names = "stmmaceth", "pclk";
+ clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>,
+ <&clk CLK_PERISYS_APB4_HCLK>;
+ clock-names = "stmmaceth", "pclk", "apb";
snps,pbl = <32>;
snps,fixed-burst;
snps,multicast-filter-bins = <64>;
@@ -319,8 +320,9 @@
reg-names = "dwmac", "apb";
interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
- clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>;
- clock-names = "stmmaceth", "pclk";
+ clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>,
+ <&clk CLK_PERISYS_APB4_HCLK>;
+ clock-names = "stmmaceth", "pclk", "apb";
snps,pbl = <32>;
snps,fixed-burst;
snps,multicast-filter-bins = <64>;
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
index 1aaea81fb141..4c03e20ad11f 100644
--- a/arch/riscv/include/asm/bug.h
+++ b/arch/riscv/include/asm/bug.h
@@ -31,40 +31,45 @@ typedef u32 bug_insn_t;
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
#define __BUG_ENTRY_ADDR RISCV_INT " 1b - ."
-#define __BUG_ENTRY_FILE RISCV_INT " %0 - ."
+#define __BUG_ENTRY_FILE(file) RISCV_INT " " file " - ."
#else
#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
-#define __BUG_ENTRY_FILE RISCV_PTR " %0"
+#define __BUG_ENTRY_FILE(file) RISCV_PTR " " file
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define __BUG_ENTRY \
+#define __BUG_ENTRY(file, line, flags) \
__BUG_ENTRY_ADDR "\n\t" \
- __BUG_ENTRY_FILE "\n\t" \
- RISCV_SHORT " %1\n\t" \
- RISCV_SHORT " %2"
+ __BUG_ENTRY_FILE(file) "\n\t" \
+ RISCV_SHORT " " line "\n\t" \
+ RISCV_SHORT " " flags
#else
-#define __BUG_ENTRY \
- __BUG_ENTRY_ADDR "\n\t" \
- RISCV_SHORT " %2"
+#define __BUG_ENTRY(file, line, flags) \
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " " flags
#endif
#ifdef CONFIG_GENERIC_BUG
-#define __BUG_FLAGS(flags) \
-do { \
- __asm__ __volatile__ ( \
+
+#define ARCH_WARN_ASM(file, line, flags, size) \
"1:\n\t" \
"ebreak\n" \
".pushsection __bug_table,\"aw\"\n\t" \
"2:\n\t" \
- __BUG_ENTRY "\n\t" \
- ".org 2b + %3\n\t" \
+ __BUG_ENTRY(file, line, flags) "\n\t" \
+ ".org 2b + " size "\n\t" \
".popsection" \
+
+#define __BUG_FLAGS(flags) \
+do { \
+ __asm__ __volatile__ ( \
+ ARCH_WARN_ASM("%0", "%1", "%2", "%3") \
: \
: "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
+
#else /* CONFIG_GENERIC_BUG */
#define __BUG_FLAGS(flags) do { \
__asm__ __volatile__ ("ebreak\n"); \
@@ -78,6 +83,8 @@ do { \
#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+#define ARCH_WARN_REACHABLE
+
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
index fb9696d7a3f2..4508aaa7a2fd 100644
--- a/arch/riscv/include/asm/cfi.h
+++ b/arch/riscv/include/asm/cfi.h
@@ -14,27 +14,11 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
#define __bpfcall
-static inline int cfi_get_offset(void)
-{
- return 4;
-}
-
-#define cfi_get_offset cfi_get_offset
-extern u32 cfi_bpf_hash;
-extern u32 cfi_bpf_subprog_hash;
-extern u32 cfi_get_func_hash(void *func);
#else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
return BUG_TRAP_TYPE_NONE;
}
-
-#define cfi_bpf_hash 0U
-#define cfi_bpf_subprog_hash 0U
-static inline u32 cfi_get_func_hash(void *func)
-{
- return 0;
-}
#endif /* CONFIG_CFI_CLANG */
#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 5acce285e56e..b04ecdd1a860 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -150,7 +150,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
-int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
new file mode 100644
index 000000000000..595e2183173e
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_gstage.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2025 Ventana Micro Systems Inc.
+ */
+
+#ifndef __RISCV_KVM_GSTAGE_H_
+#define __RISCV_KVM_GSTAGE_H_
+
+#include <linux/kvm_types.h>
+
+struct kvm_gstage {
+ struct kvm *kvm;
+ unsigned long flags;
+#define KVM_GSTAGE_FLAGS_LOCAL BIT(0)
+ unsigned long vmid;
+ pgd_t *pgd;
+};
+
+struct kvm_gstage_mapping {
+ gpa_t addr;
+ pte_t pte;
+ u32 level;
+};
+
+#ifdef CONFIG_64BIT
+#define kvm_riscv_gstage_index_bits 9
+#else
+#define kvm_riscv_gstage_index_bits 10
+#endif
+
+extern unsigned long kvm_riscv_gstage_mode;
+extern unsigned long kvm_riscv_gstage_pgd_levels;
+
+#define kvm_riscv_gstage_pgd_xbits 2
+#define kvm_riscv_gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + kvm_riscv_gstage_pgd_xbits))
+#define kvm_riscv_gstage_gpa_bits (HGATP_PAGE_SHIFT + \
+ (kvm_riscv_gstage_pgd_levels * \
+ kvm_riscv_gstage_index_bits) + \
+ kvm_riscv_gstage_pgd_xbits)
+#define kvm_riscv_gstage_gpa_size ((gpa_t)(1ULL << kvm_riscv_gstage_gpa_bits))
+
+bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
+ pte_t **ptepp, u32 *ptep_level);
+
+int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
+ struct kvm_mmu_memory_cache *pcache,
+ const struct kvm_gstage_mapping *map);
+
+int kvm_riscv_gstage_map_page(struct kvm_gstage *gstage,
+ struct kvm_mmu_memory_cache *pcache,
+ gpa_t gpa, phys_addr_t hpa, unsigned long page_size,
+ bool page_rdonly, bool page_exec,
+ struct kvm_gstage_mapping *out_map);
+
+enum kvm_riscv_gstage_op {
+ GSTAGE_OP_NOP = 0, /* Nothing */
+ GSTAGE_OP_CLEAR, /* Clear/Unmap */
+ GSTAGE_OP_WP, /* Write-protect */
+};
+
+void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
+ pte_t *ptep, u32 ptep_level, enum kvm_riscv_gstage_op op);
+
+void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage,
+ gpa_t start, gpa_t size, bool may_block);
+
+void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end);
+
+void kvm_riscv_gstage_mode_detect(void);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index bcbf8b1ec115..d71d3299a335 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -16,6 +16,8 @@
#include <asm/hwcap.h>
#include <asm/kvm_aia.h>
#include <asm/ptrace.h>
+#include <asm/kvm_tlb.h>
+#include <asm/kvm_vmid.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_sbi.h>
@@ -36,14 +38,16 @@
#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
#define KVM_REQ_FENCE_I \
KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
#define KVM_REQ_HFENCE_VVMA_ALL \
KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HFENCE \
KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+
#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \
+ BIT(EXC_INST_ILLEGAL) | \
BIT(EXC_BREAKPOINT) | \
BIT(EXC_SYSCALL) | \
BIT(EXC_INST_PAGE_FAULT) | \
@@ -54,24 +58,6 @@
BIT(IRQ_VS_TIMER) | \
BIT(IRQ_VS_EXT))
-enum kvm_riscv_hfence_type {
- KVM_RISCV_HFENCE_UNKNOWN = 0,
- KVM_RISCV_HFENCE_GVMA_VMID_GPA,
- KVM_RISCV_HFENCE_VVMA_ASID_GVA,
- KVM_RISCV_HFENCE_VVMA_ASID_ALL,
- KVM_RISCV_HFENCE_VVMA_GVA,
-};
-
-struct kvm_riscv_hfence {
- enum kvm_riscv_hfence_type type;
- unsigned long asid;
- unsigned long order;
- gpa_t addr;
- gpa_t size;
-};
-
-#define KVM_RISCV_VCPU_MAX_HFENCE 64
-
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
};
@@ -97,15 +83,6 @@ struct kvm_vcpu_stat {
struct kvm_arch_memory_slot {
};
-struct kvm_vmid {
- /*
- * Writes to vmid_version and vmid happen with vmid_lock held
- * whereas reads happen without any lock held.
- */
- unsigned long vmid_version;
- unsigned long vmid;
-};
-
struct kvm_arch {
/* G-stage vmid */
struct kvm_vmid vmid;
@@ -309,77 +286,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
-
-void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
- gpa_t gpa, gpa_t gpsz,
- unsigned long order);
-void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
-void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
- unsigned long order);
-void kvm_riscv_local_hfence_gvma_all(void);
-void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
- unsigned long asid,
- unsigned long gva,
- unsigned long gvsz,
- unsigned long order);
-void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
- unsigned long asid);
-void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
- unsigned long gva, unsigned long gvsz,
- unsigned long order);
-void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
-
-void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
-
-void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
-void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
-void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
-void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
-
-void kvm_riscv_fence_i(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask);
-void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask,
- gpa_t gpa, gpa_t gpsz,
- unsigned long order);
-void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask);
-void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask,
- unsigned long gva, unsigned long gvsz,
- unsigned long order, unsigned long asid);
-void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask,
- unsigned long asid);
-void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask,
- unsigned long gva, unsigned long gvsz,
- unsigned long order);
-void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask);
-
-int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
- phys_addr_t hpa, unsigned long size,
- bool writable, bool in_atomic);
-void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
- unsigned long size);
-int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
- struct kvm_memory_slot *memslot,
- gpa_t gpa, unsigned long hva, bool is_write);
-int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
-void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
-void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
-void __init kvm_riscv_gstage_mode_detect(void);
-unsigned long __init kvm_riscv_gstage_mode(void);
-int kvm_riscv_gstage_gpa_bits(void);
-
-void __init kvm_riscv_gstage_vmid_detect(void);
-unsigned long kvm_riscv_gstage_vmid_bits(void);
-int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
-bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
-void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
-
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
void __kvm_riscv_unpriv_trap(void);
@@ -415,7 +321,6 @@ void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
-void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
#endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/include/asm/kvm_mmu.h b/arch/riscv/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..5439e76f0a96
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_mmu.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Ventana Micro Systems Inc.
+ */
+
+#ifndef __RISCV_KVM_MMU_H_
+#define __RISCV_KVM_MMU_H_
+
+#include <asm/kvm_gstage.h>
+
+int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
+ unsigned long size, bool writable, bool in_atomic);
+void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size);
+int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
+ gpa_t gpa, unsigned long hva, bool is_write,
+ struct kvm_gstage_mapping *out_map);
+int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm);
+void kvm_riscv_mmu_free_pgd(struct kvm *kvm);
+void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_tlb.h b/arch/riscv/include/asm/kvm_tlb.h
new file mode 100644
index 000000000000..38a2f933ad3a
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_tlb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Ventana Micro Systems Inc.
+ */
+
+#ifndef __RISCV_KVM_TLB_H_
+#define __RISCV_KVM_TLB_H_
+
+#include <linux/kvm_types.h>
+
+enum kvm_riscv_hfence_type {
+ KVM_RISCV_HFENCE_UNKNOWN = 0,
+ KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+ KVM_RISCV_HFENCE_GVMA_VMID_ALL,
+ KVM_RISCV_HFENCE_VVMA_ASID_GVA,
+ KVM_RISCV_HFENCE_VVMA_ASID_ALL,
+ KVM_RISCV_HFENCE_VVMA_GVA,
+ KVM_RISCV_HFENCE_VVMA_ALL
+};
+
+struct kvm_riscv_hfence {
+ enum kvm_riscv_hfence_type type;
+ unsigned long asid;
+ unsigned long vmid;
+ unsigned long order;
+ gpa_t addr;
+ gpa_t size;
+};
+
+#define KVM_RISCV_VCPU_MAX_HFENCE 64
+
+#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
+
+void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
+void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_all(void);
+void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
+ unsigned long asid,
+ unsigned long gva,
+ unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
+ unsigned long asid);
+void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
+
+void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order, unsigned long vmid);
+void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid);
+void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order, unsigned long asid,
+ unsigned long vmid);
+void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long asid, unsigned long vmid);
+void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order, unsigned long vmid);
+void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index 439ab2b3534f..d678fd7e5973 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -49,6 +49,16 @@ struct kvm_vcpu_sbi_extension {
/* Extension specific probe function */
unsigned long (*probe)(struct kvm_vcpu *vcpu);
+
+ /*
+ * Init/deinit function called once during VCPU init/destroy. These
+ * might be use if the SBI extensions need to allocate or do specific
+ * init time only configuration.
+ */
+ int (*init)(struct kvm_vcpu *vcpu);
+ void (*deinit)(struct kvm_vcpu *vcpu);
+
+ void (*reset)(struct kvm_vcpu *vcpu);
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -72,6 +82,8 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
unsigned long *reg_val);
diff --git a/arch/riscv/include/asm/kvm_vmid.h b/arch/riscv/include/asm/kvm_vmid.h
new file mode 100644
index 000000000000..ab98e1434fb7
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vmid.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Ventana Micro Systems Inc.
+ */
+
+#ifndef __RISCV_KVM_VMID_H_
+#define __RISCV_KVM_VMID_H_
+
+#include <linux/kvm_types.h>
+
+struct kvm_vmid {
+ /*
+ * Writes to vmid_version and vmid happen with vmid_lock held
+ * whereas reads happen without any lock held.
+ */
+ unsigned long vmid_version;
+ unsigned long vmid;
+};
+
+void __init kvm_riscv_gstage_vmid_detect(void);
+unsigned long kvm_riscv_gstage_vmid_bits(void);
+int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
+bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
+void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
+void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 7de05db7d3bd..1018d2216901 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -397,24 +397,8 @@ static inline struct page *pgd_page(pgd_t pgd)
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pte_devmap(pte_t pte);
static inline pte_t pmd_pte(pmd_t pmd);
static inline pte_t pud_pte(pud_t pud);
-
-static inline int pmd_devmap(pmd_t pmd)
-{
- return pte_devmap(pmd_pte(pmd));
-}
-
-static inline int pud_devmap(pud_t pud)
-{
- return pte_devmap(pud_pte(pud));
-}
-
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
#endif
#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index a8f5205cea54..179bd4afece4 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -19,7 +19,6 @@
#define _PAGE_SOFT (3 << 8) /* Reserved for software */
#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */
-#define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */
#define _PAGE_TABLE _PAGE_PRESENT
/*
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 5bd5aae60d53..91697fbf1f90 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -409,13 +409,6 @@ static inline int pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
-{
- return pte_val(pte) & _PAGE_DEVMAP;
-}
-#endif
-
/* static inline pte_t pte_rdprotect(pte_t pte) */
static inline pte_t pte_wrprotect(pte_t pte)
@@ -457,11 +450,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
-static inline pte_t pte_mkdevmap(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_DEVMAP);
-}
-
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte;
@@ -790,11 +778,6 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- return pte_pmd(pte_mkdevmap(pmd_pte(pmd)));
-}
-
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd)
{
@@ -946,11 +929,6 @@ static inline pud_t pud_mkhuge(pud_t pud)
return pud;
}
-static inline pud_t pud_mkdevmap(pud_t pud)
-{
- return pte_pud(pte_mkdevmap(pud_pte(pud)));
-}
-
static inline int pudp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
pud_t entry, int dirty)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 1a20dd746a49..eed0abc40514 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -63,7 +63,6 @@ void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start,
bool arch_tlbbatch_should_defer(struct mm_struct *mm);
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm, unsigned long start, unsigned long end);
-void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
extern unsigned long tlb_flush_all_threshold;
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 5f59fd226cc5..ef27d4289da1 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -18,6 +18,7 @@
#define __KVM_HAVE_IRQ_LINE
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_INTERRUPT_SET -1U
#define KVM_INTERRUPT_UNSET -2U
diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c
index 64bdd3e1ab8c..6ec9dbd7292e 100644
--- a/arch/riscv/kernel/cfi.c
+++ b/arch/riscv/kernel/cfi.c
@@ -75,56 +75,3 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
return report_cfi_failure(regs, regs->epc, &target, type);
}
-
-#ifdef CONFIG_CFI_CLANG
-struct bpf_insn;
-
-/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
-extern unsigned int __bpf_prog_runX(const void *ctx,
- const struct bpf_insn *insn);
-
-/*
- * Force a reference to the external symbol so the compiler generates
- * __kcfi_typid.
- */
-__ADDRESSABLE(__bpf_prog_runX);
-
-/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_hash,@object \n"
-" .globl cfi_bpf_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_hash: \n"
-" .word __kcfi_typeid___bpf_prog_runX \n"
-" .size cfi_bpf_hash, 4 \n"
-" .popsection \n"
-);
-
-/* Must match bpf_callback_t */
-extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-
-__ADDRESSABLE(__bpf_callback_fn);
-
-/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_subprog_hash,@object \n"
-" .globl cfi_bpf_subprog_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_subprog_hash: \n"
-" .word __kcfi_typeid___bpf_callback_fn \n"
-" .size cfi_bpf_subprog_hash, 4 \n"
-" .popsection \n"
-);
-
-u32 cfi_get_func_hash(void *func)
-{
- u32 hash;
-
- if (get_kernel_nofault(hash, func - cfi_get_offset()))
- return 0;
-
- return hash;
-}
-#endif
diff --git a/arch/riscv/kernel/kexec_elf.c b/arch/riscv/kernel/kexec_elf.c
index f4755d49b89e..56444c7bd34e 100644
--- a/arch/riscv/kernel/kexec_elf.c
+++ b/arch/riscv/kernel/kexec_elf.c
@@ -95,6 +95,7 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
kbuf.buf_align = PMD_SIZE;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
+ kbuf.cma = NULL;
kbuf.top_down = false;
ret = arch_kexec_locate_mem_hole(&kbuf);
if (!ret) {
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 14888e5ea19a..f90cce7a3ace 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -21,6 +21,8 @@
#include <linux/efi.h>
#include <linux/crash_dump.h>
#include <linux/panic_notifier.h>
+#include <linux/jump_label.h>
+#include <linux/gcd.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
@@ -362,6 +364,9 @@ void __init setup_arch(char **cmdline_p)
riscv_user_isa_enable();
riscv_spinlock_init();
+
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_ZBB) || !riscv_isa_extension_available(NULL, ZBB))
+ static_branch_disable(&efficient_ffs_key);
}
bool arch_cpu_is_hotpluggable(int cpu)
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index 704c2899197e..5a62091b0809 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -25,6 +25,7 @@ config KVM
select HAVE_KVM_MSI
select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_READONLY_MEM
+ select HAVE_KVM_DIRTY_RING_ACQ_REL
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 4e0bba91d284..4b199dc3e58b 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -14,6 +14,7 @@ kvm-y += aia.o
kvm-y += aia_aplic.o
kvm-y += aia_device.o
kvm-y += aia_imsic.o
+kvm-y += gstage.o
kvm-y += main.o
kvm-y += mmu.o
kvm-y += nacl.o
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index 806c41931cde..b195a93add1c 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -509,12 +509,12 @@ void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_aia_imsic_reset(vcpu);
}
-int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
+void kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
if (!kvm_riscv_aia_available())
- return 0;
+ return;
/*
* We don't do any memory allocations over here because these
@@ -526,8 +526,6 @@ int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
/* Initialize default values in AIA vcpu context */
vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
vaia->hart_index = vcpu->vcpu_idx;
-
- return 0;
}
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index 2ff865943ebb..fda0346f0ea1 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -16,6 +16,7 @@
#include <linux/swab.h>
#include <kvm/iodev.h>
#include <asm/csr.h>
+#include <asm/kvm_mmu.h>
#define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
@@ -745,9 +746,8 @@ void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
*/
/* Purge the G-stage mapping */
- kvm_riscv_gstage_iounmap(vcpu->kvm,
- vcpu->arch.aia_context.imsic_addr,
- IMSIC_MMIO_PAGE_SZ);
+ kvm_riscv_mmu_iounmap(vcpu->kvm, vcpu->arch.aia_context.imsic_addr,
+ IMSIC_MMIO_PAGE_SZ);
/* TODO: Purge the IOMMU mapping ??? */
@@ -830,9 +830,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
/* Update G-stage mapping for the new IMSIC VS-file */
- ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
- new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
- true, true);
+ ret = kvm_riscv_mmu_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
+ new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
+ true, true);
if (ret)
goto fail_free_vsfile_hgei;
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
new file mode 100644
index 000000000000..24c270d6d0e2
--- /dev/null
+++ b/arch/riscv/kvm/gstage.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/pgtable.h>
+#include <asm/kvm_gstage.h>
+
+#ifdef CONFIG_64BIT
+unsigned long kvm_riscv_gstage_mode __ro_after_init = HGATP_MODE_SV39X4;
+unsigned long kvm_riscv_gstage_pgd_levels __ro_after_init = 3;
+#else
+unsigned long kvm_riscv_gstage_mode __ro_after_init = HGATP_MODE_SV32X4;
+unsigned long kvm_riscv_gstage_pgd_levels __ro_after_init = 2;
+#endif
+
+#define gstage_pte_leaf(__ptep) \
+ (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
+
+static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
+{
+ unsigned long mask;
+ unsigned long shift = HGATP_PAGE_SHIFT + (kvm_riscv_gstage_index_bits * level);
+
+ if (level == (kvm_riscv_gstage_pgd_levels - 1))
+ mask = (PTRS_PER_PTE * (1UL << kvm_riscv_gstage_pgd_xbits)) - 1;
+ else
+ mask = PTRS_PER_PTE - 1;
+
+ return (addr >> shift) & mask;
+}
+
+static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
+{
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pte_val(pte)));
+}
+
+static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
+{
+ u32 i;
+ unsigned long psz = 1UL << 12;
+
+ for (i = 0; i < kvm_riscv_gstage_pgd_levels; i++) {
+ if (page_size == (psz << (i * kvm_riscv_gstage_index_bits))) {
+ *out_level = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int gstage_level_to_page_order(u32 level, unsigned long *out_pgorder)
+{
+ if (kvm_riscv_gstage_pgd_levels < level)
+ return -EINVAL;
+
+ *out_pgorder = 12 + (level * kvm_riscv_gstage_index_bits);
+ return 0;
+}
+
+static int gstage_level_to_page_size(u32 level, unsigned long *out_pgsize)
+{
+ int rc;
+ unsigned long page_order = PAGE_SHIFT;
+
+ rc = gstage_level_to_page_order(level, &page_order);
+ if (rc)
+ return rc;
+
+ *out_pgsize = BIT(page_order);
+ return 0;
+}
+
+bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
+ pte_t **ptepp, u32 *ptep_level)
+{
+ pte_t *ptep;
+ u32 current_level = kvm_riscv_gstage_pgd_levels - 1;
+
+ *ptep_level = current_level;
+ ptep = (pte_t *)gstage->pgd;
+ ptep = &ptep[gstage_pte_index(addr, current_level)];
+ while (ptep && pte_val(ptep_get(ptep))) {
+ if (gstage_pte_leaf(ptep)) {
+ *ptep_level = current_level;
+ *ptepp = ptep;
+ return true;
+ }
+
+ if (current_level) {
+ current_level--;
+ *ptep_level = current_level;
+ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ ptep = &ptep[gstage_pte_index(addr, current_level)];
+ } else {
+ ptep = NULL;
+ }
+ }
+
+ return false;
+}
+
+static void gstage_tlb_flush(struct kvm_gstage *gstage, u32 level, gpa_t addr)
+{
+ unsigned long order = PAGE_SHIFT;
+
+ if (gstage_level_to_page_order(level, &order))
+ return;
+ addr &= ~(BIT(order) - 1);
+
+ if (gstage->flags & KVM_GSTAGE_FLAGS_LOCAL)
+ kvm_riscv_local_hfence_gvma_vmid_gpa(gstage->vmid, addr, BIT(order), order);
+ else
+ kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order,
+ gstage->vmid);
+}
+
+int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
+ struct kvm_mmu_memory_cache *pcache,
+ const struct kvm_gstage_mapping *map)
+{
+ u32 current_level = kvm_riscv_gstage_pgd_levels - 1;
+ pte_t *next_ptep = (pte_t *)gstage->pgd;
+ pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
+
+ if (current_level < map->level)
+ return -EINVAL;
+
+ while (current_level != map->level) {
+ if (gstage_pte_leaf(ptep))
+ return -EEXIST;
+
+ if (!pte_val(ptep_get(ptep))) {
+ if (!pcache)
+ return -ENOMEM;
+ next_ptep = kvm_mmu_memory_cache_alloc(pcache);
+ if (!next_ptep)
+ return -ENOMEM;
+ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
+ } else {
+ if (gstage_pte_leaf(ptep))
+ return -EEXIST;
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ }
+
+ current_level--;
+ ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
+ }
+
+ if (pte_val(*ptep) != pte_val(map->pte)) {
+ set_pte(ptep, map->pte);
+ if (gstage_pte_leaf(ptep))
+ gstage_tlb_flush(gstage, current_level, map->addr);
+ }
+
+ return 0;
+}
+
+int kvm_riscv_gstage_map_page(struct kvm_gstage *gstage,
+ struct kvm_mmu_memory_cache *pcache,
+ gpa_t gpa, phys_addr_t hpa, unsigned long page_size,
+ bool page_rdonly, bool page_exec,
+ struct kvm_gstage_mapping *out_map)
+{
+ pgprot_t prot;
+ int ret;
+
+ out_map->addr = gpa;
+ out_map->level = 0;
+
+ ret = gstage_page_size_to_level(page_size, &out_map->level);
+ if (ret)
+ return ret;
+
+ /*
+ * A RISC-V implementation can choose to either:
+ * 1) Update 'A' and 'D' PTE bits in hardware
+ * 2) Generate page fault when 'A' and/or 'D' bits are not set
+ * PTE so that software can update these bits.
+ *
+ * We support both options mentioned above. To achieve this, we
+ * always set 'A' and 'D' PTE bits at time of creating G-stage
+ * mapping. To support KVM dirty page logging with both options
+ * mentioned above, we will write-protect G-stage PTEs to track
+ * dirty pages.
+ */
+
+ if (page_exec) {
+ if (page_rdonly)
+ prot = PAGE_READ_EXEC;
+ else
+ prot = PAGE_WRITE_EXEC;
+ } else {
+ if (page_rdonly)
+ prot = PAGE_READ;
+ else
+ prot = PAGE_WRITE;
+ }
+ out_map->pte = pfn_pte(PFN_DOWN(hpa), prot);
+ out_map->pte = pte_mkdirty(out_map->pte);
+
+ return kvm_riscv_gstage_set_pte(gstage, pcache, out_map);
+}
+
+void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
+ pte_t *ptep, u32 ptep_level, enum kvm_riscv_gstage_op op)
+{
+ int i, ret;
+ pte_t old_pte, *next_ptep;
+ u32 next_ptep_level;
+ unsigned long next_page_size, page_size;
+
+ ret = gstage_level_to_page_size(ptep_level, &page_size);
+ if (ret)
+ return;
+
+ WARN_ON(addr & (page_size - 1));
+
+ if (!pte_val(ptep_get(ptep)))
+ return;
+
+ if (ptep_level && !gstage_pte_leaf(ptep)) {
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ next_ptep_level = ptep_level - 1;
+ ret = gstage_level_to_page_size(next_ptep_level, &next_page_size);
+ if (ret)
+ return;
+
+ if (op == GSTAGE_OP_CLEAR)
+ set_pte(ptep, __pte(0));
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kvm_riscv_gstage_op_pte(gstage, addr + i * next_page_size,
+ &next_ptep[i], next_ptep_level, op);
+ if (op == GSTAGE_OP_CLEAR)
+ put_page(virt_to_page(next_ptep));
+ } else {
+ old_pte = *ptep;
+ if (op == GSTAGE_OP_CLEAR)
+ set_pte(ptep, __pte(0));
+ else if (op == GSTAGE_OP_WP)
+ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
+ if (pte_val(*ptep) != pte_val(old_pte))
+ gstage_tlb_flush(gstage, ptep_level, addr);
+ }
+}
+
+void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage,
+ gpa_t start, gpa_t size, bool may_block)
+{
+ int ret;
+ pte_t *ptep;
+ u32 ptep_level;
+ bool found_leaf;
+ unsigned long page_size;
+ gpa_t addr = start, end = start + size;
+
+ while (addr < end) {
+ found_leaf = kvm_riscv_gstage_get_leaf(gstage, addr, &ptep, &ptep_level);
+ ret = gstage_level_to_page_size(ptep_level, &page_size);
+ if (ret)
+ break;
+
+ if (!found_leaf)
+ goto next;
+
+ if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
+ kvm_riscv_gstage_op_pte(gstage, addr, ptep,
+ ptep_level, GSTAGE_OP_CLEAR);
+
+next:
+ addr += page_size;
+
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (!(gstage->flags & KVM_GSTAGE_FLAGS_LOCAL) && may_block && addr < end)
+ cond_resched_lock(&gstage->kvm->mmu_lock);
+ }
+}
+
+void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end)
+{
+ int ret;
+ pte_t *ptep;
+ u32 ptep_level;
+ bool found_leaf;
+ gpa_t addr = start;
+ unsigned long page_size;
+
+ while (addr < end) {
+ found_leaf = kvm_riscv_gstage_get_leaf(gstage, addr, &ptep, &ptep_level);
+ ret = gstage_level_to_page_size(ptep_level, &page_size);
+ if (ret)
+ break;
+
+ if (!found_leaf)
+ goto next;
+
+ if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
+ kvm_riscv_gstage_op_pte(gstage, addr, ptep,
+ ptep_level, GSTAGE_OP_WP);
+
+next:
+ addr += page_size;
+ }
+}
+
+void __init kvm_riscv_gstage_mode_detect(void)
+{
+#ifdef CONFIG_64BIT
+ /* Try Sv57x4 G-stage mode */
+ csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
+ if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
+ kvm_riscv_gstage_mode = HGATP_MODE_SV57X4;
+ kvm_riscv_gstage_pgd_levels = 5;
+ goto skip_sv48x4_test;
+ }
+
+ /* Try Sv48x4 G-stage mode */
+ csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
+ if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
+ kvm_riscv_gstage_mode = HGATP_MODE_SV48X4;
+ kvm_riscv_gstage_pgd_levels = 4;
+ }
+skip_sv48x4_test:
+
+ csr_write(CSR_HGATP, 0);
+ kvm_riscv_local_hfence_gvma_all();
+#endif
+}
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 4b24705dc63a..67c876de74ef 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_nacl.h>
#include <asm/sbi.h>
@@ -134,7 +135,7 @@ static int __init riscv_kvm_init(void)
(rc) ? slist : "no features");
}
- switch (kvm_riscv_gstage_mode()) {
+ switch (kvm_riscv_gstage_mode) {
case HGATP_MODE_SV32X4:
str = "Sv32x4";
break;
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 1087ea74567b..a1c3b2ec1dde 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -6,348 +6,38 @@
* Anup Patel <anup.patel@wdc.com>
*/
-#include <linux/bitops.h>
#include <linux/errno.h>
-#include <linux/err.h>
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
#include <linux/sched/signal.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_nacl.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_64BIT
-static unsigned long gstage_mode __ro_after_init = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
-static unsigned long gstage_pgd_levels __ro_after_init = 3;
-#define gstage_index_bits 9
-#else
-static unsigned long gstage_mode __ro_after_init = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
-static unsigned long gstage_pgd_levels __ro_after_init = 2;
-#define gstage_index_bits 10
-#endif
-
-#define gstage_pgd_xbits 2
-#define gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + gstage_pgd_xbits))
-#define gstage_gpa_bits (HGATP_PAGE_SHIFT + \
- (gstage_pgd_levels * gstage_index_bits) + \
- gstage_pgd_xbits)
-#define gstage_gpa_size ((gpa_t)(1ULL << gstage_gpa_bits))
-
-#define gstage_pte_leaf(__ptep) \
- (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
-
-static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
-{
- unsigned long mask;
- unsigned long shift = HGATP_PAGE_SHIFT + (gstage_index_bits * level);
-
- if (level == (gstage_pgd_levels - 1))
- mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1;
- else
- mask = PTRS_PER_PTE - 1;
-
- return (addr >> shift) & mask;
-}
-
-static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
-{
- return (unsigned long)pfn_to_virt(__page_val_to_pfn(pte_val(pte)));
-}
-
-static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
-{
- u32 i;
- unsigned long psz = 1UL << 12;
-
- for (i = 0; i < gstage_pgd_levels; i++) {
- if (page_size == (psz << (i * gstage_index_bits))) {
- *out_level = i;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int gstage_level_to_page_order(u32 level, unsigned long *out_pgorder)
-{
- if (gstage_pgd_levels < level)
- return -EINVAL;
-
- *out_pgorder = 12 + (level * gstage_index_bits);
- return 0;
-}
-
-static int gstage_level_to_page_size(u32 level, unsigned long *out_pgsize)
-{
- int rc;
- unsigned long page_order = PAGE_SHIFT;
-
- rc = gstage_level_to_page_order(level, &page_order);
- if (rc)
- return rc;
-
- *out_pgsize = BIT(page_order);
- return 0;
-}
-
-static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
- pte_t **ptepp, u32 *ptep_level)
-{
- pte_t *ptep;
- u32 current_level = gstage_pgd_levels - 1;
-
- *ptep_level = current_level;
- ptep = (pte_t *)kvm->arch.pgd;
- ptep = &ptep[gstage_pte_index(addr, current_level)];
- while (ptep && pte_val(ptep_get(ptep))) {
- if (gstage_pte_leaf(ptep)) {
- *ptep_level = current_level;
- *ptepp = ptep;
- return true;
- }
-
- if (current_level) {
- current_level--;
- *ptep_level = current_level;
- ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
- ptep = &ptep[gstage_pte_index(addr, current_level)];
- } else {
- ptep = NULL;
- }
- }
-
- return false;
-}
-
-static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
-{
- unsigned long order = PAGE_SHIFT;
-
- if (gstage_level_to_page_order(level, &order))
- return;
- addr &= ~(BIT(order) - 1);
-
- kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
-}
-
-static int gstage_set_pte(struct kvm *kvm, u32 level,
- struct kvm_mmu_memory_cache *pcache,
- gpa_t addr, const pte_t *new_pte)
-{
- u32 current_level = gstage_pgd_levels - 1;
- pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
- pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];
-
- if (current_level < level)
- return -EINVAL;
-
- while (current_level != level) {
- if (gstage_pte_leaf(ptep))
- return -EEXIST;
-
- if (!pte_val(ptep_get(ptep))) {
- if (!pcache)
- return -ENOMEM;
- next_ptep = kvm_mmu_memory_cache_alloc(pcache);
- if (!next_ptep)
- return -ENOMEM;
- set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE)));
- } else {
- if (gstage_pte_leaf(ptep))
- return -EEXIST;
- next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
- }
-
- current_level--;
- ptep = &next_ptep[gstage_pte_index(addr, current_level)];
- }
-
- set_pte(ptep, *new_pte);
- if (gstage_pte_leaf(ptep))
- gstage_remote_tlb_flush(kvm, current_level, addr);
-
- return 0;
-}
-
-static int gstage_map_page(struct kvm *kvm,
- struct kvm_mmu_memory_cache *pcache,
- gpa_t gpa, phys_addr_t hpa,
- unsigned long page_size,
- bool page_rdonly, bool page_exec)
-{
- int ret;
- u32 level = 0;
- pte_t new_pte;
- pgprot_t prot;
-
- ret = gstage_page_size_to_level(page_size, &level);
- if (ret)
- return ret;
-
- /*
- * A RISC-V implementation can choose to either:
- * 1) Update 'A' and 'D' PTE bits in hardware
- * 2) Generate page fault when 'A' and/or 'D' bits are not set
- * PTE so that software can update these bits.
- *
- * We support both options mentioned above. To achieve this, we
- * always set 'A' and 'D' PTE bits at time of creating G-stage
- * mapping. To support KVM dirty page logging with both options
- * mentioned above, we will write-protect G-stage PTEs to track
- * dirty pages.
- */
- if (page_exec) {
- if (page_rdonly)
- prot = PAGE_READ_EXEC;
- else
- prot = PAGE_WRITE_EXEC;
- } else {
- if (page_rdonly)
- prot = PAGE_READ;
- else
- prot = PAGE_WRITE;
- }
- new_pte = pfn_pte(PFN_DOWN(hpa), prot);
- new_pte = pte_mkdirty(new_pte);
-
- return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
-}
-
-enum gstage_op {
- GSTAGE_OP_NOP = 0, /* Nothing */
- GSTAGE_OP_CLEAR, /* Clear/Unmap */
- GSTAGE_OP_WP, /* Write-protect */
-};
-
-static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
- pte_t *ptep, u32 ptep_level, enum gstage_op op)
-{
- int i, ret;
- pte_t *next_ptep;
- u32 next_ptep_level;
- unsigned long next_page_size, page_size;
-
- ret = gstage_level_to_page_size(ptep_level, &page_size);
- if (ret)
- return;
-
- BUG_ON(addr & (page_size - 1));
-
- if (!pte_val(ptep_get(ptep)))
- return;
-
- if (ptep_level && !gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
- next_ptep_level = ptep_level - 1;
- ret = gstage_level_to_page_size(next_ptep_level,
- &next_page_size);
- if (ret)
- return;
-
- if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
- for (i = 0; i < PTRS_PER_PTE; i++)
- gstage_op_pte(kvm, addr + i * next_page_size,
- &next_ptep[i], next_ptep_level, op);
- if (op == GSTAGE_OP_CLEAR)
- put_page(virt_to_page(next_ptep));
- } else {
- if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
- else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
- gstage_remote_tlb_flush(kvm, ptep_level, addr);
- }
-}
-
-static void gstage_unmap_range(struct kvm *kvm, gpa_t start,
- gpa_t size, bool may_block)
-{
- int ret;
- pte_t *ptep;
- u32 ptep_level;
- bool found_leaf;
- unsigned long page_size;
- gpa_t addr = start, end = start + size;
-
- while (addr < end) {
- found_leaf = gstage_get_leaf_entry(kvm, addr,
- &ptep, &ptep_level);
- ret = gstage_level_to_page_size(ptep_level, &page_size);
- if (ret)
- break;
-
- if (!found_leaf)
- goto next;
-
- if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
- gstage_op_pte(kvm, addr, ptep,
- ptep_level, GSTAGE_OP_CLEAR);
-
-next:
- addr += page_size;
-
- /*
- * If the range is too large, release the kvm->mmu_lock
- * to prevent starvation and lockup detector warnings.
- */
- if (may_block && addr < end)
- cond_resched_lock(&kvm->mmu_lock);
- }
-}
-
-static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
-{
- int ret;
- pte_t *ptep;
- u32 ptep_level;
- bool found_leaf;
- gpa_t addr = start;
- unsigned long page_size;
-
- while (addr < end) {
- found_leaf = gstage_get_leaf_entry(kvm, addr,
- &ptep, &ptep_level);
- ret = gstage_level_to_page_size(ptep_level, &page_size);
- if (ret)
- break;
-
- if (!found_leaf)
- goto next;
-
- if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
- gstage_op_pte(kvm, addr, ptep,
- ptep_level, GSTAGE_OP_WP);
-
-next:
- addr += page_size;
- }
-}
-
-static void gstage_wp_memory_region(struct kvm *kvm, int slot)
+static void mmu_wp_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+ struct kvm_gstage gstage;
+
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
spin_lock(&kvm->mmu_lock);
- gstage_wp_range(kvm, start, end);
+ kvm_riscv_gstage_wp_range(&gstage, start, end);
spin_unlock(&kvm->mmu_lock);
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
}
-int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
- phys_addr_t hpa, unsigned long size,
- bool writable, bool in_atomic)
+int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
+ unsigned long size, bool writable, bool in_atomic)
{
- pte_t pte;
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
@@ -355,22 +45,31 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
.gfp_zero = __GFP_ZERO,
};
+ struct kvm_gstage_mapping map;
+ struct kvm_gstage gstage;
+
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
- pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+ map.addr = addr;
+ map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+ map.level = 0;
if (!writable)
- pte = pte_wrprotect(pte);
+ map.pte = pte_wrprotect(map.pte);
- ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
+ ret = kvm_mmu_topup_memory_cache(&pcache, kvm_riscv_gstage_pgd_levels);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
- ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
+ ret = kvm_riscv_gstage_set_pte(&gstage, &pcache, &map);
spin_unlock(&kvm->mmu_lock);
if (ret)
goto out;
@@ -383,10 +82,17 @@ out:
return ret;
}
-void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
+void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
+ struct kvm_gstage gstage;
+
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+
spin_lock(&kvm->mmu_lock);
- gstage_unmap_range(kvm, gpa, size, false);
+ kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
@@ -398,8 +104,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+ struct kvm_gstage gstage;
- gstage_wp_range(kvm, start, end);
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+
+ kvm_riscv_gstage_wp_range(&gstage, start, end);
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
@@ -416,7 +128,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
- kvm_riscv_gstage_free_pgd(kvm);
+ kvm_riscv_mmu_free_pgd(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -424,9 +136,15 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
{
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
phys_addr_t size = slot->npages << PAGE_SHIFT;
+ struct kvm_gstage gstage;
+
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
spin_lock(&kvm->mmu_lock);
- gstage_unmap_range(kvm, gpa, size, false);
+ kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
@@ -441,7 +159,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* the memory slot is write protected.
*/
if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
- gstage_wp_memory_region(kvm, new->id);
+ mmu_wp_memory_region(kvm, new->id);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -463,7 +181,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* space addressable by the KVM guest GPA space.
*/
if ((new->base_gfn + new->npages) >=
- (gstage_gpa_size >> PAGE_SHIFT))
+ (kvm_riscv_gstage_gpa_size >> PAGE_SHIFT))
return -EFAULT;
hva = new->userspace_addr;
@@ -487,10 +205,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* +--------------------------------------------+
*/
do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
+ struct vm_area_struct *vma;
hva_t vm_start, vm_end;
- if (!vma || vma->vm_start >= reg_end)
+ vma = find_vma_intersection(current->mm, hva, reg_end);
+ if (!vma)
break;
/*
@@ -519,9 +238,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}
- ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable, false);
+ ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start,
+ writable, false);
if (ret)
break;
}
@@ -532,7 +250,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
if (ret)
- kvm_riscv_gstage_iounmap(kvm, base_gpa, size);
+ kvm_riscv_mmu_iounmap(kvm, base_gpa, size);
out:
mmap_read_unlock(current->mm);
@@ -541,12 +259,18 @@ out:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
+ struct kvm_gstage gstage;
+
if (!kvm->arch.pgd)
return false;
- gstage_unmap_range(kvm, range->start << PAGE_SHIFT,
- (range->end - range->start) << PAGE_SHIFT,
- range->may_block);
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
+ (range->end - range->start) << PAGE_SHIFT,
+ range->may_block);
return false;
}
@@ -555,14 +279,19 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t *ptep;
u32 ptep_level = 0;
u64 size = (range->end - range->start) << PAGE_SHIFT;
+ struct kvm_gstage gstage;
if (!kvm->arch.pgd)
return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
- &ptep, &ptep_level))
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+ if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
+ &ptep, &ptep_level))
return false;
return ptep_test_and_clear_young(NULL, 0, ptep);
@@ -573,22 +302,27 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t *ptep;
u32 ptep_level = 0;
u64 size = (range->end - range->start) << PAGE_SHIFT;
+ struct kvm_gstage gstage;
if (!kvm->arch.pgd)
return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
- &ptep, &ptep_level))
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+ if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
+ &ptep, &ptep_level))
return false;
return pte_young(ptep_get(ptep));
}
-int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
- struct kvm_memory_slot *memslot,
- gpa_t gpa, unsigned long hva, bool is_write)
+int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
+ gpa_t gpa, unsigned long hva, bool is_write,
+ struct kvm_gstage_mapping *out_map)
{
int ret;
kvm_pfn_t hfn;
@@ -601,10 +335,19 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
+ struct kvm_gstage gstage;
struct page *page;
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+
+ /* Setup initial state of output mapping */
+ memset(out_map, 0, sizeof(*out_map));
+
/* We need minimum second+third level pages */
- ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+ ret = kvm_mmu_topup_memory_cache(pcache, kvm_riscv_gstage_pgd_levels);
if (ret) {
kvm_err("Failed to topup G-stage cache\n");
return ret;
@@ -648,7 +391,8 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return -EFAULT;
}
- hfn = kvm_faultin_pfn(vcpu, gfn, is_write, &writable, &page);
+ hfn = __kvm_faultin_pfn(memslot, gfn, is_write ? FOLL_WRITE : 0,
+ &writable, &page);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
vma_pageshift, current);
@@ -670,12 +414,12 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
goto out_unlock;
if (writable) {
- mark_page_dirty(kvm, gfn);
- ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
- vma_pagesize, false, true);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
+ ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
+ vma_pagesize, false, true, out_map);
} else {
- ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
- vma_pagesize, true, true);
+ ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
+ vma_pagesize, true, true, out_map);
}
if (ret)
@@ -687,7 +431,7 @@ out_unlock:
return ret;
}
-int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
+int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm)
{
struct page *pgd_page;
@@ -697,7 +441,7 @@ int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
}
pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(gstage_pgd_size));
+ get_order(kvm_riscv_gstage_pgd_size));
if (!pgd_page)
return -ENOMEM;
kvm->arch.pgd = page_to_virt(pgd_page);
@@ -706,13 +450,18 @@ int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
return 0;
}
-void kvm_riscv_gstage_free_pgd(struct kvm *kvm)
+void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
{
+ struct kvm_gstage gstage;
void *pgd = NULL;
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false);
+ gstage.kvm = kvm;
+ gstage.flags = 0;
+ gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_unmap_range(&gstage, 0UL, kvm_riscv_gstage_gpa_size, false);
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
kvm->arch.pgd_phys = 0;
@@ -720,12 +469,12 @@ void kvm_riscv_gstage_free_pgd(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
if (pgd)
- free_pages((unsigned long)pgd, get_order(gstage_pgd_size));
+ free_pages((unsigned long)pgd, get_order(kvm_riscv_gstage_pgd_size));
}
-void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
+void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu)
{
- unsigned long hgatp = gstage_mode;
+ unsigned long hgatp = kvm_riscv_gstage_mode << HGATP_MODE_SHIFT;
struct kvm_arch *k = &vcpu->kvm->arch;
hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
@@ -736,37 +485,3 @@ void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
if (!kvm_riscv_gstage_vmid_bits())
kvm_riscv_local_hfence_gvma_all();
}
-
-void __init kvm_riscv_gstage_mode_detect(void)
-{
-#ifdef CONFIG_64BIT
- /* Try Sv57x4 G-stage mode */
- csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
- if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
- gstage_mode = (HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
- gstage_pgd_levels = 5;
- goto skip_sv48x4_test;
- }
-
- /* Try Sv48x4 G-stage mode */
- csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
- if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
- gstage_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
- gstage_pgd_levels = 4;
- }
-skip_sv48x4_test:
-
- csr_write(CSR_HGATP, 0);
- kvm_riscv_local_hfence_gvma_all();
-#endif
-}
-
-unsigned long __init kvm_riscv_gstage_mode(void)
-{
- return gstage_mode >> HGATP_MODE_SHIFT;
-}
-
-int kvm_riscv_gstage_gpa_bits(void)
-{
- return gstage_gpa_bits;
-}
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 2f91ea5f8493..3c5a70a2b927 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -15,6 +15,8 @@
#include <asm/cpufeature.h>
#include <asm/insn-def.h>
#include <asm/kvm_nacl.h>
+#include <asm/kvm_tlb.h>
+#include <asm/kvm_vmid.h>
#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
@@ -156,36 +158,13 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
csr_write(CSR_HGATP, hgatp);
}
-void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
-{
- unsigned long vmid;
-
- if (!kvm_riscv_gstage_vmid_bits() ||
- vcpu->arch.last_exit_cpu == vcpu->cpu)
- return;
-
- /*
- * On RISC-V platforms with hardware VMID support, we share same
- * VMID for all VCPUs of a particular Guest/VM. This means we might
- * have stale G-stage TLB entries on the current Host CPU due to
- * some other VCPU of the same Guest which ran previously on the
- * current Host CPU.
- *
- * To cleanup stale TLB entries, we simply flush all G-stage TLB
- * entries by VMID whenever underlying Host CPU changes for a VCPU.
- */
-
- vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
- kvm_riscv_local_hfence_gvma_vmid_all(vmid);
-}
-
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
local_flush_icache_all();
}
-void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
+void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu)
{
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
unsigned long vmid = READ_ONCE(v->vmid);
@@ -258,51 +237,58 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
{
- unsigned long vmid;
struct kvm_riscv_hfence d = { 0 };
- struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
while (vcpu_hfence_dequeue(vcpu, &d)) {
switch (d.type) {
case KVM_RISCV_HFENCE_UNKNOWN:
break;
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
+ nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
+ kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
d.size, d.order);
break;
+ case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
+ if (kvm_riscv_nacl_available())
+ nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
+ else
+ kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
+ break;
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
+ nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
+ kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
+ nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
else
- kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
+ kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
break;
case KVM_RISCV_HFENCE_VVMA_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma(nacl_shmem(), vmid,
+ nacl_hfence_vvma(nacl_shmem(), d.vmid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
+ kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
d.size, d.order);
break;
+ case KVM_RISCV_HFENCE_VVMA_ALL:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
+ if (kvm_riscv_nacl_available())
+ nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
+ else
+ kvm_riscv_local_hfence_vvma_all(d.vmid);
+ break;
default:
break;
}
@@ -355,35 +341,43 @@ void kvm_riscv_fence_i(struct kvm *kvm,
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
gpa_t gpa, gpa_t gpsz,
- unsigned long order)
+ unsigned long order, unsigned long vmid)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
data.asid = 0;
+ data.vmid = vmid;
data.addr = gpa;
data.size = gpsz;
data.order = order;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
- KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
+ KVM_REQ_TLB_FLUSH, &data);
}
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask)
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid)
{
- make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
- KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
+ struct kvm_riscv_hfence data = {0};
+
+ data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
+ data.vmid = vmid;
+ make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+ KVM_REQ_TLB_FLUSH, &data);
}
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order, unsigned long asid)
+ unsigned long order, unsigned long asid,
+ unsigned long vmid)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
data.asid = asid;
+ data.vmid = vmid;
data.addr = gva;
data.size = gvsz;
data.order = order;
@@ -393,13 +387,13 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
- unsigned long asid)
+ unsigned long asid, unsigned long vmid)
{
- struct kvm_riscv_hfence data;
+ struct kvm_riscv_hfence data = {0};
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
data.asid = asid;
- data.addr = data.size = data.order = 0;
+ data.vmid = vmid;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
}
@@ -407,12 +401,13 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order)
+ unsigned long order, unsigned long vmid)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
data.asid = 0;
+ data.vmid = vmid;
data.addr = gva;
data.size = gvsz;
data.order = order;
@@ -421,8 +416,21 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
}
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask)
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid)
+{
+ struct kvm_riscv_hfence data = {0};
+
+ data.type = KVM_RISCV_HFENCE_VVMA_ALL;
+ data.vmid = vmid;
+ make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+ KVM_REQ_HFENCE_VVMA_ALL, &data);
+}
+
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
- make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
- KVM_REQ_HFENCE_VVMA_ALL, NULL);
+ kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
+ gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
+ PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
+ return 0;
}
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 0462863206ca..f001e56403f9 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_nacl.h>
#include <asm/kvm_vcpu_vector.h>
@@ -111,7 +112,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
vcpu->arch.hfence_tail = 0;
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
- kvm_riscv_vcpu_sbi_sta_reset(vcpu);
+ kvm_riscv_vcpu_sbi_reset(vcpu);
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
@@ -148,8 +149,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
spin_lock_init(&vcpu->arch.reset_state.lock);
- if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
- return -ENOMEM;
+ rc = kvm_riscv_vcpu_alloc_vector_context(vcpu);
+ if (rc)
+ return rc;
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
@@ -158,9 +160,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_pmu_init(vcpu);
/* Setup VCPU AIA */
- rc = kvm_riscv_vcpu_aia_init(vcpu);
- if (rc)
- return rc;
+ kvm_riscv_vcpu_aia_init(vcpu);
/*
* Setup SBI extensions
@@ -187,6 +187,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_vcpu_sbi_deinit(vcpu);
+
/* Cleanup VCPU AIA context */
kvm_riscv_vcpu_aia_deinit(vcpu);
@@ -620,7 +622,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
}
- kvm_riscv_gstage_update_hgatp(vcpu);
+ kvm_riscv_mmu_update_hgatp(vcpu);
kvm_riscv_vcpu_timer_restore(vcpu);
@@ -680,7 +682,14 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
}
}
-static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
+/**
+ * check_vcpu_requests - check and handle pending vCPU requests
+ * @vcpu: the VCPU pointer
+ *
+ * Return: 1 if we should enter the guest
+ * 0 if we should exit to userspace
+ */
+static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
{
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
@@ -705,17 +714,13 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_riscv_reset_vcpu(vcpu, true);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
- kvm_riscv_gstage_update_hgatp(vcpu);
+ kvm_riscv_mmu_update_hgatp(vcpu);
if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
kvm_riscv_fence_i_process(vcpu);
- /*
- * The generic KVM_REQ_TLB_FLUSH is same as
- * KVM_REQ_HFENCE_GVMA_VMID_ALL
- */
- if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
- kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+ kvm_riscv_tlb_flush_process(vcpu);
if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
kvm_riscv_hfence_vvma_all_process(vcpu);
@@ -725,7 +730,12 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
kvm_riscv_vcpu_record_steal_time(vcpu);
+
+ if (kvm_dirty_ring_check_request(vcpu))
+ return 0;
}
+
+ return 1;
}
static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
@@ -907,7 +917,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_gstage_vmid_update(vcpu);
- kvm_riscv_check_vcpu_requests(vcpu);
+ ret = kvm_riscv_check_vcpu_requests(vcpu);
+ if (ret <= 0)
+ continue;
preempt_disable();
@@ -951,12 +963,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
/*
- * Cleanup stale TLB enteries
+ * Sanitize VMID mappings cached (TLB) on current CPU
*
* Note: This should be done after G-stage VMID has been
* updated using kvm_riscv_gstage_vmid_ver_changed()
*/
- kvm_riscv_local_tlb_sanitize(vcpu);
+ kvm_riscv_gstage_vmid_sanitize(vcpu);
trace_kvm_entry(vcpu);
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 6e0c18412795..0bb0c51e3c89 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -9,10 +9,13 @@
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/insn-def.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_nacl.h>
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
+ struct kvm_gstage_mapping host_map;
struct kvm_memory_slot *memslot;
unsigned long hva, fault_addr;
bool writable;
@@ -40,8 +43,9 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
};
}
- ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
- (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
+ ret = kvm_riscv_mmu_map(vcpu, memslot, fault_addr, hva,
+ (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false,
+ &host_map);
if (ret < 0)
return ret;
@@ -135,7 +139,7 @@ unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap)
{
- unsigned long vsstatus = csr_read(CSR_VSSTATUS);
+ unsigned long vsstatus = ncsr_read(CSR_VSSTATUS);
/* Change Guest SSTATUS.SPP bit */
vsstatus &= ~SR_SPP;
@@ -151,15 +155,15 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
vsstatus &= ~SR_SIE;
/* Update Guest SSTATUS */
- csr_write(CSR_VSSTATUS, vsstatus);
+ ncsr_write(CSR_VSSTATUS, vsstatus);
/* Update Guest SCAUSE, STVAL, and SEPC */
- csr_write(CSR_VSCAUSE, trap->scause);
- csr_write(CSR_VSTVAL, trap->stval);
- csr_write(CSR_VSEPC, trap->sepc);
+ ncsr_write(CSR_VSCAUSE, trap->scause);
+ ncsr_write(CSR_VSTVAL, trap->stval);
+ ncsr_write(CSR_VSEPC, trap->sepc);
/* Set Guest PC to Guest exception vector */
- vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
+ vcpu->arch.guest_context.sepc = ncsr_read(CSR_VSTVEC);
/* Set Guest privilege mode to supervisor */
vcpu->arch.guest_context.sstatus |= SR_SPP;
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 2e1b646f0d61..cce6a38ea54f 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -23,7 +23,7 @@
#define KVM_ISA_EXT_ARR(ext) \
[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
-/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
+/* Mapping between KVM ISA Extension ID & guest ISA extension ID */
static const unsigned long kvm_isa_ext_arr[] = {
/* Single letter extensions (alphabetically sorted) */
[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
@@ -35,7 +35,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
/* Multi letter extensions (alphabetically sorted) */
- [KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM,
+ KVM_ISA_EXT_ARR(SMNPM),
KVM_ISA_EXT_ARR(SMSTATEEN),
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSCOFPMF),
@@ -112,6 +112,36 @@ static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
return KVM_RISCV_ISA_EXT_MAX;
}
+static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
+{
+ unsigned long host_ext;
+
+ if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
+ kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
+ return -ENOENT;
+
+ *guest_ext = kvm_isa_ext_arr[kvm_ext];
+ switch (*guest_ext) {
+ case RISCV_ISA_EXT_SMNPM:
+ /*
+ * Pointer masking effective in (H)S-mode is provided by the
+ * Smnpm extension, so that extension is reported to the guest,
+ * even though the CSR bits for configuring VS-mode pointer
+ * masking on the host side are part of the Ssnpm extension.
+ */
+ host_ext = RISCV_ISA_EXT_SSNPM;
+ break;
+ default:
+ host_ext = *guest_ext;
+ break;
+ }
+
+ if (!__riscv_isa_extension_available(NULL, host_ext))
+ return -ENOENT;
+
+ return 0;
+}
+
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
{
switch (ext) {
@@ -219,13 +249,13 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
{
- unsigned long host_isa, i;
+ unsigned long guest_ext, i;
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
- host_isa = kvm_isa_ext_arr[i];
- if (__riscv_isa_extension_available(NULL, host_isa) &&
- kvm_riscv_vcpu_isa_enable_allowed(i))
- set_bit(host_isa, vcpu->arch.isa);
+ if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
+ continue;
+ if (kvm_riscv_vcpu_isa_enable_allowed(i))
+ set_bit(guest_ext, vcpu->arch.isa);
}
}
@@ -607,18 +637,15 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
- unsigned long host_isa_ext;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
- reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -ENOENT;
+ unsigned long guest_ext;
+ int ret;
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (!__riscv_isa_extension_available(NULL, host_isa_ext))
- return -ENOENT;
+ ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
+ if (ret)
+ return ret;
*reg_val = 0;
- if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
+ if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
*reg_val = 1; /* Mark the given extension as available */
return 0;
@@ -628,17 +655,14 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
- unsigned long host_isa_ext;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
- reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -ENOENT;
+ unsigned long guest_ext;
+ int ret;
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (!__riscv_isa_extension_available(NULL, host_isa_ext))
- return -ENOENT;
+ ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
+ if (ret)
+ return ret;
- if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
+ if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
return 0;
if (!vcpu->arch.ran_atleast_once) {
@@ -648,10 +672,10 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
*/
if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
- set_bit(host_isa_ext, vcpu->arch.isa);
+ set_bit(guest_ext, vcpu->arch.isa);
else if (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
- clear_bit(host_isa_ext, vcpu->arch.isa);
+ clear_bit(guest_ext, vcpu->arch.isa);
else
return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
@@ -1009,16 +1033,15 @@ static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
+ unsigned long guest_ext;
unsigned int n = 0;
- unsigned long isa_ext;
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
- isa_ext = kvm_isa_ext_arr[i];
- if (!__riscv_isa_extension_available(NULL, isa_ext))
+ if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
continue;
if (uindices) {
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 6e09b518a5d1..a56c4959f9ad 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -536,5 +536,54 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
scontext->ext_status[idx] = ext->default_disabled ?
KVM_RISCV_SBI_EXT_STATUS_DISABLED :
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
+
+ if (ext->init && ext->init(vcpu) != 0)
+ scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
+ }
+}
+
+void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ const struct kvm_riscv_sbi_extension_entry *entry;
+ const struct kvm_vcpu_sbi_extension *ext;
+ int idx, i;
+
+ for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ entry = &sbi_ext[i];
+ ext = entry->ext_ptr;
+ idx = entry->ext_idx;
+
+ if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
+ continue;
+
+ if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE ||
+ !ext->deinit)
+ continue;
+
+ ext->deinit(vcpu);
+ }
+}
+
+void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ const struct kvm_riscv_sbi_extension_entry *entry;
+ const struct kvm_vcpu_sbi_extension *ext;
+ int idx, i;
+
+ for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ entry = &sbi_ext[i];
+ ext = entry->ext_ptr;
+ idx = entry->ext_idx;
+
+ if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
+ continue;
+
+ if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED ||
+ !ext->reset)
+ continue;
+
+ ext->reset(vcpu);
}
}
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index b17fad091bab..b490ed1428a6 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -96,6 +96,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long funcid = cp->a6;
+ unsigned long vmid;
switch (funcid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
@@ -103,22 +104,22 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
- kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
+ kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
- cp->a2, cp->a3, PAGE_SHIFT);
+ cp->a2, cp->a3, PAGE_SHIFT, vmid);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
- kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
- hbase, hmask, cp->a4);
+ kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
+ cp->a4, vmid);
else
- kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
- hbase, hmask,
- cp->a2, cp->a3,
- PAGE_SHIFT, cp->a4);
+ kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
+ cp->a3, PAGE_SHIFT, cp->a4, vmid);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index 5f35427114c1..cc6cb7c8f0e4 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -16,7 +16,7 @@
#include <asm/sbi.h>
#include <asm/uaccess.h>
-void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
+static void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.sta.shmem = INVALID_GPA;
vcpu->arch.sta.last_steal = 0;
@@ -156,6 +156,7 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
.extid_end = SBI_EXT_STA,
.handler = kvm_sbi_ext_sta_handler,
.probe = kvm_sbi_ext_sta_probe,
+ .reset = kvm_riscv_vcpu_sbi_sta_reset,
};
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c
index 8f4c4fa16227..368dfddd23d9 100644
--- a/arch/riscv/kvm/vcpu_sbi_v01.c
+++ b/arch/riscv/kvm/vcpu_sbi_v01.c
@@ -23,6 +23,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm *kvm = vcpu->kvm;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_cpu_trap *utrap = retdata->utrap;
+ unsigned long vmid;
switch (cp->a7) {
case SBI_EXT_0_1_CONSOLE_GETCHAR:
@@ -78,25 +79,21 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if (cp->a1 == 0 && cp->a2 == 0)
- kvm_riscv_hfence_vvma_all(vcpu->kvm,
- 0, hmask);
+ kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid);
else
- kvm_riscv_hfence_vvma_gva(vcpu->kvm,
- 0, hmask,
- cp->a1, cp->a2,
- PAGE_SHIFT);
+ kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1,
+ cp->a2, PAGE_SHIFT, vmid);
} else {
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if (cp->a1 == 0 && cp->a2 == 0)
- kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
- 0, hmask,
- cp->a3);
+ kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask,
+ cp->a3, vmid);
else
- kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
- 0, hmask,
- cp->a1, cp->a2,
- PAGE_SHIFT,
- cp->a3);
+ kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask,
+ cp->a1, cp->a2, PAGE_SHIFT,
+ cp->a3, vmid);
}
break;
default:
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index b27ec8f96697..66d91ae6e9b2 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
@@ -31,13 +32,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int r;
- r = kvm_riscv_gstage_alloc_pgd(kvm);
+ r = kvm_riscv_mmu_alloc_pgd(kvm);
if (r)
return r;
r = kvm_riscv_gstage_vmid_init(kvm);
if (r) {
- kvm_riscv_gstage_free_pgd(kvm);
+ kvm_riscv_mmu_free_pgd(kvm);
return r;
}
@@ -199,7 +200,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_VM_GPA_BITS:
- r = kvm_riscv_gstage_gpa_bits();
+ r = kvm_riscv_gstage_gpa_bits;
break;
default:
r = 0;
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index ddc98714ce8e..3b426c800480 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
+#include <asm/kvm_tlb.h>
+#include <asm/kvm_vmid.h>
static unsigned long vmid_version = 1;
static unsigned long vmid_next;
@@ -122,3 +124,26 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
kvm_for_each_vcpu(i, v, vcpu->kvm)
kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
}
+
+void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu)
+{
+ unsigned long vmid;
+
+ if (!kvm_riscv_gstage_vmid_bits() ||
+ vcpu->arch.last_exit_cpu == vcpu->cpu)
+ return;
+
+ /*
+ * On RISC-V platforms with hardware VMID support, we share same
+ * VMID for all VCPUs of a particular Guest/VM. This means we might
+ * have stale G-stage TLB entries on the current Host CPU due to
+ * some other VCPU of the same Guest which ran previously on the
+ * current Host CPU.
+ *
+ * To cleanup stale TLB entries, we simply flush all G-stage TLB
+ * entries by VMID whenever underlying Host CPU changes for a VCPU.
+ */
+
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
+ kvm_riscv_local_hfence_gvma_vmid_all(vmid);
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 0194324a0c50..04ed6f8acae4 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -20,6 +20,9 @@
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/exceptions.h>
+
#include "../kernel/head.h"
static void show_pte(unsigned long addr)
@@ -291,6 +294,11 @@ void handle_page_fault(struct pt_regs *regs)
if (kprobe_page_fault(regs, cause))
return;
+ if (user_mode(regs))
+ trace_page_fault_user(addr, regs, cause);
+ else
+ trace_page_fault_kernel(addr, regs, cause);
+
/*
* Fault-in kernel-space virtual memory on-demand.
* The 'reference' page table is init_mm.pgd.
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 8d0374d7ce8e..15683ae13fa5 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1408,7 +1408,7 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, &high);
+ &low_size, NULL, &high);
if (ret)
return;
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index d815448758a1..3f76db3d2769 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -299,7 +299,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (ret)
goto unlock;
- ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ ret = walk_kernel_page_table_range(lm_start, lm_end,
&pageattr_ops, NULL, &masks);
if (ret)
goto unlock;
@@ -317,13 +317,13 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (ret)
goto unlock;
- ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ ret = walk_kernel_page_table_range(lm_start, lm_end,
&pageattr_ops, NULL, &masks);
if (ret)
goto unlock;
}
- ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL,
&masks);
unlock:
@@ -335,7 +335,7 @@ unlock:
*/
flush_tlb_all();
#else
- ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL,
&masks);
mmap_write_unlock(&init_mm);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 32922550a50a..3b51690cc876 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -6,7 +6,6 @@
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <linux/memory_hotplug.h>
#include <linux/seq_file.h>
#include <linux/ptdump.h>
@@ -413,9 +412,7 @@ bool ptdump_check_wx(void)
static int ptdump_show(struct seq_file *m, void *v)
{
- get_online_mems();
ptdump_walk(m, m->private);
- put_online_mems();
return 0;
}
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index e737ba7949b1..8404530ec00f 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -234,11 +234,6 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
-void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
- flush_tlb_mm(mm);
-}
-
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
__flush_tlb_range(NULL, &batch->cpumask,
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 25a773e6596e..bf680c26a33c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -74,6 +74,7 @@ config S390
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
@@ -102,6 +103,7 @@ config S390
select ARCH_HAS_UBSAN
select ARCH_HAS_VDSO_TIME_DATA
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAVE_TRACE_MMIO_ACCESS
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
select ARCH_INLINE_READ_LOCK_IRQ
@@ -131,6 +133,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
+ select ARCH_MODULE_NEEDS_WEAK_PER_CPU
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
@@ -149,6 +152,7 @@ config S390
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+ select ARCH_WANTS_THP_SWAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
select DCACHE_WORD_ACCESS if !KMSAN
@@ -198,7 +202,6 @@ config S390
select HAVE_GUP_FAST
select HAVE_FENTRY
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index dd7ba7587dd5..ad2b0baa527c 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -12,6 +12,7 @@
#define KMSG_COMPONENT "appldata"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched/stat.h>
#include <linux/init.h>
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index bee49626be4b..02f2cf082748 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -19,15 +19,15 @@ CC_FLAGS_MARCH_MINIMUM := -march=z10
KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR))
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR))
-KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
-KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
+KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM) -D__DISABLE_EXPORTS
+KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM) -D__DISABLE_EXPORTS
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check.o ctype.o ipl_data.o relocs.o alternative.o
-obj-y += uv.o printk.o
+obj-y += uv.o printk.o trampoline.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index 79afb5fa7f1f..25a20986b96e 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -65,7 +65,7 @@ static void facility_mismatch(void)
boot_emerg("The Linux kernel requires more recent processor hardware\n");
boot_emerg("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities();
- boot_emerg("See Principles of Operations for facility bits\n");
+ boot_emerg("See z/Architecture Principles of Operation - Facility Indications\n");
disabled_wait();
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index e045cae6e80a..c0152db285f0 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -6,7 +6,7 @@
#define IPL_START 0x200
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/printk.h>
#include <asm/physmem_info.h>
@@ -74,6 +74,7 @@ void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);
void boot_rb_dump(void);
+void __noreturn jump_to_kernel(psw_t *psw);
#ifndef boot_fmt
#define boot_fmt(fmt) fmt
@@ -121,5 +122,5 @@ static inline bool intersects(unsigned long addr0, unsigned long size0,
{
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/ipl_data.c b/arch/s390/boot/ipl_data.c
index 0846e2b249c6..c4130a80b058 100644
--- a/arch/s390/boot/ipl_data.c
+++ b/arch/s390/boot/ipl_data.c
@@ -16,7 +16,9 @@ struct ipl_lowcore {
struct ccw0 ccwpgm[2]; /* 0x0008 */
u8 fill[56]; /* 0x0018 */
struct ccw0 ccwpgmcc[20]; /* 0x0050 */
- u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */
+ u8 pad_0xf0[0x0140-0x00f0]; /* 0x00f0 */
+ psw_t svc_old_psw; /* 0x0140 */
+ u8 pad_0x150[0x01a0-0x0150]; /* 0x0150 */
psw_t restart_psw; /* 0x01a0 */
psw_t external_new_psw; /* 0x01b0 */
psw_t svc_new_psw; /* 0x01c0 */
@@ -75,6 +77,11 @@ static struct ipl_lowcore ipl_lowcore __used __section(".ipldata") = {
[18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI),
},
+ /*
+ * Let the GDB's lx-symbols command find the jump_to_kernel symbol
+ * without having to load decompressor symbols.
+ */
+ .svc_old_psw = { .mask = 0, .addr = (unsigned long)jump_to_kernel },
.restart_psw = { .mask = 0, .addr = IPL_START, },
.external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, },
.svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, },
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index da8337e63a3e..93684a775716 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -384,7 +384,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
- kernel_size + kernel_size);
+ kernel_start + kernel_size);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
@@ -642,5 +642,5 @@ void startup_kernel(void)
psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
- __load_psw(psw);
+ jump_to_kernel(&psw);
}
diff --git a/arch/s390/boot/trampoline.S b/arch/s390/boot/trampoline.S
new file mode 100644
index 000000000000..1cb5adf005ea
--- /dev/null
+++ b/arch/s390/boot/trampoline.S
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+
+# This function is identical to __load_psw(), but the lx-symbols GDB command
+# puts a breakpoint on it, so it needs to be kept separate.
+SYM_CODE_START(jump_to_kernel)
+ lpswe 0(%r2)
+SYM_CODE_END(jump_to_kernel)
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index a7db7ed28720..6b33429f1c4d 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -248,7 +248,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
@@ -817,6 +816,7 @@ CONFIG_PKEY_EP11=m
CONFIG_PKEY_PCKMO=m
CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
+CONFIG_CRYPTO_PHMAC_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CRYPTO_KRB5=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 0217ed5616bf..b75eb2775850 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -239,7 +239,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
@@ -804,6 +803,7 @@ CONFIG_PKEY_EP11=m
CONFIG_PKEY_PCKMO=m
CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
+CONFIG_CRYPTO_PHMAC_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CRYPTO_KRB5=m
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 1e5a1038d491..998f4b656b18 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -11,4 +11,5 @@ obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
+obj-$(CONFIG_CRYPTO_PHMAC_S390) += phmac_s390.o
obj-y += arch_random.o
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index a8a2407381af..083e8d5eada2 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -6,6 +6,7 @@
* Author(s): Harald Freudenberger
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/random.h>
diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c
index 93a1098d9f8d..58444da9b004 100644
--- a/arch/s390/crypto/hmac_s390.c
+++ b/arch/s390/crypto/hmac_s390.c
@@ -290,6 +290,7 @@ static int s390_hmac_export(struct shash_desc *desc, void *out)
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
unsigned int ds = bs / 2;
+ u64 lo = ctx->buflen[0];
union {
u8 *u8;
u64 *u64;
@@ -301,9 +302,10 @@ static int s390_hmac_export(struct shash_desc *desc, void *out)
else
memcpy(p.u8, ctx->param, ds);
p.u8 += ds;
- put_unaligned(ctx->buflen[0], p.u64++);
+ lo += bs;
+ put_unaligned(lo, p.u64++);
if (ds == SHA512_DIGEST_SIZE)
- put_unaligned(ctx->buflen[1], p.u64);
+ put_unaligned(ctx->buflen[1] + (lo < bs), p.u64);
return err;
}
@@ -316,14 +318,16 @@ static int s390_hmac_import(struct shash_desc *desc, const void *in)
const u8 *u8;
const u64 *u64;
} p = { .u8 = in };
+ u64 lo;
int err;
err = s390_hmac_sha2_init(desc);
memcpy(ctx->param, p.u8, ds);
p.u8 += ds;
- ctx->buflen[0] = get_unaligned(p.u64++);
+ lo = get_unaligned(p.u64++);
+ ctx->buflen[0] = lo - bs;
if (ds == SHA512_DIGEST_SIZE)
- ctx->buflen[1] = get_unaligned(p.u64);
+ ctx->buflen[1] = get_unaligned(p.u64) - (lo < bs);
if (ctx->buflen[0] | ctx->buflen[1])
ctx->gr0.ikp = 1;
return err;
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 8a340c16acb4..a624a43a2b54 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -1633,7 +1633,7 @@ static int __init paes_s390_init(void)
/* with this pseudo devie alloc and start a crypto engine */
paes_crypto_engine =
crypto_engine_alloc_init_and_set(paes_dev.this_device,
- true, NULL, false, MAX_QLEN);
+ true, false, MAX_QLEN);
if (!paes_crypto_engine) {
rc = -ENOMEM;
goto out_err;
diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c
new file mode 100644
index 000000000000..7ecfdc4fba2d
--- /dev/null
+++ b/arch/s390/crypto/phmac_s390.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2025
+ *
+ * s390 specific HMAC support for protected keys.
+ */
+
+#define KMSG_COMPONENT "phmac_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <asm/cpacf.h>
+#include <asm/pkey.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <linux/atomic.h>
+#include <linux/cpufeature.h>
+#include <linux/delay.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+static struct crypto_engine *phmac_crypto_engine;
+#define MAX_QLEN 10
+
+/*
+ * A simple hash walk helper
+ */
+
+struct hash_walk_helper {
+ struct crypto_hash_walk walk;
+ const u8 *walkaddr;
+ int walkbytes;
+};
+
+/*
+ * Prepare hash walk helper.
+ * Set up the base hash walk, fill walkaddr and walkbytes.
+ * Returns 0 on success or negative value on error.
+ */
+static inline int hwh_prepare(struct ahash_request *req,
+ struct hash_walk_helper *hwh)
+{
+ hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
+ if (hwh->walkbytes < 0)
+ return hwh->walkbytes;
+ hwh->walkaddr = hwh->walk.data;
+ return 0;
+}
+
+/*
+ * Advance hash walk helper by n bytes.
+ * Progress the walkbytes and walkaddr fields by n bytes.
+ * If walkbytes is then 0, pull next hunk from hash walk
+ * and update walkbytes and walkaddr.
+ * If n is negative, unmap hash walk and return error.
+ * Returns 0 on success or negative value on error.
+ */
+static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
+{
+ if (n < 0)
+ return crypto_hash_walk_done(&hwh->walk, n);
+
+ hwh->walkbytes -= n;
+ hwh->walkaddr += n;
+ if (hwh->walkbytes > 0)
+ return 0;
+
+ hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
+ if (hwh->walkbytes < 0)
+ return hwh->walkbytes;
+
+ hwh->walkaddr = hwh->walk.data;
+ return 0;
+}
+
+/*
+ * KMAC param block layout for sha2 function codes:
+ * The layout of the param block for the KMAC instruction depends on the
+ * blocksize of the used hashing sha2-algorithm function codes. The param block
+ * contains the hash chaining value (cv), the input message bit-length (imbl)
+ * and the hmac-secret (key). To prevent code duplication, the sizes of all
+ * these are calculated based on the blocksize.
+ *
+ * param-block:
+ * +-------+
+ * | cv |
+ * +-------+
+ * | imbl |
+ * +-------+
+ * | key |
+ * +-------+
+ *
+ * sizes:
+ * part | sh2-alg | calculation | size | type
+ * -----+---------+-------------+------+--------
+ * cv | 224/256 | blocksize/2 | 32 | u64[8]
+ * | 384/512 | | 64 | u128[8]
+ * imbl | 224/256 | blocksize/8 | 8 | u64
+ * | 384/512 | | 16 | u128
+ * key | 224/256 | blocksize | 96 | u8[96]
+ * | 384/512 | | 160 | u8[160]
+ */
+
+#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define MAX_IMBL_SIZE sizeof(u128)
+#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+#define SHA2_CV_SIZE(bs) ((bs) >> 1)
+#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
+
+#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
+#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
+
+#define PHMAC_MAX_KEYSIZE 256
+#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32)
+#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32)
+#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE
+
+/* phmac protected key struct */
+struct phmac_protkey {
+ u32 type;
+ u32 len;
+ u8 protkey[PHMAC_MAX_PK_SIZE];
+};
+
+#define PK_STATE_NO_KEY 0
+#define PK_STATE_CONVERT_IN_PROGRESS 1
+#define PK_STATE_VALID 2
+
+/* phmac tfm context */
+struct phmac_tfm_ctx {
+ /* source key material used to derive a protected key from */
+ u8 keybuf[PHMAC_MAX_KEYSIZE];
+ unsigned int keylen;
+
+ /* cpacf function code to use with this protected key type */
+ long fc;
+
+ /* nr of requests enqueued via crypto engine which use this tfm ctx */
+ atomic_t via_engine_ctr;
+
+ /* spinlock to atomic read/update all the following fields */
+ spinlock_t pk_lock;
+
+ /* see PK_STATE* defines above, < 0 holds convert failure rc */
+ int pk_state;
+ /* if state is valid, pk holds the protected key */
+ struct phmac_protkey pk;
+};
+
+union kmac_gr0 {
+ unsigned long reg;
+ struct {
+ unsigned long : 48;
+ unsigned long ikp : 1;
+ unsigned long iimp : 1;
+ unsigned long ccup : 1;
+ unsigned long : 6;
+ unsigned long fc : 7;
+ };
+};
+
+struct kmac_sha2_ctx {
+ u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
+ union kmac_gr0 gr0;
+ u8 buf[MAX_BLOCK_SIZE];
+ u64 buflen[2];
+};
+
+/* phmac request context */
+struct phmac_req_ctx {
+ struct hash_walk_helper hwh;
+ struct kmac_sha2_ctx kmac_ctx;
+ bool final;
+};
+
+/*
+ * Pkey 'token' struct used to derive a protected key value from a clear key.
+ */
+struct hmac_clrkey_token {
+ u8 type;
+ u8 res0[3];
+ u8 version;
+ u8 res1[3];
+ u32 keytype;
+ u32 len;
+ u8 key[];
+} __packed;
+
+static int hash_key(const u8 *in, unsigned int inlen,
+ u8 *digest, unsigned int digestsize)
+{
+ unsigned long func;
+ union {
+ struct sha256_paramblock {
+ u32 h[8];
+ u64 mbl;
+ } sha256;
+ struct sha512_paramblock {
+ u64 h[8];
+ u128 mbl;
+ } sha512;
+ } __packed param;
+
+#define PARAM_INIT(x, y, z) \
+ param.sha##x.h[0] = SHA##y ## _H0; \
+ param.sha##x.h[1] = SHA##y ## _H1; \
+ param.sha##x.h[2] = SHA##y ## _H2; \
+ param.sha##x.h[3] = SHA##y ## _H3; \
+ param.sha##x.h[4] = SHA##y ## _H4; \
+ param.sha##x.h[5] = SHA##y ## _H5; \
+ param.sha##x.h[6] = SHA##y ## _H6; \
+ param.sha##x.h[7] = SHA##y ## _H7; \
+ param.sha##x.mbl = (z)
+
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 224, inlen * 8);
+ break;
+ case SHA256_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 256, inlen * 8);
+ break;
+ case SHA384_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 384, inlen * 8);
+ break;
+ case SHA512_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 512, inlen * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+#undef PARAM_INIT
+
+ cpacf_klmd(func, &param, in, inlen);
+
+ memcpy(digest, &param, digestsize);
+
+ return 0;
+}
+
+/*
+ * make_clrkey_token() - wrap the clear key into a pkey clearkey token.
+ */
+static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
+ unsigned int digestsize, u8 *dest)
+{
+ struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
+ unsigned int blocksize;
+ int rc;
+
+ token->type = 0x00;
+ token->version = 0x02;
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ case SHA256_DIGEST_SIZE:
+ token->keytype = PKEY_KEYTYPE_HMAC_512;
+ blocksize = 64;
+ break;
+ case SHA384_DIGEST_SIZE:
+ case SHA512_DIGEST_SIZE:
+ token->keytype = PKEY_KEYTYPE_HMAC_1024;
+ blocksize = 128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ token->len = blocksize;
+
+ if (clrkeylen > blocksize) {
+ rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
+ if (rc)
+ return rc;
+ } else {
+ memcpy(token->key, clrkey, clrkeylen);
+ }
+
+ return 0;
+}
+
+/*
+ * phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct
+ * a clear key token digestible by pkey from a clear key value.
+ */
+static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen > sizeof(tfm_ctx->keybuf))
+ return -EINVAL;
+
+ memcpy(tfm_ctx->keybuf, key, keylen);
+ tfm_ctx->keylen = keylen;
+
+ return 0;
+}
+
+/*
+ * Convert the raw key material into a protected key via PKEY api.
+ * This function may sleep - don't call in non-sleeping context.
+ */
+static inline int convert_key(const u8 *key, unsigned int keylen,
+ struct phmac_protkey *pk)
+{
+ int rc, i;
+
+ pk->len = sizeof(pk->protkey);
+
+ /*
+ * In case of a busy card retry with increasing delay
+ * of 200, 400, 800 and 1600 ms - in total 3 s.
+ */
+ for (rc = -EIO, i = 0; rc && i < 5; i++) {
+ if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
+ rc = -EINTR;
+ goto out;
+ }
+ rc = pkey_key2protkey(key, keylen,
+ pk->protkey, &pk->len, &pk->type,
+ PKEY_XFLAG_NOMEMALLOC);
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * (Re-)Convert the raw key material from the tfm ctx into a protected
+ * key via convert_key() function. Update the pk_state, pk_type, pk_len
+ * and the protected key in the tfm context.
+ * Please note this function may be invoked concurrently with the very
+ * same tfm context. The pk_lock spinlock in the context ensures an
+ * atomic update of the pk and the pk state but does not guarantee any
+ * order of update. So a fresh converted valid protected key may get
+ * updated with an 'old' expired key value. As the cpacf instructions
+ * detect this, refuse to operate with an invalid key and the calling
+ * code triggers a (re-)conversion this does no harm. This may lead to
+ * unnecessary additional conversion but never to invalid data on the
+ * hash operation.
+ */
+static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
+{
+ struct phmac_protkey pk;
+ int rc;
+
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+
+ rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
+
+ /* update context */
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ if (rc) {
+ tfm_ctx->pk_state = rc;
+ } else {
+ tfm_ctx->pk_state = PK_STATE_VALID;
+ tfm_ctx->pk = pk;
+ }
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+
+ memzero_explicit(&pk, sizeof(pk));
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
+ */
+static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
+ u64 buflen_hi, unsigned int blocksize)
+{
+ u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
+
+ switch (blocksize) {
+ case SHA256_BLOCK_SIZE:
+ *(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
+ break;
+ case SHA512_BLOCK_SIZE:
+ *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
+ break;
+ default:
+ break;
+ }
+}
+
+static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
+ struct hash_walk_helper *hwh = &req_ctx->hwh;
+ unsigned int bs = crypto_ahash_blocksize(tfm);
+ unsigned int offset, k, n;
+ int rc = 0;
+
+ /*
+ * The walk is always mapped when this function is called.
+ * Note that in case of partial processing or failure the walk
+ * is NOT unmapped here. So a follow up task may reuse the walk
+ * or in case of unrecoverable failure needs to unmap it.
+ */
+
+ while (hwh->walkbytes > 0) {
+ /* check sha2 context buffer */
+ offset = ctx->buflen[0] % bs;
+ if (offset + hwh->walkbytes < bs)
+ goto store;
+
+ if (offset) {
+ /* fill ctx buffer up to blocksize and process this block */
+ n = bs - offset;
+ memcpy(ctx->buf + offset, hwh->walkaddr, n);
+ ctx->gr0.iimp = 1;
+ for (;;) {
+ k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
+ if (likely(k == bs))
+ break;
+ if (unlikely(k > 0)) {
+ /*
+ * Can't deal with hunks smaller than blocksize.
+ * And kmac should always return the nr of
+ * processed bytes as 0 or a multiple of the
+ * blocksize.
+ */
+ rc = -EIO;
+ goto out;
+ }
+ /* protected key is invalid and needs re-conversion */
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = phmac_convert_key(tfm_ctx);
+ if (rc)
+ goto out;
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->pk.protkey, tfm_ctx->pk.len);
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+ }
+ ctx->buflen[0] += n;
+ if (ctx->buflen[0] < n)
+ ctx->buflen[1]++;
+ rc = hwh_advance(hwh, n);
+ if (unlikely(rc))
+ goto out;
+ offset = 0;
+ }
+
+ /* process as many blocks as possible from the walk */
+ while (hwh->walkbytes >= bs) {
+ n = (hwh->walkbytes / bs) * bs;
+ ctx->gr0.iimp = 1;
+ k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
+ if (likely(k > 0)) {
+ ctx->buflen[0] += k;
+ if (ctx->buflen[0] < k)
+ ctx->buflen[1]++;
+ rc = hwh_advance(hwh, k);
+ if (unlikely(rc))
+ goto out;
+ }
+ if (unlikely(k < n)) {
+ /* protected key is invalid and needs re-conversion */
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = phmac_convert_key(tfm_ctx);
+ if (rc)
+ goto out;
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->pk.protkey, tfm_ctx->pk.len);
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+ }
+ }
+
+store:
+ /* store incomplete block in context buffer */
+ if (hwh->walkbytes) {
+ memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
+ ctx->buflen[0] += hwh->walkbytes;
+ if (ctx->buflen[0] < hwh->walkbytes)
+ ctx->buflen[1]++;
+ rc = hwh_advance(hwh, hwh->walkbytes);
+ if (unlikely(rc))
+ goto out;
+ }
+
+ } /* end of while (hwh->walkbytes > 0) */
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ unsigned int bs = crypto_ahash_blocksize(tfm);
+ unsigned int k, n;
+ int rc = 0;
+
+ n = ctx->buflen[0] % bs;
+ ctx->gr0.iimp = 0;
+ kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
+ for (;;) {
+ k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
+ if (likely(k == n))
+ break;
+ if (unlikely(k > 0)) {
+ /* Can't deal with hunks smaller than blocksize. */
+ rc = -EIO;
+ goto out;
+ }
+ /* protected key is invalid and needs re-conversion */
+ if (!maysleep) {
+ rc = -EKEYEXPIRED;
+ goto out;
+ }
+ rc = phmac_convert_key(tfm_ctx);
+ if (rc)
+ goto out;
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->pk.protkey, tfm_ctx->pk.len);
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+ }
+
+ memcpy(req->result, ctx->param, ds);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
+ unsigned int bs = crypto_ahash_blocksize(tfm);
+ int rc = 0;
+
+ /* zero request context (includes the kmac sha2 context) */
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ /*
+ * setkey() should have set a valid fc into the tfm context.
+ * Copy this function code into the gr0 field of the kmac context.
+ */
+ if (!tfm_ctx->fc) {
+ rc = -ENOKEY;
+ goto out;
+ }
+ kmac_ctx->gr0.fc = tfm_ctx->fc;
+
+ /*
+ * Copy the pk from tfm ctx into kmac ctx. The protected key
+ * may be outdated but update() and final() will handle this.
+ */
+ spin_lock_bh(&tfm_ctx->pk_lock);
+ memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->pk.protkey, tfm_ctx->pk.len);
+ spin_unlock_bh(&tfm_ctx->pk_lock);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_update(struct ahash_request *req)
+{
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
+ struct hash_walk_helper *hwh = &req_ctx->hwh;
+ int rc;
+
+ /* prep the walk in the request context */
+ rc = hwh_prepare(req, hwh);
+ if (rc)
+ goto out;
+
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
+ rc = phmac_kmac_update(req, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ atomic_inc(&tfm_ctx->via_engine_ctr);
+ rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&tfm_ctx->via_engine_ctr);
+ }
+
+ if (rc != -EINPROGRESS) {
+ hwh_advance(hwh, rc);
+ memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_final(struct ahash_request *req)
+{
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
+ int rc = 0;
+
+ /* Try synchronous operation if no active engine usage */
+ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
+ rc = phmac_kmac_final(req, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ req->nbytes = 0;
+ req_ctx->final = true;
+ atomic_inc(&tfm_ctx->via_engine_ctr);
+ rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&tfm_ctx->via_engine_ctr);
+ }
+
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_finup(struct ahash_request *req)
+{
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
+ struct hash_walk_helper *hwh = &req_ctx->hwh;
+ int rc;
+
+ /* prep the walk in the request context */
+ rc = hwh_prepare(req, hwh);
+ if (rc)
+ goto out;
+
+ /* Try synchronous operations if no active engine usage */
+ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
+ rc = phmac_kmac_update(req, false);
+ if (rc == 0)
+ req->nbytes = 0;
+ }
+ if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
+ rc = phmac_kmac_final(req, false);
+ if (rc == 0)
+ goto out;
+ }
+
+ /*
+ * If sync operation failed or key expired or there are already
+ * requests enqueued via engine, fallback to async. Mark tfm as
+ * using engine to serialize requests.
+ */
+ if (rc == 0 || rc == -EKEYEXPIRED) {
+ req_ctx->final = true;
+ atomic_inc(&tfm_ctx->via_engine_ctr);
+ rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
+ if (rc != -EINPROGRESS)
+ atomic_dec(&tfm_ctx->via_engine_ctr);
+ }
+
+ if (rc != -EINPROGRESS)
+ hwh_advance(hwh, rc);
+
+out:
+ if (rc != -EINPROGRESS)
+ memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_digest(struct ahash_request *req)
+{
+ int rc;
+
+ rc = phmac_init(req);
+ if (rc)
+ goto out;
+
+ rc = phmac_finup(req);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ unsigned int bs = crypto_ahash_blocksize(tfm);
+ unsigned int tmpkeylen;
+ u8 *tmpkey = NULL;
+ int rc = 0;
+
+ if (!crypto_ahash_tested(tfm)) {
+ /*
+ * selftest running: key is a raw hmac clear key and needs
+ * to get embedded into a 'clear key token' in order to have
+ * it correctly processed by the pkey module.
+ */
+ tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
+ tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
+ if (!tmpkey) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = make_clrkey_token(key, keylen, ds, tmpkey);
+ if (rc)
+ goto out;
+ keylen = tmpkeylen;
+ key = tmpkey;
+ }
+
+ /* copy raw key into tfm context */
+ rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
+ if (rc)
+ goto out;
+
+ /* convert raw key into protected key */
+ rc = phmac_convert_key(tfm_ctx);
+ if (rc)
+ goto out;
+
+ /* set function code in tfm context, check for valid pk type */
+ switch (ds) {
+ case SHA224_DIGEST_SIZE:
+ if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
+ rc = -EINVAL;
+ else
+ tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
+ rc = -EINVAL;
+ else
+ tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
+ rc = -EINVAL;
+ else
+ tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
+ rc = -EINVAL;
+ else
+ tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
+ break;
+ default:
+ tfm_ctx->fc = 0;
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(tmpkey);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int phmac_export(struct ahash_request *req, void *out)
+{
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
+
+ memcpy(out, ctx, sizeof(*ctx));
+
+ return 0;
+}
+
+static int phmac_import(struct ahash_request *req, const void *in)
+{
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+ memcpy(ctx, in, sizeof(*ctx));
+
+ return 0;
+}
+
+static int phmac_init_tfm(struct crypto_ahash *tfm)
+{
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+ spin_lock_init(&tfm_ctx->pk_lock);
+
+ crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
+
+ return 0;
+}
+
+static void phmac_exit_tfm(struct crypto_ahash *tfm)
+{
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+
+ memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
+ memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
+}
+
+static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
+ struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
+ struct hash_walk_helper *hwh = &req_ctx->hwh;
+ int rc = -EINVAL;
+
+ /*
+ * Three kinds of requests come in here:
+ * update when req->nbytes > 0 and req_ctx->final is false
+ * final when req->nbytes = 0 and req_ctx->final is true
+ * finup when req->nbytes > 0 and req_ctx->final is true
+ * For update and finup the hwh walk needs to be prepared and
+ * up to date but the actual nr of bytes in req->nbytes may be
+ * any non zero number. For final there is no hwh walk needed.
+ */
+
+ if (req->nbytes) {
+ rc = phmac_kmac_update(req, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell scheduler to voluntarily give up the CPU here.
+ */
+ pr_debug("rescheduling request\n");
+ cond_resched();
+ return -ENOSPC;
+ } else if (rc) {
+ hwh_advance(hwh, rc);
+ goto out;
+ }
+ req->nbytes = 0;
+ }
+
+ if (req_ctx->final) {
+ rc = phmac_kmac_final(req, true);
+ if (rc == -EKEYEXPIRED) {
+ /*
+ * Protected key expired, conversion is in process.
+ * Trigger a re-schedule of this request by returning
+ * -ENOSPC ("hardware queue full") to the crypto engine.
+ * To avoid immediately re-invocation of this callback,
+ * tell scheduler to voluntarily give up the CPU here.
+ */
+ pr_debug("rescheduling request\n");
+ cond_resched();
+ return -ENOSPC;
+ }
+ }
+
+out:
+ if (rc || req_ctx->final)
+ memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
+ pr_debug("request complete with rc=%d\n", rc);
+ local_bh_disable();
+ atomic_dec(&tfm_ctx->via_engine_ctr);
+ crypto_finalize_hash_request(engine, req, rc);
+ local_bh_enable();
+ return rc;
+}
+
+#define S390_ASYNC_PHMAC_ALG(x) \
+{ \
+ .base = { \
+ .init = phmac_init, \
+ .update = phmac_update, \
+ .final = phmac_final, \
+ .finup = phmac_finup, \
+ .digest = phmac_digest, \
+ .setkey = phmac_setkey, \
+ .import = phmac_import, \
+ .export = phmac_export, \
+ .init_tfm = phmac_init_tfm, \
+ .exit_tfm = phmac_exit_tfm, \
+ .halg = { \
+ .digestsize = SHA##x##_DIGEST_SIZE, \
+ .statesize = sizeof(struct kmac_sha2_ctx), \
+ .base = { \
+ .cra_name = "phmac(sha" #x ")", \
+ .cra_driver_name = "phmac_s390_sha" #x, \
+ .cra_blocksize = SHA##x##_BLOCK_SIZE, \
+ .cra_priority = 400, \
+ .cra_flags = CRYPTO_ALG_ASYNC | \
+ CRYPTO_ALG_NO_FALLBACK, \
+ .cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
+ .cra_module = THIS_MODULE, \
+ }, \
+ }, \
+ }, \
+ .op = { \
+ .do_one_request = phmac_do_one_request, \
+ }, \
+}
+
+static struct phmac_alg {
+ unsigned int fc;
+ struct ahash_engine_alg alg;
+ bool registered;
+} phmac_algs[] = {
+ {
+ .fc = CPACF_KMAC_PHMAC_SHA_224,
+ .alg = S390_ASYNC_PHMAC_ALG(224),
+ }, {
+ .fc = CPACF_KMAC_PHMAC_SHA_256,
+ .alg = S390_ASYNC_PHMAC_ALG(256),
+ }, {
+ .fc = CPACF_KMAC_PHMAC_SHA_384,
+ .alg = S390_ASYNC_PHMAC_ALG(384),
+ }, {
+ .fc = CPACF_KMAC_PHMAC_SHA_512,
+ .alg = S390_ASYNC_PHMAC_ALG(512),
+ }
+};
+
+static struct miscdevice phmac_dev = {
+ .name = "phmac",
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static void s390_phmac_exit(void)
+{
+ struct phmac_alg *phmac;
+ int i;
+
+ if (phmac_crypto_engine) {
+ crypto_engine_stop(phmac_crypto_engine);
+ crypto_engine_exit(phmac_crypto_engine);
+ }
+
+ for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
+ phmac = &phmac_algs[i];
+ if (phmac->registered)
+ crypto_engine_unregister_ahash(&phmac->alg);
+ }
+
+ misc_deregister(&phmac_dev);
+}
+
+static int __init s390_phmac_init(void)
+{
+ struct phmac_alg *phmac;
+ int i, rc;
+
+ /* for selftest cpacf klmd subfunction is needed */
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
+ return -ENODEV;
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
+ return -ENODEV;
+
+ /* register a simple phmac pseudo misc device */
+ rc = misc_register(&phmac_dev);
+ if (rc)
+ return rc;
+
+ /* with this pseudo device alloc and start a crypto engine */
+ phmac_crypto_engine =
+ crypto_engine_alloc_init_and_set(phmac_dev.this_device,
+ true, false, MAX_QLEN);
+ if (!phmac_crypto_engine) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+ rc = crypto_engine_start(phmac_crypto_engine);
+ if (rc) {
+ crypto_engine_exit(phmac_crypto_engine);
+ phmac_crypto_engine = NULL;
+ goto out_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
+ phmac = &phmac_algs[i];
+ if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
+ continue;
+ rc = crypto_engine_register_ahash(&phmac->alg);
+ if (rc)
+ goto out_err;
+ phmac->registered = true;
+ pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
+ }
+
+ return 0;
+
+out_err:
+ s390_phmac_exit();
+ return rc;
+}
+
+module_init(s390_phmac_init);
+module_exit(s390_phmac_exit);
+
+MODULE_ALIAS_CRYPTO("phmac(sha224)");
+MODULE_ALIAS_CRYPTO("phmac(sha256)");
+MODULE_ALIAS_CRYPTO("phmac(sha384)");
+MODULE_ALIAS_CRYPTO("phmac(sha512)");
+
+MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index d757ccbce2b4..cadb4b13622a 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -27,6 +27,9 @@ struct s390_sha_ctx {
u64 state[SHA512_DIGEST_SIZE / sizeof(u64)];
u64 count_hi;
} sha512;
+ struct {
+ __le64 state[SHA3_STATE_SIZE / sizeof(u64)];
+ } sha3;
};
int func; /* KIMD function to use */
bool first_message_part;
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index 4a7731ac6bcd..03bb4f4bab70 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -35,23 +35,33 @@ static int sha3_256_init(struct shash_desc *desc)
static int sha3_256_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- struct sha3_state *octx = out;
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
if (sctx->first_message_part) {
- memset(sctx->state, 0, sizeof(sctx->state));
- sctx->first_message_part = 0;
+ memset(out, 0, SHA3_STATE_SIZE);
+ return 0;
}
- memcpy(octx->st, sctx->state, sizeof(octx->st));
+ for (i = 0; i < SHA3_STATE_SIZE / 8; i++)
+ put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++);
return 0;
}
static int sha3_256_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- const struct sha3_state *ictx = in;
-
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
+
+ for (i = 0; i < SHA3_STATE_SIZE / 8; i++)
+ sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++));
sctx->count = 0;
- memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx->first_message_part = 0;
sctx->func = CPACF_KIMD_SHA3_256;
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index 018f02fff444..a5c9690eecb1 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -34,24 +34,33 @@ static int sha3_512_init(struct shash_desc *desc)
static int sha3_512_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- struct sha3_state *octx = out;
-
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
if (sctx->first_message_part) {
- memset(sctx->state, 0, sizeof(sctx->state));
- sctx->first_message_part = 0;
+ memset(out, 0, SHA3_STATE_SIZE);
+ return 0;
}
- memcpy(octx->st, sctx->state, sizeof(octx->st));
+ for (i = 0; i < SHA3_STATE_SIZE / 8; i++)
+ put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++);
return 0;
}
static int sha3_512_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- const struct sha3_state *ictx = in;
-
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
+
+ for (i = 0; i < SHA3_STATE_SIZE / 8; i++)
+ sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++));
sctx->count = 0;
- memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx->first_message_part = 0;
sctx->func = CPACF_KIMD_SHA3_512;
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index b5e2c365ea05..d6f839618794 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <asm/cpacf.h>
#include "sha.h"
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index c7bf60a541e9..1c56480def9e 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -51,7 +51,7 @@
ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stddef.h>
@@ -183,7 +183,7 @@ static inline void apply_alternatives(struct alt_instr *start, struct alt_instr
/* Use this macro if clobbers are needed without inputs. */
#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
/*
* Issue one struct alt_instr descriptor entry (need to put it into
@@ -233,6 +233,6 @@ static inline void apply_alternatives(struct alt_instr *start, struct alt_instr
.popsection
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 395b02d6a133..352108727d7e 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -103,7 +103,7 @@ struct ap_tapq_hwinfo {
unsigned int accel : 1; /* A */
unsigned int ep11 : 1; /* X */
unsigned int apxa : 1; /* APXA */
- unsigned int : 1;
+ unsigned int slcf : 1; /* Cmd filtering avail. */
unsigned int class : 8;
unsigned int bs : 2; /* SE bind/assoc */
unsigned int : 14;
diff --git a/arch/s390/include/asm/asm-const.h b/arch/s390/include/asm/asm-const.h
index 11f615eb0066..1cfffad9eea0 100644
--- a/arch/s390/include/asm/asm-const.h
+++ b/arch/s390/include/asm/asm-const.h
@@ -2,7 +2,7 @@
#ifndef _ASM_S390_ASM_CONST_H
#define _ASM_S390_ASM_CONST_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define stringify_in_c(...) __VA_ARGS__
#else
/* This version of stringify will deal with commas... */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 54cb97603ec0..4bc5317fbb12 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -129,6 +129,10 @@
#define CPACF_KMAC_HMAC_SHA_256 0x71
#define CPACF_KMAC_HMAC_SHA_384 0x72
#define CPACF_KMAC_HMAC_SHA_512 0x73
+#define CPACF_KMAC_PHMAC_SHA_224 0x78
+#define CPACF_KMAC_PHMAC_SHA_256 0x79
+#define CPACF_KMAC_PHMAC_SHA_384 0x7a
+#define CPACF_KMAC_PHMAC_SHA_512 0x7b
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index 26c710cd3485..5672e3fab52b 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -9,7 +9,7 @@
#ifndef _ASM_S390_CPU_H
#define _ASM_S390_CPU_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/jump_label.h>
@@ -24,5 +24,5 @@ struct cpuid
DECLARE_STATIC_KEY_FALSE(cpu_has_bear);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h
index a68b362e0964..941663939cc7 100644
--- a/arch/s390/include/asm/cpu_mf-insn.h
+++ b/arch/s390/include/asm/cpu_mf-insn.h
@@ -8,7 +8,7 @@
#ifndef _ASM_S390_CPU_MF_INSN_H
#define _ASM_S390_CPU_MF_INSN_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* Macro to generate the STCCTM instruction with a customized
* M3 field designating the counter set.
@@ -17,6 +17,6 @@
.insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index e6527f51ad0b..e93cc240a1ed 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -80,7 +80,7 @@
#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT)
#define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bug.h>
@@ -252,5 +252,5 @@ union ctlreg15 {
};
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_S390_CTLREG_H */
diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
index 390906b8e386..e3ad6798d0cd 100644
--- a/arch/s390/include/asm/dwarf.h
+++ b/arch/s390/include/asm/dwarf.h
@@ -2,7 +2,7 @@
#ifndef _ASM_S390_DWARF_H
#define _ASM_S390_DWARF_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
@@ -33,6 +33,6 @@
.cfi_sections .eh_frame, .debug_frame
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_DWARF_H */
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 35555c944630..979af986a8fe 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -59,4 +59,14 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+static __always_inline bool arch_in_rcu_eqs(void)
+{
+ if (IS_ENABLED(CONFIG_KVM))
+ return current->flags & PF_VCPU;
+
+ return false;
+}
+
+#define arch_in_rcu_eqs arch_in_rcu_eqs
+
#endif
diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h
index e0a06060afdd..225ee89c3f5e 100644
--- a/arch/s390/include/asm/extmem.h
+++ b/arch/s390/include/asm/extmem.h
@@ -6,7 +6,7 @@
#ifndef _ASM_S390X_DCSS_H
#define _ASM_S390X_DCSS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* DCSS segment is defined as a contiguous range of pages using DEFSEG command.
diff --git a/arch/s390/include/asm/fpu-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h
index d296322be4bc..cc0468fdf2d0 100644
--- a/arch/s390/include/asm/fpu-insn-asm.h
+++ b/arch/s390/include/asm/fpu-insn-asm.h
@@ -16,7 +16,7 @@
#error only <asm/fpu-insn.h> can be included directly
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* Macros to generate vector instruction byte code */
@@ -750,5 +750,5 @@
MRXBOPC 0, 0x77, v1, v2, v3
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_S390_FPU_INSN_ASM_H */
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
index f668bffd6dd3..135bb89c0a89 100644
--- a/arch/s390/include/asm/fpu-insn.h
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -9,7 +9,7 @@
#include <asm/fpu-insn-asm.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/instrumented.h>
#include <asm/asm-extable.h>
@@ -475,5 +475,5 @@ static __always_inline void fpu_vzero(u8 v)
: "memory");
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_S390_FPU_INSN_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 185331e91f83..bee2d16c2951 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -5,7 +5,7 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_INSN_SIZE 6
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/stacktrace.h>
static __always_inline unsigned long return_address(unsigned int n)
@@ -134,7 +134,7 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index bde6a496df5f..697497e7d13e 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -25,7 +25,7 @@
#define EXT_IRQ_CP_SERVICE 0x2603
#define EXT_IRQ_IUCV 0x4000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/hardirq.h>
#include <linux/percpu.h>
@@ -120,6 +120,6 @@ void irq_subclass_unregister(enum irq_subclass subclass);
#define irq_canonicalize(irq) (irq)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index bf78cf381dfc..d9cbc18f6b2e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,7 +4,7 @@
#define HAVE_JUMP_LABEL_BATCH
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stringify.h>
@@ -51,5 +51,5 @@ label:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index cb89e54ada25..f870d09515cc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -716,6 +716,9 @@ extern char sie_exit;
bool kvm_s390_pv_is_protected(struct kvm *kvm);
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
+extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
+ u64 *gprs, unsigned long gasce);
+
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index e99e9c87b1ce..d9c853db9a40 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -22,7 +22,7 @@
#define LOWCORE_ALT_ADDRESS _AC(0x70000, UL)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct pgm_tdb {
u64 data[32];
@@ -237,7 +237,7 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory");
}
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
.macro GET_LC reg
ALTERNATIVE "lghi \reg,0", \
@@ -251,5 +251,5 @@ static inline void set_prefix(__u32 address)
ALT_FEATURE(MFEATURE_LOWCORE)
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_LOWCORE_H */
diff --git a/arch/s390/include/asm/machine.h b/arch/s390/include/asm/machine.h
index 8abe5afdbfc4..9bd4a9dc7778 100644
--- a/arch/s390/include/asm/machine.h
+++ b/arch/s390/include/asm/machine.h
@@ -20,7 +20,7 @@
#define MFEATURE_LPAR 9
#define MFEATURE_DIAG288 10
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitops.h>
#include <asm/alternative.h>
@@ -100,5 +100,5 @@ DEFINE_MACHINE_HAS_FEATURE(lpar, MFEATURE_LPAR)
#define machine_is_kvm machine_has_kvm
#define machine_is_lpar machine_has_lpar
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_S390_MACHINE_H */
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
index b85e13505a0f..28c83ec1f243 100644
--- a/arch/s390/include/asm/mem_encrypt.h
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -2,11 +2,11 @@
#ifndef S390_MEM_ENCRYPT_H__
#define S390_MEM_ENCRYPT_H__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
int set_memory_encrypted(unsigned long vaddr, int numpages);
int set_memory_decrypted(unsigned long vaddr, int numpages);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* S390_MEM_ENCRYPT_H__ */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 227466ce9e41..6454c1531854 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -33,7 +33,7 @@
#define MCCK_CODE_FC_VALID BIT(63 - 43)
#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
union mci {
unsigned long val;
@@ -104,5 +104,5 @@ void nmi_free_mcesa(u64 *mcesad);
void s390_handle_mcck(void);
void s390_do_machine_check(struct pt_regs *regs);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index c7c96282f011..81c4813cff18 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -2,7 +2,7 @@
#ifndef _ASM_S390_EXPOLINE_H
#define _ASM_S390_EXPOLINE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <asm/facility.h>
@@ -42,6 +42,6 @@ void __s390_indirect_jump_r13(void);
void __s390_indirect_jump_r14(void);
void __s390_indirect_jump_r15(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index cb15dd25bf21..6ce6b56e282b 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -3,9 +3,10 @@
#define _ASM_S390_NOSPEC_ASM_H
#include <linux/linkage.h>
+#include <linux/export.h>
#include <asm/dwarf.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef CC_USING_EXPOLINE
@@ -128,6 +129,6 @@
.endm
#endif /* CC_USING_EXPOLINE */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 4e5dbabdf202..9240a363c893 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -33,7 +33,7 @@
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#include <asm/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
void __storage_key_init_range(unsigned long start, unsigned long end);
@@ -130,11 +130,19 @@ typedef pte_t *pgtable_t;
static inline void page_set_storage_key(unsigned long addr,
unsigned char skey, int mapped)
{
- if (!mapped)
- asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
- : : "d" (skey), "a" (addr));
- else
- asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+ if (!mapped) {
+ asm volatile(
+ " .insn rrf,0xb22b0000,%[skey],%[addr],8,0"
+ :
+ : [skey] "d" (skey), [addr] "a" (addr)
+ : "memory");
+ } else {
+ asm volatile(
+ " sske %[skey],%[addr]"
+ :
+ : [skey] "d" (skey), [addr] "a" (addr)
+ : "memory");
+ }
}
static inline unsigned char page_get_storage_key(unsigned long addr)
@@ -274,7 +282,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 84f6b8357b45..96af7d964014 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -16,10 +16,9 @@
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
+ * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU
+ * in the Kconfig.
*/
-#if defined(MODULE)
-#define ARCH_NEEDS_WEAK_PER_CPU
-#endif
/*
* We use a compare-and-swap loop since that uses less cpu cycles than
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6d8bc27a366e..c1a7a92f0575 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -963,6 +963,12 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
}
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define pmd_swp_soft_dirty(pmd) pmd_soft_dirty(pmd)
+#define pmd_swp_mksoft_dirty(pmd) pmd_mksoft_dirty(pmd)
+#define pmd_swp_clear_soft_dirty(pmd) pmd_clear_soft_dirty(pmd)
+#endif
+
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
@@ -1979,6 +1985,45 @@ static inline unsigned long __swp_offset_rste(swp_entry_t entry)
#define __rste_to_swp_entry(rste) ((swp_entry_t) { rste })
+/*
+ * s390 has different layout for PTE and region / segment table entries (RSTE).
+ * This is also true for swap entries, and their swap type and offset encoding.
+ * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and
+ * __swp_offset_rste() helpers to correctly handle RSTE swap entries.
+ *
+ * But common swap code does not know about this difference, and only uses
+ * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between
+ * arch-dependent and arch-independent representation of swp_entry_t for all
+ * pagetable levels. On s390, those helpers only work for PTE swap entries.
+ *
+ * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry
+ * and return the arch-dependent representation of that. Correspondingly,
+ * implement __swp_entry_to_pmd() to convert that into a proper PMD swap
+ * entry again. With this, the arch-dependent swp_entry_t representation will
+ * always look like a PTE swap entry in common code.
+ *
+ * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only
+ * requires conversion of the swap type and offset, and not all the possible
+ * PTE bits.
+ */
+static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd)
+{
+ swp_entry_t arch_entry;
+ pte_t pte;
+
+ arch_entry = __rste_to_swp_entry(pmd_val(pmd));
+ pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
+ return __pte_to_swp_entry(pte);
+}
+
+static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry)
+{
+ pmd_t pmd;
+
+ pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry)));
+ return pmd;
+}
+
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6c8063cb8fe7..6a9c08b80eda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -26,7 +26,7 @@
#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
#include <linux/linkage.h>
@@ -418,6 +418,6 @@ static __always_inline void bpon(void)
);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 0905fa99a31e..dfa770b15fad 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -54,7 +54,7 @@
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct psw_bits {
unsigned long : 1;
@@ -292,5 +292,5 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
regs->gprs[2] = rc;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _S390_PTRACE_H */
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
index e297bcfc476f..4c7a43bc43a1 100644
--- a/arch/s390/include/asm/purgatory.h
+++ b/arch/s390/include/asm/purgatory.h
@@ -7,11 +7,11 @@
#ifndef _S390_PURGATORY_H_
#define _S390_PURGATORY_H_
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/purgatory.h>
int verify_sha256_digest(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1e62919bacf4..0f184dbdbe5e 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -21,7 +21,7 @@
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/uio.h>
#include <asm/chpid.h>
#include <asm/cpu.h>
@@ -199,5 +199,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
return _sclp_get_core_info(info);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 031e881b4d88..7c57ac968bf6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -24,7 +24,7 @@
#define LEGACY_COMMAND_LINE_SIZE 896
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/lowcore.h>
#include <asm/types.h>
@@ -41,6 +41,8 @@ struct parmarea {
char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */
};
+extern char arch_hw_string[128];
+
extern struct parmarea parmarea;
extern unsigned int zlib_dfltcc_support;
@@ -100,5 +102,5 @@ static __always_inline u32 gen_lpswe(unsigned long addr)
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_SETUP_H */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 472943b77066..97d77868f83c 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -36,7 +36,7 @@
#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL
#define SIGP_STATUS_NOT_RUNNING 0x00000400UL
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/asm.h>
@@ -68,6 +68,6 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
return cc;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __S390_ASM_SIGP_H */
diff --git a/arch/s390/include/asm/skey.h b/arch/s390/include/asm/skey.h
new file mode 100644
index 000000000000..84e7cf28b712
--- /dev/null
+++ b/arch/s390/include/asm/skey.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SKEY_H
+#define __ASM_SKEY_H
+
+#include <asm/rwonce.h>
+
+struct skey_region {
+ unsigned long start;
+ unsigned long end;
+};
+
+#define SKEY_REGION(_start, _end) \
+ stringify_in_c(.section .skey_region,"a";) \
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.quad (_start);) \
+ stringify_in_c(.quad (_end);) \
+ stringify_in_c(.previous)
+
+extern int skey_regions_initialized;
+extern struct skey_region __skey_region_start[];
+extern struct skey_region __skey_region_end[];
+
+void __skey_regions_initialize(void);
+
+static inline void skey_regions_initialize(void)
+{
+ if (READ_ONCE(skey_regions_initialized))
+ return;
+ __skey_regions_initialize();
+}
+
+#endif /* __ASM_SKEY_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 391eb04d26d8..f6ed2c8192c8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -24,7 +24,7 @@
#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* low level task data that entry.S needs immediate access to
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index bed8d0b5a282..59dfb8780f62 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -196,13 +196,6 @@ static inline unsigned long get_tod_clock_fast(void)
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
return clk;
}
-
-static inline cycles_t get_cycles(void)
-{
- return (cycles_t) get_tod_clock() >> 2;
-}
-#define get_cycles get_cycles
-
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);
@@ -230,6 +223,12 @@ static inline unsigned long get_tod_clock_monotonic(void)
return tod;
}
+static inline cycles_t get_cycles(void)
+{
+ return (cycles_t)get_tod_clock_monotonic() >> 2;
+}
+#define get_cycles get_cycles
+
/**
* tod_to_ns - convert a TOD format value to nanoseconds
* @todval: to be converted TOD format value
diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h
index f76e5fdff23a..71c8b6f76cdd 100644
--- a/arch/s390/include/asm/tpi.h
+++ b/arch/s390/include/asm/tpi.h
@@ -5,7 +5,7 @@
#include <linux/types.h>
#include <uapi/asm/schid.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
struct tpi_info {
@@ -32,6 +32,6 @@ struct tpi_adapter_info {
u32 :27;
} __packed __aligned(4);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_TPI_H */
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 0b5d550a0478..53695b2196f7 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -5,7 +5,7 @@
#include <uapi/asm/types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
union register_pair {
unsigned __int128 pair;
@@ -15,5 +15,5 @@ union register_pair {
};
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a43fc88c0050..3e5b8b677057 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -473,188 +473,30 @@ do { \
void __cmpxchg_user_key_called_with_bad_pointer(void);
-#define CMPXCHG_USER_KEY_MAX_LOOPS 128
-
-static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
- __uint128_t old, __uint128_t new,
- unsigned long key, int size)
+int __cmpxchg_user_key1(unsigned long address, unsigned char *uval,
+ unsigned char old, unsigned char new, unsigned long key);
+int __cmpxchg_user_key2(unsigned long address, unsigned short *uval,
+ unsigned short old, unsigned short new, unsigned long key);
+int __cmpxchg_user_key4(unsigned long address, unsigned int *uval,
+ unsigned int old, unsigned int new, unsigned long key);
+int __cmpxchg_user_key8(unsigned long address, unsigned long *uval,
+ unsigned long old, unsigned long new, unsigned long key);
+int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval,
+ __uint128_t old, __uint128_t new, unsigned long key);
+
+static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval,
+ __uint128_t old, __uint128_t new,
+ unsigned long key, int size)
{
- bool sacf_flag;
- int rc = 0;
-
switch (size) {
- case 1: {
- unsigned int prev, shift, mask, _old, _new;
- unsigned long count;
-
- shift = (3 ^ (address & 3)) << 3;
- address ^= address & 3;
- _old = ((unsigned int)old & 0xff) << shift;
- _new = ((unsigned int)new & 0xff) << shift;
- mask = ~(0xff << shift);
- sacf_flag = enable_sacf_uaccess();
- asm_inline volatile(
- " spka 0(%[key])\n"
- " sacf 256\n"
- " llill %[count],%[max_loops]\n"
- "0: l %[prev],%[address]\n"
- "1: nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "2: lr %[tmp],%[prev]\n"
- "3: cs %[prev],%[new],%[address]\n"
- "4: jnl 5f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jnz 5f\n"
- " brct %[count],2b\n"
- "5: sacf 768\n"
- " spka %[default_key]\n"
- EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
- : [rc] "+&d" (rc),
- [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (_old),
- [new] "+&d" (_new),
- [mask] "+&d" (mask),
- [count] "=a" (count)
- : [key] "%[count]" (key << 4),
- [default_key] "J" (PAGE_DEFAULT_KEY),
- [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
- : "memory", "cc");
- disable_sacf_uaccess(sacf_flag);
- *(unsigned char *)uval = prev >> shift;
- if (!count)
- rc = -EAGAIN;
- return rc;
- }
- case 2: {
- unsigned int prev, shift, mask, _old, _new;
- unsigned long count;
-
- shift = (2 ^ (address & 2)) << 3;
- address ^= address & 2;
- _old = ((unsigned int)old & 0xffff) << shift;
- _new = ((unsigned int)new & 0xffff) << shift;
- mask = ~(0xffff << shift);
- sacf_flag = enable_sacf_uaccess();
- asm_inline volatile(
- " spka 0(%[key])\n"
- " sacf 256\n"
- " llill %[count],%[max_loops]\n"
- "0: l %[prev],%[address]\n"
- "1: nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "2: lr %[tmp],%[prev]\n"
- "3: cs %[prev],%[new],%[address]\n"
- "4: jnl 5f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jnz 5f\n"
- " brct %[count],2b\n"
- "5: sacf 768\n"
- " spka %[default_key]\n"
- EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
- : [rc] "+&d" (rc),
- [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (_old),
- [new] "+&d" (_new),
- [mask] "+&d" (mask),
- [count] "=a" (count)
- : [key] "%[count]" (key << 4),
- [default_key] "J" (PAGE_DEFAULT_KEY),
- [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
- : "memory", "cc");
- disable_sacf_uaccess(sacf_flag);
- *(unsigned short *)uval = prev >> shift;
- if (!count)
- rc = -EAGAIN;
- return rc;
- }
- case 4: {
- unsigned int prev = old;
-
- sacf_flag = enable_sacf_uaccess();
- asm_inline volatile(
- " spka 0(%[key])\n"
- " sacf 256\n"
- "0: cs %[prev],%[new],%[address]\n"
- "1: sacf 768\n"
- " spka %[default_key]\n"
- EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
- : [rc] "+&d" (rc),
- [prev] "+&d" (prev),
- [address] "+Q" (*(int *)address)
- : [new] "d" ((unsigned int)new),
- [key] "a" (key << 4),
- [default_key] "J" (PAGE_DEFAULT_KEY)
- : "memory", "cc");
- disable_sacf_uaccess(sacf_flag);
- *(unsigned int *)uval = prev;
- return rc;
- }
- case 8: {
- unsigned long prev = old;
-
- sacf_flag = enable_sacf_uaccess();
- asm_inline volatile(
- " spka 0(%[key])\n"
- " sacf 256\n"
- "0: csg %[prev],%[new],%[address]\n"
- "1: sacf 768\n"
- " spka %[default_key]\n"
- EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
- : [rc] "+&d" (rc),
- [prev] "+&d" (prev),
- [address] "+QS" (*(long *)address)
- : [new] "d" ((unsigned long)new),
- [key] "a" (key << 4),
- [default_key] "J" (PAGE_DEFAULT_KEY)
- : "memory", "cc");
- disable_sacf_uaccess(sacf_flag);
- *(unsigned long *)uval = prev;
- return rc;
- }
- case 16: {
- __uint128_t prev = old;
-
- sacf_flag = enable_sacf_uaccess();
- asm_inline volatile(
- " spka 0(%[key])\n"
- " sacf 256\n"
- "0: cdsg %[prev],%[new],%[address]\n"
- "1: sacf 768\n"
- " spka %[default_key]\n"
- EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
- EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
- : [rc] "+&d" (rc),
- [prev] "+&d" (prev),
- [address] "+QS" (*(__int128_t *)address)
- : [new] "d" (new),
- [key] "a" (key << 4),
- [default_key] "J" (PAGE_DEFAULT_KEY)
- : "memory", "cc");
- disable_sacf_uaccess(sacf_flag);
- *(__uint128_t *)uval = prev;
- return rc;
- }
+ case 1: return __cmpxchg_user_key1(address, uval, old, new, key);
+ case 2: return __cmpxchg_user_key2(address, uval, old, new, key);
+ case 4: return __cmpxchg_user_key4(address, uval, old, new, key);
+ case 8: return __cmpxchg_user_key8(address, uval, old, new, key);
+ case 16: return __cmpxchg_user_key16(address, uval, old, new, key);
+ default: __cmpxchg_user_key_called_with_bad_pointer();
}
- __cmpxchg_user_key_called_with_bad_pointer();
- return rc;
+ return 0;
}
/**
@@ -686,8 +528,8 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
might_fault(); \
__chk_user_ptr(__ptr); \
- __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
- (old), (new), (key), sizeof(*(__ptr))); \
+ _cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
+ (old), (new), (key), sizeof(*(__ptr))); \
})
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 420a073fdde5..8e2fffa0ca68 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -4,11 +4,11 @@
#include <vdso/datapage.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
int vdso_getcpu_init(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define __VDSO_PAGES 4
diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h
index f8713ce39bb2..6741a27199f8 100644
--- a/arch/s390/include/asm/vdso/getrandom.h
+++ b/arch/s390/include/asm/vdso/getrandom.h
@@ -3,7 +3,7 @@
#ifndef __ASM_VDSO_GETRANDOM_H
#define __ASM_VDSO_GETRANDOM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <vdso/datapage.h>
#include <asm/vdso/vsyscall.h>
@@ -23,6 +23,6 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig
return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags);
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index fb4564308e9d..c31ac5f61c83 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -16,13 +16,7 @@
static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd)
{
- u64 adj, now;
-
- now = get_tod_clock();
- adj = vd->arch_data.tod_steering_end - now;
- if (unlikely((s64) adj > 0))
- now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
- return now;
+ return get_tod_clock() - vd->arch_data.tod_delta;
}
static __always_inline
diff --git a/arch/s390/include/asm/vdso/time_data.h b/arch/s390/include/asm/vdso/time_data.h
index 8a08752422e6..25c4e0d9f596 100644
--- a/arch/s390/include/asm/vdso/time_data.h
+++ b/arch/s390/include/asm/vdso/time_data.h
@@ -5,8 +5,7 @@
#include <linux/types.h>
struct arch_vdso_time_data {
- __s64 tod_steering_delta;
- __u64 tod_steering_end;
+ __s64 tod_delta;
};
#endif /* __S390_ASM_VDSO_TIME_DATA_H */
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index d346ebe51301..b00acec8ddbc 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -2,7 +2,7 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/hrtimer.h>
#include <vdso/datapage.h>
@@ -11,6 +11,6 @@
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index bb0826024bb9..ea202072f1ad 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -242,7 +242,8 @@
#define PTRACE_OLDSETOPTIONS 21
#define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32
-#ifndef __ASSEMBLY__
+
+#ifndef __ASSEMBLER__
#include <linux/stddef.h>
#include <linux/types.h>
@@ -450,6 +451,6 @@ struct user_regs_struct {
unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_S390_PTRACE_H */
diff --git a/arch/s390/include/uapi/asm/schid.h b/arch/s390/include/uapi/asm/schid.h
index a3e1cf168553..d804d1a5b1b3 100644
--- a/arch/s390/include/uapi/asm/schid.h
+++ b/arch/s390/include/uapi/asm/schid.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct subchannel_id {
__u32 cssid : 8;
@@ -15,6 +15,6 @@ struct subchannel_id {
__u32 sch_no : 16;
} __attribute__ ((packed, aligned(4)));
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPIASM_SCHID_H */
diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h
index 84457dbb26b4..4ab468c5032e 100644
--- a/arch/s390/include/uapi/asm/types.h
+++ b/arch/s390/include/uapi/asm/types.h
@@ -10,7 +10,7 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
typedef unsigned long addr_t;
typedef __signed__ long saddr_t;
@@ -25,6 +25,6 @@ typedef struct {
};
} __attribute__((packed, aligned(4))) __vector128;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_S390_TYPES_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index ea5ed6654050..eb06ff888314 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -41,7 +41,7 @@ obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
-obj-y += entry.o reipl.o kdebugfs.o alternative.o
+obj-y += entry.o reipl.o kdebugfs.o alternative.o skey.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o
obj-y += diag/
diff --git a/arch/s390/kernel/cpufeature.c b/arch/s390/kernel/cpufeature.c
index 76210f001028..c9eef9ed876b 100644
--- a/arch/s390/kernel/cpufeature.c
+++ b/arch/s390/kernel/cpufeature.c
@@ -4,6 +4,7 @@
*/
#include <linux/cpufeature.h>
+#include <linux/export.h>
#include <linux/bug.h>
#include <asm/machine.h>
#include <asm/elf.h>
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index adb164223f8c..d4839de8ce9d 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -7,6 +7,7 @@
*/
#include <linux/crash_dump.h>
+#include <linux/export.h>
#include <asm/lowcore.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/s390/kernel/ctlreg.c b/arch/s390/kernel/ctlreg.c
index 8cc26cf2c64a..a0501f4c7e7a 100644
--- a/arch/s390/kernel/ctlreg.c
+++ b/arch/s390/kernel/ctlreg.c
@@ -5,6 +5,7 @@
#include <linux/irqflags.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 2a41be2f7925..c62100dc62c8 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1677,7 +1677,7 @@ EXPORT_SYMBOL(debug_dflt_header_fn);
/*
* prints debug data sprintf-formatted:
- * debug_sprinf_event/exception calls must be used together with this view
+ * debug_sprintf_event/exception calls must be used together with this view
*/
#define DEBUG_SPRINTF_MAX_ARGS 10
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 94eb8168ea44..63a1d4226ff8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 54cf0923050f..9adfbdd377dc 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -105,6 +105,8 @@ static inline void strim_all(char *str)
}
}
+char arch_hw_string[128];
+
static noinline __init void setup_arch_string(void)
{
struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
@@ -131,6 +133,7 @@ static noinline __init void setup_arch_string(void)
machine_is_vm() ? "z/VM" :
machine_is_kvm() ? "KVM" : "unknown");
}
+ sprintf(arch_hw_string, "HW: %s (%s)", mstr, hvstr);
dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
}
@@ -154,6 +157,7 @@ void __init __do_early_pgm_check(struct pt_regs *regs)
regs->int_code = lc->pgm_int_code;
regs->int_parm_long = lc->trans_exc_code;
+ regs->last_break = lc->pgm_last_break;
ip = __rewind_psw(regs->psw, regs->int_code >> 16);
/* Monitor Event? Might be a warning */
diff --git a/arch/s390/kernel/facility.c b/arch/s390/kernel/facility.c
index f02127219a27..d028b0be5c1d 100644
--- a/arch/s390/kernel/facility.c
+++ b/arch/s390/kernel/facility.c
@@ -3,6 +3,7 @@
* Copyright IBM Corp. 2023
*/
+#include <linux/export.h>
#include <asm/facility.h>
unsigned int stfle_size(void)
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 6f2e87920288..03a8973aec3c 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -5,6 +5,8 @@
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
+
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/sched.h>
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3da371c144eb..11f33243a23f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -9,6 +9,7 @@
*/
#include <linux/kernel_stat.h>
+#include <linux/utsname.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -21,7 +22,6 @@
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/kvm_host.h>
-#include <linux/export.h>
#include <asm/lowcore.h>
#include <asm/ctlreg.h>
#include <asm/fpu.h>
@@ -116,18 +116,82 @@ static __always_inline char *u64_to_hex(char *dest, u64 val)
return dest;
}
+static notrace void nmi_print_info(void)
+{
+ struct lowcore *lc = get_lowcore();
+ char message[100];
+ char *ptr;
+ int i;
+
+ ptr = nmi_puts(message, "Unrecoverable machine check, code: ");
+ ptr = u64_to_hex(ptr, lc->mcck_interruption_code);
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+
+ ptr = nmi_puts(message, init_utsname()->release);
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+
+ ptr = nmi_puts(message, arch_hw_string);
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+
+ ptr = nmi_puts(message, "PSW: ");
+ ptr = u64_to_hex(ptr, lc->mcck_old_psw.mask);
+ ptr = nmi_puts(ptr, " ");
+ ptr = u64_to_hex(ptr, lc->mcck_old_psw.addr);
+ ptr = nmi_puts(ptr, " PFX: ");
+ ptr = u64_to_hex(ptr, (u64)get_lowcore());
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+
+ ptr = nmi_puts(message, "LBA: ");
+ ptr = u64_to_hex(ptr, lc->last_break_save_area);
+ ptr = nmi_puts(ptr, " EDC: ");
+ ptr = u64_to_hex(ptr, lc->external_damage_code);
+ ptr = nmi_puts(ptr, " FSA: ");
+ ptr = u64_to_hex(ptr, lc->failing_storage_address);
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+
+ ptr = nmi_puts(message, "CRS:\n");
+ sclp_emergency_printk(message);
+ ptr = message;
+ for (i = 0; i < 16; i++) {
+ ptr = u64_to_hex(ptr, lc->cregs_save_area[i].val);
+ ptr = nmi_puts(ptr, " ");
+ if ((i + 1) % 4 == 0) {
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+ ptr = message;
+ }
+ }
+
+ ptr = nmi_puts(message, "GPRS:\n");
+ sclp_emergency_printk(message);
+ ptr = message;
+ for (i = 0; i < 16; i++) {
+ ptr = u64_to_hex(ptr, lc->gpregs_save_area[i]);
+ ptr = nmi_puts(ptr, " ");
+ if ((i + 1) % 4 == 0) {
+ ptr = nmi_puts(ptr, "\n");
+ sclp_emergency_printk(message);
+ ptr = message;
+ }
+ }
+
+ ptr = nmi_puts(message, "System stopped\n");
+ sclp_emergency_printk(message);
+}
+
static notrace void s390_handle_damage(void)
{
struct lowcore *lc = get_lowcore();
union ctlreg0 cr0, cr0_new;
- char message[100];
psw_t psw_save;
- char *ptr;
smp_emergency_stop();
diag_amode31_ops.diag308_reset();
- ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x");
- u64_to_hex(ptr, lc->mcck_interruption_code);
/*
* Disable low address protection and make machine check new PSW a
@@ -141,7 +205,7 @@ static notrace void s390_handle_damage(void)
psw_bits(lc->mcck_new_psw).io = 0;
psw_bits(lc->mcck_new_psw).ext = 0;
psw_bits(lc->mcck_new_psw).wait = 1;
- sclp_emergency_printk(message);
+ nmi_print_info();
/*
* Restore machine check new PSW and control register 0 to original
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 6a262e198e35..4d09954ebf49 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -14,7 +14,6 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/export.h>
#include <linux/miscdevice.h>
#include <linux/perf_event.h>
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 91469401f2c9..f432869f8921 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -14,7 +14,6 @@
#include <linux/percpu.h>
#include <linux/pid.h>
#include <linux/notifier.h>
-#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 2b9611c4718e..91b8716c883a 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -12,7 +12,6 @@
#include <linux/perf_event.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
-#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 63875270941b..f373a1009c45 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -13,7 +13,6 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <asm/ctlreg.h>
@@ -696,7 +695,7 @@ static const char * const paicrypt_ctrnames[] = {
[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
- [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
+ [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index fd14d5ebccbc..d827473e7f87 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -14,7 +14,6 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <asm/ctlreg.h>
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9637aee43c40..f55f09cda6f8 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -27,7 +27,6 @@
#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
-#include <linux/export.h>
#include <linux/init_task.h>
#include <linux/entry-common.h>
#include <linux/io.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f244c5560e7f..7b529868789f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -605,7 +605,7 @@ static void __init reserve_crashkernel(void)
int rc;
rc = parse_crashkernel(boot_command_line, ident_map_size,
- &crash_size, &crash_base, NULL, NULL);
+ &crash_size, &crash_base, NULL, NULL, NULL);
crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
@@ -719,6 +719,11 @@ static void __init memblock_add_physmem_info(void)
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
}
+static void __init setup_high_memory(void)
+{
+ high_memory = __va(ident_map_size);
+}
+
/*
* Reserve memory used for lowcore.
*/
@@ -951,6 +956,7 @@ void __init setup_arch(char **cmdline_p)
free_physmem_info();
setup_memory_end();
+ setup_high_memory();
memblock_dump_all();
setup_memory();
diff --git a/arch/s390/kernel/skey.c b/arch/s390/kernel/skey.c
new file mode 100644
index 000000000000..ba049fd103c2
--- /dev/null
+++ b/arch/s390/kernel/skey.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/rwonce.h>
+#include <asm/page.h>
+#include <asm/skey.h>
+
+int skey_regions_initialized;
+
+static inline unsigned long load_real_address(unsigned long address)
+{
+ unsigned long real;
+
+ asm volatile(
+ " lra %[real],0(%[address])\n"
+ : [real] "=d" (real)
+ : [address] "a" (address)
+ : "cc");
+ return real;
+}
+
+/*
+ * Initialize storage keys of registered memory regions with the
+ * default key. This is useful for code which is executed with a
+ * non-default access key.
+ */
+void __skey_regions_initialize(void)
+{
+ unsigned long address, real;
+ struct skey_region *r, *end;
+
+ r = __skey_region_start;
+ end = __skey_region_end;
+ while (r < end) {
+ address = r->start & PAGE_MASK;
+ do {
+ real = load_real_address(address);
+ page_set_storage_key(real, PAGE_DEFAULT_KEY, 1);
+ address += PAGE_SIZE;
+ } while (address < r->end);
+ r++;
+ }
+ /*
+ * Make sure storage keys are initialized before
+ * skey_regions_initialized is changed.
+ */
+ barrier();
+ WRITE_ONCE(skey_regions_initialized, 1);
+}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 81f12bb77f62..e88ebe5339fc 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -175,13 +175,10 @@ static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
{
- int order;
-
if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
return;
- order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
pcpu->ec_clk = get_tod_clock_fast();
- pcpu_sigp_retry(pcpu, order, 0);
+ pcpu_sigp_retry(pcpu, SIGP_EXTERNAL_CALL, 0);
}
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
@@ -433,16 +430,16 @@ void notrace smp_emergency_stop(void)
cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask);
- end = get_tod_clock() + (1000000UL << 12);
+ end = get_tod_clock_monotonic() + (1000000UL << 12);
for_each_cpu(cpu, &cpumask) {
struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
0, NULL) == SIGP_CC_BUSY &&
- get_tod_clock() < end)
+ get_tod_clock_monotonic() < end)
cpu_relax();
}
- while (get_tod_clock() < end) {
+ while (get_tod_clock_monotonic() < end) {
for_each_cpu(cpu, &cpumask)
if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
cpumask_clear_cpu(cpu, &cpumask);
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index d40f0b983e74..f4ccdbed4b89 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -5,6 +5,8 @@
* Copyright IBM Corp. 2016
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
*/
+
+#include <linux/export.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fed17d407a44..63517b85f4c9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -69,8 +69,6 @@ unsigned char ptff_function_mask[16];
static unsigned long lpar_offset;
static unsigned long initial_leap_seconds;
-static unsigned long tod_steering_end;
-static long tod_steering_delta;
/*
* Get time offsets with PTFF
@@ -80,9 +78,7 @@ void __init time_early_init(void)
struct ptff_qto qto;
struct ptff_qui qui;
- /* Initialize TOD steering parameters */
- tod_steering_end = tod_clock_base.tod;
- vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
+ vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod;
if (!test_facility(28))
return;
@@ -226,21 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
static u64 read_tod_clock(struct clocksource *cs)
{
- unsigned long now, adj;
-
- preempt_disable(); /* protect from changes to steering parameters */
- now = get_tod_clock();
- adj = tod_steering_end - now;
- if (unlikely((s64) adj > 0))
- /*
- * manually steer by 1 cycle every 2^16 cycles. This
- * corresponds to shifting the tod delta by 15. 1s is
- * therefore steered in ~9h. The adjust will decrease
- * over time, until it finally reaches 0.
- */
- now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
- preempt_enable();
- return now;
+ return get_tod_clock_monotonic();
}
static struct clocksource clocksource_tod = {
@@ -369,26 +351,11 @@ static inline int check_sync_clock(void)
*/
static void clock_sync_global(long delta)
{
- unsigned long now, adj;
struct ptff_qto qto;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
- /* Adjust TOD steering parameters. */
- now = get_tod_clock();
- adj = tod_steering_end - now;
- if (unlikely((s64) adj >= 0))
- /* Calculate how much of the old adjustment is left. */
- tod_steering_delta = (tod_steering_delta < 0) ?
- -(adj >> 15) : (adj >> 15);
- tod_steering_delta += delta;
- if ((abs(tod_steering_delta) >> 48) != 0)
- panic("TOD clock sync offset %li is too large to drift\n",
- tod_steering_delta);
- tod_steering_end = now + (abs(tod_steering_delta) << 15);
- vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
- vdso_k_time_data->arch_data.tod_steering_delta = tod_steering_delta;
-
+ vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod;
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
lpar_offset = qto.tod_epoch_difference;
@@ -430,7 +397,7 @@ struct clock_sync_data {
/*
* Server Time Protocol (STP) code.
*/
-static bool stp_online;
+static bool stp_online = true;
static struct stp_sstpi stp_info;
static void *stp_page;
@@ -456,7 +423,6 @@ static void __init stp_reset(void)
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
- pr_warn("The real or virtual hardware system does not provide an STP interface\n");
free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = false;
@@ -580,7 +546,7 @@ static int stp_sync_clock(void *data)
atomic_dec(&sync->cpus);
/* Wait for in_sync to be set. */
while (READ_ONCE(sync->in_sync) == 0)
- __udelay(1);
+ ;
}
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
@@ -591,81 +557,6 @@ static int stp_sync_clock(void *data)
return 0;
}
-static int stp_clear_leap(void)
-{
- struct __kernel_timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(txc));
-
- ret = do_adjtimex(&txc);
- if (ret < 0)
- return ret;
-
- txc.modes = ADJ_STATUS;
- txc.status &= ~(STA_INS|STA_DEL);
- return do_adjtimex(&txc);
-}
-
-static void stp_check_leap(void)
-{
- struct stp_stzi stzi;
- struct stp_lsoib *lsoib = &stzi.lsoib;
- struct __kernel_timex txc;
- int64_t timediff;
- int leapdiff, ret;
-
- if (!stp_info.lu || !check_sync_clock()) {
- /*
- * Either a scheduled leap second was removed by the operator,
- * or STP is out of sync. In both cases, clear the leap second
- * kernel flags.
- */
- if (stp_clear_leap() < 0)
- pr_err("failed to clear leap second flags\n");
- return;
- }
-
- if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
- pr_err("stzi failed\n");
- return;
- }
-
- timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
- leapdiff = lsoib->nlso - lsoib->also;
-
- if (leapdiff != 1 && leapdiff != -1) {
- pr_err("Cannot schedule %d leap seconds\n", leapdiff);
- return;
- }
-
- if (timediff < 0) {
- if (stp_clear_leap() < 0)
- pr_err("failed to clear leap second flags\n");
- } else if (timediff < 7200) {
- memset(&txc, 0, sizeof(txc));
- ret = do_adjtimex(&txc);
- if (ret < 0)
- return;
-
- txc.modes = ADJ_STATUS;
- if (leapdiff > 0)
- txc.status |= STA_INS;
- else
- txc.status |= STA_DEL;
- ret = do_adjtimex(&txc);
- if (ret < 0)
- pr_err("failed to set leap second flags\n");
- /* arm Timer to clear leap second flags */
- mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400));
- } else {
- /* The day the leap second is scheduled for hasn't been reached. Retry
- * in one hour.
- */
- mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600));
- }
-}
-
/*
* STP work. Check for the STP state and take over the clock
* synchronization if the STP clock source is usable.
@@ -707,8 +598,6 @@ static void stp_work_fn(struct work_struct *work)
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
- else if (stp_info.lu)
- stp_check_leap();
out_unlock:
mutex_unlock(&stp_mutex);
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index cd44be2b6ce8..0f88caca4eaf 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b99478e84da4..47f574cd1728 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -7,6 +7,7 @@
#define KMSG_COMPONENT "prot_virt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sizes.h>
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ff1ddba96352..1c606dfa595d 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -71,6 +71,13 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
+ . = ALIGN(8);
+ .skey_region_table : {
+ __skey_region_start = .;
+ KEEP(*(.skey_region))
+ __skey_region_end = .;
+ }
+
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 60c360c18690..2a92a8b9e4c2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
+#include <linux/export.h>
#include <linux/mmu_context.h>
#include <linux/nospec.h>
#include <linux/signal.h>
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d5ad10791c25..bf6fa8b9ca73 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hrtimer.h>
@@ -5062,6 +5063,30 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
return vcpu_post_run_handle_fault(vcpu);
}
+int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
+ u64 *gprs, unsigned long gasce)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+
+ /*
+ * The guest_state_{enter,exit}_irqoff() functions inform lockdep and
+ * tracing that entry to the guest will enable host IRQs, and exit from
+ * the guest will disable host IRQs.
+ *
+ * We must not use lockdep/tracing/RCU in this critical section, so we
+ * use the low-level arch_local_irq_*() helpers to enable/disable IRQs.
+ */
+ arch_local_irq_enable();
+ ret = sie64a(scb, gprs, gasce);
+ arch_local_irq_disable();
+
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -5082,20 +5107,27 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_vcpu_srcu_read_unlock(vcpu);
/*
* As PF_VCPU will be used in fault handler, between
- * guest_enter and guest_exit should be no uaccess.
+ * guest_timing_enter_irqoff and guest_timing_exit_irqoff
+ * should be no uaccess.
*/
- local_irq_disable();
- guest_enter_irqoff();
- __disable_cpu_timer_accounting(vcpu);
- local_irq_enable();
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(sie_page->pv_grregs,
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
- exit_reason = sie64a(vcpu->arch.sie_block,
- vcpu->run->s.regs.gprs,
- vcpu->arch.gmap->asce);
+
+ local_irq_disable();
+ guest_timing_enter_irqoff();
+ __disable_cpu_timer_accounting(vcpu);
+
+ exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
+ vcpu->run->s.regs.gprs,
+ vcpu->arch.gmap->asce);
+
+ __enable_cpu_timer_accounting(vcpu);
+ guest_timing_exit_irqoff();
+ local_irq_enable();
+
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(vcpu->run->s.regs.gprs,
sie_page->pv_grregs,
@@ -5111,10 +5143,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
}
}
- local_irq_disable();
- __enable_cpu_timer_accounting(vcpu);
- guest_exit_irqoff();
- local_irq_enable();
kvm_vcpu_srcu_read_lock(vcpu);
rc = vcpu_post_run(vcpu, exit_reason);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 14c330ec8ceb..25ede8354514 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -5,6 +5,8 @@
* Copyright IBM Corp. 2019, 2020
* Author(s): Janosch Frank <frankja@linux.ibm.com>
*/
+
+#include <linux/export.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/minmax.h>
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 13a9661d2b28..347268f89f2f 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1170,10 +1170,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->arch.sie_block->fpf & FPF_BPBC)
set_thread_flag(TIF_ISOLATE_BP_GUEST);
- local_irq_disable();
- guest_enter_irqoff();
- local_irq_enable();
-
/*
* Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
* and VCPU requests also hinder the vSIE from running and lead
@@ -1183,15 +1179,16 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
current->thread.gmap_int_code = 0;
barrier();
- if (!kvm_s390_vcpu_sie_inhibited(vcpu))
- rc = sie64a(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
+ if (!kvm_s390_vcpu_sie_inhibited(vcpu)) {
+ local_irq_disable();
+ guest_timing_enter_irqoff();
+ rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
+ guest_timing_exit_irqoff();
+ local_irq_enable();
+ }
barrier();
vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
- local_irq_disable();
- guest_exit_irqoff();
- local_irq_enable();
-
/* restore guest state for bp isolation override */
if (!guest_bp_isolation)
clear_thread_flag(TIF_ISOLATE_BP_GUEST);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index be14c58cb989..c1ea14e3c927 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -7,6 +7,7 @@
*/
#include <linux/processor.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <asm/div64.h>
#include <asm/timex.h>
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index fa7d98fa1320..1a6ba105e071 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -8,11 +8,13 @@
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
+#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/asm-extable.h>
#include <asm/ctlreg.h>
+#include <asm/skey.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
@@ -145,3 +147,189 @@ unsigned long _copy_to_user_key(void __user *to, const void *from,
return raw_copy_to_user_key(to, from, n, key);
}
EXPORT_SYMBOL(_copy_to_user_key);
+
+#define CMPXCHG_USER_KEY_MAX_LOOPS 128
+
+static nokprobe_inline int __cmpxchg_user_key_small(unsigned long address, unsigned int *uval,
+ unsigned int old, unsigned int new,
+ unsigned int mask, unsigned long key)
+{
+ unsigned long count;
+ unsigned int prev;
+ bool sacf_flag;
+ int rc = 0;
+
+ skey_regions_initialize();
+ sacf_flag = enable_sacf_uaccess();
+ asm_inline volatile(
+ "20: spka 0(%[key])\n"
+ " sacf 256\n"
+ " llill %[count],%[max_loops]\n"
+ "0: l %[prev],%[address]\n"
+ "1: nr %[prev],%[mask]\n"
+ " xilf %[mask],0xffffffff\n"
+ " or %[new],%[prev]\n"
+ " or %[prev],%[tmp]\n"
+ "2: lr %[tmp],%[prev]\n"
+ "3: cs %[prev],%[new],%[address]\n"
+ "4: jnl 5f\n"
+ " xr %[tmp],%[prev]\n"
+ " xr %[new],%[tmp]\n"
+ " nr %[tmp],%[mask]\n"
+ " jnz 5f\n"
+ " brct %[count],2b\n"
+ "5: sacf 768\n"
+ " spka %[default_key]\n"
+ "21:\n"
+ EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
+ SKEY_REGION(20b, 21b)
+ : [rc] "+&d" (rc),
+ [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address),
+ [tmp] "+&d" (old),
+ [new] "+&d" (new),
+ [mask] "+&d" (mask),
+ [count] "=a" (count)
+ : [key] "%[count]" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY),
+ [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
+ : "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
+ *uval = prev;
+ if (!count)
+ rc = -EAGAIN;
+ return rc;
+}
+
+int __kprobes __cmpxchg_user_key1(unsigned long address, unsigned char *uval,
+ unsigned char old, unsigned char new, unsigned long key)
+{
+ unsigned int prev, shift, mask, _old, _new;
+ int rc;
+
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
+ _old = (unsigned int)old << shift;
+ _new = (unsigned int)new << shift;
+ mask = ~(0xff << shift);
+ rc = __cmpxchg_user_key_small(address, &prev, _old, _new, mask, key);
+ *uval = prev >> shift;
+ return rc;
+}
+EXPORT_SYMBOL(__cmpxchg_user_key1);
+
+int __kprobes __cmpxchg_user_key2(unsigned long address, unsigned short *uval,
+ unsigned short old, unsigned short new, unsigned long key)
+{
+ unsigned int prev, shift, mask, _old, _new;
+ int rc;
+
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
+ _old = (unsigned int)old << shift;
+ _new = (unsigned int)new << shift;
+ mask = ~(0xffff << shift);
+ rc = __cmpxchg_user_key_small(address, &prev, _old, _new, mask, key);
+ *uval = prev >> shift;
+ return rc;
+}
+EXPORT_SYMBOL(__cmpxchg_user_key2);
+
+int __kprobes __cmpxchg_user_key4(unsigned long address, unsigned int *uval,
+ unsigned int old, unsigned int new, unsigned long key)
+{
+ unsigned int prev = old;
+ bool sacf_flag;
+ int rc = 0;
+
+ skey_regions_initialize();
+ sacf_flag = enable_sacf_uaccess();
+ asm_inline volatile(
+ "20: spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: cs %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ "21:\n"
+ EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+ SKEY_REGION(20b, 21b)
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+Q" (*(int *)address)
+ : [new] "d" (new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
+ *uval = prev;
+ return rc;
+}
+EXPORT_SYMBOL(__cmpxchg_user_key4);
+
+int __kprobes __cmpxchg_user_key8(unsigned long address, unsigned long *uval,
+ unsigned long old, unsigned long new, unsigned long key)
+{
+ unsigned long prev = old;
+ bool sacf_flag;
+ int rc = 0;
+
+ skey_regions_initialize();
+ sacf_flag = enable_sacf_uaccess();
+ asm_inline volatile(
+ "20: spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: csg %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ "21:\n"
+ EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+ SKEY_REGION(20b, 21b)
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+QS" (*(long *)address)
+ : [new] "d" (new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
+ *uval = prev;
+ return rc;
+}
+EXPORT_SYMBOL(__cmpxchg_user_key8);
+
+int __kprobes __cmpxchg_user_key16(unsigned long address, __uint128_t *uval,
+ __uint128_t old, __uint128_t new, unsigned long key)
+{
+ __uint128_t prev = old;
+ bool sacf_flag;
+ int rc = 0;
+
+ skey_regions_initialize();
+ sacf_flag = enable_sacf_uaccess();
+ asm_inline volatile(
+ "20: spka 0(%[key])\n"
+ " sacf 256\n"
+ "0: cdsg %[prev],%[new],%[address]\n"
+ "1: sacf 768\n"
+ " spka %[default_key]\n"
+ "21:\n"
+ EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
+ EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
+ SKEY_REGION(20b, 21b)
+ : [rc] "+&d" (rc),
+ [prev] "+&d" (prev),
+ [address] "+QS" (*(__int128_t *)address)
+ : [new] "d" (new),
+ [key] "a" (key << 4),
+ [default_key] "J" (PAGE_DEFAULT_KEY)
+ : "memory", "cc");
+ disable_sacf_uaccess(sacf_flag);
+ *uval = prev;
+ return rc;
+}
+EXPORT_SYMBOL(__cmpxchg_user_key16);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index ac604b176660..9af2aae0a515 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -247,11 +247,9 @@ static int ptdump_show(struct seq_file *m, void *v)
.marker = markers,
};
- get_online_mems();
mutex_lock(&cpa_mutex);
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
mutex_unlock(&cpa_mutex);
- put_online_mems();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 012a4366a2ad..c7defe4ed1f6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -9,6 +9,7 @@
*/
#include <linux/cpufeature.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/pagewalk.h>
#include <linux/swap.h>
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
index a45d417ad951..b63f427e7289 100644
--- a/arch/s390/mm/gmap_helpers.c
+++ b/arch/s390/mm/gmap_helpers.c
@@ -4,6 +4,8 @@
*
* Copyright IBM Corp. 2007, 2025
*/
+
+#include <linux/export.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/mm.h>
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index b449fd2605b0..d2f6f1f6d2fc 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -173,11 +173,6 @@ void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
- /*
- * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
- * Turn to the generic pte_free_defer() version once gmap is removed.
- */
- WARN_ON_ONCE(mm_has_pgste(mm));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7df70cd8f739..60688be4e876 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -5,6 +5,7 @@
*/
#include <linux/cpufeature.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 448dd6ed1069..f48ef361bc83 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -64,13 +64,12 @@ void *vmem_crst_alloc(unsigned long val)
pte_t __ref *vmem_pte_alloc(void)
{
- unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
+ pte = (pte_t *)page_table_alloc(&init_mm);
else
- pte = (pte_t *) memblock_alloc(size, size);
+ pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
deleted file mode 100644
index 7822ea92e54a..000000000000
--- a/arch/s390/net/bpf_jit.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * BPF Jit compiler defines
- *
- * Copyright IBM Corp. 2012,2015
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Michael Holzheu <holzheu@linux.vnet.ibm.com>
- */
-
-#ifndef __ARCH_S390_NET_BPF_JIT_H
-#define __ARCH_S390_NET_BPF_JIT_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/filter.h>
-#include <linux/types.h>
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Stackframe layout (packed stack):
- *
- * ^ high
- * +---------------+ |
- * | old backchain | |
- * +---------------+ |
- * | r15 - r6 | |
- * +---------------+ |
- * | 4 byte align | |
- * | tail_call_cnt | |
- * BFP -> +===============+ |
- * | | |
- * | BPF stack | |
- * | | |
- * R15+160 -> +---------------+ |
- * | new backchain | |
- * R15+152 -> +---------------+ |
- * | + 152 byte SA | |
- * R15 -> +---------------+ + low
- *
- * We get 160 bytes stack space from calling function, but only use
- * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
- *
- * The stack size used by the BPF program ("BPF stack" above) is passed
- * via "aux->stack_depth".
- */
-#define STK_SPACE_ADD (160)
-#define STK_160_UNUSED (160 - 12 * 8)
-#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED)
-
-#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
-#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
-
-#endif /* __ARCH_S390_NET_BPF_JIT_H */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0c9a35782c83..bb17efe29d65 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -32,7 +32,6 @@
#include <asm/set_memory.h>
#include <asm/text-patching.h>
#include <asm/unwind.h>
-#include "bpf_jit.h"
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
@@ -54,6 +53,7 @@ struct bpf_jit {
int prologue_plt; /* Start of prologue hotpatch PLT */
int kern_arena; /* Pool offset of kernel arena address */
u64 user_arena; /* User arena address */
+ u32 frame_off; /* Offset of struct bpf_prog from %r15 */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -426,11 +426,25 @@ static void jit_fill_hole(void *area, unsigned int size)
}
/*
+ * Caller-allocated part of the frame.
+ * Thanks to packed stack, its otherwise unused initial part can be used for
+ * the BPF stack and for the next frame.
+ */
+struct prog_frame {
+ u64 unused[8];
+ /* BPF stack starts here and grows towards 0 */
+ u32 tail_call_cnt;
+ u32 pad;
+ u64 r6[10]; /* r6 - r15 */
+ u64 backchain;
+} __packed;
+
+/*
* Save registers from "rs" (register start) to "re" (register end) on stack
*/
static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = STK_OFF_R6 + (rs - 6) * 8;
+ u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8;
if (rs == re)
/* stg %rs,off(%r15) */
@@ -443,12 +457,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
/*
* Restore registers from "rs" (register start) to "re" (register end) on stack
*/
-static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
+static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = STK_OFF_R6 + (rs - 6) * 8;
-
- if (jit->seen & SEEN_STACK)
- off += STK_OFF + stack_depth;
+ u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8;
if (rs == re)
/* lg %rs,off(%r15) */
@@ -492,8 +503,7 @@ static int get_end(u16 seen_regs, int start)
* Save and restore clobbered registers (6-15) on stack.
* We save/restore registers in chunks with gap >= 2 registers.
*/
-static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
- u16 extra_regs)
+static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs)
{
u16 seen_regs = jit->seen_regs | extra_regs;
const int last = 15, save_restore_size = 6;
@@ -516,7 +526,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
if (op == REGS_SAVE)
save_regs(jit, rs, re);
else
- restore_regs(jit, rs, re, stack_depth);
+ restore_regs(jit, rs, re);
re++;
} while (re <= last);
}
@@ -581,11 +591,12 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
* Emit function prologue
*
* Save registers and create stack frame if necessary.
- * See stack frame layout description in "bpf_jit.h"!
+ * Stack frame layout is described by struct prog_frame.
*/
-static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
- u32 stack_depth)
+static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp)
{
+ BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD);
+
/* No-op for hotpatching */
/* brcl 0,prologue_plt */
EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
@@ -593,8 +604,9 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
if (!bpf_is_subprog(fp)) {
/* Initialize the tail call counter in the main program. */
- /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
- _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
+ /* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */
+ _EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt),
+ 0xf000 | offsetof(struct prog_frame, tail_call_cnt));
} else {
/*
* Skip the tail call counter initialization in subprograms.
@@ -617,7 +629,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
jit->seen_regs |= NVREGS;
} else {
/* Save registers */
- save_restore_regs(jit, REGS_SAVE, stack_depth,
+ save_restore_regs(jit, REGS_SAVE,
fp->aux->exception_boundary ? NVREGS : 0);
}
/* Setup literal pool */
@@ -637,13 +649,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
/* lgr %w1,%r15 (backchain) */
EMIT4(0xb9040000, REG_W1, REG_15);
- /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
- EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
- /* aghi %r15,-STK_OFF */
- EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
- /* stg %w1,152(%r15) (backchain) */
+ /* la %bfp,unused_end(%r15) (BPF frame pointer) */
+ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15,
+ offsetofend(struct prog_frame, unused));
+ /* aghi %r15,-frame_off */
+ EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off);
+ /* stg %w1,backchain(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
- REG_15, 152);
+ REG_15,
+ offsetof(struct prog_frame, backchain));
}
}
@@ -677,13 +691,13 @@ static void call_r1(struct bpf_jit *jit)
/*
* Function epilogue
*/
-static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
+static void bpf_jit_epilogue(struct bpf_jit *jit)
{
jit->exit_ip = jit->prg;
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
- save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
+ save_restore_regs(jit, REGS_RESTORE, 0);
EMIT_JUMP_REG(14);
jit->prg = ALIGN(jit->prg, 8);
@@ -865,7 +879,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
* stack space for the large switch statement.
*/
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
- int i, bool extra_pass, u32 stack_depth)
+ int i, bool extra_pass)
{
struct bpf_insn *insn = &fp->insnsi[i];
s32 branch_oc_off = insn->off;
@@ -1786,9 +1800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* Note 2: We assume that the verifier does not let us call the
* main program, which clears the tail call counter on entry.
*/
- /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
- _EMIT6(0xd203f000 | STK_OFF_TCCNT,
- 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
+ /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
+ _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
+ 0xf000 | (jit->frame_off +
+ offsetof(struct prog_frame, tail_call_cnt)));
/* Sign-extend the kfunc arguments. */
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
@@ -1839,10 +1854,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* goto out;
*/
- if (jit->seen & SEEN_STACK)
- off = STK_OFF_TCCNT + STK_OFF + stack_depth;
- else
- off = STK_OFF_TCCNT;
+ off = jit->frame_off +
+ offsetof(struct prog_frame, tail_call_cnt);
/* lhi %w0,1 */
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
@@ -1872,7 +1885,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/*
* Restore registers before calling function
*/
- save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
+ save_restore_regs(jit, REGS_RESTORE, 0);
/*
* goto *(prog->bpf_func + tail_call_start);
@@ -2165,7 +2178,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i)
* Compile eBPF program into s390x code
*/
static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
- bool extra_pass, u32 stack_depth)
+ bool extra_pass)
{
int i, insn_count, lit32_size, lit64_size;
u64 kern_arena;
@@ -2174,24 +2187,30 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->lit64 = jit->lit64_start;
jit->prg = 0;
jit->excnt = 0;
+ if (is_first_pass(jit) || (jit->seen & SEEN_STACK))
+ jit->frame_off = sizeof(struct prog_frame) -
+ offsetofend(struct prog_frame, unused) +
+ round_up(fp->aux->stack_depth, 8);
+ else
+ jit->frame_off = 0;
kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
if (kern_arena)
jit->kern_arena = _EMIT_CONST_U64(kern_arena);
jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena);
- bpf_jit_prologue(jit, fp, stack_depth);
+ bpf_jit_prologue(jit, fp);
if (bpf_set_addr(jit, 0) < 0)
return -1;
for (i = 0; i < fp->len; i += insn_count) {
- insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
+ insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
if (insn_count < 0)
return -1;
/* Next instruction address */
if (bpf_set_addr(jit, i + insn_count) < 0)
return -1;
}
- bpf_jit_epilogue(jit, stack_depth);
+ bpf_jit_epilogue(jit);
lit32_size = jit->lit32 - jit->lit32_start;
lit64_size = jit->lit64 - jit->lit64_start;
@@ -2267,7 +2286,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
- u32 stack_depth = round_up(fp->aux->stack_depth, 8);
struct bpf_prog *tmp, *orig_fp = fp;
struct bpf_binary_header *header;
struct s390_jit_data *jit_data;
@@ -2320,7 +2338,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
* - 3: Calculate program size and addrs array
*/
for (pass = 1; pass <= 3; pass++) {
- if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
+ if (bpf_jit_prog(&jit, fp, extra_pass)) {
fp = orig_fp;
goto free_addrs;
}
@@ -2334,7 +2352,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs;
}
skip_init_ctx:
- if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
+ if (bpf_jit_prog(&jit, fp, extra_pass)) {
bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
@@ -2654,9 +2672,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
/* stg %r1,backchain_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
tjit->backchain_off);
- /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
+ /* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */
_EMIT6(0xd203f000 | tjit->tccnt_off,
- 0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
+ 0xf000 | (tjit->stack_size +
+ offsetof(struct prog_frame, tail_call_cnt)));
/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
if (nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
@@ -2793,8 +2812,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
(nr_stack_args * sizeof(u64) - 1) << 16 |
tjit->stack_args_off,
0xf000 | tjit->orig_stack_args_off);
- /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
- _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
+ /* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */
+ _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
+ 0xf000 | tjit->tccnt_off);
/* lgr %r1,%r8 */
EMIT4(0xb9040000, REG_1, REG_8);
/* %r1() */
@@ -2851,8 +2871,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
tjit->retval_off);
- /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
- _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
+ /* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */
+ _EMIT6(0xd203f000 | (tjit->stack_size +
+ offsetof(struct prog_frame, tail_call_cnt)),
0xf000 | tjit->tccnt_off);
/* aghi %r15,stack_size */
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c
index 79211bec0fc8..03089ef479b2 100644
--- a/arch/s390/net/pnet.c
+++ b/arch/s390/net/pnet.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 81bdb54ad5e3..45a1c36c5a54 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/export.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/jump_label.h>
diff --git a/arch/s390/pci/pci_kvm_hook.c b/arch/s390/pci/pci_kvm_hook.c
index ff34baf50a3e..df5b25dbe9ca 100644
--- a/arch/s390/pci/pci_kvm_hook.c
+++ b/arch/s390/pci/pci_kvm_hook.c
@@ -5,7 +5,9 @@
* Copyright (C) IBM Corp. 2022. All rights reserved.
* Author(s): Pierre Morel <pmorel@linux.ibm.com>
*/
+
#include <linux/kvm_host.h>
+#include <linux/export.h>
struct zpci_kvm_hook zpci_kvm_hook;
EXPORT_SYMBOL_GPL(zpci_kvm_hook);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 89185af7bcc9..d5795067befa 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -40,7 +40,6 @@ config SUPERH
select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_HW_BREAKPOINT
select HAVE_IOREMAP_PROT if MMU && !X2TLB
select HAVE_KERNEL_BZIP2
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index cab2f9c011a8..7b420424b6d7 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -103,16 +103,16 @@ UTS_MACHINE := sh
LDFLAGS_vmlinux += -e _stext
ifdef CONFIG_CPU_LITTLE_ENDIAN
-ld-bfd := elf32-sh-linux
-LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
+ld_bfd := elf32-sh-linux
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld_bfd)
KBUILD_LDFLAGS += -EL
else
-ld-bfd := elf32-shbig-linux
-LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
+ld_bfd := elf32-shbig-linux
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld_bfd)
KBUILD_LDFLAGS += -EB
endif
-export ld-bfd
+export ld_bfd
# Mach groups
machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 8bc319ff54bf..58df491778b2 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -27,7 +27,7 @@ endif
ccflags-remove-$(CONFIG_MCOUNT) += -pg
-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
+LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(IMAGE_OFFSET) -e startup \
-T $(obj)/../../kernel/vmlinux.lds
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
OBJCOPYFLAGS += -R .empty_zero_page
-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
+LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
$(call if_changed,ld)
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
index c7c8be58400c..17b03df0a8de 100644
--- a/arch/sh/boot/romimage/Makefile
+++ b/arch/sh/boot/romimage/Makefile
@@ -13,7 +13,7 @@ mmcif-obj-$(CONFIG_CPU_SUBTYPE_SH7724) := $(obj)/mmcif-sh7724.o
load-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-load-y)
obj-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-obj-y)
-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \
+LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(load-y) -e romstart \
-T $(obj)/../../kernel/vmlinux.lds
$(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
@@ -24,7 +24,7 @@ OBJCOPYFLAGS += -j .empty_zero_page
$(obj)/zeropage.bin: vmlinux FORCE
$(call if_changed,objcopy)
-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
+LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
$(call if_changed,ld)
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index f022ada363b5..8ef72b8dbcd3 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 8321b31d2e19..37073ca1e0ad 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -146,7 +146,7 @@ void __init reserve_crashkernel(void)
return;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &crash_size, &crash_base, NULL, NULL);
+ &crash_size, &crash_base, NULL, NULL, NULL);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index dcfdb7f1dae9..7b595092cbfb 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,7 +78,6 @@ config SPARC64
select MMU_GATHER_NO_FLUSH_CACHE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_PAGE_SIZE_8KB
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_CONTEXT_TRACKING_USER
@@ -97,6 +96,7 @@ config SPARC64
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_HUGETLBFS
select HAVE_NMI
select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_USE_QUEUED_RWLOCKS
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index e7a9cdd498dc..d3bc16fbcbbd 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -50,11 +50,6 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
return changed;
}
-#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
-void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
- unsigned long end, unsigned long floor,
- unsigned long ceiling);
-
#include <asm-generic/hugetlb.h>
#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index af9c10c83dc5..3e4bac33be81 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -28,7 +28,7 @@ static inline void ipi_set_tstate_mcde(void *arg)
}
#define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot)
-static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot)
+static inline vm_flags_t sparc_calc_vm_prot_bits(unsigned long prot)
{
if (adi_capable() && (prot & PROT_ADI)) {
struct pt_regs *regs;
@@ -58,7 +58,7 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
/* arch_validate_flags() - Ensure combination of flags is valid for a
* VMA.
*/
-static inline bool arch_validate_flags(unsigned long vm_flags)
+static inline bool arch_validate_flags(vm_flags_t vm_flags)
{
/* If ADI is being enabled on this VMA, check for ADI
* capability on the platform and ensure VMA is suitable
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index adcba7329386..71befa109e1c 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -145,6 +145,9 @@
#define SO_PASSRIGHTS 0x005c
+#define SO_INQ 0x005d
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 80504148d8a5..4b9431311e05 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -295,122 +295,3 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-
-static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
- unsigned long addr)
-{
- pgtable_t token = pmd_pgtable(*pmd);
-
- pmd_clear(pmd);
- pte_free_tlb(tlb, token, addr);
- mm_dec_nr_ptes(tlb->mm);
-}
-
-static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pmd_t *pmd;
- unsigned long next;
- unsigned long start;
-
- start = addr;
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd))
- continue;
- if (is_hugetlb_pmd(*pmd))
- pmd_clear(pmd);
- else
- hugetlb_free_pte_range(tlb, pmd, addr);
- } while (pmd++, addr = next, addr != end);
-
- start &= PUD_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PUD_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- return;
-
- pmd = pmd_offset(pud, start);
- pud_clear(pud);
- pmd_free_tlb(tlb, pmd, start);
- mm_dec_nr_pmds(tlb->mm);
-}
-
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pud_t *pud;
- unsigned long next;
- unsigned long start;
-
- start = addr;
- pud = pud_offset(p4d, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- if (is_hugetlb_pud(*pud))
- pud_clear(pud);
- else
- hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
- ceiling);
- } while (pud++, addr = next, addr != end);
-
- start &= PGDIR_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PGDIR_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- return;
-
- pud = pud_offset(p4d, start);
- p4d_clear(p4d);
- pud_free_tlb(tlb, pud, start);
- mm_dec_nr_puds(tlb->mm);
-}
-
-void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- unsigned long next;
-
- addr &= PMD_MASK;
- if (addr < floor) {
- addr += PMD_SIZE;
- if (!addr)
- return;
- }
- if (ceiling) {
- ceiling &= PMD_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- end -= PMD_SIZE;
- if (addr > end - 1)
- return;
-
- pgd = pgd_offset(tlb->mm, addr);
- p4d = p4d_offset(pgd, addr);
- do {
- next = p4d_addr_end(addr, end);
- if (p4d_none_or_clear_bad(p4d))
- continue;
- hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
- } while (p4d++, addr = next, addr != end);
-}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 25ae4c897aae..7ed58bf3aaca 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -3201,7 +3201,7 @@ void copy_highpage(struct page *to, struct page *from)
}
EXPORT_SYMBOL(copy_highpage);
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
unsigned long prot = pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index f08e8a7fac93..9083bfdb7735 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -6,6 +6,7 @@ config UML
bool
default y
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -35,6 +36,7 @@ config UML
select HAVE_RUST
select ARCH_HAS_UBSAN
select HAVE_ARCH_TRACEHOOK
+ select HAVE_SYSCALL_TRACEPOINTS
select THREAD_INFO_IN_TASK
config MMU
@@ -82,9 +84,6 @@ config NR_CPUS
range 1 1
default 1
-config ARCH_HAS_CACHE_LINE_SIZE
- def_bool y
-
source "arch/$(HEADER_ARCH)/um/Kconfig"
config MAY_HAVE_RUNTIME_DEPS
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 34085bfc6d41..6a0354ca032f 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -160,6 +160,7 @@ config UML_RTC
config UML_PCI
bool
select FORCE_PCI
+ select IRQ_MSI_LIB
select UML_IOMEM_EMULATION
select UML_DMA_EMULATION
select PCI_MSI
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
index 51e79f3148cd..67912fcf7b28 100644
--- a/arch/um/drivers/rtc_user.c
+++ b/arch/um/drivers/rtc_user.c
@@ -28,7 +28,7 @@ int uml_rtc_start(bool timetravel)
int err;
if (timetravel) {
- int err = os_pipe(uml_rtc_irq_fds, 1, 1);
+ err = os_pipe(uml_rtc_irq_fds, 1, 1);
if (err)
goto fail;
} else {
diff --git a/arch/um/drivers/vfio_kern.c b/arch/um/drivers/vfio_kern.c
index 13b971a2bd43..915812a79bfc 100644
--- a/arch/um/drivers/vfio_kern.c
+++ b/arch/um/drivers/vfio_kern.c
@@ -16,6 +16,7 @@
#include <init.h>
#include <os.h>
+#include "mconsole_kern.h"
#include "virt-pci.h"
#include "vfio_user.h"
@@ -60,6 +61,7 @@ static LIST_HEAD(uml_vfio_groups);
static DEFINE_MUTEX(uml_vfio_groups_mtx);
static LIST_HEAD(uml_vfio_devices);
+static DEFINE_MUTEX(uml_vfio_devices_mtx);
static int uml_vfio_set_container(int group_fd)
{
@@ -581,32 +583,44 @@ static struct uml_vfio_device *uml_vfio_find_device(const char *device)
return NULL;
}
-static int uml_vfio_cmdline_set(const char *device, const struct kernel_param *kp)
+static struct uml_vfio_device *uml_vfio_add_device(const char *device)
{
struct uml_vfio_device *dev;
int fd;
+ guard(mutex)(&uml_vfio_devices_mtx);
+
if (uml_vfio_container.fd < 0) {
fd = uml_vfio_user_open_container();
if (fd < 0)
- return fd;
+ return ERR_PTR(fd);
uml_vfio_container.fd = fd;
}
if (uml_vfio_find_device(device))
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
dev->name = kstrdup(device, GFP_KERNEL);
if (!dev->name) {
kfree(dev);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
list_add_tail(&dev->list, &uml_vfio_devices);
+ return dev;
+}
+
+static int uml_vfio_cmdline_set(const char *device, const struct kernel_param *kp)
+{
+ struct uml_vfio_device *dev;
+
+ dev = uml_vfio_add_device(device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
return 0;
}
@@ -629,6 +643,42 @@ __uml_help(uml_vfio_cmdline_param_ops,
" through multiple PCI devices to UML.\n\n"
);
+static int uml_vfio_mc_config(char *str, char **error_out)
+{
+ struct uml_vfio_device *dev;
+
+ if (*str != '=') {
+ *error_out = "Invalid config";
+ return -EINVAL;
+ }
+ str += 1;
+
+ dev = uml_vfio_add_device(str);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ uml_vfio_open_device(dev);
+ return 0;
+}
+
+static int uml_vfio_mc_id(char **str, int *start_out, int *end_out)
+{
+ return -EOPNOTSUPP;
+}
+
+static int uml_vfio_mc_remove(int n, char **error_out)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct mc_device uml_vfio_mc = {
+ .list = LIST_HEAD_INIT(uml_vfio_mc.list),
+ .name = "vfio_uml.device",
+ .config = uml_vfio_mc_config,
+ .get_config = NULL,
+ .id = uml_vfio_mc_id,
+ .remove = uml_vfio_mc_remove,
+};
+
static int __init uml_vfio_init(void)
{
struct uml_vfio_device *dev, *n;
@@ -639,6 +689,8 @@ static int __init uml_vfio_init(void)
list_for_each_entry_safe(dev, n, &uml_vfio_devices, list)
uml_vfio_open_device(dev);
+ mconsole_register_dev(&uml_vfio_mc);
+
return 0;
}
late_initcall(uml_vfio_init);
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index 0fe207ca4b72..557d93aea00a 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/logic_iomem.h>
#include <linux/of_platform.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/unaligned.h>
@@ -29,7 +30,6 @@ static struct um_pci_device *um_pci_platform_device;
static struct um_pci_device_reg um_pci_devices[MAX_DEVICES];
static struct fwnode_handle *um_pci_fwnode;
static struct irq_domain *um_pci_inner_domain;
-static struct irq_domain *um_pci_msi_domain;
static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
@@ -400,21 +400,24 @@ static void um_pci_inner_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops um_pci_inner_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = um_pci_inner_domain_alloc,
.free = um_pci_inner_domain_free,
};
-static struct irq_chip um_pci_msi_irq_chip = {
- .name = "UM virtual PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info um_pci_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .chip = &um_pci_msi_irq_chip,
+#define UM_PCI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+#define UM_PCI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops um_pci_msi_parent_ops = {
+ .required_flags = UM_PCI_MSI_FLAGS_REQUIRED,
+ .supported_flags = UM_PCI_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "UM-virtual-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static struct resource busn_resource = {
@@ -559,17 +562,14 @@ static int __init um_pci_init(void)
goto free;
}
- um_pci_inner_domain = irq_domain_create_linear(um_pci_fwnode, MAX_MSI_VECTORS,
- &um_pci_inner_domain_ops, NULL);
- if (!um_pci_inner_domain) {
- err = -ENOMEM;
- goto free;
- }
+ struct irq_domain_info info = {
+ .fwnode = um_pci_fwnode,
+ .ops = &um_pci_inner_domain_ops,
+ .size = MAX_MSI_VECTORS,
+ };
- um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode,
- &um_pci_msi_domain_info,
- um_pci_inner_domain);
- if (!um_pci_msi_domain) {
+ um_pci_inner_domain = msi_create_parent_irq_domain(&info, &um_pci_msi_parent_ops);
+ if (!um_pci_inner_domain) {
err = -ENOMEM;
goto free;
}
@@ -611,7 +611,6 @@ device_initcall(um_pci_init);
static void __exit um_pci_exit(void)
{
- irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
diff --git a/arch/um/drivers/virtio_pcidev.c b/arch/um/drivers/virtio_pcidev.c
index 3c4c4c928fdd..e9e23cc3f357 100644
--- a/arch/um/drivers/virtio_pcidev.c
+++ b/arch/um/drivers/virtio_pcidev.c
@@ -42,7 +42,7 @@ struct virtio_pcidev_device {
void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS);
-#define UM_PCI_STAT_WAITING 0
+#define VIRTIO_PCIDEV_STAT_WAITING 0
unsigned long status;
bool platform;
@@ -172,7 +172,7 @@ static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
}
/* kick and poll for getting a response on the queue */
- set_bit(UM_PCI_STAT_WAITING, &dev->status);
+ set_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status);
virtqueue_kick(dev->cmd_vq);
ret = 0;
@@ -193,7 +193,7 @@ static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
}
udelay(1);
}
- clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+ clear_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status);
if (bounce_out)
memcpy(out, buf->data, out_size);
@@ -439,7 +439,7 @@ static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq)
void *cmd;
int len;
- if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
+ if (test_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status))
return;
while ((cmd = virtqueue_get_buf(vq, &len)))
diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
index 1eb8b834fbec..4354f6984271 100644
--- a/arch/um/include/asm/cpufeature.h
+++ b/arch/um/include/asm/cpufeature.h
@@ -4,7 +4,7 @@
#include <asm/processor.h>
-#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__)
#include <asm/asm.h>
#include <linux/bitops.h>
@@ -137,5 +137,5 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
-#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) */
#endif /* _ASM_UM_CPUFEATURE_H */
diff --git a/arch/um/include/asm/current.h b/arch/um/include/asm/current.h
index de64e032d66c..8accc6d6f502 100644
--- a/arch/um/include/asm/current.h
+++ b/arch/um/include/asm/current.h
@@ -5,7 +5,7 @@
#include <linux/compiler.h>
#include <linux/threads.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
extern struct task_struct *cpu_tasks[NR_CPUS];
@@ -18,6 +18,6 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_CURRENT_H */
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 23dcc914d44e..0bbb24868557 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -16,11 +16,6 @@
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
- /*
- * This is called by fs/exec.c and sys_unshare()
- * when the new ->mm is used for the first time.
- */
- __switch_mm(&new->context.id);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -28,11 +23,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned cpu = smp_processor_id();
- if(prev != next){
+ if (prev != next) {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
- if(next != &init_mm)
- __switch_mm(&next->context.id);
}
}
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
index 3d516f3ca9c7..6f54254aaf44 100644
--- a/arch/um/include/asm/page.h
+++ b/arch/um/include/asm/page.h
@@ -11,7 +11,7 @@
#include <vdso/page.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct page;
@@ -94,7 +94,7 @@ extern unsigned long uml_physmem;
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifdef CONFIG_X86_32
#define __HAVE_ARCH_GATE_AREA 1
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 4696f24d1492..86d74f9d33cf 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -6,7 +6,7 @@
#ifndef __UM_PTRACE_GENERIC_H
#define __UM_PTRACE_GENERIC_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <sysdep/ptrace.h>
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index f9ad06fcc991..7a6f4dc99fa1 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -9,7 +9,7 @@
#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
#include <asm/page.h>
@@ -43,6 +43,8 @@ struct thread_info {
#define TIF_NOTIFY_RESUME 8
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SINGLESTEP 10 /* single stepping userspace */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
+
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -50,7 +52,11 @@ struct thread_info {
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_NOTIFY_RESUME)
+
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 4f44dcce8a7c..2f9bfd99460a 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -26,7 +26,7 @@
#define STUB_DATA_PAGES 2 /* must be a power of two */
#define STUB_END (STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <sysdep/ptrace.h>
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 89df9a55fbea..4f977ef5dda5 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -19,8 +19,6 @@ struct mm_id {
int syscall_fd_map[STUB_MAX_FDS];
};
-void __switch_mm(struct mm_id *mm_idp);
-
void notify_mm_kill(int pid);
#endif
diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h
index 7d1de4cab551..807514e10538 100644
--- a/arch/um/include/shared/skas/skas.h
+++ b/arch/um/include/shared/skas/skas.h
@@ -9,7 +9,6 @@
#include <sysdep/ptrace.h>
extern int using_seccomp;
-extern int userspace_pid[];
extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index cb8b5cd9285c..13812fa97eee 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -26,8 +26,6 @@ void flush_thread(void)
get_safe_registers(current_pt_regs()->regs.gp,
current_pt_regs()->regs.fp);
-
- __switch_mm(&current->mm->context.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0cd6fad3d908..1be644de9e41 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -82,14 +82,18 @@ struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to
void interrupt_end(void)
{
struct pt_regs *regs = &current->thread.regs;
-
- if (need_resched())
- schedule();
- if (test_thread_flag(TIF_SIGPENDING) ||
- test_thread_flag(TIF_NOTIFY_SIGNAL))
- do_signal(regs);
- if (test_thread_flag(TIF_NOTIFY_RESUME))
- resume_user_mode_work(regs);
+ unsigned long thread_flags;
+
+ thread_flags = read_thread_flags();
+ while (thread_flags & _TIF_WORK_MASK) {
+ if (thread_flags & _TIF_NEED_RESCHED)
+ schedule();
+ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+ if (thread_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+ thread_flags = read_thread_flags();
+ }
}
int get_current_pid(void)
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 2124624b7817..fdbb37b5c399 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -9,6 +9,9 @@
#include <linux/uaccess.h>
#include <asm/ptrace-abi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
void user_enable_single_step(struct task_struct *child)
{
set_tsk_thread_flag(child, TIF_SINGLESTEP);
@@ -126,6 +129,9 @@ int syscall_trace_enter(struct pt_regs *regs)
UPT_SYSCALL_ARG3(&regs->regs),
UPT_SYSCALL_ARG4(&regs->regs));
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, UPT_SYSCALL_NR(&regs->regs));
+
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return 0;
@@ -142,6 +148,9 @@ void syscall_trace_leave(struct pt_regs *regs)
if (test_thread_flag(TIF_SINGLESTEP))
send_sigtrap(&regs->regs, 0);
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, PT_REGS_SYSCALL_RET(regs));
+
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 849fafa4b54f..afe9a2f251ef 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -20,8 +20,8 @@
/* Ensure the stub_data struct covers the allocated area */
static_assert(sizeof(struct stub_data) == STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
-spinlock_t mm_list_lock;
-struct list_head mm_list;
+static spinlock_t mm_list_lock;
+static struct list_head mm_list;
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 05dcdc057af9..5881b17eb987 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -26,8 +26,6 @@ static int __init start_kernel_proc(void *unused)
return 0;
}
-extern int userspace_pid[];
-
static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
int __init start_uml(void)
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index a5beaea2967e..ba7494f9bfe4 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -9,8 +9,8 @@
#include <kern_util.h>
#include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h>
-#include <sysdep/syscalls.h>
#include <linux/time-internal.h>
+#include <asm/syscall.h>
#include <asm/unistd.h>
#include <asm/delay.h>
@@ -43,7 +43,14 @@ void handle_syscall(struct uml_pt_regs *r)
tt_extra_sched_jiffies += 1;
if (syscall >= 0 && syscall < __NR_syscalls) {
- unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
+ unsigned long ret;
+
+ ret = (*sys_call_table[syscall])(UPT_SYSCALL_ARG1(&regs->regs),
+ UPT_SYSCALL_ARG2(&regs->regs),
+ UPT_SYSCALL_ARG3(&regs->regs),
+ UPT_SYSCALL_ARG4(&regs->regs),
+ UPT_SYSCALL_ARG5(&regs->regs),
+ UPT_SYSCALL_ARG6(&regs->regs));
PT_REGS_SET_SYSCALL_RETURN(regs, ret);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index e42ffac23e3c..78f48fa9db8b 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -267,7 +267,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
}
-static void handle_trap(int pid, struct uml_pt_regs *regs)
+static void handle_trap(struct uml_pt_regs *regs)
{
if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
fatal_sigsegv();
@@ -434,7 +434,6 @@ static int __init init_stub_exe_fd(void)
__initcall(init_stub_exe_fd);
int using_seccomp;
-int userspace_pid[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -548,12 +547,12 @@ out_close:
return err;
}
-int unscheduled_userspace_iterations;
+static int unscheduled_userspace_iterations;
extern unsigned long tt_extra_sched_jiffies;
void userspace(struct uml_pt_regs *regs)
{
- int err, status, op, pid = userspace_pid[0];
+ int err, status, op;
siginfo_t si_ptrace;
siginfo_t *si;
int sig;
@@ -562,6 +561,8 @@ void userspace(struct uml_pt_regs *regs)
interrupt_end();
while (1) {
+ struct mm_id *mm_id = current_mm_id();
+
/*
* When we are in time-travel mode, userspace can theoretically
* do a *lot* of work without being scheduled. The problem with
@@ -590,14 +591,12 @@ void userspace(struct uml_pt_regs *regs)
current_mm_sync();
if (using_seccomp) {
- struct mm_id *mm_id = current_mm_id();
struct stub_data *proc_data = (void *) mm_id->stack;
- int ret;
- ret = set_stub_state(regs, proc_data, singlestepping());
- if (ret) {
+ err = set_stub_state(regs, proc_data, singlestepping());
+ if (err) {
printk(UM_KERN_ERR "%s - failed to set regs: %d",
- __func__, ret);
+ __func__, err);
fatal_sigsegv();
}
@@ -623,10 +622,10 @@ void userspace(struct uml_pt_regs *regs)
mm_id->syscall_data_len = 0;
mm_id->syscall_fd_num = 0;
- ret = get_stub_state(regs, proc_data, NULL);
- if (ret) {
+ err = get_stub_state(regs, proc_data, NULL);
+ if (err) {
printk(UM_KERN_ERR "%s - failed to get regs: %d",
- __func__, ret);
+ __func__, err);
fatal_sigsegv();
}
@@ -645,8 +644,10 @@ void userspace(struct uml_pt_regs *regs)
GET_FAULTINFO_FROM_MC(regs->faultinfo, mcontext);
}
} else {
+ int pid = mm_id->pid;
+
/* Flush out any pending syscalls */
- err = syscall_stub_flush(current_mm_id());
+ err = syscall_stub_flush(mm_id);
if (err) {
if (err == -ENOMEM)
report_enomem();
@@ -756,7 +757,7 @@ void userspace(struct uml_pt_regs *regs)
handle_syscall(regs);
break;
case SIGTRAP + 0x80:
- handle_trap(pid, regs);
+ handle_trap(regs);
break;
case SIGTRAP:
relay_signal(SIGTRAP, (struct siginfo *)si, regs, NULL);
@@ -777,7 +778,6 @@ void userspace(struct uml_pt_regs *regs)
__func__, sig);
fatal_sigsegv();
}
- pid = userspace_pid[0];
interrupt_end();
/* Avoid -ERESTARTSYS handling in host */
@@ -902,8 +902,3 @@ void reboot_skas(void)
block_signals_trace();
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
}
-
-void __switch_mm(struct mm_id *mm_idp)
-{
- userspace_pid[0] = mm_idp->pid;
-}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f23919a7db40..58d890fe2100 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -99,7 +99,6 @@ config X86
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PREEMPT_LAZY
- select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2
@@ -124,6 +123,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
@@ -242,7 +242,6 @@ config X86
select HAVE_GUP_FAST
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
select HAVE_FTRACE_GRAPH_FUNC if HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_FREGS if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_TRACER if X86_32 || (X86_64 && DYNAMIC_FTRACE)
select HAVE_FUNCTION_TRACER
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
index 916bac09b464..63e037e94e4c 100644
--- a/arch/x86/boot/cpuflags.c
+++ b/arch/x86/boot/cpuflags.c
@@ -106,5 +106,18 @@ void get_cpuflags(void)
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
&cpu.flags[1]);
}
+
+ if (max_amd_level >= 0x8000001f) {
+ u32 ebx;
+
+ /*
+ * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
+ * the virtualization flags entry (word 8) and set by
+ * scattered.c, so the bit needs to be explicitly set.
+ */
+ cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
+ if (ebx & BIT(31))
+ set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
+ }
}
}
diff --git a/arch/x86/boot/startup/sev-shared.c b/arch/x86/boot/startup/sev-shared.c
index 7a706db87b93..ac7dfd21ddd4 100644
--- a/arch/x86/boot/startup/sev-shared.c
+++ b/arch/x86/boot/startup/sev-shared.c
@@ -810,6 +810,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
+
+ /*
+ * If validating memory (making it private) and affected by the
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
+ */
+ if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
+ sev_evict_cache((void *)vaddr, 1);
}
/*
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index fc59ce78c477..400a6ab75d45 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -358,10 +358,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
static void pvalidate_pages(struct snp_psc_desc *desc)
{
+ struct psc_entry *e;
+ unsigned int i;
+
if (snp_vmpl)
svsm_pval_pages(desc);
else
pval_pages(desc);
+
+ /*
+ * If not affected by the cache-coherency vulnerability there is no need
+ * to perform the cache eviction mitigation.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
+ return;
+
+ for (i = 0; i <= desc->hdr.end_entry; i++) {
+ e = &desc->entries[i];
+
+ /*
+ * If validating memory (making it private) perform the cache
+ * eviction mitigation.
+ */
+ if (e->operation == SNP_PAGE_STATE_PRIVATE)
+ sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
+ }
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index f1b6d40154e3..f1adfba1a76e 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -104,10 +104,12 @@ static void crypto_aegis128_aesni_process_ad(
}
}
-static __always_inline void
+static __always_inline int
crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk, bool enc)
{
+ int err = 0;
+
while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
if (enc)
aegis128_aesni_enc(state, walk->src.virt.addr,
@@ -119,7 +121,10 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
walk->dst.virt.addr,
round_down(walk->nbytes,
AEGIS128_BLOCK_SIZE));
- skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+ kernel_fpu_end();
+ err = skcipher_walk_done(walk,
+ walk->nbytes % AEGIS128_BLOCK_SIZE);
+ kernel_fpu_begin();
}
if (walk->nbytes) {
@@ -131,8 +136,11 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
aegis128_aesni_dec_tail(state, walk->src.virt.addr,
walk->dst.virt.addr,
walk->nbytes);
- skcipher_walk_done(walk, 0);
+ kernel_fpu_end();
+ err = skcipher_walk_done(walk, 0);
+ kernel_fpu_begin();
}
+ return err;
}
static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead)
@@ -165,7 +173,7 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static __always_inline void
+static __always_inline int
crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_block *tag_xor,
unsigned int cryptlen, bool enc)
@@ -174,20 +182,24 @@ crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
+ int err;
if (enc)
- skcipher_walk_aead_encrypt(&walk, req, true);
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
else
- skcipher_walk_aead_decrypt(&walk, req, true);
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (err)
+ return err;
kernel_fpu_begin();
aegis128_aesni_init(&state, &ctx->key, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128_aesni_process_crypt(&state, &walk, enc);
- aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
-
+ err = crypto_aegis128_aesni_process_crypt(&state, &walk, enc);
+ if (err == 0)
+ aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
+ return err;
}
static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
@@ -196,8 +208,11 @@ static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
struct aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
+ int err;
- crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true);
+ err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true);
+ if (err)
+ return err;
scatterwalk_map_and_copy(tag.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
@@ -212,11 +227,14 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
struct aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
+ int err;
scatterwalk_map_and_copy(tag.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
- crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false);
+ err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false);
+ if (err)
+ return err;
return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
}
diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c
index b4bddcd58457..007b250f774c 100644
--- a/arch/x86/crypto/aria_aesni_avx2_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx2_glue.c
@@ -9,6 +9,7 @@
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
index ab9b38d05332..4c88ef4eba82 100644
--- a/arch/x86/crypto/aria_aesni_avx_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -9,6 +9,7 @@
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index a7d162388142..5c321f255eb7 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -8,6 +8,7 @@
#include <crypto/algapi.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index 3bd37d664121..cbede120e5f2 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -10,6 +10,7 @@
#include <linux/unaligned.h>
#include <linux/crypto.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index dcfc0de333de..d587f05c3c8c 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -7,6 +7,7 @@
#include <crypto/curve25519.h>
#include <crypto/internal/kpp.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index e640abc1cb8a..9c8b3a335d5c 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index 72867fc49ce8..88caf418a06f 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -11,6 +11,7 @@
#include <asm/fpu/api.h>
#include <linux/module.h>
#include <linux/crypto.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 4c67184dc573..8e9906d36902 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -40,6 +40,7 @@
#include <crypto/algapi.h>
#include <crypto/twofish.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 1a1ecfa7f72a..8ad77725bf60 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -9,6 +9,7 @@
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/crypto.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 23d86c9750b9..07ba4935e873 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -488,11 +488,14 @@ static inline void apic_setup_apic_calls(void) { }
extern void apic_ack_irq(struct irq_data *data);
+#define APIC_VECTOR_TO_BIT_NUMBER(v) ((unsigned int)(v) % 32)
+#define APIC_VECTOR_TO_REG_OFFSET(v) ((unsigned int)(v) / 32 * 0x10)
+
static inline bool lapic_vector_set_in_irr(unsigned int vector)
{
- u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+ u32 irr = apic_read(APIC_IRR + APIC_VECTOR_TO_REG_OFFSET(vector));
- return !!(irr & (1U << (vector % 32)));
+ return !!(irr & (1U << APIC_VECTOR_TO_BIT_NUMBER(vector)));
}
static inline bool is_vector_pending(unsigned int vector)
@@ -500,6 +503,65 @@ static inline bool is_vector_pending(unsigned int vector)
return lapic_vector_set_in_irr(vector) || pi_pending_this_cpu(vector);
}
+#define MAX_APIC_VECTOR 256
+#define APIC_VECTORS_PER_REG 32
+
+/*
+ * Vector states are maintained by APIC in 32-bit registers that are
+ * 16 bytes aligned. The status of each vector is kept in a single
+ * bit.
+ */
+static inline int apic_find_highest_vector(void *bitmap)
+{
+ int vec;
+ u32 *reg;
+
+ for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; vec >= 0; vec -= APIC_VECTORS_PER_REG) {
+ reg = bitmap + APIC_VECTOR_TO_REG_OFFSET(vec);
+ if (*reg)
+ return __fls(*reg) + vec;
+ }
+
+ return -1;
+}
+
+static inline u32 apic_get_reg(void *regs, int reg)
+{
+ return *((u32 *) (regs + reg));
+}
+
+static inline void apic_set_reg(void *regs, int reg, u32 val)
+{
+ *((u32 *) (regs + reg)) = val;
+}
+
+static __always_inline u64 apic_get_reg64(void *regs, int reg)
+{
+ BUILD_BUG_ON(reg != APIC_ICR);
+ return *((u64 *) (regs + reg));
+}
+
+static __always_inline void apic_set_reg64(void *regs, int reg, u64 val)
+{
+ BUILD_BUG_ON(reg != APIC_ICR);
+ *((u64 *) (regs + reg)) = val;
+}
+
+static inline void apic_clear_vector(int vec, void *bitmap)
+{
+ clear_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec));
+}
+
+static inline void apic_set_vector(int vec, void *bitmap)
+{
+ set_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec));
+}
+
+static inline int apic_test_vector(int vec, void *bitmap)
+{
+ return test_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec));
+}
+
/*
* Warm reset vector position:
*/
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index f0e9acf72547..20fcb8507ad1 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -32,45 +32,42 @@
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
-# define __BUG_REL(val) ".long " __stringify(val)
+# define __BUG_REL(val) ".long " val
#else
-# define __BUG_REL(val) ".long " __stringify(val) " - ."
+# define __BUG_REL(val) ".long " val " - ."
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_ENTRY(file, line, flags) \
+ "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \
+ "\t" __BUG_REL(file) "\t# bug_entry::file\n" \
+ "\t.word " line "\t# bug_entry::line\n" \
+ "\t.word " flags "\t# bug_entry::flags\n"
+#else
+#define __BUG_ENTRY(file, line, flags) \
+ "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \
+ "\t.word " flags "\t# bug_entry::flags\n"
+#endif
+
+#define _BUG_FLAGS_ASM(ins, file, line, flags, size, extra) \
+ "1:\t" ins "\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
+ __BUG_ENTRY(file, line, flags) \
+ "\t.org 2b + " size "\n" \
+ ".popsection\n" \
+ extra
#define _BUG_FLAGS(ins, flags, extra) \
do { \
- asm_inline volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
- "\t.word %c1" "\t# bug_entry::line\n" \
- "\t.word %c2" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c3\n" \
- ".popsection\n" \
- extra \
+ asm_inline volatile(_BUG_FLAGS_ASM(ins, "%c0", \
+ "%c1", "%c2", "%c3", extra) \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
-#else /* !CONFIG_DEBUG_BUGVERBOSE */
-
-#define _BUG_FLAGS(ins, flags, extra) \
-do { \
- asm_inline volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t.word %c0" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c1\n" \
- ".popsection\n" \
- extra \
- : : "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
-} while (0)
-
-#endif /* CONFIG_DEBUG_BUGVERBOSE */
+#define ARCH_WARN_ASM(file, line, flags, size) \
+ _BUG_FLAGS_ASM(ASM_UD2, file, line, flags, size, "")
#else
@@ -92,11 +89,14 @@ do { \
* were to trigger, we'd rather wreck the machine in an attempt to get the
* message out than not know about it.
*/
+
+#define ARCH_WARN_REACHABLE ANNOTATE_REACHABLE(1b)
+
#define __WARN_FLAGS(flags) \
do { \
__auto_type __flags = BUGFLAG_WARNING|(flags); \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, __flags, ANNOTATE_REACHABLE(1b)); \
+ _BUG_FLAGS(ASM_UD2, __flags, ARCH_WARN_REACHABLE); \
instrumentation_end(); \
} while (0)
diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
index 3e51ba459154..1751f1eb95ef 100644
--- a/arch/x86/include/asm/cfi.h
+++ b/arch/x86/include/asm/cfi.h
@@ -116,8 +116,6 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
#define __bpfcall
-extern u32 cfi_bpf_hash;
-extern u32 cfi_bpf_subprog_hash;
static inline int cfi_get_offset(void)
{
@@ -135,6 +133,8 @@ static inline int cfi_get_offset(void)
#define cfi_get_offset cfi_get_offset
extern u32 cfi_get_func_hash(void *func);
+#define cfi_get_func_hash cfi_get_func_hash
+
extern int cfi_get_func_arity(void *func);
#ifdef CONFIG_FINEIBT
@@ -153,12 +153,6 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
return BUG_TRAP_TYPE_NONE;
}
-#define cfi_bpf_hash 0U
-#define cfi_bpf_subprog_hash 0U
-static inline u32 cfi_get_func_hash(void *func)
-{
- return 0;
-}
static inline int cfi_get_func_arity(void *func)
{
return 0;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 602957dd2609..06fc0479a23f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -218,6 +218,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
+#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 162ebd73a698..cbe19e669080 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -92,8 +92,6 @@ struct irq_cfg {
extern struct irq_cfg *irq_cfg(unsigned int irq);
extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
-extern void lock_vector_lock(void);
-extern void unlock_vector_lock(void);
#ifdef CONFIG_SMP
extern void vector_schedule_cleanup(struct irq_cfg *);
extern void irq_complete_move(struct irq_cfg *cfg);
@@ -101,12 +99,16 @@ extern void irq_complete_move(struct irq_cfg *cfg);
static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
static inline void irq_complete_move(struct irq_cfg *c) { }
#endif
-
extern void apic_ack_edge(struct irq_data *data);
-#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
+#else
static inline void lock_vector_lock(void) {}
static inline void unlock_vector_lock(void) {}
-#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+#endif
/* Statistics */
extern atomic_t irq_err_count;
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 6bfdaeddbae8..5a68e9db6518 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -5,7 +5,7 @@
#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000
#define __head __section(".head.text") __no_sanitize_undefined __no_stack_protector
#else
-#define __head __section(".head.text") __no_sanitize_undefined __no_sanitize_coverage
+#define __head __section(".head.text") __no_sanitize_undefined __no_kstack_erase
#endif
struct x86_mapping_info {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index be10c188614f..e345dbdf933e 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -150,6 +150,11 @@
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
+#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
+
+#define INTEL_NOVALAKE IFM(18, 0x01)
+#define INTEL_NOVALAKE_L IFM(18, 0x03)
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5036f13ab69f..5a0d42464d44 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -26,7 +26,22 @@ enum {
IRQ_REMAP_X2APIC_MODE,
};
-struct vcpu_data {
+/*
+ * This is mainly used to communicate information back-and-forth
+ * between SVM and IOMMU for setting up and tearing down posted
+ * interrupt
+ */
+struct amd_iommu_pi_data {
+ u64 vapic_addr; /* Physical address of the vCPU's vAPIC. */
+ u32 ga_tag;
+ u32 vector; /* Guest vector of the interrupt */
+ int cpu;
+ bool ga_log_intr;
+ bool is_guest_mode;
+ void *ir_data;
+};
+
+struct intel_iommu_pi_data {
u64 pi_desc_addr; /* Physical address of PI Descriptor */
u32 vector; /* Guest vector of the interrupt */
};
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 8d50e3e0a19b..18a5c3119e1a 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -49,7 +49,6 @@ KVM_X86_OP(set_idt)
KVM_X86_OP(get_gdt)
KVM_X86_OP(set_gdt)
KVM_X86_OP(sync_dirty_debug_regs)
-KVM_X86_OP(set_dr6)
KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags)
@@ -112,7 +111,7 @@ KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
KVM_X86_OP_OPTIONAL(vcpu_blocking)
KVM_X86_OP_OPTIONAL(vcpu_unblocking)
KVM_X86_OP_OPTIONAL(pi_update_irte)
-KVM_X86_OP_OPTIONAL(pi_start_assignment)
+KVM_X86_OP_OPTIONAL(pi_start_bypass)
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
@@ -139,7 +138,7 @@ KVM_X86_OP(check_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
KVM_X86_OP_OPTIONAL(migrate_timers)
-KVM_X86_OP(msr_filter_changed)
+KVM_X86_OP(recalc_msr_intercepts)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f7af967aa16f..f19a76d3ca0e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -297,6 +297,7 @@ enum x86_intercept_stage;
*/
#define KVM_APIC_PV_EOI_PENDING 1
+struct kvm_kernel_irqfd;
struct kvm_kernel_irq_routing_entry;
/*
@@ -1320,6 +1321,12 @@ enum kvm_apicv_inhibit {
*/
APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED,
+ /*
+ * AVIC is disabled because the vCPU's APIC ID is beyond the max
+ * supported by AVIC/x2AVIC, i.e. the vCPU is unaddressable.
+ */
+ APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG,
+
NR_APICV_INHIBIT_REASONS,
};
@@ -1338,7 +1345,8 @@ enum kvm_apicv_inhibit {
__APICV_INHIBIT_REASON(IRQWIN), \
__APICV_INHIBIT_REASON(PIT_REINJ), \
__APICV_INHIBIT_REASON(SEV), \
- __APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED)
+ __APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED), \
+ __APICV_INHIBIT_REASON(PHYSICAL_ID_TOO_BIG)
struct kvm_arch {
unsigned long n_used_mmu_pages;
@@ -1350,7 +1358,7 @@ struct kvm_arch {
bool has_private_mem;
bool has_protected_state;
bool pre_fault_allowed;
- struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ struct hlist_head *mmu_page_hash;
struct list_head active_mmu_pages;
/*
* A list of kvm_mmu_page structs that, if zapped, could possibly be
@@ -1379,11 +1387,13 @@ struct kvm_arch {
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count;
-#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
- atomic_t assigned_device_count;
+ unsigned long nr_possible_bypass_irqs;
+
+#ifdef CONFIG_KVM_IOAPIC
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
+#endif
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
struct kvm_apic_map __rcu *apic_map;
@@ -1398,12 +1408,8 @@ struct kvm_arch {
gpa_t wall_clock;
- bool mwait_in_guest;
- bool hlt_in_guest;
- bool pause_in_guest;
- bool cstate_in_guest;
+ u64 disabled_exits;
- unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
/*
@@ -1432,9 +1438,6 @@ struct kvm_arch {
struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
- /* reads protected by irq_srcu, writes by irq_lock */
- struct hlist_head mask_notifier_list;
-
#ifdef CONFIG_KVM_HYPERV
struct kvm_hv hyperv;
#endif
@@ -1457,6 +1460,7 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+ bool has_mapped_host_mmio;
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
@@ -1680,6 +1684,12 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
}
+enum kvm_x86_run_flags {
+ KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0),
+ KVM_RUN_LOAD_GUEST_DR6 = BIT(1),
+ KVM_RUN_LOAD_DEBUGCTL = BIT(2),
+};
+
struct kvm_x86_ops {
const char *name;
@@ -1708,6 +1718,12 @@ struct kvm_x86_ops {
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ /*
+ * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to
+ * match the host's value even while the guest is active.
+ */
+ const u64 HOST_OWNED_DEBUGCTL;
+
void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
@@ -1730,7 +1746,6 @@ struct kvm_x86_ops {
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
- void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
@@ -1761,7 +1776,7 @@ struct kvm_x86_ops {
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
- bool force_immediate_exit);
+ u64 run_flags);
int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath);
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
@@ -1853,9 +1868,10 @@ struct kvm_x86_ops {
void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
- int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
- void (*pi_start_assignment)(struct kvm *kvm);
+ int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
+ unsigned int host_irq, uint32_t guest_irq,
+ struct kvm_vcpu *vcpu, u32 vector);
+ void (*pi_start_bypass)(struct kvm *kvm);
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
@@ -1892,7 +1908,7 @@ struct kvm_x86_ops {
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
void (*migrate_timers)(struct kvm_vcpu *vcpu);
- void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
+ void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
@@ -1950,6 +1966,7 @@ struct kvm_arch_async_pf {
extern u32 __read_mostly kvm_nr_uret_msrs;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern bool __read_mostly enable_apicv;
+extern bool __read_mostly enable_ipiv;
extern bool __read_mostly enable_device_posted_irqs;
extern struct kvm_x86_ops kvm_x86_ops;
@@ -1968,7 +1985,7 @@ void kvm_x86_vendor_exit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ return kvzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT);
}
#define __KVM_HAVE_ARCH_VM_FREE
@@ -2013,7 +2030,7 @@ void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
@@ -2044,19 +2061,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
-struct kvm_irq_mask_notifier {
- void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
- int irq;
- struct hlist_node link;
-};
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask);
-
extern bool tdp_enabled;
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
@@ -2215,9 +2219,6 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state,
return !!(*irq_state);
}
-int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
-void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
-
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
@@ -2394,9 +2395,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
-void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
- struct kvm_lapic_irq *irq);
-
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
/* We can only post Fixed and LowPrio IRQs */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 7490bb5c0776..b65c3ba5fa14 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -419,6 +419,7 @@
#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
+#define DEBUGCTLMSR_RTM_DEBUG BIT(15)
#define MSR_PEBS_FRONTEND 0x000003f7
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 97954c936c54..e33df3da6980 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -301,16 +301,15 @@ static inline bool pmd_leaf(pmd_t pte)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
static inline int pmd_trans_huge(pmd_t pmd)
{
- return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+ return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE;
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud)
{
- return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+ return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE;
}
#endif
@@ -320,24 +319,6 @@ static inline int has_transparent_hugepage(void)
return boot_cpu_has(X86_FEATURE_PSE);
}
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pmd_devmap(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_DEVMAP);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline int pud_devmap(pud_t pud)
-{
- return !!(pud_val(pud) & _PAGE_DEVMAP);
-}
-#else
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd)
{
@@ -361,12 +342,6 @@ static inline pud_t pud_mkspecial(pud_t pud)
return pud_set_flags(pud, _PAGE_SPECIAL);
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
@@ -527,11 +502,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
return pte_set_flags(pte, _PAGE_SPECIAL);
}
-static inline pte_t pte_mkdevmap(pte_t pte)
-{
- return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
-}
-
/* See comments above mksaveddirty_shift() */
static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
{
@@ -603,11 +573,6 @@ static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_DIRTY);
}
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- return pmd_set_flags(pmd, _PAGE_DEVMAP);
-}
-
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_PSE);
@@ -673,11 +638,6 @@ static inline pud_t pud_mkdirty(pud_t pud)
return pud_mksaveddirty(pud);
}
-static inline pud_t pud_mkdevmap(pud_t pud)
-{
- return pud_set_flags(pud, _PAGE_DEVMAP);
-}
-
static inline pud_t pud_mkhuge(pud_t pud)
{
return pud_set_flags(pud, _PAGE_PSE);
@@ -1008,13 +968,6 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t a)
-{
- return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
-}
-#endif
-
#define pte_accessible pte_accessible
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index a5731fb1e9dd..2ec250ba467e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -34,7 +34,6 @@
#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */
-#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#ifdef CONFIG_X86_64
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
@@ -121,11 +120,9 @@
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
-#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
#else
#define _PAGE_NX (_AT(pteval_t, 0))
-#define _PAGE_DEVMAP (_AT(pteval_t, 0))
#define _PAGE_SOFTW4 (_AT(pteval_t, 0))
#endif
@@ -154,7 +151,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
- _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
+ _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 89075ff19afa..02236962fdb1 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
void snp_fixup_e820_tables(void);
+
+static inline void sev_evict_cache(void *va, int npages)
+{
+ volatile u8 val __always_unused;
+ u8 *bytes = va;
+ int page_idx;
+
+ /*
+ * For SEV guests, a read from the first/last cache-lines of a 4K page
+ * using the guest key is sufficient to cause a flush of all cache-lines
+ * associated with that 4K page without incurring all the overhead of a
+ * full CLFLUSH sequence.
+ */
+ for (page_idx = 0; page_idx < npages; page_idx++) {
+ val = bytes[page_idx * PAGE_SIZE];
+ val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
+ }
+}
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
static inline void snp_fixup_e820_tables(void) {}
+static inline void sev_evict_cache(void *va, int npages) {}
#endif
#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index ad954a1a6656..ffc27f676243 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -252,16 +252,21 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
+/*
+ * GA_LOG_INTR is a synthetic flag that's never propagated to hardware-visible
+ * tables. GA_LOG_INTR is set if the vCPU needs device posted IRQs to generate
+ * GA log interrupts to wake the vCPU (because it's blocking or about to block).
+ */
+#define AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR BIT_ULL(61)
+
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK GENMASK_ULL(51, 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL)
#define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
@@ -290,8 +295,6 @@ enum avic_ipi_failure_cause {
static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
-
#define SVM_SEV_FEAT_SNP_ACTIVE BIT(0)
#define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3)
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e9b81876ebe4..00daedfefc1b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
-static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
- flush_tlb_mm(mm);
-}
-
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
static inline bool pte_flags_need_flush(unsigned long oldflags,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ea1d984166cd..7bde68247b5f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -120,7 +120,7 @@ struct its_array its_pages;
static void *__its_alloc(struct its_array *pages)
{
- void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
+ void *page __free(execmem) = execmem_alloc_rw(EXECMEM_MODULE_TEXT, PAGE_SIZE);
if (!page)
return NULL;
@@ -237,7 +237,6 @@ static void *its_alloc(void)
if (!page)
return NULL;
- execmem_make_temp_rw(page, PAGE_SIZE);
if (pages == &its_pages)
set_memory_x((unsigned long)page, 1);
@@ -1184,43 +1183,6 @@ bool cfi_bhi __ro_after_init = false;
#endif
#ifdef CONFIG_CFI_CLANG
-struct bpf_insn;
-
-/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
-extern unsigned int __bpf_prog_runX(const void *ctx,
- const struct bpf_insn *insn);
-
-KCFI_REFERENCE(__bpf_prog_runX);
-
-/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_hash,@object \n"
-" .globl cfi_bpf_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_hash: \n"
-" .long __kcfi_typeid___bpf_prog_runX \n"
-" .size cfi_bpf_hash, 4 \n"
-" .popsection \n"
-);
-
-/* Must match bpf_callback_t */
-extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-
-KCFI_REFERENCE(__bpf_callback_fn);
-
-/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_subprog_hash,@object \n"
-" .globl cfi_bpf_subprog_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_subprog_hash: \n"
-" .long __kcfi_typeid___bpf_callback_fn \n"
-" .size cfi_bpf_subprog_hash, 4 \n"
-" .popsection \n"
-);
-
u32 cfi_get_func_hash(void *func)
{
u32 hash;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index b4a1f6732a3a..6b868afb26c3 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 279148e72459..308dbbae6c6e 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -279,7 +279,7 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
unsigned long addr,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
struct sgx_encl_page *entry;
@@ -520,9 +520,9 @@ static void sgx_vma_open(struct vm_area_struct *vma)
* Return: 0 on success, -EACCES otherwise
*/
int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
- unsigned long end, unsigned long vm_flags)
+ unsigned long end, vm_flags_t vm_flags)
{
- unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
+ vm_flags_t vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
struct sgx_encl_page *page;
unsigned long count = 0;
int ret = 0;
@@ -605,7 +605,7 @@ static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *pag
*/
static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
unsigned long addr,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
struct sgx_encl_page *entry;
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index f94ff14c9486..8ff47f6652b9 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -101,7 +101,7 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
}
int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
- unsigned long end, unsigned long vm_flags);
+ unsigned long end, vm_flags_t vm_flags);
bool current_is_ksgxd(void);
void sgx_encl_release(struct kref *ref);
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index bcb534688dfe..c6b12bed173d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -163,10 +163,10 @@ static struct crash_mem *fill_up_crash_elf_data(void)
return NULL;
/*
- * Exclusion of crash region and/or crashk_low_res may cause
- * another range split. So add extra two slots here.
+ * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges
+ * may cause range splits. So add extra slots here.
*/
- nr_ranges += 2;
+ nr_ranges += 2 + crashk_cma_cnt;
cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
if (!cmem)
return NULL;
@@ -184,6 +184,7 @@ static struct crash_mem *fill_up_crash_elf_data(void)
static int elf_header_exclude_ranges(struct crash_mem *cmem)
{
int ret = 0;
+ int i;
/* Exclude the low 1M because it is always reserved */
ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
@@ -198,8 +199,17 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
if (crashk_low_res.end)
ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
crashk_low_res.end);
+ if (ret)
+ return ret;
- return ret;
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
@@ -374,6 +384,14 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
add_e820_entry(params, &ei);
}
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ei.addr = crashk_cma_ranges[i].start;
+ ei.size = crashk_cma_ranges[i].end -
+ crashk_cma_ranges[i].start + 1;
+ ei.type = E820_TYPE_RAM;
+ add_e820_entry(params, &ei);
+ }
+
out:
vfree(cmem);
return ret;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 252e82bcfd2f..4450acec9390 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -263,7 +263,7 @@ void arch_ftrace_update_code(int command)
static inline void *alloc_tramp(unsigned long size)
{
- return execmem_alloc(EXECMEM_FTRACE, size);
+ return execmem_alloc_rw(EXECMEM_FTRACE, size);
}
static inline void tramp_free(void *tramp)
{
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 9ed29ff10e59..10721a125226 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -256,26 +256,59 @@ static __always_inline void handle_irq(struct irq_desc *desc,
__handle_irq(desc, regs);
}
-static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
+static struct irq_desc *reevaluate_vector(int vector)
{
- struct irq_desc *desc;
- int ret = 0;
+ struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
+
+ if (!IS_ERR_OR_NULL(desc))
+ return desc;
+
+ if (desc == VECTOR_UNUSED)
+ pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector);
+ else
+ __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+ return NULL;
+}
+
+static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs)
+{
+ struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
- desc = __this_cpu_read(vector_irq[vector]);
if (likely(!IS_ERR_OR_NULL(desc))) {
handle_irq(desc, regs);
- } else {
- ret = -EINVAL;
- if (desc == VECTOR_UNUSED) {
- pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
- __func__, smp_processor_id(),
- vector);
- } else {
- __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
- }
+ return true;
}
- return ret;
+ /*
+ * Reevaluate with vector_lock held to prevent a race against
+ * request_irq() setting up the vector:
+ *
+ * CPU0 CPU1
+ * interrupt is raised in APIC IRR
+ * but not handled
+ * free_irq()
+ * per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN;
+ *
+ * request_irq() common_interrupt()
+ * d = this_cpu_read(vector_irq[vector]);
+ *
+ * per_cpu(vector_irq, CPU1)[vector] = desc;
+ *
+ * if (d == VECTOR_SHUTDOWN)
+ * this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+ *
+ * This requires that the same vector on the same target CPU is
+ * handed out or that a spurious interrupt hits that CPU/vector.
+ */
+ lock_vector_lock();
+ desc = reevaluate_vector(vector);
+ unlock_vector_lock();
+
+ if (!desc)
+ return false;
+
+ handle_irq(desc, regs);
+ return true;
}
/*
@@ -289,7 +322,7 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
/* entry code tells RCU that we're not quiescent. Check it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
- if (unlikely(call_irq_handler(vector, regs)))
+ if (unlikely(!call_irq_handler(vector, regs)))
apic_eoi();
set_irq_regs(old_regs);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 47cb8eb138ba..6079d15dab8c 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -481,24 +481,6 @@ static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
return len;
}
-/* Make page to RO mode when allocate it */
-void *alloc_insn_page(void)
-{
- void *page;
-
- page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
- if (!page)
- return NULL;
-
- /*
- * TODO: Once additional kernel code protection mechanisms are set, ensure
- * that the page was not maliciously altered and it is still zeroed.
- */
- set_memory_rox((unsigned long)page, 1);
-
- return page;
-}
-
/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index fb27be697128..1b2edd07a3e1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -213,8 +213,10 @@ arch_initcall(init_x86_sysctl);
*/
struct screen_info screen_info;
EXPORT_SYMBOL(screen_info);
+#if defined(CONFIG_FIRMWARE_EDID)
struct edid_info edid_info;
EXPORT_SYMBOL_GPL(edid_info);
+#endif
extern int root_mountflags;
@@ -525,7 +527,9 @@ static void __init parse_boot_params(void)
{
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
screen_info = boot_params.screen_info;
+#if defined(CONFIG_FIRMWARE_EDID)
edid_info = boot_params.edid_info;
+#endif
#ifdef CONFIG_X86_32
apm_info.bios = boot_params.apm_bios_info;
ist_info = boot_params.ist_info;
@@ -599,7 +603,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
static void __init arch_reserve_crashkernel(void)
{
- unsigned long long crash_base, crash_size, low_size = 0;
+ unsigned long long crash_base, crash_size, low_size = 0, cma_size = 0;
bool high = false;
int ret;
@@ -608,7 +612,7 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, &high);
+ &low_size, &cma_size, &high);
if (ret)
return;
@@ -618,6 +622,7 @@ static void __init arch_reserve_crashkernel(void)
}
reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
+ reserve_crashkernel_cma(cma_size);
}
static struct resource standard_io_resources[] = {
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 2eeffcec5382..2c86673155c9 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -166,6 +166,16 @@ config KVM_AMD_SEV
Encrypted State (SEV-ES), and Secure Encrypted Virtualization with
Secure Nested Paging (SEV-SNP) technologies on AMD processors.
+config KVM_IOAPIC
+ bool "I/O APIC, PIC, and PIT emulation"
+ default y
+ depends on KVM
+ help
+ Provides support for KVM to emulate an I/O APIC, PIC, and PIT, i.e.
+ for full in-kernel APIC emulation.
+
+ If unsure, say Y.
+
config KVM_SMM
bool "System Management Mode emulation"
default y
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a5d362c7b504..c4b8950c7abe 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -5,12 +5,11 @@ ccflags-$(CONFIG_KVM_WERROR) += -Werror
include $(srctree)/virt/kvm/Makefile.kvm
-kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
- i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- debugfs.o mmu/mmu.o mmu/page_track.o \
- mmu/spte.o
+kvm-y += x86.o emulate.o irq.o lapic.o cpuid.o pmu.o mtrr.o \
+ debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
+kvm-$(CONFIG_KVM_IOAPIC) += i8259.o i8254.o ioapic.o
kvm-$(CONFIG_KVM_HYPERV) += hyperv.o
kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-$(CONFIG_KVM_SMM) += smm.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index f84bc0569c9c..e2836a255b16 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -979,6 +979,7 @@ void kvm_set_cpu_caps(void)
F(FSRS),
F(FSRC),
F(WRMSRNS),
+ X86_64_F(LKGS),
F(AMX_FP16),
F(AVX_IFMA),
F(LAM),
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index ee27064dd72f..72b19a88a776 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -497,15 +497,19 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
return ret;
}
-int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
+int kvm_hv_synic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status)
{
struct kvm_vcpu_hv_synic *synic;
- synic = synic_get(kvm, vpidx);
+ if (!level)
+ return -1;
+
+ synic = synic_get(kvm, e->hv_sint.vcpu);
if (!synic)
return -EINVAL;
- return synic_set_irq(synic, sint);
+ return synic_set_irq(synic, e->hv_sint.sint);
}
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 913bfc96959c..6ce160ffa678 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -103,7 +103,8 @@ static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
void kvm_hv_irq_routing_update(struct kvm *kvm);
-int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
+int kvm_hv_synic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status);
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 739aa6c0d0c3..850972deac8e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -248,8 +248,8 @@ static void pit_do_work(struct kthread_work *work)
if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
return;
- kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
- kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
+ kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 1, false);
+ kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 0, false);
/*
* Provides NMI watchdog support via Virtual Wire mode.
@@ -288,7 +288,7 @@ static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
atomic_set(&pit->pit_state.irq_ack, 1);
}
-void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
+static void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
{
struct kvm_kpit_state *ps = &pit->pit_state;
struct kvm *kvm = pit->kvm;
@@ -400,8 +400,8 @@ static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
}
}
-void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
- int hpet_legacy_start)
+static void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+ int hpet_legacy_start)
{
u8 saved_mode;
@@ -641,7 +641,7 @@ static void kvm_pit_reset(struct kvm_pit *pit)
kvm_pit_reset_reinject(pit);
}
-static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
+static void pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask)
{
struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
@@ -649,6 +649,79 @@ static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
kvm_pit_reset_reinject(pit);
}
+int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
+
+ BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
+
+ mutex_lock(&kps->lock);
+ memcpy(ps, &kps->channels, sizeof(*ps));
+ mutex_unlock(&kps->lock);
+ return 0;
+}
+
+int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ int i;
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
+ for (i = 0; i < 3; i++)
+ kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
+ mutex_unlock(&pit->pit_state.lock);
+ return 0;
+}
+
+int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+{
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
+ sizeof(ps->channels));
+ ps->flags = kvm->arch.vpit->pit_state.flags;
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ memset(&ps->reserved, 0, sizeof(ps->reserved));
+ return 0;
+}
+
+int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+{
+ int start = 0;
+ int i;
+ u32 prev_legacy, cur_legacy;
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
+ cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
+ if (!prev_legacy && cur_legacy)
+ start = 1;
+ memcpy(&pit->pit_state.channels, &ps->channels,
+ sizeof(pit->pit_state.channels));
+ pit->pit_state.flags = ps->flags;
+ for (i = 0; i < 3; i++)
+ kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
+ start && i == 0);
+ mutex_unlock(&pit->pit_state.lock);
+ return 0;
+}
+
+int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control)
+{
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ /* pit->pit_state.lock was overloaded to prevent userspace from getting
+ * an inconsistent state after running multiple KVM_REINJECT_CONTROL
+ * ioctls in parallel. Use a separate lock if that ioctl isn't rare.
+ */
+ mutex_lock(&pit->pit_state.lock);
+ kvm_pit_set_reinject(pit, control->pit_reinject);
+ mutex_unlock(&pit->pit_state.lock);
+
+ return 0;
+}
+
static const struct kvm_io_device_ops pit_dev_ops = {
.read = pit_ioport_read,
.write = pit_ioport_write,
@@ -671,10 +744,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
if (!pit)
return NULL;
- pit->irq_source_id = kvm_request_irq_source_id(kvm);
- if (pit->irq_source_id < 0)
- goto fail_request;
-
mutex_init(&pit->pit_state.lock);
pid = get_pid(task_tgid(current));
@@ -694,7 +763,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pit_state->irq_ack_notifier.gsi = 0;
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
- pit->mask_notifier.func = pit_mask_notifer;
+ pit->mask_notifier.func = pit_mask_notifier;
kvm_pit_reset(pit);
@@ -726,8 +795,6 @@ fail_register_pit:
kvm_pit_set_reinject(pit, false);
kthread_destroy_worker(pit->worker);
fail_kthread:
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
-fail_request:
kfree(pit);
return NULL;
}
@@ -744,7 +811,6 @@ void kvm_free_pit(struct kvm *kvm)
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
kthread_destroy_worker(pit->worker);
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
}
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index a768212ba821..60fa499d2f8a 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -6,6 +6,11 @@
#include <kvm/iodev.h>
+#include <uapi/asm/kvm.h>
+
+#include "ioapic.h"
+
+#ifdef CONFIG_KVM_IOAPIC
struct kvm_kpit_channel_state {
u32 count; /* can be 65536 */
u16 latched_count;
@@ -42,7 +47,6 @@ struct kvm_pit {
struct kvm_io_device speaker_dev;
struct kvm *kvm;
struct kvm_kpit_state pit_state;
- int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
struct kthread_worker *worker;
struct kthread_work expired;
@@ -55,11 +59,14 @@ struct kvm_pit {
#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
#define KVM_PIT_CHANNEL_MASK 0x3
+int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps);
+int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps);
+int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps);
+int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps);
+int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control);
+
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
void kvm_free_pit(struct kvm *kvm);
-
-void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
- int hpet_legacy_start);
-void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject);
+#endif /* CONFIG_KVM_IOAPIC */
#endif
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index a8fb19940975..2ac7f1678c46 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -31,6 +31,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+
+#include "ioapic.h"
#include "irq.h"
#include <linux/kvm_host.h>
@@ -185,8 +187,11 @@ void kvm_pic_update_irq(struct kvm_pic *s)
pic_unlock(s);
}
-int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
+int kvm_pic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status)
{
+ struct kvm_pic *s = kvm->arch.vpic;
+ int irq = e->irqchip.pin;
int ret, irq_level;
BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
@@ -203,16 +208,6 @@ int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
return ret;
}
-void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
-{
- int i;
-
- pic_lock(s);
- for (i = 0; i < PIC_NUM_PINS; i++)
- __clear_bit(irq_source_id, &s->irq_states[i]);
- pic_unlock(s);
-}
-
/*
* acknowledge interrupt 'irq'
*/
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 45dae2d5d2f1..2b5d389bca5f 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -41,11 +41,11 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
-#include <trace/events/kvm.h>
#include "ioapic.h"
#include "lapic.h"
#include "irq.h"
+#include "trace.h"
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
bool line_status);
@@ -310,6 +310,42 @@ void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
kvm_make_scan_ioapic_request(kvm);
}
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ mutex_lock(&kvm->irq_lock);
+ kimn->irq = irq;
+ hlist_add_head_rcu(&kimn->link, &ioapic->mask_notifier_list);
+ mutex_unlock(&kvm->irq_lock);
+}
+
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn)
+{
+ mutex_lock(&kvm->irq_lock);
+ hlist_del_rcu(&kimn->link);
+ mutex_unlock(&kvm->irq_lock);
+ synchronize_srcu(&kvm->irq_srcu);
+}
+
+void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ bool mask)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ struct kvm_irq_mask_notifier *kimn;
+ int idx, gsi;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ if (gsi != -1)
+ hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link)
+ if (kimn->irq == gsi)
+ kimn->func(kimn, mask);
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -479,9 +515,11 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
return ret;
}
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
- int level, bool line_status)
+int kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status)
{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ int irq = e->irqchip.pin;
int ret, irq_level;
BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
@@ -496,16 +534,6 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
return ret;
}
-void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
-{
- int i;
-
- spin_lock(&ioapic->lock);
- for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
- __clear_bit(irq_source_id, &ioapic->irq_states[i]);
- spin_unlock(&ioapic->lock);
-}
-
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
{
int i;
@@ -718,6 +746,7 @@ int kvm_ioapic_init(struct kvm *kvm)
return -ENOMEM;
spin_lock_init(&ioapic->lock);
INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
+ INIT_HLIST_HEAD(&ioapic->mask_notifier_list);
kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index aa8cb4ac0479..bf28dbc11ff6 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -86,8 +86,24 @@ struct kvm_ioapic {
struct delayed_work eoi_inject;
u32 irq_eoi[IOAPIC_NUM_PINS];
u32 irr_delivered;
+
+ /* reads protected by irq_srcu, writes by irq_lock */
+ struct hlist_head mask_notifier_list;
+};
+
+struct kvm_irq_mask_notifier {
+ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
+ int irq;
+ struct hlist_node link;
};
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ bool mask);
+
#ifdef DEBUG
#define ASSERT(x) \
do { \
@@ -103,7 +119,7 @@ do { \
static inline int ioapic_in_kernel(struct kvm *kvm)
{
- return irqchip_kernel(kvm);
+ return irqchip_full(kvm);
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
@@ -111,9 +127,9 @@ void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
- int level, bool line_status);
-void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
+int kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status);
+
void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 97d68d837929..16da89259011 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -11,9 +11,12 @@
#include <linux/export.h>
#include <linux/kvm_host.h>
+#include <linux/kvm_irqfd.h>
+#include "hyperv.h"
+#include "ioapic.h"
#include "irq.h"
-#include "i8254.h"
+#include "trace.h"
#include "x86.h"
#include "xen.h"
@@ -41,6 +44,14 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
return v->arch.pending_external_vector != -1;
}
+static int get_userspace_extint(struct kvm_vcpu *vcpu)
+{
+ int vector = vcpu->arch.pending_external_vector;
+
+ vcpu->arch.pending_external_vector = -1;
+ return vector;
+}
+
/*
* check if there is pending interrupt from
* non-APIC source without intack.
@@ -67,10 +78,13 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v)
if (!kvm_apic_accept_pic_intr(v))
return 0;
- if (irqchip_split(v->kvm))
- return pending_userspace_extint(v);
- else
+#ifdef CONFIG_KVM_IOAPIC
+ if (pic_in_kernel(v->kvm))
return v->kvm->arch.vpic->output;
+#endif
+
+ WARN_ON_ONCE(!irqchip_split(v->kvm));
+ return pending_userspace_extint(v);
}
/*
@@ -126,13 +140,13 @@ int kvm_cpu_get_extint(struct kvm_vcpu *v)
return v->kvm->arch.xen.upcall_vector;
#endif
- if (irqchip_split(v->kvm)) {
- int vector = v->arch.pending_external_vector;
-
- v->arch.pending_external_vector = -1;
- return vector;
- } else
+#ifdef CONFIG_KVM_IOAPIC
+ if (pic_in_kernel(v->kvm))
return kvm_pic_read_irq(v->kvm); /* PIC */
+#endif
+
+ WARN_ON_ONCE(!irqchip_split(v->kvm));
+ return get_userspace_extint(v);
}
EXPORT_SYMBOL_GPL(kvm_cpu_get_extint);
@@ -163,7 +177,9 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);
+#ifdef CONFIG_KVM_IOAPIC
__kvm_migrate_pit_timer(vcpu);
+#endif
kvm_x86_call(migrate_timers)(vcpu);
}
@@ -171,10 +187,532 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
{
bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
- return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
+ return resample ? irqchip_full(kvm) : irqchip_in_kernel(kvm);
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return irqchip_in_kernel(kvm);
}
+
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq, struct dest_map *dest_map)
+{
+ int r = -1;
+ struct kvm_vcpu *vcpu, *lowest = NULL;
+ unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ unsigned int dest_vcpus = 0;
+
+ if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
+ return r;
+
+ if (irq->dest_mode == APIC_DEST_PHYSICAL &&
+ irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
+ pr_info("apic: phys broadcast and lowest prio\n");
+ irq->delivery_mode = APIC_DM_FIXED;
+ }
+
+ memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+
+ if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
+ continue;
+
+ if (!kvm_lowest_prio_delivery(irq)) {
+ if (r < 0)
+ r = 0;
+ r += kvm_apic_set_irq(vcpu, irq, dest_map);
+ } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
+ if (!kvm_vector_hashing_enabled()) {
+ if (!lowest)
+ lowest = vcpu;
+ else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+ lowest = vcpu;
+ } else {
+ __set_bit(i, dest_vcpu_bitmap);
+ dest_vcpus++;
+ }
+ }
+ }
+
+ if (dest_vcpus != 0) {
+ int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+ dest_vcpu_bitmap, KVM_MAX_VCPUS);
+
+ lowest = kvm_get_vcpu(kvm, idx);
+ }
+
+ if (lowest)
+ r = kvm_apic_set_irq(lowest, irq, dest_map);
+
+ return r;
+}
+
+static void kvm_msi_to_lapic_irq(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ struct kvm_lapic_irq *irq)
+{
+ struct msi_msg msg = { .address_lo = e->msi.address_lo,
+ .address_hi = e->msi.address_hi,
+ .data = e->msi.data };
+
+ trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ?
+ (u64)msg.address_hi << 32 : 0), msg.data);
+
+ irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format);
+ irq->vector = msg.arch_data.vector;
+ irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical);
+ irq->trig_mode = msg.arch_data.is_level;
+ irq->delivery_mode = msg.arch_data.delivery_mode << 8;
+ irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint;
+ irq->level = 1;
+ irq->shorthand = APIC_DEST_NOSHORT;
+}
+
+static inline bool kvm_msi_route_invalid(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e)
+{
+ return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
+}
+
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ struct kvm_lapic_irq irq;
+
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
+
+ if (!level)
+ return -1;
+
+ kvm_msi_to_lapic_irq(kvm, e, &irq);
+
+ return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ struct kvm_lapic_irq irq;
+ int r;
+
+ switch (e->type) {
+#ifdef CONFIG_KVM_HYPERV
+ case KVM_IRQ_ROUTING_HV_SINT:
+ return kvm_hv_synic_set_irq(e, kvm, irq_source_id, level,
+ line_status);
+#endif
+
+ case KVM_IRQ_ROUTING_MSI:
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
+
+ kvm_msi_to_lapic_irq(kvm, e, &irq);
+
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+ return r;
+ break;
+
+#ifdef CONFIG_KVM_XEN
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ if (!level)
+ return -1;
+
+ return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm);
+#endif
+ default:
+ break;
+ }
+
+ return -EWOULDBLOCK;
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+ bool line_status)
+{
+ if (!irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+ irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ irq_event->irq, irq_event->level,
+ line_status);
+ return 0;
+}
+
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm);
+}
+
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ /* We can't check irqchip_in_kernel() here as some callers are
+ * currently initializing the irqchip. Other callers should therefore
+ * check kvm_arch_can_set_irq_routing() before calling this function.
+ */
+ switch (ue->type) {
+#ifdef CONFIG_KVM_IOAPIC
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ if (irqchip_split(kvm))
+ return -EINVAL;
+ e->irqchip.pin = ue->u.irqchip.pin;
+ switch (ue->u.irqchip.irqchip) {
+ case KVM_IRQCHIP_PIC_SLAVE:
+ e->irqchip.pin += PIC_NUM_PINS / 2;
+ fallthrough;
+ case KVM_IRQCHIP_PIC_MASTER:
+ if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
+ return -EINVAL;
+ e->set = kvm_pic_set_irq;
+ break;
+ case KVM_IRQCHIP_IOAPIC:
+ if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
+ return -EINVAL;
+ e->set = kvm_ioapic_set_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ break;
+#endif
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
+ break;
+#ifdef CONFIG_KVM_HYPERV
+ case KVM_IRQ_ROUTING_HV_SINT:
+ e->set = kvm_hv_synic_set_irq;
+ e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
+ e->hv_sint.sint = ue->u.hv_sint.sint;
+ break;
+#endif
+#ifdef CONFIG_KVM_XEN
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ return kvm_xen_setup_evtchn(kvm, e, ue);
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu)
+{
+ int r = 0;
+ unsigned long i;
+ struct kvm_vcpu *vcpu;
+
+ if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
+ return true;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+
+ if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
+ continue;
+
+ if (++r == 2)
+ return false;
+
+ *dest_vcpu = vcpu;
+ }
+
+ return r == 1;
+}
+EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
+
+void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
+ u8 vector, unsigned long *ioapic_handled_vectors)
+{
+ /*
+ * Intercept EOI if the vCPU is the target of the new IRQ routing, or
+ * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU
+ * may receive a level-triggered IRQ in the future, or already received
+ * level-triggered IRQ. The EOI needs to be intercepted and forwarded
+ * to I/O APIC emulation so that the IRQ can be de-asserted.
+ */
+ if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) {
+ __set_bit(vector, ioapic_handled_vectors);
+ } else if (kvm_apic_pending_eoi(vcpu, vector)) {
+ __set_bit(vector, ioapic_handled_vectors);
+
+ /*
+ * Track the highest pending EOI for which the vCPU is NOT the
+ * target in the new routing. Only the EOI for the IRQ that is
+ * in-flight (for the old routing) needs to be intercepted, any
+ * future IRQs that arrive on this vCPU will be coincidental to
+ * the level-triggered routing and don't need to be intercepted.
+ */
+ if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi)
+ vcpu->arch.highest_stale_pending_ioapic_eoi = vector;
+ }
+}
+
+void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
+ ulong *ioapic_handled_vectors)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_kernel_irq_routing_entry *entry;
+ struct kvm_irq_routing_table *table;
+ u32 i, nr_ioapic_pins;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
+ kvm->arch.nr_reserved_ioapic_pins);
+ for (i = 0; i < nr_ioapic_pins; ++i) {
+ hlist_for_each_entry(entry, &table->map[i], link) {
+ struct kvm_lapic_irq irq;
+
+ if (entry->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+
+ kvm_msi_to_lapic_irq(vcpu->kvm, entry, &irq);
+
+ if (!irq.trig_mode)
+ continue;
+
+ kvm_scan_ioapic_irq(vcpu, irq.dest_id, irq.dest_mode,
+ irq.vector, ioapic_handled_vectors);
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+}
+
+void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+#ifdef CONFIG_KVM_HYPERV
+ kvm_hv_irq_routing_update(kvm);
+#endif
+
+ if (irqchip_split(kvm))
+ kvm_make_scan_ioapic_request(kvm);
+}
+
+static int kvm_pi_update_irte(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *entry)
+{
+ unsigned int host_irq = irqfd->producer->irq;
+ struct kvm *kvm = irqfd->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+ struct kvm_lapic_irq irq;
+ int r;
+
+ if (WARN_ON_ONCE(!irqchip_in_kernel(kvm) || !kvm_arch_has_irq_bypass()))
+ return -EINVAL;
+
+ if (entry && entry->type == KVM_IRQ_ROUTING_MSI) {
+ kvm_msi_to_lapic_irq(kvm, entry, &irq);
+
+ /*
+ * Force remapped mode if hardware doesn't support posting the
+ * virtual interrupt to a vCPU. Only IRQs are postable (NMIs,
+ * SMIs, etc. are not), and neither AMD nor Intel IOMMUs support
+ * posting multicast/broadcast IRQs. If the interrupt can't be
+ * posted, the device MSI needs to be routed to the host so that
+ * the guest's desired interrupt can be synthesized by KVM.
+ *
+ * This means that KVM can only post lowest-priority interrupts
+ * if they have a single CPU as the destination, e.g. only if
+ * the guest has affined the interrupt to a single vCPU.
+ */
+ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
+ !kvm_irq_is_postable(&irq))
+ vcpu = NULL;
+ }
+
+ if (!irqfd->irq_bypass_vcpu && !vcpu)
+ return 0;
+
+ r = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, host_irq, irqfd->gsi,
+ vcpu, irq.vector);
+ if (r) {
+ WARN_ON_ONCE(irqfd->irq_bypass_vcpu && !vcpu);
+ irqfd->irq_bypass_vcpu = NULL;
+ return r;
+ }
+
+ irqfd->irq_bypass_vcpu = vcpu;
+
+ trace_kvm_pi_irte_update(host_irq, vcpu, irqfd->gsi, irq.vector, !!vcpu);
+ return 0;
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
+ int ret = 0;
+
+ spin_lock_irq(&kvm->irqfds.lock);
+ irqfd->producer = prod;
+
+ if (!kvm->arch.nr_possible_bypass_irqs++)
+ kvm_x86_call(pi_start_bypass)(kvm);
+
+ if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
+ ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry);
+ if (ret)
+ kvm->arch.nr_possible_bypass_irqs--;
+ }
+ spin_unlock_irq(&kvm->irqfds.lock);
+
+ return ret;
+}
+
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
+ int ret;
+
+ WARN_ON(irqfd->producer != prod);
+
+ /*
+ * If the producer of an IRQ that is currently being posted to a vCPU
+ * is unregistered, change the associated IRTE back to remapped mode as
+ * the IRQ has been released (or repurposed) by the device driver, i.e.
+ * KVM must relinquish control of the IRTE.
+ */
+ spin_lock_irq(&kvm->irqfds.lock);
+
+ if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
+ ret = kvm_pi_update_irte(irqfd, NULL);
+ if (ret)
+ pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n",
+ irqfd->consumer.eventfd, ret);
+ }
+ irqfd->producer = NULL;
+
+ kvm->arch.nr_possible_bypass_irqs--;
+
+ spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
+{
+ if (new->type != KVM_IRQ_ROUTING_MSI &&
+ old->type != KVM_IRQ_ROUTING_MSI)
+ return;
+
+ if (old->type == KVM_IRQ_ROUTING_MSI &&
+ new->type == KVM_IRQ_ROUTING_MSI &&
+ !memcmp(&old->msi, &new->msi, sizeof(new->msi)))
+ return;
+
+ kvm_pi_update_irte(irqfd, new);
+}
+
+#ifdef CONFIG_KVM_IOAPIC
+#define IOAPIC_ROUTING_ENTRY(irq) \
+ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
+ .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
+#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
+
+#define PIC_ROUTING_ENTRY(irq) \
+ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
+ .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
+#define ROUTING_ENTRY2(irq) \
+ IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
+
+static const struct kvm_irq_routing_entry default_routing[] = {
+ ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
+ ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
+ ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
+ ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
+ ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
+ ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
+ ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
+ ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
+ ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
+ ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
+ ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
+ ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
+};
+
+int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm)
+{
+ return kvm_set_irq_routing(kvm, default_routing,
+ ARRAY_SIZE(default_routing), 0);
+}
+
+int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+{
+ struct kvm_pic *pic = kvm->arch.vpic;
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_PIC_MASTER:
+ memcpy(&chip->chip.pic, &pic->pics[0],
+ sizeof(struct kvm_pic_state));
+ break;
+ case KVM_IRQCHIP_PIC_SLAVE:
+ memcpy(&chip->chip.pic, &pic->pics[1],
+ sizeof(struct kvm_pic_state));
+ break;
+ case KVM_IRQCHIP_IOAPIC:
+ kvm_get_ioapic(kvm, &chip->chip.ioapic);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+{
+ struct kvm_pic *pic = kvm->arch.vpic;
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_PIC_MASTER:
+ spin_lock(&pic->lock);
+ memcpy(&pic->pics[0], &chip->chip.pic,
+ sizeof(struct kvm_pic_state));
+ spin_unlock(&pic->lock);
+ break;
+ case KVM_IRQCHIP_PIC_SLAVE:
+ spin_lock(&pic->lock);
+ memcpy(&pic->pics[1], &chip->chip.pic,
+ sizeof(struct kvm_pic_state));
+ spin_unlock(&pic->lock);
+ break;
+ case KVM_IRQCHIP_IOAPIC:
+ kvm_set_ioapic(kvm, &chip->chip.ioapic);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ kvm_pic_update_irq(pic);
+ return r;
+}
+#endif
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 76d46b2f41dd..5e62c1f79ce6 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -18,6 +18,8 @@
#include <kvm/iodev.h>
#include "lapic.h"
+#ifdef CONFIG_KVM_IOAPIC
+
#define PIC_NUM_PINS 16
#define SELECT_PIC(irq) \
((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE)
@@ -63,17 +65,15 @@ int kvm_pic_init(struct kvm *kvm);
void kvm_pic_destroy(struct kvm *kvm);
int kvm_pic_read_irq(struct kvm *kvm);
void kvm_pic_update_irq(struct kvm_pic *s);
+int kvm_pic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status);
-static inline int irqchip_split(struct kvm *kvm)
-{
- int mode = kvm->arch.irqchip_mode;
+int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm);
- /* Matches smp_wmb() when setting irqchip_mode */
- smp_rmb();
- return mode == KVM_IRQCHIP_SPLIT;
-}
+int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip);
+int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip);
-static inline int irqchip_kernel(struct kvm *kvm)
+static inline int irqchip_full(struct kvm *kvm)
{
int mode = kvm->arch.irqchip_mode;
@@ -81,10 +81,26 @@ static inline int irqchip_kernel(struct kvm *kvm)
smp_rmb();
return mode == KVM_IRQCHIP_KERNEL;
}
+#else /* CONFIG_KVM_IOAPIC */
+static __always_inline int irqchip_full(struct kvm *kvm)
+{
+ return false;
+}
+#endif
static inline int pic_in_kernel(struct kvm *kvm)
{
- return irqchip_kernel(kvm);
+ return irqchip_full(kvm);
+}
+
+
+static inline int irqchip_split(struct kvm *kvm)
+{
+ int mode = kvm->arch.irqchip_mode;
+
+ /* Matches smp_wmb() when setting irqchip_mode */
+ smp_rmb();
+ return mode == KVM_IRQCHIP_SPLIT;
}
static inline int irqchip_in_kernel(struct kvm *kvm)
@@ -105,7 +121,6 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
-int kvm_setup_default_irq_routing(struct kvm *kvm);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq,
struct dest_map *dest_map);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
deleted file mode 100644
index d6d792b5d1bd..000000000000
--- a/arch/x86/kvm/irq_comm.c
+++ /dev/null
@@ -1,469 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * irq_comm.c: Common API for in kernel interrupt controller
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors:
- * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
- *
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kvm_host.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/rculist.h>
-
-#include <trace/events/kvm.h>
-
-#include "irq.h"
-
-#include "ioapic.h"
-
-#include "lapic.h"
-
-#include "hyperv.h"
-#include "x86.h"
-#include "xen.h"
-
-static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- struct kvm_pic *pic = kvm->arch.vpic;
- return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
-}
-
-static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
- line_status);
-}
-
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, struct dest_map *dest_map)
-{
- int r = -1;
- struct kvm_vcpu *vcpu, *lowest = NULL;
- unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int dest_vcpus = 0;
-
- if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
- return r;
-
- if (irq->dest_mode == APIC_DEST_PHYSICAL &&
- irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
- pr_info("apic: phys broadcast and lowest prio\n");
- irq->delivery_mode = APIC_DM_FIXED;
- }
-
- memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
- irq->dest_id, irq->dest_mode))
- continue;
-
- if (!kvm_lowest_prio_delivery(irq)) {
- if (r < 0)
- r = 0;
- r += kvm_apic_set_irq(vcpu, irq, dest_map);
- } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
- if (!kvm_vector_hashing_enabled()) {
- if (!lowest)
- lowest = vcpu;
- else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
- lowest = vcpu;
- } else {
- __set_bit(i, dest_vcpu_bitmap);
- dest_vcpus++;
- }
- }
- }
-
- if (dest_vcpus != 0) {
- int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
- dest_vcpu_bitmap, KVM_MAX_VCPUS);
-
- lowest = kvm_get_vcpu(kvm, idx);
- }
-
- if (lowest)
- r = kvm_apic_set_irq(lowest, irq, dest_map);
-
- return r;
-}
-
-void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
- struct kvm_lapic_irq *irq)
-{
- struct msi_msg msg = { .address_lo = e->msi.address_lo,
- .address_hi = e->msi.address_hi,
- .data = e->msi.data };
-
- trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ?
- (u64)msg.address_hi << 32 : 0), msg.data);
-
- irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format);
- irq->vector = msg.arch_data.vector;
- irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical);
- irq->trig_mode = msg.arch_data.is_level;
- irq->delivery_mode = msg.arch_data.delivery_mode << 8;
- irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint;
- irq->level = 1;
- irq->shorthand = APIC_DEST_NOSHORT;
-}
-EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
-
-static inline bool kvm_msi_route_invalid(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *e)
-{
- return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
-}
-
-int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level, bool line_status)
-{
- struct kvm_lapic_irq irq;
-
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
-
- if (!level)
- return -1;
-
- kvm_set_msi_irq(kvm, e, &irq);
-
- return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
-}
-
-#ifdef CONFIG_KVM_HYPERV
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- if (!level)
- return -1;
-
- return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-#endif
-
-int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- struct kvm_lapic_irq irq;
- int r;
-
- switch (e->type) {
-#ifdef CONFIG_KVM_HYPERV
- case KVM_IRQ_ROUTING_HV_SINT:
- return kvm_hv_set_sint(e, kvm, irq_source_id, level,
- line_status);
-#endif
-
- case KVM_IRQ_ROUTING_MSI:
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
-
- kvm_set_msi_irq(kvm, e, &irq);
-
- if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
- return r;
- break;
-
-#ifdef CONFIG_KVM_XEN
- case KVM_IRQ_ROUTING_XEN_EVTCHN:
- if (!level)
- return -1;
-
- return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm);
-#endif
- default:
- break;
- }
-
- return -EWOULDBLOCK;
-}
-
-int kvm_request_irq_source_id(struct kvm *kvm)
-{
- unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
- int irq_source_id;
-
- mutex_lock(&kvm->irq_lock);
- irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
-
- if (irq_source_id >= BITS_PER_LONG) {
- pr_warn("exhausted allocatable IRQ sources!\n");
- irq_source_id = -EFAULT;
- goto unlock;
- }
-
- ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
- ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
- set_bit(irq_source_id, bitmap);
-unlock:
- mutex_unlock(&kvm->irq_lock);
-
- return irq_source_id;
-}
-
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
-{
- ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
- ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-
- mutex_lock(&kvm->irq_lock);
- if (irq_source_id < 0 ||
- irq_source_id >= BITS_PER_LONG) {
- pr_err("IRQ source ID out of range!\n");
- goto unlock;
- }
- clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
- if (!irqchip_kernel(kvm))
- goto unlock;
-
- kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
- kvm_pic_clear_all(kvm->arch.vpic, irq_source_id);
-unlock:
- mutex_unlock(&kvm->irq_lock);
-}
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn)
-{
- mutex_lock(&kvm->irq_lock);
- kimn->irq = irq;
- hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
- mutex_unlock(&kvm->irq_lock);
-}
-
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn)
-{
- mutex_lock(&kvm->irq_lock);
- hlist_del_rcu(&kimn->link);
- mutex_unlock(&kvm->irq_lock);
- synchronize_srcu(&kvm->irq_srcu);
-}
-
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask)
-{
- struct kvm_irq_mask_notifier *kimn;
- int idx, gsi;
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
- if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
- if (kimn->irq == gsi)
- kimn->func(kimn, mask);
- srcu_read_unlock(&kvm->irq_srcu, idx);
-}
-
-bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
-{
- return irqchip_in_kernel(kvm);
-}
-
-int kvm_set_routing_entry(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *e,
- const struct kvm_irq_routing_entry *ue)
-{
- /* We can't check irqchip_in_kernel() here as some callers are
- * currently initializing the irqchip. Other callers should therefore
- * check kvm_arch_can_set_irq_routing() before calling this function.
- */
- switch (ue->type) {
- case KVM_IRQ_ROUTING_IRQCHIP:
- if (irqchip_split(kvm))
- return -EINVAL;
- e->irqchip.pin = ue->u.irqchip.pin;
- switch (ue->u.irqchip.irqchip) {
- case KVM_IRQCHIP_PIC_SLAVE:
- e->irqchip.pin += PIC_NUM_PINS / 2;
- fallthrough;
- case KVM_IRQCHIP_PIC_MASTER:
- if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
- return -EINVAL;
- e->set = kvm_set_pic_irq;
- break;
- case KVM_IRQCHIP_IOAPIC:
- if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
- return -EINVAL;
- e->set = kvm_set_ioapic_irq;
- break;
- default:
- return -EINVAL;
- }
- e->irqchip.irqchip = ue->u.irqchip.irqchip;
- break;
- case KVM_IRQ_ROUTING_MSI:
- e->set = kvm_set_msi;
- e->msi.address_lo = ue->u.msi.address_lo;
- e->msi.address_hi = ue->u.msi.address_hi;
- e->msi.data = ue->u.msi.data;
-
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
- break;
-#ifdef CONFIG_KVM_HYPERV
- case KVM_IRQ_ROUTING_HV_SINT:
- e->set = kvm_hv_set_sint;
- e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
- e->hv_sint.sint = ue->u.hv_sint.sint;
- break;
-#endif
-#ifdef CONFIG_KVM_XEN
- case KVM_IRQ_ROUTING_XEN_EVTCHN:
- return kvm_xen_setup_evtchn(kvm, e, ue);
-#endif
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
- struct kvm_vcpu **dest_vcpu)
-{
- int r = 0;
- unsigned long i;
- struct kvm_vcpu *vcpu;
-
- if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
- return true;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
- irq->dest_id, irq->dest_mode))
- continue;
-
- if (++r == 2)
- return false;
-
- *dest_vcpu = vcpu;
- }
-
- return r == 1;
-}
-EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
-
-#define IOAPIC_ROUTING_ENTRY(irq) \
- { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
- .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
-#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
-
-#define PIC_ROUTING_ENTRY(irq) \
- { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
- .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
-#define ROUTING_ENTRY2(irq) \
- IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
-
-static const struct kvm_irq_routing_entry default_routing[] = {
- ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
- ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
- ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
- ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
- ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
- ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
- ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
- ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
- ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
- ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
- ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
- ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
-};
-
-int kvm_setup_default_irq_routing(struct kvm *kvm)
-{
- return kvm_set_irq_routing(kvm, default_routing,
- ARRAY_SIZE(default_routing), 0);
-}
-
-void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
- if (!irqchip_split(kvm))
- return;
- kvm_make_scan_ioapic_request(kvm);
-}
-
-void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
- u8 vector, unsigned long *ioapic_handled_vectors)
-{
- /*
- * Intercept EOI if the vCPU is the target of the new IRQ routing, or
- * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU
- * may receive a level-triggered IRQ in the future, or already received
- * level-triggered IRQ. The EOI needs to be intercepted and forwarded
- * to I/O APIC emulation so that the IRQ can be de-asserted.
- */
- if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) {
- __set_bit(vector, ioapic_handled_vectors);
- } else if (kvm_apic_pending_eoi(vcpu, vector)) {
- __set_bit(vector, ioapic_handled_vectors);
-
- /*
- * Track the highest pending EOI for which the vCPU is NOT the
- * target in the new routing. Only the EOI for the IRQ that is
- * in-flight (for the old routing) needs to be intercepted, any
- * future IRQs that arrive on this vCPU will be coincidental to
- * the level-triggered routing and don't need to be intercepted.
- */
- if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi)
- vcpu->arch.highest_stale_pending_ioapic_eoi = vector;
- }
-}
-
-void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
- ulong *ioapic_handled_vectors)
-{
- struct kvm *kvm = vcpu->kvm;
- struct kvm_kernel_irq_routing_entry *entry;
- struct kvm_irq_routing_table *table;
- u32 i, nr_ioapic_pins;
- int idx;
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
- kvm->arch.nr_reserved_ioapic_pins);
- for (i = 0; i < nr_ioapic_pins; ++i) {
- hlist_for_each_entry(entry, &table->map[i], link) {
- struct kvm_lapic_irq irq;
-
- if (entry->type != KVM_IRQ_ROUTING_MSI)
- continue;
-
- kvm_set_msi_irq(vcpu->kvm, entry, &irq);
-
- if (!irq.trig_mode)
- continue;
-
- kvm_scan_ioapic_irq(vcpu, irq.dest_id, irq.dest_mode,
- irq.vector, ioapic_handled_vectors);
- }
- }
- srcu_read_unlock(&kvm->irq_srcu, idx);
-}
-
-void kvm_arch_irq_routing_update(struct kvm *kvm)
-{
-#ifdef CONFIG_KVM_HYPERV
- kvm_hv_irq_routing_update(kvm);
-#endif
-}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73418dc0ebb2..8172c2042dd6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/math64.h>
#include <linux/slab.h>
+#include <asm/apic.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include <asm/msr.h>
@@ -55,9 +56,6 @@
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION 0x14UL
#define LAPIC_MMIO_LENGTH (1 << 12)
-/* followed define is not in apicdef.h */
-#define MAX_APIC_VECTOR 256
-#define APIC_VECTORS_PER_REG 32
/*
* Enable local APIC timer advancement (tscdeadline mode only) with adaptive
@@ -79,42 +77,20 @@ module_param(lapic_timer_advance, bool, 0444);
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
-static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
-{
- *((u32 *) (regs + reg_off)) = val;
-}
-
static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
- __kvm_lapic_set_reg(apic->regs, reg_off, val);
-}
-
-static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
-{
- BUILD_BUG_ON(reg != APIC_ICR);
- return *((u64 *) (regs + reg));
+ apic_set_reg(apic->regs, reg_off, val);
}
static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
{
- return __kvm_lapic_get_reg64(apic->regs, reg);
-}
-
-static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
-{
- BUILD_BUG_ON(reg != APIC_ICR);
- *((u64 *) (regs + reg)) = val;
+ return apic_get_reg64(apic->regs, reg);
}
static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
int reg, u64 val)
{
- __kvm_lapic_set_reg64(apic->regs, reg, val);
-}
-
-static inline int apic_test_vector(int vec, void *bitmap)
-{
- return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+ apic_set_reg64(apic->regs, reg, val);
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
@@ -125,16 +101,6 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
apic_test_vector(vector, apic->regs + APIC_IRR);
}
-static inline int __apic_test_and_set_vector(int vec, void *bitmap)
-{
- return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
-static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
-{
- return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
@@ -626,21 +592,6 @@ static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
[LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
};
-static int find_highest_vector(void *bitmap)
-{
- int vec;
- u32 *reg;
-
- for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
- vec >= 0; vec -= APIC_VECTORS_PER_REG) {
- reg = bitmap + REG_POS(vec);
- if (*reg)
- return __fls(*reg) + vec;
- }
-
- return -1;
-}
-
static u8 count_vectors(void *bitmap)
{
int vec;
@@ -648,7 +599,7 @@ static u8 count_vectors(void *bitmap)
u8 count = 0;
for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
- reg = bitmap + REG_POS(vec);
+ reg = bitmap + APIC_VECTOR_TO_REG_OFFSET(vec);
count += hweight32(*reg);
}
@@ -706,7 +657,7 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline int apic_search_irr(struct kvm_lapic *apic)
{
- return find_highest_vector(apic->regs + APIC_IRR);
+ return apic_find_highest_vector(apic->regs + APIC_IRR);
}
static inline int apic_find_highest_irr(struct kvm_lapic *apic)
@@ -729,10 +680,10 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
if (unlikely(apic->apicv_active)) {
- kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
} else {
apic->irr_pending = false;
- kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
if (apic_search_irr(apic) != -1)
apic->irr_pending = true;
}
@@ -744,9 +695,15 @@ void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
}
EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
+static void *apic_vector_to_isr(int vec, struct kvm_lapic *apic)
+{
+ return apic->regs + APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(vec);
+}
+
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
- if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ if (__test_and_set_bit(APIC_VECTOR_TO_BIT_NUMBER(vec),
+ apic_vector_to_isr(vec, apic)))
return;
/*
@@ -781,7 +738,7 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
if (likely(apic->highest_isr_cache != -1))
return apic->highest_isr_cache;
- result = find_highest_vector(apic->regs + APIC_ISR);
+ result = apic_find_highest_vector(apic->regs + APIC_ISR);
ASSERT(result == -1 || result >= 16);
return result;
@@ -789,7 +746,8 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
- if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+ if (!__test_and_clear_bit(APIC_VECTOR_TO_BIT_NUMBER(vec),
+ apic_vector_to_isr(vec, apic)))
return;
/*
@@ -1332,11 +1290,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
if (trig_mode)
- kvm_lapic_set_vector(vector,
- apic->regs + APIC_TMR);
+ apic_set_vector(vector, apic->regs + APIC_TMR);
else
- kvm_lapic_clear_vector(vector,
- apic->regs + APIC_TMR);
+ apic_clear_vector(vector, apic->regs + APIC_TMR);
}
kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
@@ -1455,7 +1411,7 @@ static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
- int trigger_mode;
+ int __maybe_unused trigger_mode;
/* Eoi the ioapic only if the ioapic doesn't own the vector. */
if (!kvm_ioapic_handles_vector(apic, vector))
@@ -1476,12 +1432,14 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
return;
}
+#ifdef CONFIG_KVM_IOAPIC
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
else
trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
+#endif
}
static int apic_set_eoi(struct kvm_lapic *apic)
@@ -3084,12 +3042,12 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
if (!kvm_x86_ops.x2apic_icr_is_split) {
if (set) {
- icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
- (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
- __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
+ icr = apic_get_reg(s->regs, APIC_ICR) |
+ (u64)apic_get_reg(s->regs, APIC_ICR2) << 32;
+ apic_set_reg64(s->regs, APIC_ICR, icr);
} else {
- icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
- __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ icr = apic_get_reg64(s->regs, APIC_ICR);
+ apic_set_reg(s->regs, APIC_ICR2, icr >> 32);
}
}
}
@@ -3105,8 +3063,7 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
* Get calculated timer current count for remaining timer period (if
* any) and store it in the returned register set.
*/
- __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
- __apic_read(vcpu->arch.apic, APIC_TMCCT));
+ apic_set_reg(s->regs, APIC_TMCCT, __apic_read(vcpu->arch.apic, APIC_TMCCT));
return kvm_apic_state_fixup(vcpu, s, false);
}
@@ -3146,8 +3103,11 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+#ifdef CONFIG_KVM_IOAPIC
if (ioapic_in_kernel(vcpu->kvm))
kvm_rtc_eoi_tracking_restore_one(vcpu);
+#endif
vcpu->arch.apic_arb_prio = 0;
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 4ce30db65828..72de14527698 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -4,6 +4,8 @@
#include <kvm/iodev.h>
+#include <asm/apic.h>
+
#include <linux/kvm_host.h>
#include "hyperv.h"
@@ -21,6 +23,8 @@
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
+#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
+
enum lapic_mode {
LAPIC_MODE_DISABLED = 0,
LAPIC_MODE_INVALID = X2APIC_ENABLE,
@@ -145,22 +149,9 @@ void kvm_lapic_exit(void);
u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic);
-#define VEC_POS(v) ((v) & (32 - 1))
-#define REG_POS(v) (((v) >> 5) << 4)
-
-static inline void kvm_lapic_clear_vector(int vec, void *bitmap)
-{
- clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
-static inline void kvm_lapic_set_vector(int vec, void *bitmap)
-{
- set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
static inline void kvm_lapic_set_irr(int vec, struct kvm_lapic *apic)
{
- kvm_lapic_set_vector(vec, apic->regs + APIC_IRR);
+ apic_set_vector(vec, apic->regs + APIC_IRR);
/*
* irr_pending must be true if any interrupt is pending; set it after
* APIC_IRR to avoid race with apic_clear_irr
@@ -168,14 +159,9 @@ static inline void kvm_lapic_set_irr(int vec, struct kvm_lapic *apic)
apic->irr_pending = true;
}
-static inline u32 __kvm_lapic_get_reg(char *regs, int reg_off)
-{
- return *((u32 *) (regs + reg_off));
-}
-
static inline u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
{
- return __kvm_lapic_get_reg(apic->regs, reg_off);
+ return apic_get_reg(apic->regs, reg_off);
}
DECLARE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4e06e2e89a8f..6e838cb6c9e1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1983,14 +1983,35 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
return true;
}
+static __ro_after_init HLIST_HEAD(empty_page_hash);
+
+static struct hlist_head *kvm_get_mmu_page_hash(struct kvm *kvm, gfn_t gfn)
+{
+ /*
+ * Ensure the load of the hash table pointer itself is ordered before
+ * loads to walk the table. The pointer is set at runtime outside of
+ * mmu_lock when the TDP MMU is enabled, i.e. when the hash table of
+ * shadow pages becomes necessary only when KVM needs to shadow L1's
+ * TDP for an L2 guest. Pairs with the smp_store_release() in
+ * kvm_mmu_alloc_page_hash().
+ */
+ struct hlist_head *page_hash = smp_load_acquire(&kvm->arch.mmu_page_hash);
+
+ lockdep_assert_held(&kvm->mmu_lock);
+
+ if (!page_hash)
+ return &empty_page_hash;
+
+ return &page_hash[kvm_page_table_hashfn(gfn)];
+}
+
#define for_each_valid_sp(_kvm, _sp, _list) \
hlist_for_each_entry(_sp, _list, hash_link) \
if (is_obsolete_sp((_kvm), (_sp))) { \
} else
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
- for_each_valid_sp(_kvm, _sp, \
- &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
+ for_each_valid_sp(_kvm, _sp, kvm_get_mmu_page_hash(_kvm, _gfn)) \
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
@@ -2358,6 +2379,12 @@ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
struct kvm_mmu_page *sp;
bool created = false;
+ /*
+ * No need for memory barriers, unlike in kvm_get_mmu_page_hash(), as
+ * mmu_page_hash must be set prior to creating the first shadow root,
+ * i.e. reaching this point is fully serialized by slots_arch_lock.
+ */
+ BUG_ON(!kvm->arch.mmu_page_hash);
sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
@@ -3882,6 +3909,28 @@ out_unlock:
return r;
}
+static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
+{
+ struct hlist_head *h;
+
+ if (kvm->arch.mmu_page_hash)
+ return 0;
+
+ h = kvcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT);
+ if (!h)
+ return -ENOMEM;
+
+ /*
+ * Ensure the hash table pointer is set only after all stores to zero
+ * the memory are retired. Pairs with the smp_load_acquire() in
+ * kvm_get_mmu_page_hash(). Note, mmu_lock must be held for write to
+ * add (or remove) shadow pages, and so readers are guaranteed to see
+ * an empty list for their current mmu_lock critical section.
+ */
+ smp_store_release(&kvm->arch.mmu_page_hash, h);
+ return 0;
+}
+
static int mmu_first_shadow_root_alloc(struct kvm *kvm)
{
struct kvm_memslots *slots;
@@ -3901,9 +3950,13 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
if (kvm_shadow_root_allocated(kvm))
goto out_unlock;
+ r = kvm_mmu_alloc_page_hash(kvm);
+ if (r)
+ goto out_unlock;
+
/*
- * Check if anything actually needs to be allocated, e.g. all metadata
- * will be allocated upfront if TDP is disabled.
+ * Check if memslot metadata actually needs to be allocated, e.g. all
+ * metadata will be allocated upfront if TDP is disabled.
*/
if (kvm_memslots_have_rmaps(kvm) &&
kvm_page_track_write_tracking_enabled(kvm))
@@ -6682,15 +6735,22 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
}
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
{
+ int r;
+
kvm->arch.shadow_mmio_value = shadow_mmio_value;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
- if (tdp_mmu_enabled)
+ if (tdp_mmu_enabled) {
kvm_mmu_init_tdp_mmu(kvm);
+ } else {
+ r = kvm_mmu_alloc_page_hash(kvm);
+ if (r)
+ return r;
+ }
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
@@ -6699,6 +6759,7 @@ void kvm_mmu_init_vm(struct kvm *kvm)
kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+ return 0;
}
static void mmu_free_vm_memory_caches(struct kvm *kvm)
@@ -6710,6 +6771,8 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm)
void kvm_mmu_uninit_vm(struct kvm *kvm)
{
+ kvfree(kvm->arch.mmu_page_hash);
+
if (tdp_mmu_enabled)
kvm_mmu_uninit_tdp_mmu(kvm);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index db8f33e4de62..65f3c89d7c5d 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -103,6 +103,9 @@ struct kvm_mmu_page {
int root_count;
refcount_t tdp_mmu_root_count;
};
+
+ bool has_mapped_host_mmio;
+
union {
/* These two members aren't used for TDP MMU */
struct {
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 68e323568e95..ed762bb4b007 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -804,9 +804,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r != RET_PF_CONTINUE)
return r;
+#if PTTYPE != PTTYPE_EPT
/*
- * Do not change pte_access if the pfn is a mmio page, otherwise
- * we will cache the incorrect access into mmio spte.
+ * Treat the guest PTE protections as writable, supervisor-only if this
+ * is a supervisor write fault and CR0.WP=0 (supervisor accesses ignore
+ * PTE.W if CR0.WP=0). Don't change the access type for emulated MMIO,
+ * otherwise KVM will cache incorrect access information in the SPTE.
*/
if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
!is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
@@ -822,6 +825,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (is_cr4_smep(vcpu->arch.mmu))
walker.pte_access &= ~ACC_EXEC_MASK;
}
+#endif
r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index cfce03d8f123..df31039b5d63 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
return spte;
}
-static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
+static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
@@ -125,6 +125,35 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM);
}
+static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
+{
+ /*
+ * Determining if a PFN is host MMIO is relative expensive. Cache the
+ * result locally (in the sole caller) to avoid doing the full query
+ * multiple times when creating a single SPTE.
+ */
+ if (*is_host_mmio < 0)
+ *is_host_mmio = __kvm_is_mmio_pfn(pfn);
+
+ return *is_host_mmio;
+}
+
+static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
+
+ if (root)
+ WRITE_ONCE(root->has_mapped_host_mmio, true);
+ else
+ WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true);
+
+ /*
+ * Force vCPUs to exit and flush CPU buffers if the vCPU is using the
+ * affected root(s).
+ */
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
+}
+
/*
* Returns true if the SPTE needs to be updated atomically due to having bits
* that may be changed without holding mmu_lock, and for which KVM must not
@@ -162,6 +191,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
+ int is_host_mmio = -1;
bool wrprot = false;
/*
@@ -209,13 +239,15 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
- spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
+ if (kvm_x86_ops.get_mt_mask)
+ spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
+ kvm_is_mmio_pfn(pfn, &is_host_mmio));
if (host_writable)
spte |= shadow_host_writable_mask;
else
pte_access &= ~ACC_WRITE_MASK;
- if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
+ if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio))
spte |= shadow_me_value;
spte |= (u64)pfn << PAGE_SHIFT;
@@ -260,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
+ if (static_branch_unlikely(&cpu_buf_vm_clear) &&
+ !kvm_vcpu_can_access_host_mmio(vcpu) &&
+ kvm_is_mmio_pfn(pfn, &is_host_mmio))
+ kvm_track_host_mmio_mapping(vcpu);
+
*new_spte = spte;
return wrprot;
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 1e94f081bdaf..3133f066927e 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep)
return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
}
+static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
+
+ if (root)
+ return READ_ONCE(root->has_mapped_host_mmio);
+
+ return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio);
+}
+
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
{
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 067f8e3f5a0d..a34c5c3b164e 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -18,6 +18,7 @@
#include <linux/hashtable.h>
#include <linux/amd-iommu.h>
#include <linux/kvm_host.h>
+#include <linux/kvm_irqfd.h>
#include <asm/irq_remapping.h>
#include <asm/msr.h>
@@ -29,36 +30,39 @@
#include "svm.h"
/*
- * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
- * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
- * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
+ * Encode the arbitrary VM ID and the vCPU's _index_ into the GATag so that
+ * KVM can retrieve the correct vCPU from a GALog entry if an interrupt can't
+ * be delivered, e.g. because the vCPU isn't running. Use the vCPU's index
+ * instead of its ID (a.k.a. its default APIC ID), as KVM is guaranteed a fast
+ * lookup on the index, where as vCPUs whose index doesn't match their ID need
+ * to walk the entire xarray of vCPUs in the worst case scenario.
*
- * For the vCPU ID, use however many bits are currently allowed for the max
+ * For the vCPU index, use however many bits are currently allowed for the max
* guest physical APIC ID (limited by the size of the physical ID table), and
* use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
* size of the GATag is defined by hardware (32 bits), but is an opaque value
* as far as hardware is concerned.
*/
-#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
+#define AVIC_VCPU_IDX_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
-#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
+#define AVIC_GATAG_TO_VCPUIDX(x) (x & AVIC_VCPU_IDX_MASK)
-#define __AVIC_GATAG(vm_id, vcpu_id) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
- ((vcpu_id) & AVIC_VCPU_ID_MASK))
-#define AVIC_GATAG(vm_id, vcpu_id) \
+#define __AVIC_GATAG(vm_id, vcpu_idx) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
+ ((vcpu_idx) & AVIC_VCPU_IDX_MASK))
+#define AVIC_GATAG(vm_id, vcpu_idx) \
({ \
- u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id); \
+ u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_idx); \
\
- WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id)); \
+ WARN_ON_ONCE(AVIC_GATAG_TO_VCPUIDX(ga_tag) != (vcpu_idx)); \
WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id)); \
ga_tag; \
})
-static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
+static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_IDX_MASK) == -1u);
static bool force_avic;
module_param_unsafe(force_avic, bool, 0444);
@@ -75,14 +79,6 @@ static bool next_vm_id_wrapped = 0;
static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
bool x2avic_enabled;
-/*
- * This is a wrapper of struct amd_iommu_ir_data.
- */
-struct amd_svm_iommu_ir {
- struct list_head node; /* Used by SVM for per-vcpu ir_list */
- void *data; /* Storing pointer to struct amd_ir_data */
-};
-
static void avic_activate_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -147,16 +143,16 @@ int avic_ga_log_notifier(u32 ga_tag)
struct kvm_svm *kvm_svm;
struct kvm_vcpu *vcpu = NULL;
u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
- u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
+ u32 vcpu_idx = AVIC_GATAG_TO_VCPUIDX(ga_tag);
- pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
- trace_kvm_avic_ga_log(vm_id, vcpu_id);
+ pr_debug("SVM: %s: vm_id=%#x, vcpu_idx=%#x\n", __func__, vm_id, vcpu_idx);
+ trace_kvm_avic_ga_log(vm_id, vcpu_idx);
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
if (kvm_svm->avic_vm_id != vm_id)
continue;
- vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
+ vcpu = kvm_get_vcpu(&kvm_svm->kvm, vcpu_idx);
break;
}
spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
@@ -180,10 +176,8 @@ void avic_vm_destroy(struct kvm *kvm)
if (!enable_apicv)
return;
- if (kvm_svm->avic_logical_id_table_page)
- __free_page(kvm_svm->avic_logical_id_table_page);
- if (kvm_svm->avic_physical_id_table_page)
- __free_page(kvm_svm->avic_physical_id_table_page);
+ free_page((unsigned long)kvm_svm->avic_logical_id_table);
+ free_page((unsigned long)kvm_svm->avic_physical_id_table);
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
hash_del(&kvm_svm->hnode);
@@ -196,27 +190,19 @@ int avic_vm_init(struct kvm *kvm)
int err = -ENOMEM;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
struct kvm_svm *k2;
- struct page *p_page;
- struct page *l_page;
u32 vm_id;
if (!enable_apicv)
return 0;
- /* Allocating physical APIC ID table (4KB) */
- p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (!p_page)
+ kvm_svm->avic_physical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!kvm_svm->avic_physical_id_table)
goto free_avic;
- kvm_svm->avic_physical_id_table_page = p_page;
-
- /* Allocating logical APIC ID table (4KB) */
- l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (!l_page)
+ kvm_svm->avic_logical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!kvm_svm->avic_logical_id_table)
goto free_avic;
- kvm_svm->avic_logical_id_table_page = l_page;
-
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
again:
vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
@@ -242,17 +228,19 @@ free_avic:
return err;
}
+static phys_addr_t avic_get_backing_page_address(struct vcpu_svm *svm)
+{
+ return __sme_set(__pa(svm->vcpu.arch.apic->regs));
+}
+
void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
{
struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
- phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
- phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
- phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
- vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
- vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
- vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
- vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
+ vmcb->control.avic_backing_page = avic_get_backing_page_address(svm);
+ vmcb->control.avic_logical_id = __sme_set(__pa(kvm_svm->avic_logical_id_table));
+ vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table));
+ vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE;
if (kvm_apicv_activated(svm->vcpu.kvm))
avic_activate_vmcb(svm);
@@ -260,32 +248,31 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
avic_deactivate_vmcb(svm);
}
-static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
- unsigned int index)
-{
- u64 *avic_physical_id_table;
- struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
-
- if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
- (index > X2AVIC_MAX_PHYSICAL_ID))
- return NULL;
-
- avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
-
- return &avic_physical_id_table[index];
-}
-
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
{
- u64 *entry, new_entry;
- int id = vcpu->vcpu_id;
+ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
+ u32 id = vcpu->vcpu_id;
+ u64 new_entry;
+ /*
+ * Inhibit AVIC if the vCPU ID is bigger than what is supported by AVIC
+ * hardware. Immediately clear apicv_active, i.e. don't wait until the
+ * KVM_REQ_APICV_UPDATE request is processed on the first KVM_RUN, as
+ * avic_vcpu_load() expects to be called if and only if the vCPU has
+ * fully initialized AVIC.
+ */
if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||
- (id > X2AVIC_MAX_PHYSICAL_ID))
- return -EINVAL;
+ (id > X2AVIC_MAX_PHYSICAL_ID)) {
+ kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG);
+ vcpu->arch.apic->apicv_active = false;
+ return 0;
+ }
+
+ BUILD_BUG_ON((AVIC_MAX_PHYSICAL_ID + 1) * sizeof(new_entry) > PAGE_SIZE ||
+ (X2AVIC_MAX_PHYSICAL_ID + 1) * sizeof(new_entry) > PAGE_SIZE);
- if (!vcpu->arch.apic->regs)
+ if (WARN_ON_ONCE(!vcpu->arch.apic->regs))
return -EINVAL;
if (kvm_apicv_activated(vcpu->kvm)) {
@@ -302,19 +289,21 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return ret;
}
- svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
+ /* Note, fls64() returns the bit position, +1. */
+ BUILD_BUG_ON(__PHYSICAL_MASK_SHIFT >
+ fls64(AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK));
/* Setting AVIC backing page address in the phy APIC ID table */
- entry = avic_get_physical_id_entry(vcpu, id);
- if (!entry)
- return -EINVAL;
+ new_entry = avic_get_backing_page_address(svm) |
+ AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
+ svm->avic_physical_id_entry = new_entry;
- new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
- AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
- AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
- WRITE_ONCE(*entry, new_entry);
-
- svm->avic_physical_id_cache = entry;
+ /*
+ * Initialize the real table, as vCPUs must have a valid entry in order
+ * for broadcast IPIs to function correctly (broadcast IPIs ignore
+ * invalid entries, i.e. aren't guaranteed to generate a VM-Exit).
+ */
+ WRITE_ONCE(kvm_svm->avic_physical_id_table[id], new_entry);
return 0;
}
@@ -448,7 +437,7 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
if (apic_x2apic_mode(source))
avic_logical_id_table = NULL;
else
- avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
+ avic_logical_id_table = kvm_svm->avic_logical_id_table;
/*
* AVIC is inhibited if vCPUs aren't mapped 1:1 with logical
@@ -550,7 +539,6 @@ unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
- u32 *logical_apic_id_table;
u32 cluster, index;
ldr = GET_APIC_LOGICAL_ID(ldr);
@@ -571,9 +559,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
return NULL;
index += (cluster << 2);
- logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
-
- return &logical_apic_id_table[index];
+ return &kvm_svm->avic_logical_id_table[index];
}
static void avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
@@ -722,6 +708,9 @@ int avic_init_vcpu(struct vcpu_svm *svm)
int ret;
struct kvm_vcpu *vcpu = &svm->vcpu;
+ INIT_LIST_HEAD(&svm->ir_list);
+ spin_lock_init(&svm->ir_list_lock);
+
if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
return 0;
@@ -729,8 +718,6 @@ int avic_init_vcpu(struct vcpu_svm *svm)
if (ret)
return ret;
- INIT_LIST_HEAD(&svm->ir_list);
- spin_lock_init(&svm->ir_list_lock);
svm->dfr_reg = APIC_DFR_FLAT;
return ret;
@@ -742,316 +729,161 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
avic_handle_ldr_update(vcpu);
}
-static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
+static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd)
{
- int ret = 0;
+ struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu;
unsigned long flags;
- struct amd_svm_iommu_ir *ir;
- struct vcpu_svm *svm = to_svm(vcpu);
-
- if (!kvm_arch_has_assigned_device(vcpu->kvm))
- return 0;
- /*
- * Here, we go through the per-vcpu ir_list to update all existing
- * interrupt remapping table entry targeting this vcpu.
- */
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
- if (list_empty(&svm->ir_list))
- goto out;
+ if (!vcpu)
+ return;
- list_for_each_entry(ir, &svm->ir_list, node) {
- if (activate)
- ret = amd_iommu_activate_guest_mode(ir->data);
- else
- ret = amd_iommu_deactivate_guest_mode(ir->data);
- if (ret)
- break;
- }
-out:
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
- return ret;
+ spin_lock_irqsave(&to_svm(vcpu)->ir_list_lock, flags);
+ list_del(&irqfd->vcpu_list);
+ spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags);
}
-static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
+ unsigned int host_irq, uint32_t guest_irq,
+ struct kvm_vcpu *vcpu, u32 vector)
{
- unsigned long flags;
- struct amd_svm_iommu_ir *cur;
-
- spin_lock_irqsave(&svm->ir_list_lock, flags);
- list_for_each_entry(cur, &svm->ir_list, node) {
- if (cur->data != pi->ir_data)
- continue;
- list_del(&cur->node);
- kfree(cur);
- break;
- }
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-}
-
-static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
-{
- int ret = 0;
- unsigned long flags;
- struct amd_svm_iommu_ir *ir;
- u64 entry;
-
- if (WARN_ON_ONCE(!pi->ir_data))
- return -EINVAL;
-
- /**
- * In some cases, the existing irte is updated and re-set,
- * so we need to check here if it's already been * added
- * to the ir_list.
- */
- if (pi->prev_ga_tag) {
- struct kvm *kvm = svm->vcpu.kvm;
- u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
- struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
- struct vcpu_svm *prev_svm;
-
- if (!prev_vcpu) {
- ret = -EINVAL;
- goto out;
- }
-
- prev_svm = to_svm(prev_vcpu);
- svm_ir_list_del(prev_svm, pi);
- }
-
- /**
- * Allocating new amd_iommu_pi_data, which will get
- * add to the per-vcpu ir_list.
- */
- ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
- if (!ir) {
- ret = -ENOMEM;
- goto out;
- }
- ir->data = pi->ir_data;
-
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
/*
- * Update the target pCPU for IOMMU doorbells if the vCPU is running.
- * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
- * will update the pCPU info when the vCPU awkened and/or scheduled in.
- * See also avic_vcpu_load().
+ * If the IRQ was affined to a different vCPU, remove the IRTE metadata
+ * from the *previous* vCPU's list.
*/
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
- if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
- amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- true, pi->ir_data);
-
- list_add(&ir->node, &svm->ir_list);
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-out:
- return ret;
-}
+ svm_ir_list_del(irqfd);
-/*
- * Note:
- * The HW cannot support posting multicast/broadcast
- * interrupts to a vCPU. So, we still use legacy interrupt
- * remapping for these kind of interrupts.
- *
- * For lowest-priority interrupts, we only support
- * those with single CPU as the destination, e.g. user
- * configures the interrupts via /proc/irq or uses
- * irqbalance to make the interrupts single-CPU.
- */
-static int
-get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
- struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
-{
- struct kvm_lapic_irq irq;
- struct kvm_vcpu *vcpu = NULL;
-
- kvm_set_msi_irq(kvm, e, &irq);
-
- if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
- !kvm_irq_is_postable(&irq)) {
- pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
- __func__, irq.vector);
- return -1;
- }
-
- pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
- irq.vector);
- *svm = to_svm(vcpu);
- vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
- vcpu_info->vector = irq.vector;
-
- return 0;
-}
-
-/*
- * avic_pi_update_irte - set IRTE for Posted-Interrupts
- *
- * @kvm: kvm
- * @host_irq: host irq of the interrupt
- * @guest_irq: gsi of the interrupt
- * @set: set or unset PI
- * returns 0 on success, < 0 on failure
- */
-int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
-{
- struct kvm_kernel_irq_routing_entry *e;
- struct kvm_irq_routing_table *irq_rt;
- bool enable_remapped_mode = true;
- int idx, ret = 0;
-
- if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass())
- return 0;
-
- pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
- __func__, host_irq, guest_irq, set);
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
-
- if (guest_irq >= irq_rt->nr_rt_entries ||
- hlist_empty(&irq_rt->map[guest_irq])) {
- pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
- guest_irq, irq_rt->nr_rt_entries);
- goto out;
- }
-
- hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
- struct vcpu_data vcpu_info;
- struct vcpu_svm *svm = NULL;
+ if (vcpu) {
+ /*
+ * Try to enable guest_mode in IRTE, unless AVIC is inhibited,
+ * in which case configure the IRTE for legacy mode, but track
+ * the IRTE metadata so that it can be converted to guest mode
+ * if AVIC is enabled/uninhibited in the future.
+ */
+ struct amd_iommu_pi_data pi_data = {
+ .ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
+ vcpu->vcpu_idx),
+ .is_guest_mode = kvm_vcpu_apicv_active(vcpu),
+ .vapic_addr = avic_get_backing_page_address(to_svm(vcpu)),
+ .vector = vector,
+ };
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 entry;
+ int ret;
- if (e->type != KVM_IRQ_ROUTING_MSI)
- continue;
+ /*
+ * Prevent the vCPU from being scheduled out or migrated until
+ * the IRTE is updated and its metadata has been added to the
+ * list of IRQs being posted to the vCPU, to ensure the IRTE
+ * isn't programmed with stale pCPU/IsRunning information.
+ */
+ guard(spinlock_irqsave)(&svm->ir_list_lock);
- /**
- * Here, we setup with legacy mode in the following cases:
- * 1. When cannot target interrupt to a specific vcpu.
- * 2. Unsetting posted interrupt.
- * 3. APIC virtualization is disabled for the vcpu.
- * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
+ /*
+ * Update the target pCPU for IOMMU doorbells if the vCPU is
+ * running. If the vCPU is NOT running, i.e. is blocking or
+ * scheduled out, KVM will update the pCPU info when the vCPU
+ * is awakened and/or scheduled in. See also avic_vcpu_load().
*/
- if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
- kvm_vcpu_apicv_active(&svm->vcpu)) {
- struct amd_iommu_pi_data pi;
-
- enable_remapped_mode = false;
-
- /* Try to enable guest_mode in IRTE */
- pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
- AVIC_HPA_MASK);
- pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
- svm->vcpu.vcpu_id);
- pi.is_guest_mode = true;
- pi.vcpu_data = &vcpu_info;
- ret = irq_set_vcpu_affinity(host_irq, &pi);
-
- /**
- * Here, we successfully setting up vcpu affinity in
- * IOMMU guest mode. Now, we need to store the posted
- * interrupt information in a per-vcpu ir_list so that
- * we can reference to them directly when we update vcpu
- * scheduling information in IOMMU irte.
- */
- if (!ret && pi.is_guest_mode)
- svm_ir_list_add(svm, &pi);
+ entry = svm->avic_physical_id_entry;
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) {
+ pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+ } else {
+ pi_data.cpu = -1;
+ pi_data.ga_log_intr = entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR;
}
- if (!ret && svm) {
- trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
- e->gsi, vcpu_info.vector,
- vcpu_info.pi_desc_addr, set);
- }
+ ret = irq_set_vcpu_affinity(host_irq, &pi_data);
+ if (ret)
+ return ret;
- if (ret < 0) {
- pr_err("%s: failed to update PI IRTE\n", __func__);
- goto out;
+ /*
+ * Revert to legacy mode if the IOMMU didn't provide metadata
+ * for the IRTE, which KVM needs to keep the IRTE up-to-date,
+ * e.g. if the vCPU is migrated or AVIC is disabled.
+ */
+ if (WARN_ON_ONCE(!pi_data.ir_data)) {
+ irq_set_vcpu_affinity(host_irq, NULL);
+ return -EIO;
}
- }
- ret = 0;
- if (enable_remapped_mode) {
- /* Use legacy mode in IRTE */
- struct amd_iommu_pi_data pi;
+ irqfd->irq_bypass_data = pi_data.ir_data;
+ list_add(&irqfd->vcpu_list, &svm->ir_list);
+ return 0;
+ }
+ return irq_set_vcpu_affinity(host_irq, NULL);
+}
- /**
- * Here, pi is used to:
- * - Tell IOMMU to use legacy mode for this interrupt.
- * - Retrieve ga_tag of prior interrupt remapping data.
- */
- pi.prev_ga_tag = 0;
- pi.is_guest_mode = false;
- ret = irq_set_vcpu_affinity(host_irq, &pi);
+enum avic_vcpu_action {
+ /*
+ * There is no need to differentiate between activate and deactivate,
+ * as KVM only refreshes AVIC state when the vCPU is scheduled in and
+ * isn't blocking, i.e. the pCPU must always be (in)valid when AVIC is
+ * being (de)activated.
+ */
+ AVIC_TOGGLE_ON_OFF = BIT(0),
+ AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF,
+ AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF,
- /**
- * Check if the posted interrupt was previously
- * setup with the guest_mode by checking if the ga_tag
- * was cached. If so, we need to clean up the per-vcpu
- * ir_list.
- */
- if (!ret && pi.prev_ga_tag) {
- int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
- struct kvm_vcpu *vcpu;
+ /*
+ * No unique action is required to deal with a vCPU that stops/starts
+ * running. A vCPU that starts running by definition stops blocking as
+ * well, and a vCPU that stops running can't have been blocking, i.e.
+ * doesn't need to toggle GALogIntr.
+ */
+ AVIC_START_RUNNING = 0,
+ AVIC_STOP_RUNNING = 0,
- vcpu = kvm_get_vcpu_by_id(kvm, id);
- if (vcpu)
- svm_ir_list_del(to_svm(vcpu), &pi);
- }
- }
-out:
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
-}
+ /*
+ * When a vCPU starts blocking, KVM needs to set the GALogIntr flag
+ * int all associated IRTEs so that KVM can wake the vCPU if an IRQ is
+ * sent to the vCPU.
+ */
+ AVIC_START_BLOCKING = BIT(1),
+};
-static inline int
-avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
+ enum avic_vcpu_action action)
{
- int ret = 0;
- struct amd_svm_iommu_ir *ir;
+ bool ga_log_intr = (action & AVIC_START_BLOCKING);
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_kernel_irqfd *irqfd;
lockdep_assert_held(&svm->ir_list_lock);
- if (!kvm_arch_has_assigned_device(vcpu->kvm))
- return 0;
-
/*
* Here, we go through the per-vcpu ir_list to update all existing
* interrupt remapping table entry targeting this vcpu.
*/
if (list_empty(&svm->ir_list))
- return 0;
+ return;
- list_for_each_entry(ir, &svm->ir_list, node) {
- ret = amd_iommu_update_ga(cpu, r, ir->data);
- if (ret)
- return ret;
+ list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
+ void *data = irqfd->irq_bypass_data;
+
+ if (!(action & AVIC_TOGGLE_ON_OFF))
+ WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, ga_log_intr));
+ else if (cpu >= 0)
+ WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, ga_log_intr));
+ else
+ WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
}
- return 0;
}
-void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
+ enum avic_vcpu_action action)
{
- u64 entry;
+ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
+ u64 entry;
lockdep_assert_preemption_disabled();
if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
- /*
- * No need to update anything if the vCPU is blocking, i.e. if the vCPU
- * is being scheduled in after being preempted. The CPU entries in the
- * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
- * If the vCPU was migrated, its new CPU value will be stuffed when the
- * vCPU unblocks.
- */
- if (kvm_vcpu_is_blocking(vcpu))
+ if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
return;
/*
@@ -1063,38 +895,57 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ entry = svm->avic_physical_id_entry;
WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
- entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+ entry &= ~(AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK |
+ AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR);
entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+ svm->avic_physical_id_entry = entry;
+
+ /*
+ * If IPI virtualization is disabled, clear IsRunning when updating the
+ * actual Physical ID table, so that the CPU never sees IsRunning=1.
+ * Keep the APIC ID up-to-date in the entry to minimize the chances of
+ * things going sideways if hardware peeks at the ID.
+ */
+ if (!enable_ipiv)
+ entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+
+ WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
+
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, action);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
-void avic_vcpu_put(struct kvm_vcpu *vcpu)
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- u64 entry;
+ /*
+ * No need to update anything if the vCPU is blocking, i.e. if the vCPU
+ * is being scheduled in after being preempted. The CPU entries in the
+ * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
+ * If the vCPU was migrated, its new CPU value will be stuffed when the
+ * vCPU unblocks.
+ */
+ if (kvm_vcpu_is_blocking(vcpu))
+ return;
+
+ __avic_vcpu_load(vcpu, cpu, AVIC_START_RUNNING);
+}
+
+static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
+{
+ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
+ u64 entry = svm->avic_physical_id_entry;
lockdep_assert_preemption_disabled();
- /*
- * Note, reading the Physical ID entry outside of ir_list_lock is safe
- * as only the pCPU that has loaded (or is loading) the vCPU is allowed
- * to modify the entry, and preemption is disabled. I.e. the vCPU
- * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
- * recursively.
- */
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
-
- /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
- if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+ if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
return;
/*
@@ -1107,13 +958,62 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ avic_update_iommu_vcpu_affinity(vcpu, -1, action);
+
+ WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR);
+ /*
+ * Keep the previous APIC ID in the entry so that a rogue doorbell from
+ * hardware is at least restricted to a CPU associated with the vCPU.
+ */
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+
+ if (enable_ipiv)
+ WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
+
+ /*
+ * Note! Don't set AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR in the table as
+ * it's a synthetic flag that usurps an unused should-be-zero bit.
+ */
+ if (action & AVIC_START_BLOCKING)
+ entry |= AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR;
+
+ svm->avic_physical_id_entry = entry;
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+}
+
+void avic_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, reading the Physical ID entry outside of ir_list_lock is safe
+ * as only the pCPU that has loaded (or is loading) the vCPU is allowed
+ * to modify the entry, and preemption is disabled. I.e. the vCPU
+ * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
+ * recursively.
+ */
+ u64 entry = to_svm(vcpu)->avic_physical_id_entry;
+
+ /*
+ * Nothing to do if IsRunning == '0' due to vCPU blocking, i.e. if the
+ * vCPU is preempted while its in the process of blocking. WARN if the
+ * vCPU wasn't running and isn't blocking, KVM shouldn't attempt to put
+ * the AVIC if it wasn't previously loaded.
+ */
+ if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) {
+ if (WARN_ON_ONCE(!kvm_vcpu_is_blocking(vcpu)))
+ return;
+ /*
+ * The vCPU was preempted while blocking, ensure its IRTEs are
+ * configured to generate GA Log Interrupts.
+ */
+ if (!(WARN_ON_ONCE(!(entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR))))
+ return;
+ }
+
+ __avic_vcpu_put(vcpu, kvm_vcpu_is_blocking(vcpu) ? AVIC_START_BLOCKING :
+ AVIC_STOP_RUNNING);
}
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -1142,19 +1042,18 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
- bool activated = kvm_vcpu_apicv_active(vcpu);
-
if (!enable_apicv)
return;
+ /* APICv should only be toggled on/off while the vCPU is running. */
+ WARN_ON_ONCE(kvm_vcpu_is_blocking(vcpu));
+
avic_refresh_virtual_apic_mode(vcpu);
- if (activated)
- avic_vcpu_load(vcpu, vcpu->cpu);
+ if (kvm_vcpu_apicv_active(vcpu))
+ __avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE);
else
- avic_vcpu_put(vcpu);
-
- avic_set_pi_irte_mode(vcpu, activated);
+ __avic_vcpu_put(vcpu, AVIC_DEACTIVATE);
}
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -1162,20 +1061,25 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
if (!kvm_vcpu_apicv_active(vcpu))
return;
- /*
- * Unload the AVIC when the vCPU is about to block, _before_
- * the vCPU actually blocks.
- *
- * Any IRQs that arrive before IsRunning=0 will not cause an
- * incomplete IPI vmexit on the source, therefore vIRR will also
- * be checked by kvm_vcpu_check_block() before blocking. The
- * memory barrier implicit in set_current_state orders writing
- * IsRunning=0 before reading the vIRR. The processor needs a
- * matching memory barrier on interrupt delivery between writing
- * IRR and reading IsRunning; the lack of this barrier might be
- * the cause of errata #1235).
- */
- avic_vcpu_put(vcpu);
+ /*
+ * Unload the AVIC when the vCPU is about to block, _before_ the vCPU
+ * actually blocks.
+ *
+ * Note, any IRQs that arrive before IsRunning=0 will not cause an
+ * incomplete IPI vmexit on the source; kvm_vcpu_check_block() handles
+ * this by checking vIRR one last time before blocking. The memory
+ * barrier implicit in set_current_state orders writing IsRunning=0
+ * before reading the vIRR. The processor needs a matching memory
+ * barrier on interrupt delivery between writing IRR and reading
+ * IsRunning; the lack of this barrier might be the cause of errata #1235).
+ *
+ * Clear IsRunning=0 even if guest IRQs are disabled, i.e. even if KVM
+ * doesn't need to detect events for scheduling purposes. The doorbell
+ * used to signal running vCPUs cannot be blocked, i.e. will perturb the
+ * CPU and cause noisy neighbor problems if the VM is sending interrupts
+ * to the vCPU while it's scheduled out.
+ */
+ __avic_vcpu_put(vcpu, AVIC_START_BLOCKING);
}
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
@@ -1228,6 +1132,14 @@ bool avic_hardware_setup(void)
if (x2avic_enabled)
pr_info("x2AVIC enabled\n");
+ /*
+ * Disable IPI virtualization for AMD Family 17h CPUs (Zen1 and Zen2)
+ * due to erratum 1235, which results in missed VM-Exits on the sender
+ * and thus missed wake events for blocking vCPUs due to the CPU
+ * failing to see a software update to clear IsRunning.
+ */
+ enable_ipiv = enable_ipiv && boot_cpu_data.x86 != 0x17;
+
amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
return true;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 8427a48b8b7a..b7fd2e869998 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -185,12 +185,87 @@ void recalc_intercepts(struct vcpu_svm *svm)
}
/*
+ * This array (and its actual size) holds the set of offsets (indexing by chunk
+ * size) to process when merging vmcb12's MSRPM with vmcb01's MSRPM. Note, the
+ * set of MSRs for which interception is disabled in vmcb01 is per-vCPU, e.g.
+ * based on CPUID features. This array only tracks MSRs that *might* be passed
+ * through to the guest.
+ *
+ * Hardcode the capacity of the array based on the maximum number of _offsets_.
+ * MSRs are batched together, so there are fewer offsets than MSRs.
+ */
+static int nested_svm_msrpm_merge_offsets[7] __ro_after_init;
+static int nested_svm_nr_msrpm_merge_offsets __ro_after_init;
+typedef unsigned long nsvm_msrpm_merge_t;
+
+int __init nested_svm_init_msrpm_merge_offsets(void)
+{
+ static const u32 merge_msrs[] __initconst = {
+ MSR_STAR,
+ MSR_IA32_SYSENTER_CS,
+ MSR_IA32_SYSENTER_EIP,
+ MSR_IA32_SYSENTER_ESP,
+ #ifdef CONFIG_X86_64
+ MSR_GS_BASE,
+ MSR_FS_BASE,
+ MSR_KERNEL_GS_BASE,
+ MSR_LSTAR,
+ MSR_CSTAR,
+ MSR_SYSCALL_MASK,
+ #endif
+ MSR_IA32_SPEC_CTRL,
+ MSR_IA32_PRED_CMD,
+ MSR_IA32_FLUSH_CMD,
+ MSR_IA32_APERF,
+ MSR_IA32_MPERF,
+ MSR_IA32_LASTBRANCHFROMIP,
+ MSR_IA32_LASTBRANCHTOIP,
+ MSR_IA32_LASTINTFROMIP,
+ MSR_IA32_LASTINTTOIP,
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(merge_msrs); i++) {
+ int bit_nr = svm_msrpm_bit_nr(merge_msrs[i]);
+ u32 offset;
+
+ if (WARN_ON(bit_nr < 0))
+ return -EIO;
+
+ /*
+ * Merging is done in chunks to reduce the number of accesses
+ * to L1's bitmap.
+ */
+ offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t);
+
+ for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) {
+ if (nested_svm_msrpm_merge_offsets[j] == offset)
+ break;
+ }
+
+ if (j < nested_svm_nr_msrpm_merge_offsets)
+ continue;
+
+ if (WARN_ON(j >= ARRAY_SIZE(nested_svm_msrpm_merge_offsets)))
+ return -EIO;
+
+ nested_svm_msrpm_merge_offsets[j] = offset;
+ nested_svm_nr_msrpm_merge_offsets++;
+ }
+
+ return 0;
+}
+
+/*
* Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
* is optimized in that it only merges the parts where KVM MSR permission bitmap
* may contain zero bits.
*/
-static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
+static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm;
+ nsvm_msrpm_merge_t *msrpm01 = svm->msrpm;
int i;
/*
@@ -205,7 +280,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!svm->nested.force_msr_bitmap_recalc) {
struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
- if (kvm_hv_hypercall_enabled(&svm->vcpu) &&
+ if (kvm_hv_hypercall_enabled(vcpu) &&
hve->hv_enlightenments_control.msr_bitmap &&
(svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
goto set_msrpm_base_pa;
@@ -215,25 +290,17 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return true;
- for (i = 0; i < MSRPM_OFFSETS; i++) {
- u32 value, p;
- u64 offset;
+ for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) {
+ const int p = nested_svm_msrpm_merge_offsets[i];
+ nsvm_msrpm_merge_t l1_val;
+ gpa_t gpa;
- if (msrpm_offsets[i] == 0xffffffff)
- break;
+ gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
- p = msrpm_offsets[i];
-
- /* x2apic msrs are intercepted always for the nested guest */
- if (is_x2apic_msrpm_offset(p))
- continue;
-
- offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
-
- if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
+ if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)))
return false;
- svm->nested.msrpm[p] = svm->msrpm[p] | value;
+ msrpm02[p] = msrpm01[p] | l1_val;
}
svm->nested.force_msr_bitmap_recalc = false;
@@ -937,7 +1004,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
goto out_exit_err;
- if (nested_svm_vmrun_msrpm(svm))
+ if (nested_svm_merge_msrpm(vcpu))
goto out;
out_exit_err:
@@ -1230,7 +1297,6 @@ int svm_allocate_nested(struct vcpu_svm *svm)
svm->nested.msrpm = svm_vcpu_alloc_msrpm();
if (!svm->nested.msrpm)
goto err_free_vmcb02;
- svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
svm->nested.initialized = true;
return 0;
@@ -1290,26 +1356,26 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
- u32 offset, msr, value;
- int write, mask;
+ gpa_t base = svm->nested.ctl.msrpm_base_pa;
+ int write, bit_nr;
+ u8 value, mask;
+ u32 msr;
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- offset = svm_msrpm_offset(msr);
+ bit_nr = svm_msrpm_bit_nr(msr);
write = svm->vmcb->control.exit_info_1 & 1;
- mask = 1 << ((2 * (msr & 0xf)) + write);
- if (offset == MSR_INVALID)
+ if (bit_nr < 0)
return NESTED_EXIT_DONE;
- /* Offset is in 32 bit units but need in 8 bit units */
- offset *= 4;
-
- if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
+ if (kvm_vcpu_read_guest(&svm->vcpu, base + bit_nr / BITS_PER_BYTE,
+ &value, sizeof(value)))
return NESTED_EXIT_DONE;
+ mask = BIT(write) << (bit_nr & (BITS_PER_BYTE - 1));
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
@@ -1819,13 +1885,11 @@ out_free:
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
if (!vcpu->arch.pdptrs_from_userspace &&
- !nested_npt_enabled(svm) && is_pae_paging(vcpu))
+ !nested_npt_enabled(to_svm(vcpu)) && is_pae_paging(vcpu))
/*
* Reload the guest's PDPTRs since after a migration
* the guest CR3 might be restored prior to setting the nested
@@ -1834,7 +1898,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
return false;
- if (!nested_svm_vmrun_msrpm(svm)) {
+ if (!nested_svm_merge_msrpm(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_EMULATION;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index b201f77fcd49..2fbdebf79fbb 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -117,6 +117,7 @@ static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
*/
down_write(&sev_deactivate_lock);
+ /* SNP firmware requires use of WBINVD for ASID recycling. */
wbinvd_on_all_cpus();
if (sev_snp_enabled)
@@ -446,7 +447,12 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
init_args.probe = false;
ret = sev_platform_init(&init_args);
if (ret)
- goto e_free;
+ goto e_free_asid;
+
+ if (!zalloc_cpumask_var(&sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
+ ret = -ENOMEM;
+ goto e_free_asid;
+ }
/* This needs to happen after SEV/SNP firmware initialization. */
if (vm_type == KVM_X86_SNP_VM) {
@@ -464,6 +470,8 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
return 0;
e_free:
+ free_cpumask_var(sev->have_run_cpus);
+e_free_asid:
argp->error = init_args.error;
sev_asid_free(sev);
sev->asid = 0;
@@ -708,6 +716,33 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
}
}
+static void sev_writeback_caches(struct kvm *kvm)
+{
+ /*
+ * Note, the caller is responsible for ensuring correctness if the mask
+ * can be modified, e.g. if a CPU could be doing VMRUN.
+ */
+ if (cpumask_empty(to_kvm_sev_info(kvm)->have_run_cpus))
+ return;
+
+ /*
+ * Ensure that all dirty guest tagged cache entries are written back
+ * before releasing the pages back to the system for use. CLFLUSH will
+ * not do this without SME_COHERENT, and flushing many cache lines
+ * individually is slower than blasting WBINVD for large VMs, so issue
+ * WBNOINVD (or WBINVD if the "no invalidate" variant is unsupported)
+ * on CPUs that have done VMRUN, i.e. may have dirtied data using the
+ * VM's ASID.
+ *
+ * For simplicity, never remove CPUs from the bitmap. Ideally, KVM
+ * would clear the mask when flushing caches, but doing so requires
+ * serializing multiple calls and having responding CPUs (to the IPI)
+ * mark themselves as still running if they are running (or about to
+ * run) a vCPU for the VM.
+ */
+ wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
+}
+
static unsigned long get_num_contig_pages(unsigned long idx,
struct page **inpages, unsigned long npages)
{
@@ -2037,6 +2072,17 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
if (ret)
goto out_source_vcpu;
+ /*
+ * Allocate a new have_run_cpus for the destination, i.e. don't copy
+ * the set of CPUs from the source. If a CPU was used to run a vCPU in
+ * the source VM but is never used for the destination VM, then the CPU
+ * can only have cached memory that was accessible to the source VM.
+ */
+ if (!zalloc_cpumask_var(&dst_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
+ ret = -ENOMEM;
+ goto out_source_vcpu;
+ }
+
sev_migrate_from(kvm, source_kvm);
kvm_vm_dead(source_kvm);
cg_cleanup_sev = src_sev;
@@ -2135,11 +2181,7 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return -EINVAL;
/* Check for policy bits that must be set */
- if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO) ||
- !(params.policy & SNP_POLICY_MASK_SMT))
- return -EINVAL;
-
- if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
+ if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO))
return -EINVAL;
sev->policy = params.policy;
@@ -2698,12 +2740,7 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
goto failed;
}
- /*
- * Ensure that all guest tagged cache entries are flushed before
- * releasing the pages back to the system for use. CLFLUSH will
- * not do this, so issue a WBINVD.
- */
- wbinvd_on_all_cpus();
+ sev_writeback_caches(kvm);
__unregister_enc_region_locked(kvm, region);
@@ -2745,13 +2782,18 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
goto e_unlock;
}
+ mirror_sev = to_kvm_sev_info(kvm);
+ if (!zalloc_cpumask_var(&mirror_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
+ ret = -ENOMEM;
+ goto e_unlock;
+ }
+
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
source_sev = to_kvm_sev_info(source_kvm);
kvm_get_kvm(source_kvm);
- mirror_sev = to_kvm_sev_info(kvm);
list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
/* Set enc_context_owner and copy its encryption context over */
@@ -2813,7 +2855,13 @@ void sev_vm_destroy(struct kvm *kvm)
WARN_ON(!list_empty(&sev->mirror_vms));
- /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
+ free_cpumask_var(sev->have_run_cpus);
+
+ /*
+ * If this is a mirror VM, remove it from the owner's list of a mirrors
+ * and skip ASID cleanup (the ASID is tied to the lifetime of the owner).
+ * Note, mirror VMs don't support registering encrypted regions.
+ */
if (is_mirroring_enc_context(kvm)) {
struct kvm *owner_kvm = sev->enc_context_owner;
@@ -2824,12 +2872,6 @@ void sev_vm_destroy(struct kvm *kvm)
return;
}
- /*
- * Ensure that all guest tagged cache entries are flushed before
- * releasing the pages back to the system for use. CLFLUSH will
- * not do this, so issue a WBINVD.
- */
- wbinvd_on_all_cpus();
/*
* if userspace was terminated before unregistering the memory regions
@@ -3099,30 +3141,29 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
/*
* VM Page Flush takes a host virtual address and a guest ASID. Fall
- * back to WBINVD if this faults so as not to make any problems worse
- * by leaving stale encrypted data in the cache.
+ * back to full writeback of caches if this faults so as not to make
+ * any problems worse by leaving stale encrypted data in the cache.
*/
if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
- goto do_wbinvd;
+ goto do_sev_writeback_caches;
return;
-do_wbinvd:
- wbinvd_on_all_cpus();
+do_sev_writeback_caches:
+ sev_writeback_caches(vcpu->kvm);
}
void sev_guest_memory_reclaimed(struct kvm *kvm)
{
/*
* With SNP+gmem, private/encrypted memory is unreachable via the
- * hva-based mmu notifiers, so these events are only actually
- * pertaining to shared pages where there is no need to perform
- * the WBINVD to flush associated caches.
+ * hva-based mmu notifiers, i.e. these events are explicitly scoped to
+ * shared pages, where there's no need to flush caches.
*/
if (!sev_guest(kvm) || sev_snp_guest(kvm))
return;
- wbinvd_on_all_cpus();
+ sev_writeback_caches(kvm);
}
void sev_free_vcpu(struct kvm_vcpu *vcpu)
@@ -3454,6 +3495,15 @@ int pre_sev_run(struct vcpu_svm *svm, int cpu)
if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa))
return -EINVAL;
+ /*
+ * To optimize cache flushes when memory is reclaimed from an SEV VM,
+ * track physical CPUs that enter the guest for SEV VMs and thus can
+ * have encrypted, dirty data in the cache, and flush caches only for
+ * CPUs that have entered the guest.
+ */
+ if (!cpumask_test_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus))
+ cpumask_set_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus);
+
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -3886,9 +3936,9 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
* From this point forward, the VMSA will always be a guest-mapped page
* rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
* theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
- * that involves cleanups like wbinvd_on_all_cpus() which would ideally
- * be handled during teardown rather than guest boot. Deferring that
- * also allows the existing logic for SEV-ES VMSAs to be re-used with
+ * that involves cleanups like flushing caches, which would ideally be
+ * handled during teardown rather than guest boot. Deferring that also
+ * allows the existing logic for SEV-ES VMSAs to be re-used with
* minimal SNP-specific changes.
*/
svm->sev_es.snp_has_guest_vmsa = true;
@@ -4390,16 +4440,17 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
count, in);
}
-static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
-
- if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
- bool v_tsc_aux = guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID);
+ /* Clear intercepts on MSRs that are context switched by hardware. */
+ svm_disable_intercept_for_msr(vcpu, MSR_AMD64_SEV_ES_GHCB, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_EFER, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_IA32_CR_PAT, MSR_TYPE_RW);
- set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
- }
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX))
+ svm_set_intercept_for_msr(vcpu, MSR_TSC_AUX, MSR_TYPE_RW,
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID));
/*
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
@@ -4413,11 +4464,9 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
* XSAVES being exposed to the guest so that KVM can at least honor
* guest CPUID for RDMSR and WRMSR.
*/
- if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
- else
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_XSS, MSR_TYPE_RW,
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES));
}
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
@@ -4429,16 +4478,12 @@ void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
if (best)
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
-
- if (sev_es_guest(svm->vcpu.kvm))
- sev_es_vcpu_after_set_cpuid(svm);
}
static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
struct vmcb *vmcb = svm->vmcb01.ptr;
- struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
@@ -4496,10 +4541,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Can't intercept XSETBV, HV can't modify XCR0 directly */
svm_clr_intercept(svm, INTERCEPT_XSETBV);
-
- /* Clear intercepts on selected MSRs */
- set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
}
void sev_init_vmcb(struct vcpu_svm *svm)
@@ -4888,7 +4929,7 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
/*
* SEV-ES avoids host/guest cache coherency issues through
- * WBINVD hooks issued via MMU notifiers during run-time, and
+ * WBNOINVD hooks issued via MMU notifiers during run-time, and
* KVM's VM destroy path at shutdown. Those MMU notifier events
* don't cover gmem since there is no requirement to map pages
* to a HVA in order to use them for a running guest. While the
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ab9b947dbf4f..d9931c6c4bc6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -72,8 +72,6 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
static bool erratum_383_found __read_mostly;
-u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
-
/*
* Set osvw_len to higher value when updated Revision Guides
* are published and we know what the new status bits are
@@ -82,72 +80,6 @@ static uint64_t osvw_len = 4, osvw_status;
static DEFINE_PER_CPU(u64, current_tsc_ratio);
-#define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
-
-static const struct svm_direct_access_msrs {
- u32 index; /* Index of the MSR */
- bool always; /* True if intercept is initially cleared */
-} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
- { .index = MSR_STAR, .always = true },
- { .index = MSR_IA32_SYSENTER_CS, .always = true },
- { .index = MSR_IA32_SYSENTER_EIP, .always = false },
- { .index = MSR_IA32_SYSENTER_ESP, .always = false },
-#ifdef CONFIG_X86_64
- { .index = MSR_GS_BASE, .always = true },
- { .index = MSR_FS_BASE, .always = true },
- { .index = MSR_KERNEL_GS_BASE, .always = true },
- { .index = MSR_LSTAR, .always = true },
- { .index = MSR_CSTAR, .always = true },
- { .index = MSR_SYSCALL_MASK, .always = true },
-#endif
- { .index = MSR_IA32_SPEC_CTRL, .always = false },
- { .index = MSR_IA32_PRED_CMD, .always = false },
- { .index = MSR_IA32_FLUSH_CMD, .always = false },
- { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
- { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
- { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
- { .index = MSR_IA32_LASTINTFROMIP, .always = false },
- { .index = MSR_IA32_LASTINTTOIP, .always = false },
- { .index = MSR_IA32_XSS, .always = false },
- { .index = MSR_EFER, .always = false },
- { .index = MSR_IA32_CR_PAT, .always = false },
- { .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
- { .index = MSR_TSC_AUX, .always = false },
- { .index = X2APIC_MSR(APIC_ID), .always = false },
- { .index = X2APIC_MSR(APIC_LVR), .always = false },
- { .index = X2APIC_MSR(APIC_TASKPRI), .always = false },
- { .index = X2APIC_MSR(APIC_ARBPRI), .always = false },
- { .index = X2APIC_MSR(APIC_PROCPRI), .always = false },
- { .index = X2APIC_MSR(APIC_EOI), .always = false },
- { .index = X2APIC_MSR(APIC_RRR), .always = false },
- { .index = X2APIC_MSR(APIC_LDR), .always = false },
- { .index = X2APIC_MSR(APIC_DFR), .always = false },
- { .index = X2APIC_MSR(APIC_SPIV), .always = false },
- { .index = X2APIC_MSR(APIC_ISR), .always = false },
- { .index = X2APIC_MSR(APIC_TMR), .always = false },
- { .index = X2APIC_MSR(APIC_IRR), .always = false },
- { .index = X2APIC_MSR(APIC_ESR), .always = false },
- { .index = X2APIC_MSR(APIC_ICR), .always = false },
- { .index = X2APIC_MSR(APIC_ICR2), .always = false },
-
- /*
- * Note:
- * AMD does not virtualize APIC TSC-deadline timer mode, but it is
- * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
- * the AVIC hardware would generate GP fault. Therefore, always
- * intercept the MSR 0x832, and do not setup direct_access_msr.
- */
- { .index = X2APIC_MSR(APIC_LVTTHMR), .always = false },
- { .index = X2APIC_MSR(APIC_LVTPC), .always = false },
- { .index = X2APIC_MSR(APIC_LVT0), .always = false },
- { .index = X2APIC_MSR(APIC_LVT1), .always = false },
- { .index = X2APIC_MSR(APIC_LVTERR), .always = false },
- { .index = X2APIC_MSR(APIC_TMICT), .always = false },
- { .index = X2APIC_MSR(APIC_TMCCT), .always = false },
- { .index = X2APIC_MSR(APIC_TDCR), .always = false },
- { .index = MSR_INVALID, .always = false },
-};
-
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* pause_filter_count: On processors that support Pause filtering(indicated
@@ -232,6 +164,7 @@ module_param(tsc_scaling, int, 0444);
*/
static bool avic;
module_param(avic, bool, 0444);
+module_param(enable_ipiv, bool, 0444);
module_param(enable_device_posted_irqs, bool, 0444);
@@ -264,33 +197,6 @@ static DEFINE_MUTEX(vmcb_dump_mutex);
*/
static int tsc_aux_uret_slot __read_mostly = -1;
-static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
-
-#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
-#define MSRS_RANGE_SIZE 2048
-#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
-
-u32 svm_msrpm_offset(u32 msr)
-{
- u32 offset;
- int i;
-
- for (i = 0; i < NUM_MSR_MAPS; i++) {
- if (msr < msrpm_ranges[i] ||
- msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
- continue;
-
- offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
- offset += (i * MSRS_RANGE_SIZE); /* add range offset */
-
- /* Now we have the u8 offset - but need the u32 offset */
- return offset / 4;
- }
-
- /* MSR not in any range */
- return MSR_INVALID;
-}
-
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
@@ -757,50 +663,8 @@ static void clr_dr_intercepts(struct vcpu_svm *svm)
recalc_intercepts(svm);
}
-static int direct_access_msr_slot(u32 msr)
-{
- u32 i;
-
- for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
- if (direct_access_msrs[i].index == msr)
- return i;
-
- return -ENOENT;
-}
-
-static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
- int write)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- int slot = direct_access_msr_slot(msr);
-
- if (slot == -ENOENT)
- return;
-
- /* Set the shadow bitmaps to the desired intercept states */
- if (read)
- set_bit(slot, svm->shadow_msr_intercept.read);
- else
- clear_bit(slot, svm->shadow_msr_intercept.read);
-
- if (write)
- set_bit(slot, svm->shadow_msr_intercept.write);
- else
- clear_bit(slot, svm->shadow_msr_intercept.write);
-}
-
-static bool valid_msr_intercept(u32 index)
-{
- return direct_access_msr_slot(index) != -ENOENT;
-}
-
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{
- u8 bit_write;
- unsigned long tmp;
- u32 offset;
- u32 *msrpm;
-
/*
* For non-nested case:
* If the L01 MSR bitmap does not intercept the MSR, then we need to
@@ -810,90 +674,102 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
- to_svm(vcpu)->msrpm;
+ void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm :
+ to_svm(vcpu)->msrpm;
- offset = svm_msrpm_offset(msr);
- bit_write = 2 * (msr & 0x0f) + 1;
- tmp = msrpm[offset];
-
- BUG_ON(offset == MSR_INVALID);
-
- return test_bit(bit_write, &tmp);
+ return svm_test_msr_bitmap_write(msrpm, msr);
}
-static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
- u32 msr, int read, int write)
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u8 bit_read, bit_write;
- unsigned long tmp;
- u32 offset;
+ void *msrpm = svm->msrpm;
- /*
- * If this warning triggers extend the direct_access_msrs list at the
- * beginning of the file
- */
- WARN_ON(!valid_msr_intercept(msr));
-
- /* Enforce non allowed MSRs to trap */
- if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
- read = 0;
-
- if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
- write = 0;
-
- offset = svm_msrpm_offset(msr);
- bit_read = 2 * (msr & 0x0f);
- bit_write = 2 * (msr & 0x0f) + 1;
- tmp = msrpm[offset];
-
- BUG_ON(offset == MSR_INVALID);
-
- read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
- write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+ /* Don't disable interception for MSRs userspace wants to handle. */
+ if (type & MSR_TYPE_R) {
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+ svm_clear_msr_bitmap_read(msrpm, msr);
+ else
+ svm_set_msr_bitmap_read(msrpm, msr);
+ }
- msrpm[offset] = tmp;
+ if (type & MSR_TYPE_W) {
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+ svm_clear_msr_bitmap_write(msrpm, msr);
+ else
+ svm_set_msr_bitmap_write(msrpm, msr);
+ }
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
svm->nested.force_msr_bitmap_recalc = true;
}
-void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
- int read, int write)
-{
- set_shadow_msr_intercept(vcpu, msr, read, write);
- set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
-}
-
-u32 *svm_vcpu_alloc_msrpm(void)
+void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
{
- unsigned int order = get_order(MSRPM_SIZE);
- struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
- u32 *msrpm;
+ unsigned int order = get_order(size);
+ struct page *pages = alloc_pages(gfp_mask, order);
+ void *pm;
if (!pages)
return NULL;
- msrpm = page_address(pages);
- memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
+ /*
+ * Set all bits in the permissions map so that all MSR and I/O accesses
+ * are intercepted by default.
+ */
+ pm = page_address(pages);
+ memset(pm, 0xff, PAGE_SIZE * (1 << order));
- return msrpm;
+ return pm;
}
-void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
+static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
{
- int i;
+ bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
- for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
- if (!direct_access_msrs[i].always)
- continue;
- set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
- }
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, MSR_TYPE_RW, intercept);
+
+ if (sev_es_guest(vcpu->kvm))
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
}
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
{
+ static const u32 x2avic_passthrough_msrs[] = {
+ X2APIC_MSR(APIC_ID),
+ X2APIC_MSR(APIC_LVR),
+ X2APIC_MSR(APIC_TASKPRI),
+ X2APIC_MSR(APIC_ARBPRI),
+ X2APIC_MSR(APIC_PROCPRI),
+ X2APIC_MSR(APIC_EOI),
+ X2APIC_MSR(APIC_RRR),
+ X2APIC_MSR(APIC_LDR),
+ X2APIC_MSR(APIC_DFR),
+ X2APIC_MSR(APIC_SPIV),
+ X2APIC_MSR(APIC_ISR),
+ X2APIC_MSR(APIC_TMR),
+ X2APIC_MSR(APIC_IRR),
+ X2APIC_MSR(APIC_ESR),
+ X2APIC_MSR(APIC_ICR),
+ X2APIC_MSR(APIC_ICR2),
+
+ /*
+ * Note! Always intercept LVTT, as TSC-deadline timer mode
+ * isn't virtualized by hardware, and the CPU will generate a
+ * #GP instead of a #VMEXIT.
+ */
+ X2APIC_MSR(APIC_LVTTHMR),
+ X2APIC_MSR(APIC_LVTPC),
+ X2APIC_MSR(APIC_LVT0),
+ X2APIC_MSR(APIC_LVT1),
+ X2APIC_MSR(APIC_LVTERR),
+ X2APIC_MSR(APIC_TMICT),
+ X2APIC_MSR(APIC_TMCCT),
+ X2APIC_MSR(APIC_TDCR),
+ };
int i;
if (intercept == svm->x2avic_msrs_intercepted)
@@ -902,84 +778,79 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
if (!x2avic_enabled)
return;
- for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
- int index = direct_access_msrs[i].index;
-
- if ((index < APIC_BASE_MSR) ||
- (index > APIC_BASE_MSR + 0xff))
- continue;
- set_msr_interception(&svm->vcpu, svm->msrpm, index,
- !intercept, !intercept);
- }
+ for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
+ svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
+ MSR_TYPE_RW, intercept);
svm->x2avic_msrs_intercepted = intercept;
}
-void svm_vcpu_free_msrpm(u32 *msrpm)
+void svm_vcpu_free_msrpm(void *msrpm)
{
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
}
-static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
+static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u32 i;
- /*
- * Set intercept permissions for all direct access MSRs again. They
- * will automatically get filtered through the MSR filter, so we are
- * back in sync after this.
- */
- for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
- u32 msr = direct_access_msrs[i].index;
- u32 read = test_bit(i, svm->shadow_msr_intercept.read);
- u32 write = test_bit(i, svm->shadow_msr_intercept.write);
-
- set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
- }
-}
-
-static void add_msr_offset(u32 offset)
-{
- int i;
-
- for (i = 0; i < MSRPM_OFFSETS; ++i) {
+ svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
- /* Offset already in list? */
- if (msrpm_offsets[i] == offset)
- return;
+#ifdef CONFIG_X86_64
+ svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
+ svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
+#endif
- /* Slot used by another offset? */
- if (msrpm_offsets[i] != MSR_INVALID)
- continue;
+ if (lbrv)
+ svm_recalc_lbr_msr_intercepts(vcpu);
- /* Add offset to list */
- msrpm_offsets[i] = offset;
+ if (cpu_feature_enabled(X86_FEATURE_IBPB))
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
+ !guest_has_pred_cmd_msr(vcpu));
- return;
- }
+ if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
/*
- * If this BUG triggers the msrpm_offsets table has an overflow. Just
- * increase MSRPM_OFFSETS in this case.
+ * Disable interception of SPEC_CTRL if KVM doesn't need to manually
+ * context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if
+ * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively
+ * using SPEC_CTRL.
*/
- BUG();
-}
-
-static void init_msrpm_offsets(void)
-{
- int i;
-
- memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
+ if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL))
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
+ !guest_has_spec_ctrl_msr(vcpu));
+ else
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
+ !svm->spec_ctrl);
- for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
- u32 offset;
+ /*
+ * Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU,
+ * as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits.
+ */
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW,
+ guest_cpuid_is_intel_compatible(vcpu));
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW,
+ guest_cpuid_is_intel_compatible(vcpu));
+
+ if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
+ svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
+ svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
+ }
- offset = svm_msrpm_offset(direct_access_msrs[i].index);
- BUG_ON(offset == MSR_INVALID);
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_recalc_msr_intercepts(vcpu);
- add_msr_offset(offset);
- }
+ /*
+ * x2APIC intercepts are modified on-demand and cannot be filtered by
+ * userspace.
+ */
}
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
@@ -998,13 +869,7 @@ void svm_enable_lbrv(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
-
- if (sev_es_guest(vcpu->kvm))
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
+ svm_recalc_lbr_msr_intercepts(vcpu);
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
@@ -1016,12 +881,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
-
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
+ svm_recalc_lbr_msr_intercepts(vcpu);
/*
* Move the LBR msrs back to the vmcb01 to avoid copying them
@@ -1176,9 +1037,10 @@ void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
}
/* Evaluate instruction intercepts that depend on guest CPUID features. */
-static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
- struct vcpu_svm *svm)
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
/*
* Intercept INVPCID if shadow paging is enabled to sync/free shadow
* roots, or if INVPCID is disabled in the guest to inject #UD.
@@ -1197,24 +1059,11 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
else
svm_set_intercept(svm, INTERCEPT_RDTSCP);
}
-}
-
-static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
if (guest_cpuid_is_intel_compatible(vcpu)) {
- /*
- * We must intercept SYSENTER_EIP and SYSENTER_ESP
- * accesses because the processor only stores 32 bits.
- * For the same reason we cannot use virtual VMLOAD/VMSAVE.
- */
svm_set_intercept(svm, INTERCEPT_VMLOAD);
svm_set_intercept(svm, INTERCEPT_VMSAVE);
svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
-
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
} else {
/*
* If hardware supports Virtual VMLOAD VMSAVE then enable it
@@ -1225,12 +1074,15 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
svm_clr_intercept(svm, INTERCEPT_VMSAVE);
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
}
- /* No need to intercept these MSRs */
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
}
}
+static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ svm_recalc_instruction_intercepts(vcpu);
+ svm_recalc_msr_intercepts(vcpu);
+}
+
static void init_vmcb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1353,15 +1205,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_clr_intercept(svm, INTERCEPT_PAUSE);
}
- svm_recalc_instruction_intercepts(vcpu, svm);
-
- /*
- * If the host supports V_SPEC_CTRL then disable the interception
- * of MSR_IA32_SPEC_CTRL.
- */
- if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
-
if (kvm_vcpu_apicv_active(vcpu))
avic_init_vmcb(svm, vmcb);
@@ -1381,7 +1224,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
sev_init_vmcb(svm);
svm_hv_init_vmcb(vmcb);
- init_vmcb_after_set_cpuid(vcpu);
+
+ svm_recalc_intercepts_after_set_cpuid(vcpu);
vmcb_mark_all_dirty(vmcb);
@@ -1392,8 +1236,6 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
svm_init_osvw(vcpu);
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
@@ -1490,13 +1332,15 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ WARN_ON_ONCE(!list_empty(&svm->ir_list));
+
svm_leave_nested(vcpu);
svm_free_nested(svm);
sev_free_vcpu(vcpu);
__free_page(__sme_pa_to_page(svm->vmcb01.pa));
- __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
+ svm_vcpu_free_msrpm(svm->msrpm);
}
#ifdef CONFIG_CPU_MITIGATIONS
@@ -2880,12 +2724,11 @@ static int svm_get_feature_msr(u32 msr, u64 *data)
return 0;
}
-static bool
-sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
+ struct msr_data *msr_info)
{
return sev_es_guest(vcpu->kvm) &&
vcpu->arch.guest_state_protected &&
- svm_msrpm_offset(msr_info->index) != MSR_INVALID &&
!msr_write_intercepted(vcpu, msr_info->index);
}
@@ -3116,11 +2959,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
*
* For nested:
* The handling of the MSR bitmap for L2 guests is done in
- * nested_svm_vmrun_msrpm.
+ * nested_svm_merge_msrpm().
* We update the L1 MSR bit as well since it will end up
* touching the MSR anyway now.
*/
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+ svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr->host_initiated &&
@@ -3186,8 +3029,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
/*
* TSC_AUX is usually changed only during boot and never read
- * directly. Intercept TSC_AUX instead of exposing it to the
- * guest via direct_access_msrs, and switch it via user return.
+ * directly. Intercept TSC_AUX and switch it via user return.
*/
preempt_disable();
ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
@@ -4389,9 +4231,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_exit_irqoff();
}
-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
- bool force_immediate_exit)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
{
+ bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
struct vcpu_svm *svm = to_svm(vcpu);
bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
@@ -4438,10 +4280,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
svm_hv_update_vp_id(svm->vmcb, vcpu);
/*
- * Run with all-zero DR6 unless needed, so that we can get the exact cause
- * of a #DB.
+ * Run with all-zero DR6 unless the guest can write DR6 freely, so that
+ * KVM can get the exact cause of a #DB. Note, loading guest DR6 from
+ * KVM's snapshot is only necessary when DR accesses won't exit.
*/
- if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
+ if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
+ svm_set_dr6(vcpu, vcpu->arch.dr6);
+ else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
clgi();
@@ -4621,20 +4466,10 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (guest_cpuid_is_intel_compatible(vcpu))
guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- svm_recalc_instruction_intercepts(vcpu, svm);
-
- if (boot_cpu_has(X86_FEATURE_IBPB))
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
- !!guest_has_pred_cmd_msr(vcpu));
-
- if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
- !!guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
-
if (sev_guest(vcpu->kvm))
sev_vcpu_after_set_cpuid(svm);
- init_vmcb_after_set_cpuid(vcpu);
+ svm_recalc_intercepts_after_set_cpuid(vcpu);
}
static bool svm_has_wbinvd_exit(void)
@@ -5185,7 +5020,7 @@ static int svm_vm_init(struct kvm *kvm)
}
if (!pause_filter_count || !pause_filter_thresh)
- kvm->arch.pause_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
if (enable_apicv) {
int ret = avic_vm_init(kvm);
@@ -5252,7 +5087,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
- .set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg,
@@ -5337,7 +5171,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
- .msr_filter_changed = svm_msr_filter_changed,
+ .recalc_msr_intercepts = svm_recalc_msr_intercepts,
.complete_emulated_msr = svm_complete_emulated_msr,
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
@@ -5473,11 +5307,8 @@ static __init void svm_set_cpu_caps(void)
static __init int svm_hardware_setup(void)
{
- int cpu;
- struct page *iopm_pages;
void *iopm_va;
- int r;
- unsigned int order = get_order(IOPM_SIZE);
+ int cpu, r;
/*
* NX is required for shadow paging and for NPT if the NX huge pages
@@ -5489,17 +5320,6 @@ static __init int svm_hardware_setup(void)
}
kvm_enable_efer_bits(EFER_NX);
- iopm_pages = alloc_pages(GFP_KERNEL, order);
-
- if (!iopm_pages)
- return -ENOMEM;
-
- iopm_va = page_address(iopm_pages);
- memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
- iopm_base = __sme_page_pa(iopm_pages);
-
- init_msrpm_offsets();
-
kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
XFEATURE_MASK_BNDCSR);
@@ -5533,6 +5353,10 @@ static __init int svm_hardware_setup(void)
if (nested) {
pr_info("Nested Virtualization enabled\n");
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
+
+ r = nested_svm_init_msrpm_merge_offsets();
+ if (r)
+ return r;
}
/*
@@ -5564,6 +5388,13 @@ static __init int svm_hardware_setup(void)
else
pr_info("LBR virtualization supported\n");
}
+
+ iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL);
+ if (!iopm_va)
+ return -ENOMEM;
+
+ iopm_base = __sme_set(__pa(iopm_va));
+
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
@@ -5581,6 +5412,7 @@ static __init int svm_hardware_setup(void)
enable_apicv = avic = avic && avic_hardware_setup();
if (!enable_apicv) {
+ enable_ipiv = false;
svm_x86_ops.vcpu_blocking = NULL;
svm_x86_ops.vcpu_unblocking = NULL;
svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
@@ -5662,6 +5494,8 @@ static int __init svm_init(void)
{
int r;
+ KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm);
+
__unused_size_checks();
if (!kvm_is_svm_supported())
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e6f3c6a153a0..58b9d168e0c8 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -44,9 +44,6 @@ static inline struct page *__sme_pa_to_page(unsigned long pa)
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 48
-#define MSRPM_OFFSETS 32
-extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
extern int nrips;
extern int vgif;
@@ -113,6 +110,7 @@ struct kvm_sev_info {
void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */
void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */
struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
+ cpumask_var_t have_run_cpus; /* CPUs that have done VMRUN for this VM. */
};
#define SEV_POLICY_NODBG BIT_ULL(0)
@@ -123,8 +121,8 @@ struct kvm_svm {
/* Struct members for AVIC */
u32 avic_vm_id;
- struct page *avic_logical_id_table_page;
- struct page *avic_physical_id_table_page;
+ u32 *avic_logical_id_table;
+ u64 *avic_physical_id_table;
struct hlist_node hnode;
struct kvm_sev_info sev_info;
@@ -189,8 +187,11 @@ struct svm_nested_state {
u64 vmcb12_gpa;
u64 last_vmcb12_gpa;
- /* These are the merged vectors */
- u32 *msrpm;
+ /*
+ * The MSR permissions map used for vmcb02, which is the merge result
+ * of vmcb01 and vmcb12
+ */
+ void *msrpm;
/* A VMRUN has started but has not yet been performed, so
* we cannot inject a nested vmexit yet. */
@@ -271,7 +272,7 @@ struct vcpu_svm {
*/
u64 virt_spec_ctrl;
- u32 *msrpm;
+ void *msrpm;
ulong nmi_iret_rip;
@@ -306,24 +307,26 @@ struct vcpu_svm {
u32 ldr_reg;
u32 dfr_reg;
- struct page *avic_backing_page;
- u64 *avic_physical_id_cache;
+
+ /* This is essentially a shadow of the vCPU's actual entry in the
+ * Physical ID table that is programmed into the VMCB, i.e. that is
+ * seen by the CPU. If IPI virtualization is disabled, IsRunning is
+ * only ever set in the shadow, i.e. is never propagated to the "real"
+ * table, so that hardware never sees IsRunning=1.
+ */
+ u64 avic_physical_id_entry;
/*
- * Per-vcpu list of struct amd_svm_iommu_ir:
- * This is used mainly to store interrupt remapping information used
- * when update the vcpu affinity. This avoids the need to scan for
- * IRTE and try to match ga_tag in the IOMMU driver.
+ * Per-vCPU list of irqfds that are eligible to post IRQs directly to
+ * the vCPU (a.k.a. device posted IRQs, a.k.a. IRQ bypass). The list
+ * is used to reconfigure IRTEs when the vCPU is loaded/put (to set the
+ * target pCPU), when AVIC is toggled on/off (to (de)activate bypass),
+ * and if the irqfd becomes ineligible for posting (to put the IRTE
+ * back into remapped mode).
*/
struct list_head ir_list;
spinlock_t ir_list_lock;
- /* Save desired MSR intercept (read: pass-through) state */
- struct {
- DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
- DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
- } shadow_msr_intercept;
-
struct vcpu_sev_es_state sev_es;
bool guest_state_loaded;
@@ -613,17 +616,74 @@ static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
}
-/* svm.c */
-#define MSR_INVALID 0xffffffffU
+/*
+ * The MSRPM is 8KiB in size, divided into four 2KiB ranges (the fourth range
+ * is reserved). Each MSR within a range is covered by two bits, one each for
+ * read (bit 0) and write (bit 1), where a bit value of '1' means intercepted.
+ */
+#define SVM_MSRPM_BYTES_PER_RANGE 2048
+#define SVM_BITS_PER_MSR 2
+#define SVM_MSRS_PER_BYTE (BITS_PER_BYTE / SVM_BITS_PER_MSR)
+#define SVM_MSRS_PER_RANGE (SVM_MSRPM_BYTES_PER_RANGE * SVM_MSRS_PER_BYTE)
+static_assert(SVM_MSRS_PER_RANGE == 8192);
+#define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
+
+static __always_inline int svm_msrpm_bit_nr(u32 msr)
+{
+ int range_nr;
+
+ switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
+ case 0:
+ range_nr = 0;
+ break;
+ case 0xc0000000:
+ range_nr = 1;
+ break;
+ case 0xc0010000:
+ range_nr = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
+ (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
+}
+
+#define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw) \
+static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap, \
+ u32 msr) \
+{ \
+ int bit_nr; \
+ \
+ bit_nr = svm_msrpm_bit_nr(msr); \
+ if (bit_nr < 0) \
+ return (rtype)true; \
+ \
+ return bitop##_bit(bit_nr + bit_rw, bitmap); \
+}
+
+#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
+ __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0) \
+ __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
+
+BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
+/* svm.c */
extern bool dump_invalid_vmcb;
-u32 svm_msrpm_offset(u32 msr);
-u32 *svm_vcpu_alloc_msrpm(void);
-void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
-void svm_vcpu_free_msrpm(u32 *msrpm);
+void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask);
+
+static inline void *svm_vcpu_alloc_msrpm(void)
+{
+ return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT);
+}
+
+void svm_vcpu_free_msrpm(void *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
@@ -643,6 +703,20 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vec);
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
+
+static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ svm_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ svm_set_intercept_for_msr(vcpu, msr, type, true);
+}
+
/* nested.c */
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
@@ -671,6 +745,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}
+int __init nested_svm_init_msrpm_merge_offsets(void);
+
int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
void svm_leave_nested(struct kvm_vcpu *vcpu);
@@ -721,7 +797,8 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
- BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
+ BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) | \
+ BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG) \
)
bool avic_hardware_setup(void);
@@ -736,8 +813,9 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void avic_vcpu_put(struct kvm_vcpu *vcpu);
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
+int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
+ unsigned int host_irq, uint32_t guest_irq,
+ struct kvm_vcpu *vcpu, u32 vector);
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
void avic_ring_doorbell(struct kvm_vcpu *vcpu);
@@ -752,6 +830,7 @@ void sev_init_vmcb(struct vcpu_svm *svm);
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_vcpu_reset(struct vcpu_svm *svm);
+void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
void sev_es_unmap_ghcb(struct vcpu_svm *svm);
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index ba736cbb0587..57d79fd31df0 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -260,6 +260,86 @@ TRACE_EVENT(kvm_cpuid,
__entry->used_max_basic ? ", used max basic" : "")
);
+#define kvm_deliver_mode \
+ {0x0, "Fixed"}, \
+ {0x1, "LowPrio"}, \
+ {0x2, "SMI"}, \
+ {0x3, "Res3"}, \
+ {0x4, "NMI"}, \
+ {0x5, "INIT"}, \
+ {0x6, "SIPI"}, \
+ {0x7, "ExtINT"}
+
+#ifdef CONFIG_KVM_IOAPIC
+TRACE_EVENT(kvm_ioapic_set_irq,
+ TP_PROTO(__u64 e, int pin, bool coalesced),
+ TP_ARGS(e, pin, coalesced),
+
+ TP_STRUCT__entry(
+ __field( __u64, e )
+ __field( int, pin )
+ __field( bool, coalesced )
+ ),
+
+ TP_fast_assign(
+ __entry->e = e;
+ __entry->pin = pin;
+ __entry->coalesced = coalesced;
+ ),
+
+ TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
+ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->e & (1<<11)) ? "logical" : "physical",
+ (__entry->e & (1<<15)) ? "level" : "edge",
+ (__entry->e & (1<<16)) ? "|masked" : "",
+ __entry->coalesced ? " (coalesced)" : "")
+);
+
+TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
+ TP_PROTO(__u64 e),
+ TP_ARGS(e),
+
+ TP_STRUCT__entry(
+ __field( __u64, e )
+ ),
+
+ TP_fast_assign(
+ __entry->e = e;
+ ),
+
+ TP_printk("dst %x vec %u (%s|%s|%s%s)",
+ (u8)(__entry->e >> 56), (u8)__entry->e,
+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->e & (1<<11)) ? "logical" : "physical",
+ (__entry->e & (1<<15)) ? "level" : "edge",
+ (__entry->e & (1<<16)) ? "|masked" : "")
+);
+#endif
+
+TRACE_EVENT(kvm_msi_set_irq,
+ TP_PROTO(__u64 address, __u64 data),
+ TP_ARGS(address, data),
+
+ TP_STRUCT__entry(
+ __field( __u64, address )
+ __field( __u64, data )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->data = data;
+ ),
+
+ TP_printk("dst %llx vec %u (%s|%s|%s%s)",
+ (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
+ (u8)__entry->data,
+ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->address & (1<<2)) ? "logical" : "physical",
+ (__entry->data & (1<<15)) ? "level" : "edge",
+ (__entry->address & (1<<3)) ? "|rh" : "")
+);
+
#define AREG(x) { APIC_##x, "APIC_" #x }
#define kvm_trace_symbol_apic \
@@ -1096,37 +1176,32 @@ TRACE_EVENT(kvm_smm_transition,
* Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC.
*/
TRACE_EVENT(kvm_pi_irte_update,
- TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
- unsigned int gsi, unsigned int gvec,
- u64 pi_desc_addr, bool set),
- TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
+ TP_PROTO(unsigned int host_irq, struct kvm_vcpu *vcpu,
+ unsigned int gsi, unsigned int gvec, bool set),
+ TP_ARGS(host_irq, vcpu, gsi, gvec, set),
TP_STRUCT__entry(
__field( unsigned int, host_irq )
- __field( unsigned int, vcpu_id )
+ __field( int, vcpu_id )
__field( unsigned int, gsi )
__field( unsigned int, gvec )
- __field( u64, pi_desc_addr )
__field( bool, set )
),
TP_fast_assign(
__entry->host_irq = host_irq;
- __entry->vcpu_id = vcpu_id;
+ __entry->vcpu_id = vcpu ? vcpu->vcpu_id : -1;
__entry->gsi = gsi;
__entry->gvec = gvec;
- __entry->pi_desc_addr = pi_desc_addr;
__entry->set = set;
),
- TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
- "gvec: 0x%x, pi_desc_addr: 0x%llx",
+ TP_printk("PI is %s for irq %u, vcpu %d, gsi: 0x%x, gvec: 0x%x",
__entry->set ? "enabled and being updated" : "disabled",
__entry->host_irq,
__entry->vcpu_id,
__entry->gsi,
- __entry->gvec,
- __entry->pi_desc_addr)
+ __entry->gvec)
);
/*
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index cb6588238f46..5316c27f6099 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -15,7 +15,6 @@ extern bool __read_mostly enable_ept;
extern bool __read_mostly enable_unrestricted_guest;
extern bool __read_mostly enable_ept_ad_bits;
extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_ipiv;
extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
index a0c5e8781c33..bc5ece76533a 100644
--- a/arch/x86/kvm/vmx/common.h
+++ b/arch/x86/kvm/vmx/common.h
@@ -53,8 +53,6 @@ struct vcpu_vt {
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
#endif
-
- unsigned long host_debugctlmsr;
};
#ifdef CONFIG_KVM_INTEL_TDX
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index d1e02e567b57..dbab1c15b0cd 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -29,40 +29,8 @@ static __init int vt_hardware_setup(void)
if (ret)
return ret;
- /*
- * Update vt_x86_ops::vm_size here so it is ready before
- * kvm_ops_update() is called in kvm_x86_vendor_init().
- *
- * Note, the actual bringing up of TDX must be done after
- * kvm_ops_update() because enabling TDX requires enabling
- * hardware virtualization first, i.e., all online CPUs must
- * be in post-VMXON state. This means the @vm_size here
- * may be updated to TDX's size but TDX may fail to enable
- * at later time.
- *
- * The VMX/VT code could update kvm_x86_ops::vm_size again
- * after bringing up TDX, but this would require exporting
- * either kvm_x86_ops or kvm_ops_update() from the base KVM
- * module, which looks overkill. Anyway, the worst case here
- * is KVM may allocate couple of more bytes than needed for
- * each VM.
- */
- if (enable_tdx) {
- vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size,
- sizeof(struct kvm_tdx));
- /*
- * Note, TDX may fail to initialize in a later time in
- * vt_init(), in which case it is not necessary to setup
- * those callbacks. But making them valid here even
- * when TDX fails to init later is fine because those
- * callbacks won't be called if the VM isn't TDX guest.
- */
- vt_x86_ops.link_external_spt = tdx_sept_link_private_spt;
- vt_x86_ops.set_external_spte = tdx_sept_set_private_spte;
- vt_x86_ops.free_external_spt = tdx_sept_free_private_spt;
- vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
- vt_x86_ops.protected_apic_has_interrupt = tdx_protected_apic_has_interrupt;
- }
+ if (enable_tdx)
+ tdx_hardware_setup();
return 0;
}
@@ -175,12 +143,12 @@ static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
return vmx_vcpu_pre_run(vcpu);
}
-static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
{
if (is_td_vcpu(vcpu))
- return tdx_vcpu_run(vcpu, force_immediate_exit);
+ return tdx_vcpu_run(vcpu, run_flags);
- return vmx_vcpu_run(vcpu, force_immediate_exit);
+ return vmx_vcpu_run(vcpu, run_flags);
}
static int vt_handle_exit(struct kvm_vcpu *vcpu,
@@ -220,7 +188,7 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return vmx_get_msr(vcpu, msr_info);
}
-static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
+static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
/*
* TDX doesn't allow VMM to configure interception of MSR accesses.
@@ -231,7 +199,7 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
if (is_td_vcpu(vcpu))
return;
- vmx_msr_filter_changed(vcpu);
+ vmx_recalc_msr_intercepts(vcpu);
}
static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
@@ -489,14 +457,6 @@ static void vt_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vmx_set_gdt(vcpu, dt);
}
-static void vt_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
-{
- if (is_td_vcpu(vcpu))
- return;
-
- vmx_set_dr6(vcpu, val);
-}
-
static void vt_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
if (is_td_vcpu(vcpu))
@@ -923,6 +883,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vcpu_load = vt_op(vcpu_load),
.vcpu_put = vt_op(vcpu_put),
+ .HOST_OWNED_DEBUGCTL = VMX_HOST_OWNED_DEBUGCTL_BITS,
+
.update_exception_bitmap = vt_op(update_exception_bitmap),
.get_feature_msr = vmx_get_feature_msr,
.get_msr = vt_op(get_msr),
@@ -943,7 +905,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.set_idt = vt_op(set_idt),
.get_gdt = vt_op(get_gdt),
.set_gdt = vt_op(set_gdt),
- .set_dr6 = vt_op(set_dr6),
.set_dr7 = vt_op(set_dr7),
.sync_dirty_debug_regs = vt_op(sync_dirty_debug_regs),
.cache_reg = vt_op(cache_reg),
@@ -1014,7 +975,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.pi_update_irte = vmx_pi_update_irte,
- .pi_start_assignment = vmx_pi_start_assignment,
+ .pi_start_bypass = vmx_pi_start_bypass,
#ifdef CONFIG_X86_64
.set_hv_timer = vt_op(set_hv_timer),
@@ -1034,7 +995,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
.migrate_timers = vmx_migrate_timers,
- .msr_filter_changed = vt_op(msr_filter_changed),
+ .recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
.complete_emulated_msr = vt_op(complete_emulated_msr),
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 7211c71d4241..b8ea1969113d 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -715,6 +715,12 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_FLUSH_CMD, MSR_TYPE_W);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_APERF, MSR_TYPE_R);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_MPERF, MSR_TYPE_R);
+
kvm_vcpu_unmap(vcpu, &map);
vmx->nested.force_msr_bitmap_recalc = false;
@@ -2663,10 +2669,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ vmx_guest_debugctl_write(vcpu, vmcs12->guest_ia32_debugctl &
+ vmx_get_supported_debugctl(vcpu, false));
} else {
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
+ vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl);
}
if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
@@ -3156,7 +3163,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
- CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
+ (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
+ CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false))))
return -EINVAL;
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
@@ -3530,7 +3538,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
- vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read();
if (kvm_mpx_supported() &&
(!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
@@ -4608,6 +4616,12 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
+ /*
+ * Note! Save DR7, but intentionally don't grab DEBUGCTL from vmcs02.
+ * Writes to DEBUGCTL that aren't intercepted by L1 are immediately
+ * propagated to vmcs12 (see vmx_set_msr()), as the value loaded into
+ * vmcs02 doesn't strictly track vmcs12.
+ */
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
vmcs12->guest_dr7 = vcpu->arch.dr7;
@@ -4798,7 +4812,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
__vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
kvm_set_dr(vcpu, 7, 0x400);
- vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+ vmx_guest_debugctl_write(vcpu, 0);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
@@ -4853,6 +4867,9 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
}
+ /* Reload DEBUGCTL to ensure vmcs01 has a fresh FREEZE_IN_SMM value. */
+ vmx_reload_guest_debugctl(vcpu);
+
/*
* Note that calling vmx_set_{efer,cr0,cr4} is important as they
* handle a variety of side effects to KVM's software model.
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index bbf4509f32d0..0b173602821b 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -653,11 +653,11 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
*/
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
{
- u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ u64 data = vmx_guest_debugctl_read();
if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
data &= ~DEBUGCTLMSR_LBR;
- vmcs_write64(GUEST_IA32_DEBUGCTL, data);
+ vmx_guest_debugctl_write(vcpu, data);
}
}
@@ -730,7 +730,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu);
- if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+ if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)
goto warn;
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
goto warn;
@@ -752,7 +752,7 @@ warn:
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
- if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+ if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR))
intel_pmu_release_guest_lbr_event(vcpu);
}
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 5c615e5845bf..4a6d9a17da23 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -2,6 +2,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h>
+#include <linux/kvm_irqfd.h>
#include <asm/irq_remapping.h>
#include <asm/cpu.h>
@@ -72,13 +73,10 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
/*
* If the vCPU wasn't on the wakeup list and wasn't migrated, then the
* full update can be skipped as neither the vector nor the destination
- * needs to be changed.
+ * needs to be changed. Clear SN even if there is no assigned device,
+ * again for simplicity.
*/
if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR && vcpu->cpu == cpu) {
- /*
- * Clear SN if it was set due to being preempted. Again, do
- * this even if there is no assigned device for simplicity.
- */
if (pi_test_and_clear_sn(pi_desc))
goto after_clear_sn;
return;
@@ -148,8 +146,13 @@ after_clear_sn:
static bool vmx_can_use_vtd_pi(struct kvm *kvm)
{
+ /*
+ * Note, reading the number of possible bypass IRQs can race with a
+ * bypass IRQ being attached to the VM. vmx_pi_start_bypass() ensures
+ * blockng vCPUs will see an elevated count or get KVM_REQ_UNBLOCK.
+ */
return irqchip_in_kernel(kvm) && kvm_arch_has_irq_bypass() &&
- kvm_arch_has_assigned_device(kvm);
+ READ_ONCE(kvm->arch.nr_possible_bypass_irqs);
}
/*
@@ -224,17 +227,23 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
if (!vmx_needs_pi_wakeup(vcpu))
return;
- if (kvm_vcpu_is_blocking(vcpu) &&
+ /*
+ * If the vCPU is blocking with IRQs enabled and ISN'T being preempted,
+ * enable the wakeup handler so that notification IRQ wakes the vCPU as
+ * expected. There is no need to enable the wakeup handler if the vCPU
+ * is preempted between setting its wait state and manually scheduling
+ * out, as the task is still runnable, i.e. doesn't need a wake event
+ * from KVM to be scheduled in.
+ *
+ * If the wakeup handler isn't being enabled, Suppress Notifications as
+ * the cost of propagating PIR.IRR to PID.ON is negligible compared to
+ * the cost of a spurious IRQ, and vCPU put/load is a slow path.
+ */
+ if (!vcpu->preempted && kvm_vcpu_is_blocking(vcpu) &&
((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
(!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
pi_enable_wakeup_handler(vcpu);
-
- /*
- * Set SN when the vCPU is preempted. Note, the vCPU can both be seen
- * as blocking and preempted, e.g. if it's preempted between setting
- * its wait state and manually scheduling out.
- */
- if (vcpu->preempted)
+ else
pi_set_sn(pi_desc);
}
@@ -281,99 +290,30 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
/*
- * Bail out of the block loop if the VM has an assigned
- * device, but the blocking vCPU didn't reconfigure the
- * PI.NV to the wakeup vector, i.e. the assigned device
- * came along after the initial check in vmx_vcpu_pi_put().
+ * Kick all vCPUs when the first possible bypass IRQ is attached to a VM, as
+ * blocking vCPUs may scheduled out without reconfiguring PID.NV to the wakeup
+ * vector, i.e. if the bypass IRQ came along after vmx_vcpu_pi_put().
*/
-void vmx_pi_start_assignment(struct kvm *kvm)
+void vmx_pi_start_bypass(struct kvm *kvm)
{
- if (!kvm_arch_has_irq_bypass())
+ if (WARN_ON_ONCE(!vmx_can_use_vtd_pi(kvm)))
return;
kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
}
-/*
- * vmx_pi_update_irte - set IRTE for Posted-Interrupts
- *
- * @kvm: kvm
- * @host_irq: host irq of the interrupt
- * @guest_irq: gsi of the interrupt
- * @set: set or unset PI
- * returns 0 on success, < 0 on failure
- */
-int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
+int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
+ unsigned int host_irq, uint32_t guest_irq,
+ struct kvm_vcpu *vcpu, u32 vector)
{
- struct kvm_kernel_irq_routing_entry *e;
- struct kvm_irq_routing_table *irq_rt;
- bool enable_remapped_mode = true;
- struct kvm_lapic_irq irq;
- struct kvm_vcpu *vcpu;
- struct vcpu_data vcpu_info;
- int idx, ret = 0;
-
- if (!vmx_can_use_vtd_pi(kvm))
- return 0;
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- if (guest_irq >= irq_rt->nr_rt_entries ||
- hlist_empty(&irq_rt->map[guest_irq])) {
- pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
- guest_irq, irq_rt->nr_rt_entries);
- goto out;
+ if (vcpu) {
+ struct intel_iommu_pi_data pi_data = {
+ .pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)),
+ .vector = vector,
+ };
+
+ return irq_set_vcpu_affinity(host_irq, &pi_data);
+ } else {
+ return irq_set_vcpu_affinity(host_irq, NULL);
}
-
- hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
- if (e->type != KVM_IRQ_ROUTING_MSI)
- continue;
- /*
- * VT-d PI cannot support posting multicast/broadcast
- * interrupts to a vCPU, we still use interrupt remapping
- * for these kind of interrupts.
- *
- * For lowest-priority interrupts, we only support
- * those with single CPU as the destination, e.g. user
- * configures the interrupts via /proc/irq or uses
- * irqbalance to make the interrupts single-CPU.
- *
- * We will support full lowest-priority interrupt later.
- *
- * In addition, we can only inject generic interrupts using
- * the PI mechanism, refuse to route others through it.
- */
-
- kvm_set_msi_irq(kvm, e, &irq);
- if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
- !kvm_irq_is_postable(&irq))
- continue;
-
- vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
- vcpu_info.vector = irq.vector;
-
- trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
- vcpu_info.vector, vcpu_info.pi_desc_addr, set);
-
- if (!set)
- continue;
-
- enable_remapped_mode = false;
-
- ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
- if (ret < 0) {
- printk(KERN_INFO "%s: failed to update PI IRTE\n",
- __func__);
- goto out;
- }
- }
-
- if (enable_remapped_mode)
- ret = irq_set_vcpu_affinity(host_irq, NULL);
-
- ret = 0;
-out:
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
}
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 80499ea0e674..a4af39948cf0 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -3,6 +3,9 @@
#define __KVM_X86_VMX_POSTED_INTR_H
#include <linux/bitmap.h>
+#include <linux/find.h>
+#include <linux/kvm_host.h>
+
#include <asm/posted_intr.h>
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
@@ -11,9 +14,10 @@ void pi_wakeup_handler(void);
void __init pi_init_cpu(int cpu);
void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
-int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
-void vmx_pi_start_assignment(struct kvm *kvm);
+int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
+ unsigned int host_irq, uint32_t guest_irq,
+ struct kvm_vcpu *vcpu, u32 vector);
+void vmx_pi_start_bypass(struct kvm *kvm);
static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
{
diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
index 6a9bfdfbb6e5..2f20fb170def 100644
--- a/arch/x86/kvm/vmx/run_flags.h
+++ b/arch/x86/kvm/vmx/run_flags.h
@@ -2,10 +2,12 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
-#define VMX_RUN_VMRESUME_SHIFT 0
-#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
+#define VMX_RUN_VMRESUME_SHIFT 0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
+#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2
-#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
-#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index ec79aacc446f..66744f5768c8 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -743,7 +743,7 @@ bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
!to_tdx(vcpu)->vp_enter_args.r12;
}
-bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
+static bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
u64 vcpu_state_details;
@@ -783,8 +783,6 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
else
vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
- vt->host_debugctlmsr = get_debugctlmsr();
-
vt->guest_state_loaded = true;
}
@@ -1025,20 +1023,20 @@ static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI | \
DEBUGCTLMSR_FREEZE_IN_SMM)
-fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_vt *vt = to_vt(vcpu);
/*
- * force_immediate_exit requires vCPU entering for events injection with
- * an immediately exit followed. But The TDX module doesn't guarantee
- * entry, it's already possible for KVM to _think_ it completely entry
- * to the guest without actually having done so.
- * Since KVM never needs to force an immediate exit for TDX, and can't
- * do direct injection, just warn on force_immediate_exit.
+ * WARN if KVM wants to force an immediate exit, as the TDX module does
+ * not guarantee entry into the guest, i.e. it's possible for KVM to
+ * _think_ it completed entry to the guest and forced an immediate exit
+ * without actually having done so. Luckily, KVM never needs to force
+ * an immediate exit for TDX (KVM can't do direct event injection, so
+ * just WARN and continue on.
*/
- WARN_ON_ONCE(force_immediate_exit);
+ WARN_ON_ONCE(run_flags);
/*
* Wait until retry of SEPT-zap-related SEAMCALL completes before
@@ -1048,7 +1046,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
return EXIT_FASTPATH_EXIT_HANDLED;
- trace_kvm_entry(vcpu, force_immediate_exit);
+ trace_kvm_entry(vcpu, run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT);
if (pi_test_on(&vt->pi_desc)) {
apic->send_IPI_self(POSTED_INTR_VECTOR);
@@ -1060,8 +1058,8 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
tdx_vcpu_enter_exit(vcpu);
- if (vt->host_debugctlmsr & ~TDX_DEBUGCTL_PRESERVED)
- update_debugctlmsr(vt->host_debugctlmsr);
+ if (vcpu->arch.host_debugctl & ~TDX_DEBUGCTL_PRESERVED)
+ update_debugctlmsr(vcpu->arch.host_debugctl);
tdx_load_host_xsave_state(vcpu);
tdx->guest_entered = true;
@@ -1640,8 +1638,8 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
return 0;
}
-int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, kvm_pfn_t pfn)
+static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
struct page *page = pfn_to_page(pfn);
@@ -1721,8 +1719,8 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
return 0;
}
-int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, void *private_spt)
+static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
gpa_t gpa = gfn_to_gpa(gfn);
@@ -1857,8 +1855,8 @@ static void tdx_track(struct kvm *kvm)
kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
}
-int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, void *private_spt)
+static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, void *private_spt)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1880,8 +1878,8 @@ int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
return tdx_reclaim_page(virt_to_page(private_spt));
}
-int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, kvm_pfn_t pfn)
+static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
{
struct page *page = pfn_to_page(pfn);
int ret;
@@ -3605,10 +3603,14 @@ int __init tdx_bringup(void)
r = __tdx_bringup();
if (r) {
/*
- * Disable TDX only but don't fail to load module if
- * the TDX module could not be loaded. No need to print
- * message saying "module is not loaded" because it was
- * printed when the first SEAMCALL failed.
+ * Disable TDX only but don't fail to load module if the TDX
+ * module could not be loaded. No need to print message saying
+ * "module is not loaded" because it was printed when the first
+ * SEAMCALL failed. Don't bother unwinding the S-EPT hooks or
+ * vm_size, as kvm_x86_ops have already been finalized (and are
+ * intentionally not exported). The S-EPT code is unreachable,
+ * and allocating a few more bytes per VM in a should-be-rare
+ * failure scenario is a non-issue.
*/
if (r == -ENODEV)
goto success_disable_tdx;
@@ -3622,3 +3624,20 @@ success_disable_tdx:
enable_tdx = 0;
return 0;
}
+
+void __init tdx_hardware_setup(void)
+{
+ KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_tdx);
+
+ /*
+ * Note, if the TDX module can't be loaded, KVM TDX support will be
+ * disabled but KVM will continue loading (see tdx_bringup()).
+ */
+ vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size, sizeof(struct kvm_tdx));
+
+ vt_x86_ops.link_external_spt = tdx_sept_link_private_spt;
+ vt_x86_ops.set_external_spte = tdx_sept_set_private_spte;
+ vt_x86_ops.free_external_spt = tdx_sept_free_private_spt;
+ vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
+ vt_x86_ops.protected_apic_has_interrupt = tdx_protected_apic_has_interrupt;
+}
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index 51f98443e8a2..ca39a9391db1 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -8,6 +8,7 @@
#ifdef CONFIG_KVM_INTEL_TDX
#include "common.h"
+void tdx_hardware_setup(void);
int tdx_bringup(void);
void tdx_cleanup(void);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 191a9ed0da22..aa157fe5b7b3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -75,6 +75,8 @@
#include "vmx_onhyperv.h"
#include "posted_intr.h"
+#include "mmu/spte.h"
+
MODULE_AUTHOR("Qumranet");
MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
MODULE_LICENSE("GPL");
@@ -113,8 +115,6 @@ static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, 0444);
module_param(enable_apicv, bool, 0444);
-
-bool __read_mostly enable_ipiv = true;
module_param(enable_ipiv, bool, 0444);
module_param(enable_device_posted_irqs, bool, 0444);
@@ -168,31 +168,6 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
RTIT_STATUS_BYTECNT))
/*
- * List of MSRs that can be directly passed to the guest.
- * In addition to these x2apic, PT and LBR MSRs are handled specially.
- */
-static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
- MSR_IA32_SPEC_CTRL,
- MSR_IA32_PRED_CMD,
- MSR_IA32_FLUSH_CMD,
- MSR_IA32_TSC,
-#ifdef CONFIG_X86_64
- MSR_FS_BASE,
- MSR_GS_BASE,
- MSR_KERNEL_GS_BASE,
- MSR_IA32_XFD,
- MSR_IA32_XFD_ERR,
-#endif
- MSR_IA32_SYSENTER_CS,
- MSR_IA32_SYSENTER_ESP,
- MSR_IA32_SYSENTER_EIP,
- MSR_CORE_C1_RES,
- MSR_CORE_C3_RESIDENCY,
- MSR_CORE_C6_RESIDENCY,
- MSR_CORE_C7_RESIDENCY,
-};
-
-/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
* executions of PAUSE in a loop. Also indicate if ple enabled.
@@ -674,40 +649,6 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
return flexpriority_enabled && lapic_in_kernel(vcpu);
}
-static int vmx_get_passthrough_msr_slot(u32 msr)
-{
- int i;
-
- switch (msr) {
- case 0x800 ... 0x8ff:
- /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
- return -ENOENT;
- case MSR_IA32_RTIT_STATUS:
- case MSR_IA32_RTIT_OUTPUT_BASE:
- case MSR_IA32_RTIT_OUTPUT_MASK:
- case MSR_IA32_RTIT_CR3_MATCH:
- case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
- /* PT MSRs. These are handled in pt_update_intercept_for_msr() */
- case MSR_LBR_SELECT:
- case MSR_LBR_TOS:
- case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
- case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
- case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
- case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
- case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
- /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
- return -ENOENT;
- }
-
- for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
- if (vmx_possible_passthrough_msrs[i] == msr)
- return i;
- }
-
- WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
- return -ENOENT;
-}
-
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -963,6 +904,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
flags |= VMX_RUN_SAVE_SPEC_CTRL;
+ if (static_branch_unlikely(&cpu_buf_vm_clear) &&
+ kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
+ flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
+
return flags;
}
@@ -2149,7 +2094,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
case MSR_IA32_DEBUGCTLMSR:
- msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ msr_info->data = vmx_guest_debugctl_read();
break;
default:
find_uret_msr:
@@ -2174,7 +2119,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
return (unsigned long)data;
}
-static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
+u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
{
u64 debugctl = 0;
@@ -2186,9 +2131,25 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ if (boot_cpu_has(X86_FEATURE_RTM) &&
+ (host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_RTM)))
+ debugctl |= DEBUGCTLMSR_RTM_DEBUG;
+
return debugctl;
}
+bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated)
+{
+ u64 invalid;
+
+ invalid = data & ~vmx_get_supported_debugctl(vcpu, host_initiated);
+ if (invalid & (DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) {
+ kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
+ invalid &= ~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR);
+ }
+ return !invalid;
+}
+
/*
* Writes msr value into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
@@ -2257,29 +2218,22 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
- case MSR_IA32_DEBUGCTLMSR: {
- u64 invalid;
-
- invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
- if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
- kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
- data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
- invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
- }
-
- if (invalid)
+ case MSR_IA32_DEBUGCTLMSR:
+ if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated))
return 1;
+ data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
+
if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
VM_EXIT_SAVE_DEBUG_CONTROLS)
get_vmcs12(vcpu)->guest_ia32_debugctl = data;
- vmcs_write64(GUEST_IA32_DEBUGCTL, data);
+ vmx_guest_debugctl_write(vcpu, data);
+
if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
(data & DEBUGCTLMSR_LBR))
intel_pmu_create_guest_lbr_event(vcpu);
return 0;
- }
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -4013,76 +3967,29 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
vmx->nested.force_msr_bitmap_recalc = true;
}
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
- int idx;
if (!cpu_has_vmx_msr_bitmap())
return;
vmx_msr_bitmap_l01_changed(vmx);
- /*
- * Mark the desired intercept state in shadow bitmap, this is needed
- * for resync when the MSR filters change.
- */
- idx = vmx_get_passthrough_msr_slot(msr);
- if (idx >= 0) {
- if (type & MSR_TYPE_R)
- clear_bit(idx, vmx->shadow_msr_intercept.read);
- if (type & MSR_TYPE_W)
- clear_bit(idx, vmx->shadow_msr_intercept.write);
- }
-
- if ((type & MSR_TYPE_R) &&
- !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
- vmx_set_msr_bitmap_read(msr_bitmap, msr);
- type &= ~MSR_TYPE_R;
- }
-
- if ((type & MSR_TYPE_W) &&
- !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
- vmx_set_msr_bitmap_write(msr_bitmap, msr);
- type &= ~MSR_TYPE_W;
+ if (type & MSR_TYPE_R) {
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+ vmx_clear_msr_bitmap_read(msr_bitmap, msr);
+ else
+ vmx_set_msr_bitmap_read(msr_bitmap, msr);
}
- if (type & MSR_TYPE_R)
- vmx_clear_msr_bitmap_read(msr_bitmap, msr);
-
- if (type & MSR_TYPE_W)
- vmx_clear_msr_bitmap_write(msr_bitmap, msr);
-}
-
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
- int idx;
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- vmx_msr_bitmap_l01_changed(vmx);
-
- /*
- * Mark the desired intercept state in shadow bitmap, this is needed
- * for resync when the MSR filter changes.
- */
- idx = vmx_get_passthrough_msr_slot(msr);
- if (idx >= 0) {
- if (type & MSR_TYPE_R)
- set_bit(idx, vmx->shadow_msr_intercept.read);
- if (type & MSR_TYPE_W)
- set_bit(idx, vmx->shadow_msr_intercept.write);
+ if (type & MSR_TYPE_W) {
+ if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+ vmx_clear_msr_bitmap_write(msr_bitmap, msr);
+ else
+ vmx_set_msr_bitmap_write(msr_bitmap, msr);
}
-
- if (type & MSR_TYPE_R)
- vmx_set_msr_bitmap_read(msr_bitmap, msr);
-
- if (type & MSR_TYPE_W)
- vmx_set_msr_bitmap_write(msr_bitmap, msr);
}
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
@@ -4161,35 +4068,57 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
}
}
-void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
+void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 i;
-
if (!cpu_has_vmx_msr_bitmap())
return;
- /*
- * Redo intercept permissions for MSRs that KVM is passing through to
- * the guest. Disabling interception will check the new MSR filter and
- * ensure that KVM enables interception if usersepace wants to filter
- * the MSR. MSRs that KVM is already intercepting don't need to be
- * refreshed since KVM is going to intercept them regardless of what
- * userspace wants.
- */
- for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
- u32 msr = vmx_possible_passthrough_msrs[i];
-
- if (!test_bit(i, vmx->shadow_msr_intercept.read))
- vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
-
- if (!test_bit(i, vmx->shadow_msr_intercept.write))
- vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
+#ifdef CONFIG_X86_64
+ vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+#endif
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ if (kvm_cstate_in_guest(vcpu->kvm)) {
+ vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
+ }
+ if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
}
/* PT MSRs can be passed through iff PT is exposed to the guest. */
if (vmx_pt_mode_is_host_guest())
pt_update_intercept_for_msr(vcpu);
+
+ if (vcpu->arch.xfd_no_write_intercept)
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, MSR_TYPE_RW);
+
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
+ !to_vmx(vcpu)->spec_ctrl);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_XFD))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD));
+
+ if (cpu_feature_enabled(X86_FEATURE_IBPB))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
+ !guest_has_pred_cmd_msr(vcpu));
+
+ if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
+
+ /*
+ * x2APIC and LBR MSR intercepts are modified on-demand and cannot be
+ * filtered by userspace.
+ */
}
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
@@ -4790,7 +4719,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write32(GUEST_SYSENTER_CS, 0);
vmcs_writel(GUEST_SYSENTER_ESP, 0);
vmcs_writel(GUEST_SYSENTER_EIP, 0);
- vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+
+ vmx_guest_debugctl_write(&vmx->vcpu, 0);
if (cpu_has_vmx_tpr_shadow()) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
@@ -5606,12 +5536,6 @@ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
set_debugreg(DR6_RESERVED, 6);
}
-void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
-{
- lockdep_assert_irqs_disabled();
- set_debugreg(vcpu->arch.dr6, 6);
-}
-
void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
vmcs_writel(GUEST_DR7, val);
@@ -7290,7 +7214,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
- kvm_arch_has_assigned_device(vcpu->kvm))
+ (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx);
@@ -7323,8 +7247,9 @@ out:
guest_state_exit_irqoff();
}
-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
{
+ bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
@@ -7369,6 +7294,12 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
vcpu->arch.regs_dirty = 0;
+ if (run_flags & KVM_RUN_LOAD_GUEST_DR6)
+ set_debugreg(vcpu->arch.dr6, 6);
+
+ if (run_flags & KVM_RUN_LOAD_DEBUGCTL)
+ vmx_reload_guest_debugctl(vcpu);
+
/*
* Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
* prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
@@ -7543,26 +7474,6 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
evmcs->hv_enlightenments_control.msr_bitmap = 1;
}
- /* The MSR bitmap starts with all ones */
- bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
- bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
-
- vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
-#ifdef CONFIG_X86_64
- vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
-#endif
- vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
- if (kvm_cstate_in_guest(vcpu->kvm)) {
- vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
- }
-
vmx->loaded_vmcs = &vmx->vmcs01;
if (cpu_need_virtualize_apic_accesses(vcpu)) {
@@ -7612,7 +7523,7 @@ free_vpid:
int vmx_vm_init(struct kvm *kvm)
{
if (!ple_gap)
- kvm->arch.pause_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
switch (l1tf_mitigation) {
@@ -7849,18 +7760,6 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
}
}
- if (kvm_cpu_cap_has(X86_FEATURE_XFD))
- vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
- !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD));
-
- if (boot_cpu_has(X86_FEATURE_IBPB))
- vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
- !guest_has_pred_cmd_msr(vcpu));
-
- if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
- vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
- !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
-
set_cr4_guest_host_mask(vmx);
vmx_write_encls_bitmap(vcpu, NULL);
@@ -7876,6 +7775,9 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx->msr_ia32_feature_control_valid_bits &=
~FEAT_CTL_SGX_LC_ENABLED;
+ /* Recalc MSR interception to account for feature changes. */
+ vmx_recalc_msr_intercepts(vcpu);
+
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
}
@@ -8650,6 +8552,8 @@ int __init vmx_init(void)
{
int r, cpu;
+ KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_vmx);
+
if (!kvm_is_vmx_supported())
return -EOPNOTSUPP;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index b5758c33c60f..d3389baf3ab3 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -19,8 +19,6 @@
#include "../mmu.h"
#include "common.h"
-#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
-
#ifdef CONFIG_X86_64
#define MAX_NR_USER_RETURN_MSRS 7
#else
@@ -296,13 +294,6 @@ struct vcpu_vmx {
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
- /* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS 16
- struct {
- DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
- DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
- } shadow_msr_intercept;
-
/* ve_info must be page aligned. */
struct vmx_ve_information *ve_info;
};
@@ -395,24 +386,54 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
+
+static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ vmx_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type)
+{
+ vmx_set_intercept_for_msr(vcpu, msr, type, true);
+}
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
-static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
- int type, bool value)
+void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+
+u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);
+bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated);
+
+#define VMX_HOST_OWNED_DEBUGCTL_BITS (DEBUGCTLMSR_FREEZE_IN_SMM)
+
+static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val)
{
- if (value)
- vmx_enable_intercept_for_msr(vcpu, msr, type);
- else
- vmx_disable_intercept_for_msr(vcpu, msr, type);
+ WARN_ON_ONCE(val & VMX_HOST_OWNED_DEBUGCTL_BITS);
+
+ val |= vcpu->arch.host_debugctl & VMX_HOST_OWNED_DEBUGCTL_BITS;
+ vmcs_write64(GUEST_IA32_DEBUGCTL, val);
}
-void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+static inline u64 vmx_guest_debugctl_read(void)
+{
+ return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~VMX_HOST_OWNED_DEBUGCTL_BITS;
+}
+
+static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu)
+{
+ u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL);
+
+ if (!((val ^ vcpu->arch.host_debugctl) & VMX_HOST_OWNED_DEBUGCTL_BITS))
+ return;
+
+ vmx_guest_debugctl_write(vcpu, val & ~VMX_HOST_OWNED_DEBUGCTL_BITS);
+}
/*
* Note, early Intel manuals have the write-low and read-high bitmap offsets
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index b4596f651232..2b3424f638db 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -21,7 +21,7 @@ void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm);
int vmx_vcpu_create(struct kvm_vcpu *vcpu);
int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
void vmx_vcpu_free(struct kvm_vcpu *vcpu);
void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
@@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
-void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
+void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_feature_msr(u32 msr, u64 *data);
@@ -133,10 +133,9 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void tdx_vcpu_free(struct kvm_vcpu *vcpu);
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
-fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
-bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath);
@@ -151,15 +150,6 @@ int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
-int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, void *private_spt);
-int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, void *private_spt);
-int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, kvm_pfn_t pfn);
-int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, kvm_pfn_t pfn);
-
void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f2a57a1136d2..a1c49bc681c4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -226,6 +226,9 @@ EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
bool __read_mostly enable_apicv = true;
EXPORT_SYMBOL_GPL(enable_apicv);
+bool __read_mostly enable_ipiv = true;
+EXPORT_SYMBOL_GPL(enable_ipiv);
+
bool __read_mostly enable_device_posted_irqs = true;
EXPORT_SYMBOL_GPL(enable_device_posted_irqs);
@@ -4579,6 +4582,9 @@ static u64 kvm_get_allowed_disable_exits(void)
{
u64 r = KVM_X86_DISABLE_EXITS_PAUSE;
+ if (boot_cpu_has(X86_FEATURE_APERFMPERF))
+ r |= KVM_X86_DISABLE_EXITS_APERFMPERF;
+
if (!mitigate_smt_rsb) {
r |= KVM_X86_DISABLE_EXITS_HLT |
KVM_X86_DISABLE_EXITS_CSTATE;
@@ -4634,17 +4640,20 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_EXT_CPUID:
case KVM_CAP_EXT_EMUL_CPUID:
case KVM_CAP_CLOCKSOURCE:
+#ifdef CONFIG_KVM_IOAPIC
case KVM_CAP_PIT:
+ case KVM_CAP_PIT2:
+ case KVM_CAP_PIT_STATE2:
+ case KVM_CAP_REINJECT_CONTROL:
+#endif
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
- case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_IOEVENTFD_NO_LENGTH:
- case KVM_CAP_PIT2:
- case KVM_CAP_PIT_STATE2:
+
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_VCPU_EVENTS:
#ifdef CONFIG_KVM_HYPERV
@@ -4985,11 +4994,6 @@ out:
return r;
}
-static void wbinvd_ipi(void *garbage)
-{
- wbinvd();
-}
-
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
@@ -5013,8 +5017,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_x86_call(has_wbinvd_exit)())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
- smp_call_function_single(vcpu->cpu,
- wbinvd_ipi, NULL, 1);
+ wbinvd_on_cpu(vcpu->cpu);
}
kvm_x86_call(vcpu_load)(vcpu, cpu);
@@ -5489,12 +5492,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL;
- /* INITs are latched while in SMM */
- if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
- (events->smi.smm || events->smi.pending) &&
- vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
- return -EINVAL;
-
process_nmi(vcpu);
/*
@@ -6401,135 +6398,6 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return 0;
}
-static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-{
- struct kvm_pic *pic = kvm->arch.vpic;
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_PIC_MASTER:
- memcpy(&chip->chip.pic, &pic->pics[0],
- sizeof(struct kvm_pic_state));
- break;
- case KVM_IRQCHIP_PIC_SLAVE:
- memcpy(&chip->chip.pic, &pic->pics[1],
- sizeof(struct kvm_pic_state));
- break;
- case KVM_IRQCHIP_IOAPIC:
- kvm_get_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- return r;
-}
-
-static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-{
- struct kvm_pic *pic = kvm->arch.vpic;
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_PIC_MASTER:
- spin_lock(&pic->lock);
- memcpy(&pic->pics[0], &chip->chip.pic,
- sizeof(struct kvm_pic_state));
- spin_unlock(&pic->lock);
- break;
- case KVM_IRQCHIP_PIC_SLAVE:
- spin_lock(&pic->lock);
- memcpy(&pic->pics[1], &chip->chip.pic,
- sizeof(struct kvm_pic_state));
- spin_unlock(&pic->lock);
- break;
- case KVM_IRQCHIP_IOAPIC:
- kvm_set_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- kvm_pic_update_irq(pic);
- return r;
-}
-
-static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
-{
- struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
-
- BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
-
- mutex_lock(&kps->lock);
- memcpy(ps, &kps->channels, sizeof(*ps));
- mutex_unlock(&kps->lock);
- return 0;
-}
-
-static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
-{
- int i;
- struct kvm_pit *pit = kvm->arch.vpit;
-
- mutex_lock(&pit->pit_state.lock);
- memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
- for (i = 0; i < 3; i++)
- kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
- mutex_unlock(&pit->pit_state.lock);
- return 0;
-}
-
-static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
-{
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
- sizeof(ps->channels));
- ps->flags = kvm->arch.vpit->pit_state.flags;
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- memset(&ps->reserved, 0, sizeof(ps->reserved));
- return 0;
-}
-
-static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
-{
- int start = 0;
- int i;
- u32 prev_legacy, cur_legacy;
- struct kvm_pit *pit = kvm->arch.vpit;
-
- mutex_lock(&pit->pit_state.lock);
- prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
- cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
- if (!prev_legacy && cur_legacy)
- start = 1;
- memcpy(&pit->pit_state.channels, &ps->channels,
- sizeof(pit->pit_state.channels));
- pit->pit_state.flags = ps->flags;
- for (i = 0; i < 3; i++)
- kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
- start && i == 0);
- mutex_unlock(&pit->pit_state.lock);
- return 0;
-}
-
-static int kvm_vm_ioctl_reinject(struct kvm *kvm,
- struct kvm_reinject_control *control)
-{
- struct kvm_pit *pit = kvm->arch.vpit;
-
- /* pit->pit_state.lock was overloaded to prevent userspace from getting
- * an inconsistent state after running multiple KVM_REINJECT_CONTROL
- * ioctls in parallel. Use a separate lock if that ioctl isn't rare.
- */
- mutex_lock(&pit->pit_state.lock);
- kvm_pit_set_reinject(pit, control->pit_reinject);
- mutex_unlock(&pit->pit_state.lock);
-
- return 0;
-}
-
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
@@ -6549,18 +6417,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
kvm_vcpu_kick(vcpu);
}
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
- bool line_status)
-{
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
- irq_event->irq, irq_event->level,
- line_status);
- return 0;
-}
-
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -6625,17 +6481,11 @@ split_irqchip_unlock:
if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) &&
cpu_smt_possible() &&
- (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
+ (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE |
+ KVM_X86_DISABLE_EXITS_APERFMPERF)))
pr_warn_once(SMT_RSB_MSG);
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
- kvm->arch.pause_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
- kvm->arch.mwait_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
- kvm->arch.hlt_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
- kvm->arch.cstate_in_guest = true;
+ kvm_disable_exits(kvm, cap->args[0]);
r = 0;
disable_exits_unlock:
mutex_unlock(&kvm->lock);
@@ -7072,9 +6922,11 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
+
+#ifdef CONFIG_KVM_IOAPIC
/*
* This union makes it completely explicit to gcc-3.x
- * that these two variables' stack usage should be
+ * that these three variables' stack usage should be
* combined, not added together.
*/
union {
@@ -7082,6 +6934,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
+#endif
switch (ioctl) {
case KVM_SET_TSS_ADDR:
@@ -7105,6 +6958,7 @@ set_identity_unlock:
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
break;
+#ifdef CONFIG_KVM_IOAPIC
case KVM_CREATE_IRQCHIP: {
mutex_lock(&kvm->lock);
@@ -7126,7 +6980,7 @@ set_identity_unlock:
goto create_irqchip_unlock;
}
- r = kvm_setup_default_irq_routing(kvm);
+ r = kvm_setup_default_ioapic_and_pic_routing(kvm);
if (r) {
kvm_ioapic_destroy(kvm);
kvm_pic_destroy(kvm);
@@ -7174,7 +7028,7 @@ set_identity_unlock:
}
r = -ENXIO;
- if (!irqchip_kernel(kvm))
+ if (!irqchip_full(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
@@ -7198,7 +7052,7 @@ set_identity_unlock:
}
r = -ENXIO;
- if (!irqchip_kernel(kvm))
+ if (!irqchip_full(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
set_irqchip_out:
@@ -7271,6 +7125,7 @@ set_pit2_out:
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
+#endif
case KVM_SET_BOOT_CPU_ID:
r = 0;
mutex_lock(&kvm->lock);
@@ -7341,9 +7196,12 @@ set_pit2_out:
if (user_tsc_khz == 0)
user_tsc_khz = tsc_khz;
- WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
- r = 0;
-
+ mutex_lock(&kvm->lock);
+ if (!kvm->created_vcpus) {
+ WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
goto out;
}
case KVM_GET_TSC_KHZ: {
@@ -10729,8 +10587,10 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
+#ifdef CONFIG_KVM_IOAPIC
else if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+#endif
if (is_guest_mode(vcpu))
vcpu->arch.load_eoi_exitmap_pending = true;
@@ -10784,6 +10644,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
fastpath_t exit_fastpath;
+ u64 run_flags, debug_ctl;
bool req_immediate_exit = false;
@@ -10931,8 +10792,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_vcpu_update_apicv(vcpu);
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
kvm_check_async_pf_completion(vcpu);
+
+ /*
+ * Recalc MSR intercepts as userspace may want to intercept
+ * accesses to MSRs that KVM would otherwise pass through to
+ * the guest.
+ */
if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
- kvm_x86_call(msr_filter_changed)(vcpu);
+ kvm_x86_call(recalc_msr_intercepts)(vcpu);
if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
kvm_x86_call(update_cpu_dirty_logging)(vcpu);
@@ -11028,8 +10895,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
- if (req_immediate_exit)
+ run_flags = 0;
+ if (req_immediate_exit) {
+ run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
@@ -11047,12 +10917,22 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[3], 3);
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
- kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
+ run_flags |= KVM_RUN_LOAD_GUEST_DR6;
} else if (unlikely(hw_breakpoint_active())) {
set_debugreg(DR7_FIXED_1, 7);
}
- vcpu->arch.host_debugctl = get_debugctlmsr();
+ /*
+ * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL
+ * can be modified in IRQ context, e.g. via SMP function calls. Inform
+ * vendor code if any host-owned bits were changed, e.g. so that the
+ * value loaded into hardware while running the guest can be updated.
+ */
+ debug_ctl = get_debugctlmsr();
+ if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
+ !vcpu->arch.guest_state_protected)
+ run_flags |= KVM_RUN_LOAD_DEBUGCTL;
+ vcpu->arch.host_debugctl = debug_ctl;
guest_timing_enter_irqoff();
@@ -11066,8 +10946,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
(kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
- exit_fastpath = kvm_x86_call(vcpu_run)(vcpu,
- req_immediate_exit);
+ exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break;
@@ -11079,6 +10958,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
break;
}
+ run_flags = 0;
+
/* Note, VM-Exits that go down the "slow" path are accounted below. */
++vcpu->stat.exits;
}
@@ -11552,6 +11433,28 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
trace_kvm_fpu(0);
}
+static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ /*
+ * SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and
+ * tracks the pending SIPI separately. SIPI_RECEIVED is still accepted
+ * by KVM_SET_VCPU_EVENTS for backwards compatibility, but should be
+ * converted to INIT_RECEIVED.
+ */
+ if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
+ return -EINVAL;
+
+ /*
+ * Disallow running the vCPU if userspace forced it into an impossible
+ * MP_STATE, e.g. if the vCPU is in WFS but SIPI is blocked.
+ */
+ if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED &&
+ !kvm_apic_init_sipi_allowed(vcpu))
+ return -EINVAL;
+
+ return kvm_x86_call(vcpu_pre_run)(vcpu);
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
@@ -11654,7 +11557,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
- r = kvm_x86_call(vcpu_pre_run)(vcpu);
+ r = kvm_x86_vcpu_pre_run(vcpu);
if (r <= 0)
goto out;
@@ -11898,21 +11801,16 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
}
/*
- * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
- * forcing the guest into INIT/SIPI if those events are supposed to be
- * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state
- * if an SMI is pending as well.
+ * SIPI_RECEIVED is obsolete and no longer used internally; KVM instead
+ * leaves the vCPU in INIT_RECIEVED (Wait-For-SIPI) and pends the SIPI.
+ * Translate SIPI_RECEIVED as appropriate for backwards compatibility.
*/
- if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) &&
- (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
- mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
- goto out;
-
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
- kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
+ mp_state->mp_state = KVM_MP_STATE_INIT_RECEIVED;
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
- } else
- kvm_set_mp_state(vcpu, mp_state->mp_state);
+ }
+
+ kvm_set_mp_state(vcpu, mp_state->mp_state);
kvm_make_request(KVM_REQ_EVENT, vcpu);
ret = 0;
@@ -12794,21 +12692,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out;
- kvm_mmu_init_vm(kvm);
+ ret = kvm_mmu_init_vm(kvm);
+ if (ret)
+ goto out_cleanup_page_track;
ret = kvm_x86_call(vm_init)(kvm);
if (ret)
goto out_uninit_mmu;
- INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
- /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
- set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
- /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
- set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
- &kvm->arch.irq_sources_bitmap);
-
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
@@ -12847,6 +12740,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_uninit_mmu:
kvm_mmu_uninit_vm(kvm);
+out_cleanup_page_track:
kvm_page_track_cleanup(kvm);
out:
return ret;
@@ -12939,7 +12833,9 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
+#ifdef CONFIG_KVM_IOAPIC
kvm_free_pit(kvm);
+#endif
kvm_mmu_pre_destroy_vm(kvm);
static_call_cond(kvm_x86_vm_pre_destroy)(kvm);
@@ -12963,8 +12859,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
kvm_destroy_vcpus(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
+#ifdef CONFIG_KVM_IOAPIC
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
+#endif
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);
@@ -13574,25 +13472,6 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
}
-void kvm_arch_start_assignment(struct kvm *kvm)
-{
- if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
- kvm_x86_call(pi_start_assignment)(kvm);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
-
-void kvm_arch_end_assignment(struct kvm *kvm)
-{
- atomic_dec(&kvm->arch.assigned_device_count);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
-
-bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
-{
- return raw_atomic_read(&kvm->arch.assigned_device_count);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
-
static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
{
/*
@@ -13628,77 +13507,6 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
-int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
- struct irq_bypass_producer *prod)
-{
- struct kvm_kernel_irqfd *irqfd =
- container_of(cons, struct kvm_kernel_irqfd, consumer);
- struct kvm *kvm = irqfd->kvm;
- int ret;
-
- kvm_arch_start_assignment(irqfd->kvm);
-
- spin_lock_irq(&kvm->irqfds.lock);
- irqfd->producer = prod;
-
- ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
- prod->irq, irqfd->gsi, 1);
- if (ret)
- kvm_arch_end_assignment(irqfd->kvm);
-
- spin_unlock_irq(&kvm->irqfds.lock);
-
-
- return ret;
-}
-
-void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
- struct irq_bypass_producer *prod)
-{
- int ret;
- struct kvm_kernel_irqfd *irqfd =
- container_of(cons, struct kvm_kernel_irqfd, consumer);
- struct kvm *kvm = irqfd->kvm;
-
- WARN_ON(irqfd->producer != prod);
-
- /*
- * When producer of consumer is unregistered, we change back to
- * remapped mode, so we can re-use the current implementation
- * when the irq is masked/disabled or the consumer side (KVM
- * int this case doesn't want to receive the interrupts.
- */
- spin_lock_irq(&kvm->irqfds.lock);
- irqfd->producer = NULL;
-
- ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
- prod->irq, irqfd->gsi, 0);
- if (ret)
- printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
- " fails: %d\n", irqfd->consumer.token, ret);
-
- spin_unlock_irq(&kvm->irqfds.lock);
-
-
- kvm_arch_end_assignment(irqfd->kvm);
-}
-
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
-{
- return kvm_x86_call(pi_update_irte)(kvm, host_irq, guest_irq, set);
-}
-
-bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
- struct kvm_kernel_irq_routing_entry *new)
-{
- if (old->type != KVM_IRQ_ROUTING_MSI ||
- new->type != KVM_IRQ_ROUTING_MSI)
- return true;
-
- return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
-}
-
bool kvm_vector_hashing_enabled(void)
{
return vector_hashing;
@@ -14098,7 +13906,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
-EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 832f0faf4779..bcfd9b719ada 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -55,6 +55,28 @@ struct kvm_host_values {
void kvm_spurious_fault(void);
+#define SIZE_OF_MEMSLOTS_HASHTABLE \
+ (sizeof(((struct kvm_memslots *)0)->id_hash) * 2 * KVM_MAX_NR_ADDRESS_SPACES)
+
+/* Sanity check the size of the memslot hash tables. */
+static_assert(SIZE_OF_MEMSLOTS_HASHTABLE ==
+ (1024 * (1 + IS_ENABLED(CONFIG_X86_64)) * (1 + IS_ENABLED(CONFIG_KVM_SMM))));
+
+/*
+ * Assert that "struct kvm_{svm,vmx,tdx}" is an order-0 or order-1 allocation.
+ * Spilling over to an order-2 allocation isn't fundamentally problematic, but
+ * isn't expected to happen in the foreseeable future (O(years)). Assert that
+ * the size is an order-0 allocation when ignoring the memslot hash tables, to
+ * help detect and debug unexpected size increases.
+ */
+#define KVM_SANITY_CHECK_VM_STRUCT_SIZE(x) \
+do { \
+ BUILD_BUG_ON(get_order(sizeof(struct x) - SIZE_OF_MEMSLOTS_HASHTABLE) && \
+ !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN)); \
+ BUILD_BUG_ON(get_order(sizeof(struct x)) > 1 && \
+ !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN)); \
+} while (0)
+
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
({ \
bool failed = (consistency_check); \
@@ -499,24 +521,34 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
__rem; \
})
+static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
+{
+ kvm->arch.disabled_exits |= mask;
+}
+
static inline bool kvm_mwait_in_guest(struct kvm *kvm)
{
- return kvm->arch.mwait_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
}
static inline bool kvm_hlt_in_guest(struct kvm *kvm)
{
- return kvm->arch.hlt_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
}
static inline bool kvm_pause_in_guest(struct kvm *kvm)
{
- return kvm->arch.pause_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
}
static inline bool kvm_cstate_in_guest(struct kvm *kvm)
{
- return kvm->arch.cstate_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
+}
+
+static inline bool kvm_aperfmperf_in_guest(struct kvm *kvm)
+{
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_APERFMPERF;
}
static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7456df985d96..bb57e93b4caf 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1063,13 +1063,9 @@ unsigned long arch_max_swapfile_size(void)
static struct execmem_info execmem_info __ro_after_init;
#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
-void execmem_fill_trapping_insns(void *ptr, size_t size, bool writeable)
+void execmem_fill_trapping_insns(void *ptr, size_t size)
{
- /* fill memory with INT3 instructions */
- if (writeable)
- memset(ptr, INT3_INSN_OPCODE, size);
- else
- text_poke_set(ptr, INT3_INSN_OPCODE, size);
+ memset(ptr, INT3_INSN_OPCODE, size);
}
#endif
@@ -1102,7 +1098,21 @@ struct execmem_info __init *execmem_arch_setup(void)
.pgprot = pgprot,
.alignment = MODULE_ALIGN,
},
- [EXECMEM_KPROBES ... EXECMEM_BPF] = {
+ [EXECMEM_KPROBES] = {
+ .flags = flags,
+ .start = start,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL_ROX,
+ .alignment = MODULE_ALIGN,
+ },
+ [EXECMEM_FTRACE] = {
+ .flags = flags,
+ .start = start,
+ .end = MODULES_END,
+ .pgprot = pgprot,
+ .alignment = MODULE_ALIGN,
+ },
+ [EXECMEM_BPF] = {
.flags = EXECMEM_KASAN_SHADOW,
.start = start,
.end = MODULES_END,
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 2e7923844afe..c09284302dd3 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -36,7 +36,6 @@
#include <linux/debugfs.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mm.h>
diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
index c84bd9540b16..dc1afd5c839d 100644
--- a/arch/x86/mm/pgprot.c
+++ b/arch/x86/mm/pgprot.c
@@ -32,7 +32,7 @@ void add_encrypt_protection_map(void)
protection_map[i] = pgprot_encrypted(protection_map[i]);
}
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
unsigned long val = pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 15672cb926fc..7e3fca164620 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -3501,13 +3501,6 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
}
-static const char *bpf_get_prog_name(struct bpf_prog *prog)
-{
- if (prog->aux->ksym.prog)
- return prog->aux->ksym.name;
- return prog->aux->name;
-}
-
static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
{
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
@@ -3531,7 +3524,7 @@ static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size
if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
- bpf_get_prog_name(prog));
+ bpf_jit_get_prog_name(prog));
break;
}
}
@@ -3845,7 +3838,6 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp
}
return;
#endif
- WARN(1, "verification of programs using bpf_throw should have failed\n");
}
void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
diff --git a/arch/x86/um/asm/syscall.h b/arch/x86/um/asm/syscall.h
index 56a2f0913e3c..d6208d0fad51 100644
--- a/arch/x86/um/asm/syscall.h
+++ b/arch/x86/um/asm/syscall.h
@@ -9,6 +9,8 @@ typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
+extern const sys_call_ptr_t sys_call_table[];
+
static inline int syscall_get_arch(struct task_struct *task)
{
#ifdef CONFIG_X86_32
diff --git a/arch/x86/um/shared/sysdep/ptrace.h b/arch/x86/um/shared/sysdep/ptrace.h
index 8f7476ff6e95..572ea2d79131 100644
--- a/arch/x86/um/shared/sysdep/ptrace.h
+++ b/arch/x86/um/shared/sysdep/ptrace.h
@@ -44,18 +44,6 @@
#include "ptrace_64.h"
#endif
-struct syscall_args {
- unsigned long args[6];
-};
-
-#define SYSCALL_ARGS(r) ((struct syscall_args) \
- { .args = { UPT_SYSCALL_ARG1(r), \
- UPT_SYSCALL_ARG2(r), \
- UPT_SYSCALL_ARG3(r), \
- UPT_SYSCALL_ARG4(r), \
- UPT_SYSCALL_ARG5(r), \
- UPT_SYSCALL_ARG6(r) } } )
-
extern unsigned long host_fp_size;
struct uml_pt_regs {
diff --git a/arch/x86/um/shared/sysdep/syscalls.h b/arch/x86/um/shared/sysdep/syscalls.h
deleted file mode 100644
index b2060ac707f0..000000000000
--- a/arch/x86/um/shared/sysdep/syscalls.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef __i386__
-#include "syscalls_32.h"
-#else
-#include "syscalls_64.h"
-#endif
diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h
deleted file mode 100644
index f6e9f84397e7..000000000000
--- a/arch/x86/um/shared/sysdep/syscalls_32.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <asm/unistd.h>
-#include <sysdep/ptrace.h>
-
-typedef long syscall_handler_t(struct syscall_args);
-
-extern syscall_handler_t *sys_call_table[];
-
-#define EXECUTE_SYSCALL(syscall, regs) \
- ((*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h
deleted file mode 100644
index b6b997225841..000000000000
--- a/arch/x86/um/shared/sysdep/syscalls_64.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_X86_64_SYSCALLS_H__
-#define __SYSDEP_X86_64_SYSCALLS_H__
-
-#include <linux/msg.h>
-#include <linux/shm.h>
-
-typedef long syscall_handler_t(long, long, long, long, long, long);
-
-extern syscall_handler_t *sys_call_table[];
-
-#define EXECUTE_SYSCALL(syscall, regs) \
- (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
- UPT_SYSCALL_ARG2(&regs->regs), \
- UPT_SYSCALL_ARG3(&regs->regs), \
- UPT_SYSCALL_ARG4(&regs->regs), \
- UPT_SYSCALL_ARG5(&regs->regs), \
- UPT_SYSCALL_ARG6(&regs->regs)))
-
-extern syscall_handler_t sys_modify_ldt;
-extern syscall_handler_t sys_arch_prctl;
-
-#endif
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index cb3f17627d16..1909c2e640b2 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -186,7 +186,7 @@ int arch_switch_tls(struct task_struct *to)
/*
* We have no need whatsoever to switch TLS for kernel threads; beyond
* that, that would also result in us calling os_set_thread_area with
- * userspace_pid[cpu] == 0, which gives an error.
+ * task->mm == NULL, which would cause a crash.
*/
if (likely(to->mm))
return load_TLS(O_FORCE, to);
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
index 6333bd1eb9d2..a459ffbaf7ab 100644
--- a/arch/xtensa/include/asm/bootparam.h
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -27,7 +27,7 @@
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* All records are aligned to 4 bytes */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 95e33a913962..b6db4838b175 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -11,7 +11,7 @@
#ifndef _XTENSA_CMPXCHG_H
#define _XTENSA_CMPXCHG_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bits.h>
#include <linux/stringify.h>
@@ -220,6 +220,6 @@ __arch_xchg(unsigned long x, volatile void * ptr, int size)
}
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 3b1a0d5d2169..e0447bcc52c5 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -16,7 +16,7 @@
#include <asm/core.h>
#include <asm/types.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# include <variant/tie-asm.h>
.macro xchal_sa_start a b
@@ -69,7 +69,7 @@
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
@@ -87,7 +87,7 @@
#define XTENSA_HAVE_IO_PORTS \
XCHAL_CP_PORT_MASK
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Additional registers.
@@ -151,5 +151,5 @@ void local_coprocessors_flush_release_all(void);
#endif /* XTENSA_HAVE_COPROCESSORS */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index df275d554788..7b483538f066 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -13,7 +13,7 @@
#include <asm/thread_info.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/thread_info.h>
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
index 0ea4f84cd558..f676d209d110 100644
--- a/arch/xtensa/include/asm/ftrace.h
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -12,20 +12,20 @@
#include <asm/processor.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long return_address(unsigned level);
#define ftrace_return_address(n) return_address(n)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 3
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern void _mcount(void);
#define mcount _mcount
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 574795a20d6f..101bcb87e15b 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -34,7 +34,7 @@
#define CA_WRITEBACK (0x4)
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define XTENSA_HWVERSION_RC_2009_0 230000
@@ -240,6 +240,6 @@
.endm
-#endif /*__ASSEMBLY__*/
+#endif /*__ASSEMBLER__*/
#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
index 46c8596259d2..38e3e2a9b0fb 100644
--- a/arch/xtensa/include/asm/jump_label.h
+++ b/arch/xtensa/include/asm/jump_label.h
@@ -4,7 +4,7 @@
#ifndef _ASM_XTENSA_JUMP_LABEL_H
#define _ASM_XTENSA_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -61,5 +61,5 @@ struct jump_entry {
jump_label_t key;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
index 8d2b4248466f..0da91b64fab9 100644
--- a/arch/xtensa/include/asm/kasan.h
+++ b/arch/xtensa/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_KASAN
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 6fc05cba61a2..6949724625a0 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -80,7 +80,7 @@
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 644413792bf3..20655174b111 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -80,7 +80,7 @@
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __pgprot(x) (x)
@@ -172,7 +172,7 @@ static inline unsigned long ___pa(unsigned long va)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#include <asm-generic/memory_model.h>
#endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index d62aa1c316fc..d6eb695f2b26 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -203,7 +203,7 @@
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -366,10 +366,10 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
-#endif /* !defined (__ASSEMBLY__) */
+#endif /* !defined (__ASSEMBLER__) */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
* _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
@@ -408,7 +408,7 @@ void update_mmu_tlb_range(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int nr);
#define update_mmu_tlb_range update_mmu_tlb_range
-#endif /* !defined (__ASSEMBLY__) */
+#endif /* !defined (__ASSEMBLER__) */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 47b5df86ab5c..60a211356335 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -105,7 +105,7 @@
#error Unsupported xtensa ABI
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#if defined(__XTENSA_WINDOWED_ABI__)
@@ -263,5 +263,5 @@ static inline unsigned long get_er(unsigned long addr)
#endif /* XCHAL_HAVE_EXTERN_REGS */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 4871e5a4d6fb..d0568ff6d349 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -41,7 +41,7 @@
#define NO_SYSCALL (-1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/coprocessor.h>
#include <asm/core.h>
@@ -106,11 +106,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
int do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
# include <asm/asm-offsets.h>
#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
index de169b4eaeef..d301e68573cc 100644
--- a/arch/xtensa/include/asm/signal.h
+++ b/arch/xtensa/include/asm/signal.h
@@ -14,10 +14,10 @@
#include <uapi/asm/signal.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index e0dffcc43b9e..5b74dfc35ef9 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -16,7 +16,7 @@
#define CURRENT_SHIFT KERNEL_STACK_SHIFT
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
# include <asm/processor.h>
#endif
@@ -28,7 +28,7 @@
* must also be changed
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#if XTENSA_HAVE_COPROCESSORS
@@ -80,7 +80,7 @@ struct thread_info {
* macros/functions for gaining access to the thread information structure
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define INIT_THREAD_INFO(tsk) \
{ \
@@ -99,7 +99,7 @@ static __always_inline struct thread_info *current_thread_info(void)
return ti;
}
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 573df8cea200..3edaebeef423 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -20,7 +20,7 @@
#define ITLB_HIT_BIT 3
#define DTLB_HIT_BIT 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* TLB flushing:
*
@@ -201,5 +201,5 @@ static inline unsigned long read_itlb_translation (int way)
return tmp;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
index 9115e86ebc75..6e89ea301438 100644
--- a/arch/xtensa/include/uapi/asm/ptrace.h
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -42,7 +42,7 @@
#define PTRACE_GETFDPIC_EXEC 0
#define PTRACE_GETFDPIC_INTERP 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct user_pt_regs {
__u32 pc;
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
index b8c824dd4b74..8060f1914400 100644
--- a/arch/xtensa/include/uapi/asm/signal.h
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -19,7 +19,7 @@
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -77,7 +77,7 @@ typedef struct {
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm-generic/signal-defs.h>
@@ -106,5 +106,5 @@ typedef struct sigaltstack {
__kernel_size_t ss_size;
} stack_t;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
index 12db8ac38750..2e9217a06ebf 100644
--- a/arch/xtensa/include/uapi/asm/types.h
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -14,7 +14,7 @@
#include <asm-generic/int-ll64.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
@@ -23,7 +23,7 @@
# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#endif
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0cb1e9873aab..50e51047e1fe 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -454,17 +454,10 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
*/
static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
{
- struct bfq_io_cq *icq;
- unsigned long flags;
-
if (!current->io_context)
return NULL;
- spin_lock_irqsave(&q->queue_lock, flags);
- icq = icq_to_bic(ioc_lookup_icq(q));
- spin_unlock_irqrestore(&q->queue_lock, flags);
-
- return icq;
+ return icq_to_bic(ioc_lookup_icq(q));
}
/*
@@ -701,17 +694,13 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{
struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
- int depth;
- unsigned limit = data->q->nr_requests;
- unsigned int act_idx;
+ unsigned int limit, act_idx;
/* Sync reads have full depth available */
- if (op_is_sync(opf) && !op_is_write(opf)) {
- depth = 0;
- } else {
- depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
- limit = (limit * depth) >> bfqd->full_depth_shift;
- }
+ if (op_is_sync(opf) && !op_is_write(opf))
+ limit = data->q->nr_requests;
+ else
+ limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
/* Fast path to check if bfqq is already allocated. */
@@ -725,14 +714,16 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
* available requests and thus starve other entities.
*/
if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
- depth = 1;
+ limit = 1;
break;
}
}
+
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
- __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
- if (depth)
- data->shallow_depth = depth;
+ __func__, bfqd->wr_busy_queues, op_is_sync(opf), limit);
+
+ if (limit < data->q->nr_requests)
+ data->shallow_depth = limit;
}
static struct bfq_queue *
@@ -2457,15 +2448,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
- struct request *free = NULL;
- /*
- * bfq_bic_lookup grabs the queue_lock: invoke it now and
- * store its return value for later use, to avoid nesting
- * queue_lock inside the bfqd->lock. We assume that the bic
- * returned by bfq_bic_lookup does not go away before
- * bfqd->lock is taken.
- */
struct bfq_io_cq *bic = bfq_bic_lookup(q);
+ struct request *free = NULL;
bool ret;
spin_lock_irq(&bfqd->lock);
@@ -5863,8 +5847,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
goto out;
}
- bfqq = kmem_cache_alloc_node(bfq_pool,
- GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
+ bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO,
bfqd->queue->node);
if (bfqq) {
@@ -7128,9 +7111,8 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
*/
static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
{
- unsigned int depth = 1U << bt->sb.shift;
+ unsigned int nr_requests = bfqd->queue->nr_requests;
- bfqd->full_depth_shift = bt->sb.shift;
/*
* In-word depths if no bfq_queue is being weight-raised:
* leaving 25% of tags only for sync reads.
@@ -7142,13 +7124,13 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max(depth >> 1, 1U);
+ bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
+ bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@@ -7158,9 +7140,9 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
+ bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
+ bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U);
}
static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
@@ -7232,22 +7214,16 @@ static void bfq_init_root_group(struct bfq_group *root_group,
root_group->sched_data.bfq_class_idle_last_service = jiffies;
}
-static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
{
struct bfq_data *bfqd;
- struct elevator_queue *eq;
unsigned int i;
struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
- if (!bfqd) {
- kobject_put(&eq->kobj);
+ if (!bfqd)
return -ENOMEM;
- }
+
eq->elevator_data = bfqd;
spin_lock_irq(&q->queue_lock);
@@ -7405,7 +7381,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
out_free:
kfree(bfqd);
- kobject_put(&eq->kobj);
return -ENOMEM;
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 687a3a7ba784..34a498e6b2a5 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -427,9 +427,6 @@ struct bfq_iocq_bfqq_data {
*/
bool saved_IO_bound;
- u64 saved_io_start_time;
- u64 saved_tot_idle_time;
-
/*
* Same purpose as the previous fields for the values of the
* field keeping the queue's belonging to a large burst
@@ -450,6 +447,9 @@ struct bfq_iocq_bfqq_data {
*/
unsigned int saved_weight;
+ u64 saved_io_start_time;
+ u64 saved_tot_idle_time;
+
/*
* Similar to previous fields: save wr information.
*/
@@ -457,13 +457,13 @@ struct bfq_iocq_bfqq_data {
unsigned long saved_last_wr_start_finish;
unsigned long saved_service_from_wr;
unsigned long saved_wr_start_at_switch_to_srt;
- unsigned int saved_wr_cur_max_time;
struct bfq_ttime saved_ttime;
+ unsigned int saved_wr_cur_max_time;
/* Save also injection state */
- u64 saved_last_serv_time_ns;
unsigned int saved_inject_limit;
unsigned long saved_decrease_time_jif;
+ u64 saved_last_serv_time_ns;
/* candidate queue for a stable merge (due to close creation time) */
struct bfq_queue *stable_merge_bfqq;
@@ -813,8 +813,7 @@ struct bfq_data {
* Depth limits used in bfq_limit_depth (see comments on the
* function)
*/
- unsigned int word_depths[2][2];
- unsigned int full_depth_shift;
+ unsigned int async_depths[2][2];
/*
* Number of independent actuators. This is equal to 1 in
diff --git a/block/bio.c b/block/bio.c
index 92c512e876c8..3b371a5da159 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -653,13 +653,13 @@ static void bio_truncate(struct bio *bio, unsigned new_size)
bio_for_each_segment(bv, bio, iter) {
if (done + bv.bv_len > new_size) {
- unsigned offset;
+ size_t offset;
if (!truncated)
offset = new_size - done;
else
offset = 0;
- zero_user(bv.bv_page, bv.bv_offset + offset,
+ memzero_page(bv.bv_page, bv.bv_offset + offset,
bv.bv_len - offset);
truncated = true;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5936db7f8475..fe9ebd6a2e14 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -394,7 +394,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
+ new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_css;
@@ -1467,7 +1467,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
spin_lock_init(&blkcg->lock);
refcount_set(&blkcg->online_pin, 1);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1630,7 +1630,7 @@ retry:
pd_prealloc = NULL;
} else {
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
- GFP_NOWAIT | __GFP_NOWARN);
+ GFP_NOWAIT);
}
if (!pd) {
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index ce82770c72ab..9fda3906e5f5 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -308,24 +308,23 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
#ifdef CONFIG_BLK_ICQ
/**
- * ioc_lookup_icq - lookup io_cq from ioc
+ * ioc_lookup_icq - lookup io_cq from ioc in io issue path
* @q: the associated request_queue
*
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
- * with @q->queue_lock held.
+ * from io issue path, either return NULL if current issue io to @q for the
+ * first time, or return a valid icq.
*/
struct io_cq *ioc_lookup_icq(struct request_queue *q)
{
struct io_context *ioc = current->io_context;
struct io_cq *icq;
- lockdep_assert_held(&q->queue_lock);
-
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
- * both of which are protected with RCU. All removals are done
- * holding both q and ioc locks, and we're holding q lock - if we
- * find a icq which points to us, it's guaranteed to be valid.
+ * both of which are protected with RCU, io issue path ensures that
+ * both request_queue and current task are valid, the found icq
+ * is guaranteed to be valid until the io is done.
*/
rcu_read_lock();
icq = rcu_dereference(ioc->icq_hint);
@@ -419,10 +418,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
task_unlock(current);
} else {
get_io_context(ioc);
-
- spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(q);
- spin_unlock_irq(&q->queue_lock);
}
if (!icq) {
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 55a0fd105147..e2ce4a28e6c9 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -374,64 +374,17 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
-static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
-{
- if (blk_mq_is_shared_tags(q->tag_set->flags)) {
- hctx->sched_tags = q->sched_shared_tags;
- return 0;
- }
-
- hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
- q->nr_requests);
-
- if (!hctx->sched_tags)
- return -ENOMEM;
- return 0;
-}
-
-static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
-{
- blk_mq_free_rq_map(queue->sched_shared_tags);
- queue->sched_shared_tags = NULL;
-}
-
/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (hctx->sched_tags) {
- if (!blk_mq_is_shared_tags(flags))
- blk_mq_free_rq_map(hctx->sched_tags);
- hctx->sched_tags = NULL;
- }
- }
+ queue_for_each_hw_ctx(q, hctx, i)
+ hctx->sched_tags = NULL;
if (blk_mq_is_shared_tags(flags))
- blk_mq_exit_sched_shared_tags(q);
-}
-
-static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
-{
- struct blk_mq_tag_set *set = queue->tag_set;
-
- /*
- * Set initial depth at max so that we don't need to reallocate for
- * updating nr_requests.
- */
- queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
- BLK_MQ_NO_HCTX_IDX,
- MAX_SCHED_RQ);
- if (!queue->sched_shared_tags)
- return -ENOMEM;
-
- blk_mq_tag_update_sched_shared_tags(queue);
-
- return 0;
+ q->sched_shared_tags = NULL;
}
void blk_mq_sched_reg_debugfs(struct request_queue *q)
@@ -458,8 +411,140 @@ void blk_mq_sched_unreg_debugfs(struct request_queue *q)
mutex_unlock(&q->debugfs_mutex);
}
+void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set)
+{
+ unsigned long i;
+
+ /* Shared tags are stored at index 0 in @tags. */
+ if (blk_mq_is_shared_tags(set->flags))
+ blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX);
+ else {
+ for (i = 0; i < et->nr_hw_queues; i++)
+ blk_mq_free_map_and_rqs(set, et->tags[i], i);
+ }
+
+ kfree(et);
+}
+
+void blk_mq_free_sched_tags_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+ struct elevator_tags *et;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ /*
+ * Accessing q->elevator without holding q->elevator_lock is
+ * safe because we're holding here set->update_nr_hwq_lock in
+ * the writer context. So, scheduler update/switch code (which
+ * acquires the same lock but in the reader context) can't run
+ * concurrently.
+ */
+ if (q->elevator) {
+ et = xa_load(et_table, q->id);
+ if (unlikely(!et))
+ WARN_ON_ONCE(1);
+ else
+ blk_mq_free_sched_tags(et, set);
+ }
+ }
+}
+
+struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues)
+{
+ unsigned int nr_tags;
+ int i;
+ struct elevator_tags *et;
+ gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
+
+ if (blk_mq_is_shared_tags(set->flags))
+ nr_tags = 1;
+ else
+ nr_tags = nr_hw_queues;
+
+ et = kmalloc(sizeof(struct elevator_tags) +
+ nr_tags * sizeof(struct blk_mq_tags *), gfp);
+ if (!et)
+ return NULL;
+ /*
+ * Default to double of smaller one between hw queue_depth and
+ * 128, since we don't split into sync/async like the old code
+ * did. Additionally, this is a per-hw queue depth.
+ */
+ et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
+ BLKDEV_DEFAULT_RQ);
+ et->nr_hw_queues = nr_hw_queues;
+
+ if (blk_mq_is_shared_tags(set->flags)) {
+ /* Shared tags are stored at index 0 in @tags. */
+ et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX,
+ MAX_SCHED_RQ);
+ if (!et->tags[0])
+ goto out;
+ } else {
+ for (i = 0; i < et->nr_hw_queues; i++) {
+ et->tags[i] = blk_mq_alloc_map_and_rqs(set, i,
+ et->nr_requests);
+ if (!et->tags[i])
+ goto out_unwind;
+ }
+ }
+
+ return et;
+out_unwind:
+ while (--i >= 0)
+ blk_mq_free_map_and_rqs(set, et->tags[i], i);
+out:
+ kfree(et);
+ return NULL;
+}
+
+int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
+{
+ struct request_queue *q;
+ struct elevator_tags *et;
+ gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ /*
+ * Accessing q->elevator without holding q->elevator_lock is
+ * safe because we're holding here set->update_nr_hwq_lock in
+ * the writer context. So, scheduler update/switch code (which
+ * acquires the same lock but in the reader context) can't run
+ * concurrently.
+ */
+ if (q->elevator) {
+ et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
+ if (!et)
+ goto out_unwind;
+ if (xa_insert(et_table, q->id, et, gfp))
+ goto out_free_tags;
+ }
+ }
+ return 0;
+out_free_tags:
+ blk_mq_free_sched_tags(et, set);
+out_unwind:
+ list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
+ if (q->elevator) {
+ et = xa_load(et_table, q->id);
+ if (et)
+ blk_mq_free_sched_tags(et, set);
+ }
+ }
+ return -ENOMEM;
+}
+
/* caller must have a reference to @e, will grab another one if successful */
-int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+ struct elevator_tags *et)
{
unsigned int flags = q->tag_set->flags;
struct blk_mq_hw_ctx *hctx;
@@ -467,36 +552,33 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
unsigned long i;
int ret;
- /*
- * Default to double of smaller one between hw queue_depth and 128,
- * since we don't split into sync/async like the old code did.
- * Additionally, this is a per-hw queue depth.
- */
- q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
- BLKDEV_DEFAULT_RQ);
+ eq = elevator_alloc(q, e, et);
+ if (!eq)
+ return -ENOMEM;
+
+ q->nr_requests = et->nr_requests;
if (blk_mq_is_shared_tags(flags)) {
- ret = blk_mq_init_sched_shared_tags(q);
- if (ret)
- return ret;
+ /* Shared tags are stored at index 0 in @et->tags. */
+ q->sched_shared_tags = et->tags[0];
+ blk_mq_tag_update_sched_shared_tags(q);
}
queue_for_each_hw_ctx(q, hctx, i) {
- ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
- if (ret)
- goto err_free_map_and_rqs;
+ if (blk_mq_is_shared_tags(flags))
+ hctx->sched_tags = q->sched_shared_tags;
+ else
+ hctx->sched_tags = et->tags[i];
}
- ret = e->ops.init_sched(q, e);
+ ret = e->ops.init_sched(q, eq);
if (ret)
- goto err_free_map_and_rqs;
+ goto out;
queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.init_hctx) {
ret = e->ops.init_hctx(hctx, i);
if (ret) {
- eq = q->elevator;
- blk_mq_sched_free_rqs(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -505,10 +587,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
}
return 0;
-err_free_map_and_rqs:
- blk_mq_sched_free_rqs(q);
+out:
blk_mq_sched_tags_teardown(q, flags);
-
+ kobject_put(&eq->kobj);
q->elevator = NULL;
return ret;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 1326526bb733..b554e1d55950 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -18,10 +18,20 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
-int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+ struct elevator_tags *et);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
void blk_mq_sched_free_rqs(struct request_queue *q);
+struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues);
+int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
+void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set);
+void blk_mq_free_sched_tags_batch(struct xarray *et_table,
+ struct blk_mq_tag_set *set);
+
static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9692fa4c3ef2..b67d6c02eceb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4974,12 +4974,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* Switch back to the elevator type stored in the xarray.
*/
static void blk_mq_elv_switch_back(struct request_queue *q,
- struct xarray *elv_tbl)
+ struct xarray *elv_tbl, struct xarray *et_tbl)
{
struct elevator_type *e = xa_load(elv_tbl, q->id);
+ struct elevator_tags *t = xa_load(et_tbl, q->id);
/* The elv_update_nr_hw_queues unfreezes the queue. */
- elv_update_nr_hw_queues(q, e);
+ elv_update_nr_hw_queues(q, e, t);
/* Drop the reference acquired in blk_mq_elv_switch_none. */
if (e)
@@ -5031,7 +5032,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int prev_nr_hw_queues = set->nr_hw_queues;
unsigned int memflags;
int i;
- struct xarray elv_tbl;
+ struct xarray elv_tbl, et_tbl;
lockdep_assert_held(&set->tag_list_lock);
@@ -5044,6 +5045,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
memflags = memalloc_noio_save();
+ xa_init(&et_tbl);
+ if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0)
+ goto out_memalloc_restore;
+
xa_init(&elv_tbl);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
@@ -5087,7 +5092,7 @@ fallback:
switch_back:
/* The blk_mq_elv_switch_back unfreezes queue for us. */
list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_elv_switch_back(q, &elv_tbl);
+ blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);
@@ -5098,7 +5103,8 @@ switch_back:
}
xa_destroy(&elv_tbl);
-
+ xa_destroy(&et_tbl);
+out_memalloc_restore:
memalloc_noio_restore(memflags);
/* Free the excess tags when nr_hw_queues shrink. */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 91449147bae9..07874e9b609f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -62,16 +62,24 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
void blk_apply_bdi_limits(struct backing_dev_info *bdi,
struct queue_limits *lim)
{
+ u64 io_opt = lim->io_opt;
+
/*
* For read-ahead of large files to be effective, we need to read ahead
- * at least twice the optimal I/O size.
+ * at least twice the optimal I/O size. For rotational devices that do
+ * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
+ * size to avoid falling back to the (rather inefficient) small default
+ * read-ahead size.
*
* There is no hardware limitation for the read-ahead size and the user
* might have increased the read-ahead size through sysfs, so don't ever
* decrease it.
*/
+ if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
+ io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
+
bdi->ra_pages = max3(bdi->ra_pages,
- lim->io_opt * 2 / PAGE_SIZE,
+ io_opt * 2 >> PAGE_SHIFT,
VM_READAHEAD_PAGES);
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
}
@@ -312,8 +320,12 @@ int blk_validate_limits(struct queue_limits *lim)
pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
return -EINVAL;
}
- if (lim->physical_block_size < lim->logical_block_size)
+ if (lim->physical_block_size < lim->logical_block_size) {
lim->physical_block_size = lim->logical_block_size;
+ } else if (!is_power_of_2(lim->physical_block_size)) {
+ pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
+ return -EINVAL;
+ }
/*
* The minimum I/O size defaults to the physical block size unless
@@ -388,12 +400,19 @@ int blk_validate_limits(struct queue_limits *lim)
lim->max_discard_sectors =
min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
+ /*
+ * When discard is not supported, discard_granularity should be reported
+ * as 0 to userspace.
+ */
+ if (lim->max_discard_sectors)
+ lim->discard_granularity =
+ max(lim->discard_granularity, lim->physical_block_size);
+ else
+ lim->discard_granularity = 0;
+
if (!lim->max_discard_segments)
lim->max_discard_segments = 1;
- if (lim->discard_granularity < lim->physical_block_size)
- lim->discard_granularity = lim->physical_block_size;
-
/*
* By default there is no limit on the segment boundary alignment,
* but if there is one it can't be smaller than the page size as
@@ -849,7 +868,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
}
/* chunk_sectors a multiple of the physical block size? */
- if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
+ if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
t->chunk_sectors = 0;
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 396cded255ea..4a7f1a349998 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -847,7 +847,7 @@ static void blk_queue_release(struct kobject *kobj)
/* nothing to do here, all data is associated with the parent gendisk */
}
-static const struct kobj_type blk_queue_ktype = {
+const struct kobj_type blk_queue_ktype = {
.default_groups = blk_queue_attr_groups,
.sysfs_ops = &queue_sysfs_ops,
.release = blk_queue_release,
@@ -875,15 +875,14 @@ int blk_register_queue(struct gendisk *disk)
struct request_queue *q = disk->queue;
int ret;
- kobject_init(&disk->queue_kobj, &blk_queue_ktype);
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
if (ret < 0)
- goto out_put_queue_kobj;
+ return ret;
if (queue_is_mq(q)) {
ret = blk_mq_sysfs_register(disk);
if (ret)
- goto out_put_queue_kobj;
+ goto out_del_queue_kobj;
}
mutex_lock(&q->sysfs_lock);
@@ -903,9 +902,9 @@ int blk_register_queue(struct gendisk *disk)
if (queue_is_mq(q))
elevator_set_default(q);
- wbt_enable_default(disk);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
+ wbt_enable_default(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
@@ -934,8 +933,8 @@ out_debugfs_remove:
mutex_unlock(&q->sysfs_lock);
if (queue_is_mq(q))
blk_mq_sysfs_unregister(disk);
-out_put_queue_kobj:
- kobject_put(&disk->queue_kobj);
+out_del_queue_kobj:
+ kobject_del(&disk->queue_kobj);
return ret;
}
@@ -986,5 +985,4 @@ void blk_unregister_queue(struct gendisk *disk)
elevator_set_none(q);
blk_debugfs_remove(disk);
- kobject_put(&disk->queue_kobj);
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index a50d4cd55f41..eb8037bae0bd 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -85,8 +85,8 @@ struct rq_wb {
u64 sync_issue;
void *sync_cookie;
- unsigned long last_issue; /* last non-throttled issue */
- unsigned long last_comp; /* last non-throttled comp */
+ unsigned long last_issue; /* issue time of last read rq */
+ unsigned long last_comp; /* completion time of last read rq */
unsigned long min_lat_nsec;
struct rq_qos rqos;
struct rq_wait rq_wait[WBT_NUM_RWQ];
@@ -248,13 +248,14 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq)
struct rq_wb *rwb = RQWB(rqos);
if (!wbt_is_tracked(rq)) {
- if (rwb->sync_cookie == rq) {
- rwb->sync_issue = 0;
- rwb->sync_cookie = NULL;
- }
+ if (wbt_is_read(rq)) {
+ if (rwb->sync_cookie == rq) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
- if (wbt_is_read(rq))
wb_timestamp(rwb, &rwb->last_comp);
+ }
} else {
WARN_ON_ONCE(rq == rwb->sync_cookie);
__wbt_done(rqos, wbt_flags(rq));
diff --git a/block/blk.h b/block/blk.h
index 76901a39997f..46f566f9b126 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -12,6 +12,7 @@
#include "blk-crypto-internal.h"
struct elevator_type;
+struct elevator_tags;
/*
* Default upper limit for the software max_sectors limit used for regular I/Os.
@@ -28,6 +29,7 @@ struct elevator_type;
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
+extern const struct kobj_type blk_queue_ktype;
extern struct dentry *blk_debugfs_root;
struct blk_flush_queue {
@@ -330,7 +332,8 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
bool blk_insert_flush(struct request *rq);
-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e);
+void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
+ struct elevator_tags *t);
void elevator_set_default(struct request_queue *q);
void elevator_set_none(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 88f8f36bed98..fe96c6f4753c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -54,6 +54,8 @@ struct elv_change_ctx {
struct elevator_queue *old;
/* for registering new elevator */
struct elevator_queue *new;
+ /* holds sched tags data */
+ struct elevator_tags *et;
};
static DEFINE_SPINLOCK(elv_list_lock);
@@ -132,7 +134,7 @@ static struct elevator_type *elevator_find_get(const char *name)
static const struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
- struct elevator_type *e)
+ struct elevator_type *e, struct elevator_tags *et)
{
struct elevator_queue *eq;
@@ -145,10 +147,10 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
+ eq->et = et;
return eq;
}
-EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
@@ -166,7 +168,6 @@ static void elevator_exit(struct request_queue *q)
lockdep_assert_held(&q->elevator_lock);
ioc_clear_queue(q);
- blk_mq_sched_free_rqs(q);
mutex_lock(&e->sysfs_lock);
blk_mq_exit_sched(q, e);
@@ -592,7 +593,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
}
if (new_e) {
- ret = blk_mq_init_sched(q, new_e);
+ ret = blk_mq_init_sched(q, new_e, ctx->et);
if (ret)
goto out_unfreeze;
ctx->new = q->elevator;
@@ -627,8 +628,10 @@ static void elv_exit_and_release(struct request_queue *q)
elevator_exit(q);
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
- if (e)
+ if (e) {
+ blk_mq_free_sched_tags(e->et, q->tag_set);
kobject_put(&e->kobj);
+ }
}
static int elevator_change_done(struct request_queue *q,
@@ -641,6 +644,7 @@ static int elevator_change_done(struct request_queue *q,
&ctx->old->flags);
elv_unregister_queue(q, ctx->old);
+ blk_mq_free_sched_tags(ctx->old->et, q->tag_set);
kobject_put(&ctx->old->kobj);
if (enable_wbt)
wbt_enable_default(q->disk);
@@ -659,9 +663,16 @@ static int elevator_change_done(struct request_queue *q,
static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
{
unsigned int memflags;
+ struct blk_mq_tag_set *set = q->tag_set;
int ret = 0;
- lockdep_assert_held(&q->tag_set->update_nr_hwq_lock);
+ lockdep_assert_held(&set->update_nr_hwq_lock);
+
+ if (strncmp(ctx->name, "none", 4)) {
+ ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
+ if (!ctx->et)
+ return -ENOMEM;
+ }
memflags = blk_mq_freeze_queue(q);
/*
@@ -681,6 +692,11 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
blk_mq_unfreeze_queue(q, memflags);
if (!ret)
ret = elevator_change_done(q, ctx);
+ /*
+ * Free sched tags if it's allocated but we couldn't switch elevator.
+ */
+ if (ctx->et && !ctx->new)
+ blk_mq_free_sched_tags(ctx->et, set);
return ret;
}
@@ -689,8 +705,10 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
* The I/O scheduler depends on the number of hardware queues, this forces a
* reattachment when nr_hw_queues changes.
*/
-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
+void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
+ struct elevator_tags *t)
{
+ struct blk_mq_tag_set *set = q->tag_set;
struct elv_change_ctx ctx = {};
int ret = -ENODEV;
@@ -698,6 +716,7 @@ void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
if (e && !blk_queue_dying(q) && blk_queue_registered(q)) {
ctx.name = e->elevator_name;
+ ctx.et = t;
mutex_lock(&q->elevator_lock);
/* force to reattach elevator after nr_hw_queue is updated */
@@ -707,6 +726,11 @@ void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
blk_mq_unfreeze_queue_nomemrestore(q);
if (!ret)
WARN_ON_ONCE(elevator_change_done(q, &ctx));
+ /*
+ * Free sched tags if it's allocated but we couldn't switch elevator.
+ */
+ if (t && !ctx.new)
+ blk_mq_free_sched_tags(t, set);
}
/*
diff --git a/block/elevator.h b/block/elevator.h
index a07ce773a38f..adc5c157e17e 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -23,8 +23,17 @@ enum elv_merge {
struct blk_mq_alloc_data;
struct blk_mq_hw_ctx;
+struct elevator_tags {
+ /* num. of hardware queues for which tags are allocated */
+ unsigned int nr_hw_queues;
+ /* depth used while allocating tags */
+ unsigned int nr_requests;
+ /* shared tag is stored at index 0 */
+ struct blk_mq_tags *tags[];
+};
+
struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
+ int (*init_sched)(struct request_queue *, struct elevator_queue *);
void (*exit_sched)(struct elevator_queue *);
int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
@@ -113,6 +122,7 @@ struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
struct elevator_queue
{
struct elevator_type *type;
+ struct elevator_tags *et;
void *elevator_data;
struct kobject kobj;
struct mutex sysfs_lock;
@@ -152,8 +162,8 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *page);
ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
+struct elevator_queue *elevator_alloc(struct request_queue *,
+ struct elevator_type *, struct elevator_tags *);
/*
* Helper functions.
diff --git a/block/genhd.c b/block/genhd.c
index c26733f6324b..9bbc38d12792 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1303,6 +1303,7 @@ static void disk_release(struct device *dev)
disk_free_zone_resources(disk);
xa_destroy(&disk->part_tbl);
+ kobject_put(&disk->queue_kobj);
disk->queue->disk = NULL;
blk_put_queue(disk->queue);
@@ -1486,6 +1487,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
INIT_LIST_HEAD(&disk->slave_bdevs);
#endif
mutex_init(&disk->rqos_state_mutex);
+ kobject_init(&disk->queue_kobj, &blk_queue_ktype);
return disk;
out_erase_part0:
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 4dba8405bd01..70cbc7b2deb4 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -157,10 +157,7 @@ struct kyber_queue_data {
*/
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
- /*
- * Async request percentage, converted to per-word depth for
- * sbitmap_get_shallow().
- */
+ /* Number of allowed async requests. */
unsigned int async_depth;
struct kyber_cpu_latency __percpu *cpu_latency;
@@ -402,20 +399,13 @@ err:
return ERR_PTR(ret);
}
-static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
+static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
{
struct kyber_queue_data *kqd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
kqd = kyber_queue_data_alloc(q);
- if (IS_ERR(kqd)) {
- kobject_put(&eq->kobj);
+ if (IS_ERR(kqd))
return PTR_ERR(kqd);
- }
blk_stat_enable_accounting(q);
@@ -454,10 +444,8 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
- unsigned int shift = tags->bitmap_tags.sb.shift;
-
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+ kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
}
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 2edf1cac06d5..b9b7cdf1d3c9 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -488,20 +488,6 @@ unlock:
}
/*
- * 'depth' is a number in the range 1..INT_MAX representing a number of
- * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
- * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
- * Values larger than q->nr_requests have the same effect as q->nr_requests.
- */
-static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
-{
- struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
- const unsigned int nrr = hctx->queue->nr_requests;
-
- return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
-}
-
-/*
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
* function is used by __blk_mq_get_tag().
*/
@@ -517,7 +503,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
* Throttle asynchronous requests and writes such that these requests
* do not block the allocation of synchronous requests.
*/
- data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
+ data->shallow_depth = dd->async_depth;
}
/* Called by blk_mq_update_nr_requests(). */
@@ -568,20 +554,14 @@ static void dd_exit_sched(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
-static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
+static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
{
struct deadline_data *dd;
- struct elevator_queue *eq;
enum dd_prio prio;
- int ret = -ENOMEM;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return ret;
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
- goto put_eq;
+ return -ENOMEM;
eq->elevator_data = dd;
@@ -608,10 +588,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
q->elevator = eq;
return 0;
-
-put_eq:
- kobject_put(&eq->kobj);
- return ret;
}
/*
diff --git a/crypto/ahash.c b/crypto/ahash.c
index bc84a07c924c..a227793d2c5b 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -29,19 +29,6 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-struct crypto_hash_walk {
- const char *data;
-
- unsigned int offset;
- unsigned int flags;
-
- struct page *pg;
- unsigned int entrylen;
-
- unsigned int total;
- struct scatterlist *sg;
-};
-
static int ahash_def_finup(struct ahash_request *req);
static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
@@ -112,8 +99,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
return hash_walk_next(walk);
}
-static int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk)
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
walk->entrylen = 0;
@@ -133,8 +120,9 @@ static int crypto_hash_walk_first(struct ahash_request *req,
return hash_walk_new_entry(walk);
}
+EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
return err;
@@ -160,11 +148,7 @@ static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
return hash_walk_new_entry(walk);
}
-
-static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
-{
- return !(walk->entrylen | walk->total);
-}
+EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
@@ -347,6 +331,12 @@ static int ahash_do_req_chain(struct ahash_request *req,
if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
return -ENOSYS;
+ if (!crypto_ahash_need_fallback(tfm))
+ return -ENOSYS;
+
+ if (crypto_hash_no_export_core(tfm))
+ return -ENOSYS;
+
{
u8 state[HASH_MAX_STATESIZE];
@@ -954,6 +944,10 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
return -EINVAL;
+ if (base->cra_flags & CRYPTO_ALG_NEED_FALLBACK &&
+ base->cra_flags & CRYPTO_ALG_NO_FALLBACK)
+ return -EINVAL;
+
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
@@ -962,7 +956,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
- (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT))
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT) &&
+ !(base->cra_flags & CRYPTO_ALG_NO_FALLBACK))
base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
if (!alg->setkey)
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 5e2b2680d7db..9e4bb7fbde25 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -119,7 +119,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
for (i = 0; i < disks; i++) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
- srcs[i] = (void*)raid6_empty_zero_page;
+ srcs[i] = raid6_get_zero_page();
} else {
srcs[i] = page_address(blocks[i]) + offsets[i];
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 354b8cd5537f..539ea5b378dc 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -414,7 +414,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = (void *) raid6_empty_zero_page;
+ ptrs[i] = raid6_get_zero_page();
else
ptrs[i] = page_address(blocks[i]) + offs[i];
@@ -497,7 +497,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = (void*)raid6_empty_zero_page;
+ ptrs[i] = raid6_get_zero_page();
else
ptrs[i] = page_address(blocks[i]) + offs[i];
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 5bb6f8d88cc2..efff54e707cb 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
static struct workqueue_struct *cryptd_wq;
struct cryptd_cpu_queue {
+ local_lock_t bh_lock;
struct crypto_queue queue;
struct work_struct work;
};
@@ -110,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ local_lock_init(&cpu_queue->bh_lock);
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
@@ -135,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_t *refcnt;
local_bh_disable();
+ local_lock_nested_bh(&queue->cpu_queue->bh_lock);
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
@@ -151,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_inc(refcnt);
out:
+ local_unlock_nested_bh(&queue->cpu_queue->bh_lock);
local_bh_enable();
return err;
@@ -169,8 +173,10 @@ static void cryptd_queue_worker(struct work_struct *work)
* Only handle one request at a time to avoid hogging crypto workqueue.
*/
local_bh_disable();
+ __local_lock_nested_bh(&cpu_queue->bh_lock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
+ __local_unlock_nested_bh(&cpu_queue->bh_lock);
local_bh_enable();
if (!req)
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 445d3c113ee1..18e1689efe12 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -74,7 +74,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
struct crypto_engine_alg *alg;
struct crypto_engine_op *op;
unsigned long flags;
- bool was_busy = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -83,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!engine->retry_support && engine->cur_req)
goto out;
- /* If another context is idling then defer */
- if (engine->idling) {
- kthread_queue_work(engine->kworker, &engine->pump_requests);
- goto out;
- }
-
/* Check if the engine queue is idle */
if (!crypto_queue_len(&engine->queue) || !engine->running) {
if (!engine->busy)
@@ -102,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
engine->busy = false;
- engine->idling = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (engine->unprepare_crypt_hardware &&
- engine->unprepare_crypt_hardware(engine))
- dev_err(engine->dev, "failed to unprepare crypt hardware\n");
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->idling = false;
goto out;
}
@@ -129,22 +113,11 @@ start_request:
if (!engine->retry_support)
engine->cur_req = async_req;
- if (engine->busy)
- was_busy = true;
- else
+ if (!engine->busy)
engine->busy = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- /* Until here we get the request need to be encrypted successfully */
- if (!was_busy && engine->prepare_crypt_hardware) {
- ret = engine->prepare_crypt_hardware(engine);
- if (ret) {
- dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err_1;
- }
- }
-
alg = container_of(async_req->tfm->__crt_alg,
struct crypto_engine_alg, base);
op = &alg->op;
@@ -195,17 +168,6 @@ retry:
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
- /*
- * Batch requests is possible only if
- * hardware can enqueue multiple requests
- */
- if (engine->do_batch_requests) {
- ret = engine->do_batch_requests(engine);
- if (ret)
- dev_err(engine->dev, "failed to do batch requests: %d\n",
- ret);
- }
-
return;
}
@@ -462,12 +424,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
- * @cbk_do_batch: pointer to a callback function to be invoked when executing
- * a batch of requests.
- * This has the form:
- * callback(struct crypto_engine *engine)
- * where:
- * engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
@@ -476,7 +432,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
*/
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct crypto_engine *engine;
@@ -492,14 +447,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
engine->rt = rt;
engine->running = false;
engine->busy = false;
- engine->idling = false;
engine->retry_support = retry_support;
engine->priv_data = dev;
- /*
- * Batch requests is possible only if
- * hardware has support for retry mechanism.
- */
- engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
@@ -534,7 +483,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
- return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+ return crypto_engine_alloc_init_and_set(dev, false, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
diff --git a/crypto/deflate.c b/crypto/deflate.c
index fe8e4ad0fee1..21404515dc77 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,9 +48,14 @@ static void *deflate_alloc_stream(void)
return ctx;
}
+static void deflate_free_stream(void *ctx)
+{
+ kvfree(ctx);
+}
+
static struct crypto_acomp_streams deflate_streams = {
.alloc_ctx = deflate_alloc_stream,
- .cfree_ctx = kvfree,
+ .free_ctx = deflate_free_stream,
};
static int deflate_compress_one(struct acomp_req *req,
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index c24d4ff2b4a8..1266eb790708 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -144,7 +144,7 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
* Inject the data from the previous loop into the pool. This data is
* not considered to contain any entropy, but it stirs the pool a bit.
*/
- ret = crypto_shash_update(desc, intermediary, sizeof(intermediary));
+ ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary));
if (ret)
goto err;
@@ -157,11 +157,12 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
* conditioning operation to have an identical amount of input data
* according to section 3.1.5.
*/
- if (!stuck) {
- ret = crypto_shash_update(hash_state_desc, (u8 *)&time,
- sizeof(__u64));
+ if (stuck) {
+ time = 0;
}
+ ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64));
+
err:
shash_desc_zero(desc);
memzero_explicit(intermediary, sizeof(intermediary));
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 3b390bd6c119..3f93cdc9a7af 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -145,6 +145,7 @@ struct rand_data {
*/
#define JENT_ENTROPY_SAFETY_FACTOR 64
+#include <linux/array_size.h>
#include <linux/fips.h>
#include <linux/minmax.h>
#include "jitterentropy.h"
@@ -178,7 +179,6 @@ static const unsigned int jent_apt_cutoff_lookup[15] = {
static const unsigned int jent_apt_cutoff_permanent_lookup[15] = {
355, 447, 479, 494, 502, 507, 510, 512,
512, 512, 512, 512, 512, 512, 512 };
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
static void jent_apt_init(struct rand_data *ec, unsigned int osr)
{
diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c
index 2a81a6315a0d..4519c572d37e 100644
--- a/crypto/krb5/selftest.c
+++ b/crypto/krb5/selftest.c
@@ -152,6 +152,7 @@ static int krb5_test_one_prf(const struct krb5_prf_test *test)
out:
clear_buf(&result);
+ clear_buf(&prf);
clear_buf(&octet);
clear_buf(&key);
return ret;
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index c33d29a523e0..c3a9d4f2995c 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -178,7 +178,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
{
- int cpu, cpu_index;
+ int cpu_index;
struct aead_instance *inst = aead_alg_instance(tfm);
struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -187,10 +187,7 @@ static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
cpumask_weight(cpu_online_mask);
- ctx->cb_cpu = cpumask_first(cpu_online_mask);
- for (cpu = 0; cpu < cpu_index; cpu++)
- ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
-
+ ctx->cb_cpu = cpumask_nth(cpu_index, cpu_online_mask);
cipher = crypto_spawn_aead(&ictx->spawn);
if (IS_ERR(cipher))
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d636e04b55d5..ee33ba21ae2b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4186,7 +4186,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha1),cbc(aes))",
.generic_driver = "authenc(hmac-sha1-lib,cbc(aes-generic))",
.test = alg_test_aead,
- .fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha1_aes_cbc_tv_temp)
}
@@ -4207,7 +4206,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha1),ctr(aes))",
.test = alg_test_null,
- .fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha1),ecb(cipher_null))",
.generic_driver = "authenc(hmac-sha1-lib,ecb-cipher_null)",
@@ -4218,7 +4216,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.test = alg_test_null,
- .fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha224),cbc(des))",
.generic_driver = "authenc(hmac-sha224-lib,cbc(des-generic))",
@@ -4712,6 +4709,7 @@ static const struct alg_test_desc alg_test_descs[] = {
*/
.alg = "drbg_nopr_hmac_sha384",
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_nopr_hmac_sha512",
.test = alg_test_drbg,
@@ -4730,6 +4728,7 @@ static const struct alg_test_desc alg_test_descs[] = {
/* covered by drbg_nopr_sha256 test */
.alg = "drbg_nopr_sha384",
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_nopr_sha512",
.fips_allowed = 1,
@@ -4761,6 +4760,7 @@ static const struct alg_test_desc alg_test_descs[] = {
/* covered by drbg_pr_hmac_sha256 test */
.alg = "drbg_pr_hmac_sha384",
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_pr_hmac_sha512",
.test = alg_test_null,
@@ -4776,6 +4776,7 @@ static const struct alg_test_desc alg_test_descs[] = {
/* covered by drbg_pr_sha256 test */
.alg = "drbg_pr_sha384",
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_pr_sha512",
.fips_allowed = 1,
@@ -5077,7 +5078,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "hmac(sha1)",
.generic_driver = "hmac-sha1-lib",
.test = alg_test_hash,
- .fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha1_tv_template)
}
@@ -5291,6 +5291,36 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(fcrypt_pcbc_tv_template)
}
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_PHMAC_S390)
+ .alg = "phmac(sha224)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha224_tv_template)
+ }
+ }, {
+ .alg = "phmac(sha256)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha256_tv_template)
+ }
+ }, {
+ .alg = "phmac(sha384)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha384_tv_template)
+ }
+ }, {
+ .alg = "phmac(sha512)",
+ .test = alg_test_hash,
+ .fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha512_tv_template)
+ }
+ }, {
+#endif
.alg = "pkcs1(rsa,none)",
.test = alg_test_sig,
.suite = {
@@ -5418,7 +5448,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "sha1",
.generic_driver = "sha1-lib",
.test = alg_test_hash,
- .fips_allowed = 1,
.suite = {
.hash = __VECS(sha1_tv_template)
}
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 7570e11b4ee6..c2a19cb0879d 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -12,188 +12,304 @@
#include <linux/net.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
-#include <crypto/internal/scompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
-#define ZSTD_DEF_LEVEL 3
+#define ZSTD_DEF_LEVEL 3
+#define ZSTD_MAX_WINDOWLOG 18
+#define ZSTD_MAX_SIZE BIT(ZSTD_MAX_WINDOWLOG)
struct zstd_ctx {
zstd_cctx *cctx;
zstd_dctx *dctx;
- void *cwksp;
- void *dwksp;
+ size_t wksp_size;
+ zstd_parameters params;
+ u8 wksp[] __aligned(8);
};
-static zstd_parameters zstd_params(void)
-{
- return zstd_get_params(ZSTD_DEF_LEVEL, 0);
-}
+static DEFINE_MUTEX(zstd_stream_lock);
-static int zstd_comp_init(struct zstd_ctx *ctx)
+static void *zstd_alloc_stream(void)
{
- int ret = 0;
- const zstd_parameters params = zstd_params();
- const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
+ zstd_parameters params;
+ struct zstd_ctx *ctx;
+ size_t wksp_size;
- ctx->cwksp = vzalloc(wksp_size);
- if (!ctx->cwksp) {
- ret = -ENOMEM;
- goto out;
- }
+ params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE);
- ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
- if (!ctx->cctx) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(ctx->cwksp);
- goto out;
+ wksp_size = max_t(size_t,
+ zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
+ if (!wksp_size)
+ return ERR_PTR(-EINVAL);
+
+ ctx = kvmalloc(sizeof(*ctx) + wksp_size, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->params = params;
+ ctx->wksp_size = wksp_size;
+
+ return ctx;
}
-static int zstd_decomp_init(struct zstd_ctx *ctx)
+static void zstd_free_stream(void *ctx)
+{
+ kvfree(ctx);
+}
+
+static struct crypto_acomp_streams zstd_streams = {
+ .alloc_ctx = zstd_alloc_stream,
+ .free_ctx = zstd_free_stream,
+};
+
+static int zstd_init(struct crypto_acomp *acomp_tfm)
{
int ret = 0;
- const size_t wksp_size = zstd_dctx_workspace_bound();
- ctx->dwksp = vzalloc(wksp_size);
- if (!ctx->dwksp) {
- ret = -ENOMEM;
- goto out;
- }
+ mutex_lock(&zstd_stream_lock);
+ ret = crypto_acomp_alloc_streams(&zstd_streams);
+ mutex_unlock(&zstd_stream_lock);
- ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
- if (!ctx->dctx) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
return ret;
-out_free:
- vfree(ctx->dwksp);
- goto out;
}
-static void zstd_comp_exit(struct zstd_ctx *ctx)
+static void zstd_exit(struct crypto_acomp *acomp_tfm)
{
- vfree(ctx->cwksp);
- ctx->cwksp = NULL;
- ctx->cctx = NULL;
+ crypto_acomp_free_streams(&zstd_streams);
}
-static void zstd_decomp_exit(struct zstd_ctx *ctx)
+static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx,
+ const void *src, void *dst, unsigned int *dlen)
{
- vfree(ctx->dwksp);
- ctx->dwksp = NULL;
- ctx->dctx = NULL;
-}
+ unsigned int out_len;
-static int __zstd_init(void *ctx)
-{
- int ret;
+ ctx->cctx = zstd_init_cctx(ctx->wksp, ctx->wksp_size);
+ if (!ctx->cctx)
+ return -EINVAL;
- ret = zstd_comp_init(ctx);
- if (ret)
- return ret;
- ret = zstd_decomp_init(ctx);
- if (ret)
- zstd_comp_exit(ctx);
- return ret;
+ out_len = zstd_compress_cctx(ctx->cctx, dst, req->dlen, src, req->slen,
+ &ctx->params);
+ if (zstd_is_error(out_len))
+ return -EINVAL;
+
+ *dlen = out_len;
+
+ return 0;
}
-static void *zstd_alloc_ctx(void)
+static int zstd_compress(struct acomp_req *req)
{
- int ret;
+ struct crypto_acomp_stream *s;
+ unsigned int pos, scur, dcur;
+ unsigned int total_out = 0;
+ bool data_available = true;
+ zstd_out_buffer outbuf;
+ struct acomp_walk walk;
+ zstd_in_buffer inbuf;
struct zstd_ctx *ctx;
+ size_t pending_bytes;
+ size_t num_bytes;
+ int ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ s = crypto_acomp_lock_stream_bh(&zstd_streams);
+ ctx = s->ctx;
- ret = __zstd_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ goto out;
+
+ ctx->cctx = zstd_init_cstream(&ctx->params, 0, ctx->wksp, ctx->wksp_size);
+ if (!ctx->cctx) {
+ ret = -EINVAL;
+ goto out;
}
- return ctx;
-}
+ do {
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ ret = -ENOSPC;
+ goto out;
+ }
-static void __zstd_exit(void *ctx)
-{
- zstd_comp_exit(ctx);
- zstd_decomp_exit(ctx);
-}
+ outbuf.pos = 0;
+ outbuf.dst = (u8 *)walk.dst.virt.addr;
+ outbuf.size = dcur;
-static void zstd_free_ctx(void *ctx)
-{
- __zstd_exit(ctx);
- kfree_sensitive(ctx);
-}
+ do {
+ scur = acomp_walk_next_src(&walk);
+ if (dcur == req->dlen && scur == req->slen) {
+ ret = zstd_compress_one(req, ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, &total_out);
+ acomp_walk_done_src(&walk, scur);
+ acomp_walk_done_dst(&walk, dcur);
+ goto out;
+ }
-static int __zstd_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- size_t out_len;
- struct zstd_ctx *zctx = ctx;
- const zstd_parameters params = zstd_params();
+ if (scur) {
+ inbuf.pos = 0;
+ inbuf.src = walk.src.virt.addr;
+ inbuf.size = scur;
+ } else {
+ data_available = false;
+ break;
+ }
- out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
- if (zstd_is_error(out_len))
- return -EINVAL;
- *dlen = out_len;
- return 0;
-}
+ num_bytes = zstd_compress_stream(ctx->cctx, &outbuf, &inbuf);
+ if (ZSTD_isError(num_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
-static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __zstd_compress(src, slen, dst, dlen, ctx);
+ pending_bytes = zstd_flush_stream(ctx->cctx, &outbuf);
+ if (ZSTD_isError(pending_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
+ acomp_walk_done_src(&walk, inbuf.pos);
+ } while (dcur != outbuf.pos);
+
+ total_out += outbuf.pos;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (data_available);
+
+ pos = outbuf.pos;
+ num_bytes = zstd_end_stream(ctx->cctx, &outbuf);
+ if (ZSTD_isError(num_bytes))
+ ret = -EIO;
+ else
+ total_out += (outbuf.pos - pos);
+
+out:
+ if (ret)
+ req->dlen = 0;
+ else
+ req->dlen = total_out;
+
+ crypto_acomp_unlock_stream_bh(s);
+
+ return ret;
}
-static int __zstd_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int zstd_decompress_one(struct acomp_req *req, struct zstd_ctx *ctx,
+ const void *src, void *dst, unsigned int *dlen)
{
size_t out_len;
- struct zstd_ctx *zctx = ctx;
- out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
+ ctx->dctx = zstd_init_dctx(ctx->wksp, ctx->wksp_size);
+ if (!ctx->dctx)
+ return -EINVAL;
+
+ out_len = zstd_decompress_dctx(ctx->dctx, dst, req->dlen, src, req->slen);
if (zstd_is_error(out_len))
return -EINVAL;
+
*dlen = out_len;
+
return 0;
}
-static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int zstd_decompress(struct acomp_req *req)
{
- return __zstd_decompress(src, slen, dst, dlen, ctx);
-}
+ struct crypto_acomp_stream *s;
+ unsigned int total_out = 0;
+ unsigned int scur, dcur;
+ zstd_out_buffer outbuf;
+ struct acomp_walk walk;
+ zstd_in_buffer inbuf;
+ struct zstd_ctx *ctx;
+ size_t pending_bytes;
+ int ret;
-static struct scomp_alg scomp = {
- .alloc_ctx = zstd_alloc_ctx,
- .free_ctx = zstd_free_ctx,
- .compress = zstd_scompress,
- .decompress = zstd_sdecompress,
- .base = {
- .cra_name = "zstd",
- .cra_driver_name = "zstd-scomp",
- .cra_module = THIS_MODULE,
+ s = crypto_acomp_lock_stream_bh(&zstd_streams);
+ ctx = s->ctx;
+
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ goto out;
+
+ ctx->dctx = zstd_init_dstream(ZSTD_MAX_SIZE, ctx->wksp, ctx->wksp_size);
+ if (!ctx->dctx) {
+ ret = -EINVAL;
+ goto out;
}
+
+ do {
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ inbuf.pos = 0;
+ inbuf.size = scur;
+ inbuf.src = walk.src.virt.addr;
+ } else {
+ break;
+ }
+
+ do {
+ dcur = acomp_walk_next_dst(&walk);
+ if (dcur == req->dlen && scur == req->slen) {
+ ret = zstd_decompress_one(req, ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, &total_out);
+ acomp_walk_done_dst(&walk, dcur);
+ acomp_walk_done_src(&walk, scur);
+ goto out;
+ }
+
+ if (!dcur) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ outbuf.pos = 0;
+ outbuf.dst = (u8 *)walk.dst.virt.addr;
+ outbuf.size = dcur;
+
+ pending_bytes = zstd_decompress_stream(ctx->dctx, &outbuf, &inbuf);
+ if (ZSTD_isError(pending_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ total_out += outbuf.pos;
+
+ acomp_walk_done_dst(&walk, outbuf.pos);
+ } while (inbuf.pos != scur);
+
+ acomp_walk_done_src(&walk, scur);
+ } while (ret == 0);
+
+out:
+ if (ret)
+ req->dlen = 0;
+ else
+ req->dlen = total_out;
+
+ crypto_acomp_unlock_stream_bh(s);
+
+ return ret;
+}
+
+static struct acomp_alg zstd_acomp = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "zstd-generic",
+ .cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .cra_module = THIS_MODULE,
+ },
+ .init = zstd_init,
+ .exit = zstd_exit,
+ .compress = zstd_compress,
+ .decompress = zstd_decompress,
};
static int __init zstd_mod_init(void)
{
- return crypto_register_scomp(&scomp);
+ return crypto_register_acomp(&zstd_acomp);
}
static void __exit zstd_mod_fini(void)
{
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&zstd_acomp);
}
module_init(zstd_mod_init);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 5b85e21b55d9..4915a63866b0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -83,6 +83,8 @@ source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
+source "drivers/dpll/Kconfig"
+
source "drivers/pinctrl/Kconfig"
source "drivers/gpio/Kconfig"
@@ -215,8 +217,6 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
-source "drivers/gpu/trace/Kconfig"
-
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
@@ -251,6 +251,4 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
-source "drivers/dpll/Kconfig"
-
endmenu
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index e04549f64d69..2cff5419bd2f 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -361,7 +361,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job)
aie2_hwctx_restart(xdna, hwctx);
mutex_unlock(&xdna->dev_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops sched_ops = {
@@ -566,7 +566,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.size = MAX_CHAIN_CMDBUF_SIZE,
};
- abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
if (IS_ERR(abo)) {
ret = PTR_ERR(abo);
goto free_cmd_bufs;
@@ -848,7 +848,8 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
goto up_sem;
}
- ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
+ hwctx->client->filp->client_id);
if (ret) {
XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
goto free_chain;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 26831ec69f89..0f85a0105178 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -24,40 +24,79 @@
MODULE_IMPORT_NS("DMA_BUF");
static int
-amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
{
struct amdxdna_client *client = abo->client;
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_mem *mem = &abo->mem;
+ struct amdxdna_gem_obj *heap;
u64 offset;
u32 align;
int ret;
+ mutex_lock(&client->mm_lock);
+
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (mem->size == 0 || mem->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
+ mem->size, heap->mem.size);
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
- ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
mem->size, align,
0, DRM_MM_INSERT_BEST);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- return ret;
+ goto unlock_out;
}
mem->dev_addr = abo->mm_node.start;
- offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
- mem->userptr = abo->dev_heap->mem.userptr + offset;
- mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
- mem->nr_pages = mem->size >> PAGE_SHIFT;
-
- if (use_vmap) {
- mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
- if (!mem->kva) {
- XDNA_ERR(xdna, "Failed to vmap");
- drm_mm_remove_node(&abo->mm_node);
- return -EFAULT;
- }
- }
+ offset = mem->dev_addr - heap->mem.dev_addr;
+ mem->userptr = heap->mem.userptr + offset;
+ mem->kva = heap->mem.kva + offset;
- return 0;
+ drm_gem_object_get(to_gobj(heap));
+
+unlock_out:
+ mutex_unlock(&client->mm_lock);
+
+ return ret;
+}
+
+static void
+amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
+{
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+}
+
+static void
+amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_gem_obj *heap;
+
+ mutex_lock(&abo->client->mm_lock);
+
+ drm_mm_remove_node(&abo->mm_node);
+
+ heap = abo->client->dev_heap;
+ drm_gem_object_put(to_gobj(heap));
+
+ mutex_unlock(&abo->client->mm_lock);
}
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
@@ -213,6 +252,20 @@ free_map:
return ret;
}
+static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ amdxdna_gem_heap_free(abo);
+ drm_gem_object_release(gobj);
+ amdxdna_gem_destroy_obj(abo);
+}
+
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
struct vm_area_struct *vma)
{
@@ -374,19 +427,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->pinned)
amdxdna_gem_unpin(abo);
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
@@ -402,7 +442,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
}
static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
+ .free = amdxdna_gem_dev_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -527,6 +567,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
@@ -553,18 +594,26 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
shmem->map_wc = false;
abo = to_xdna_obj(&shmem->base);
-
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
mutex_unlock(&client->mm_lock);
return abo;
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
mm_unlock:
mutex_unlock(&client->mm_lock);
return ERR_PTR(ret);
@@ -573,58 +622,32 @@ mm_unlock:
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap)
+ struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
size_t aligned_sz = PAGE_ALIGN(args->size);
- struct amdxdna_gem_obj *abo, *heap;
+ struct amdxdna_gem_obj *abo;
int ret;
- mutex_lock(&client->mm_lock);
- heap = client->dev_heap;
- if (!heap) {
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
- XDNA_ERR(xdna, "Invalid dev heap userptr");
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (args->size > heap->mem.size) {
- XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
- args->size, heap->mem.size);
- ret = -EINVAL;
- goto mm_unlock;
- }
-
abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
- if (IS_ERR(abo)) {
- ret = PTR_ERR(abo);
- goto mm_unlock;
- }
+ if (IS_ERR(abo))
+ return abo;
+
to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
abo->type = AMDXDNA_BO_DEV;
abo->client = client;
- abo->dev_heap = heap;
- ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+
+ ret = amdxdna_gem_heap_alloc(abo);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- goto mm_unlock;
+ amdxdna_gem_destroy_obj(abo);
+ return ERR_PTR(ret);
}
- drm_gem_object_get(to_gobj(heap));
drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
- mutex_unlock(&client->mm_lock);
return abo;
-
-mm_unlock:
- mutex_unlock(&client->mm_lock);
- return ERR_PTR(ret);
}
static struct amdxdna_gem_obj *
@@ -632,10 +655,10 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- struct iosys_map map;
int ret;
if (args->size > XDNA_MAX_CMD_BO_SIZE) {
@@ -692,7 +715,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
abo = amdxdna_drm_create_dev_heap(dev, args, filp);
break;
case AMDXDNA_BO_DEV:
- abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
break;
case AMDXDNA_BO_CMD:
abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
@@ -724,20 +747,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return 0;
- switch (abo->type) {
- case AMDXDNA_BO_SHMEM:
- case AMDXDNA_BO_DEV_HEAP:
- ret = drm_gem_shmem_pin(&abo->base);
- break;
- case AMDXDNA_BO_DEV:
- ret = drm_gem_shmem_pin(&abo->dev_heap->base);
- break;
- default:
- ret = -EOPNOTSUPP;
- }
+ ret = drm_gem_shmem_pin(&abo->base);
XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
return ret;
@@ -747,9 +763,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
{
int ret;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
ret = amdxdna_gem_pin_nolock(abo);
mutex_unlock(&abo->lock);
@@ -759,12 +772,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
drm_gem_shmem_unpin(&abo->base);
mutex_unlock(&abo->lock);
@@ -855,10 +868,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
if (is_import_bo(abo))
drm_clflush_sg(abo->base.sgt);
- else if (abo->type == AMDXDNA_BO_DEV)
- drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
- else
+ else if (abo->mem.kva)
+ drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
+ else if (abo->base.pages)
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+ else
+ drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
amdxdna_gem_unpin(abo);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index aee97e971d6d..ae29db94a9d3 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -41,7 +41,6 @@ struct amdxdna_gem_obj {
/* Below members is uninitialized when needed */
struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
- struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
struct dma_buf *dma_buf;
@@ -72,7 +71,7 @@ amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap);
+ struct drm_file *filp);
int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index aa826033b0ce..ca3357acd127 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -20,8 +20,6 @@
DEFINE_XARRAY_ALLOC(accel_minors_xa);
-static struct dentry *accel_debugfs_root;
-
static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
@@ -74,17 +72,6 @@ static const struct drm_info_list accel_debugfs_list[] = {
#define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list)
/**
- * accel_debugfs_init() - Initialize debugfs for device
- * @dev: Pointer to the device instance.
- *
- * This function creates a root directory for the device in debugfs.
- */
-void accel_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_dev_init(dev, accel_debugfs_root);
-}
-
-/**
* accel_debugfs_register() - Register debugfs for device
* @dev: Pointer to the device instance.
*
@@ -194,7 +181,6 @@ static const struct file_operations accel_stub_fops = {
void accel_core_exit(void)
{
unregister_chrdev(ACCEL_MAJOR, "accel");
- debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
WARN_ON(!xa_empty(&accel_minors_xa));
}
@@ -209,8 +195,6 @@ int __init accel_core_init(void)
goto error;
}
- accel_debugfs_root = debugfs_create_dir("accel", NULL);
-
ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops);
if (ret < 0)
DRM_ERROR("Cannot register ACCEL major: %d\n", ret);
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 68eebed3b050..80fa08bf57bd 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1066,28 +1066,11 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device);
}
-static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
- bool is_pq_hb)
-{
- time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
- : hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
- struct tm tm;
-
- if (!seconds)
- return;
-
- time64_to_tm(seconds, 0, &tm);
-
- snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
- tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-}
-
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
if (!prop->cpucp_info.eq_health_check_supported)
return true;
@@ -1095,17 +1078,15 @@ static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
if (!hdev->eq_heartbeat_received) {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
- stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
- stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
dev_err(hdev->dev,
- "EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
+ "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",
hdev->event_queue.ci,
heartbeat_debug_info->heartbeat_event_counter,
- eq_time_str,
+ &hdev->heartbeat_debug_info.last_eq_heartbeat_ts,
hdev->kernel_queues[cpu_q_id].pi,
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
- pq_time_str);
+ &hdev->heartbeat_debug_info.last_pq_heartbeat_ts);
hl_eq_dump(hdev, &hdev->event_queue);
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 601fdbe70179..61472a381904 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 0e7748c5e117..3d6d52492536 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -704,6 +704,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 5497e7030e91..62ab1c654e63 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -26,6 +26,7 @@
#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define PCI_DEVICE_ID_PTL_P 0xb03e
+#define PCI_DEVICE_ID_WCL 0xfd3e
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -165,6 +166,7 @@ struct ivpu_device {
int boot;
int jsm;
int tdr;
+ int inference;
int autosuspend;
int d0i3_entry_msg;
int state_dump_msg;
@@ -207,10 +209,11 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
-#define IVPU_TEST_MODE_TURBO BIT(9)
-#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(10)
-#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(11)
-#define IVPU_TEST_MODE_D0I2_DISABLE BIT(12)
+#define IVPU_TEST_MODE_TURBO_ENABLE BIT(9)
+#define IVPU_TEST_MODE_TURBO_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(11)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(12)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(13)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -240,6 +243,7 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
return IVPU_HW_IP_40XX;
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_IP_50XX;
default:
dump_stack();
@@ -256,6 +260,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 633160470c93..08dcc31b56f4 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -94,12 +94,14 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = -1;
vdev->timeout.jsm = -1;
vdev->timeout.tdr = -1;
+ vdev->timeout.inference = -1;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 15000;
vdev->timeout.tdr = 30000;
+ vdev->timeout.inference = 900000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
vdev->timeout.state_dump_msg = 10000;
@@ -107,6 +109,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
+ vdev->timeout.inference = 300000;
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 100;
vdev->timeout.state_dump_msg = 10;
@@ -114,6 +117,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
+ vdev->timeout.inference = 60000;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->timeout.autosuspend = 10;
else
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 823f6a57dc54..2bf9882ab52e 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -683,6 +683,7 @@ static void pwr_island_delay_set(struct ivpu_device *vdev)
return;
switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_WCL:
case PCI_DEVICE_ID_PTL_P:
post = high ? 18 : 0;
post1 = 0;
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index fae8351aa330..060f1fc031d3 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <drm/drm_file.h>
@@ -100,6 +100,43 @@ err_free_cmdq:
return NULL;
}
+/**
+ * ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
+ * @cmdq: Pointer to the command queue structure.
+ *
+ * Returns the number of entries that can fit in the command queue memory.
+ */
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
+{
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+/**
+ * ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
+ * @vdev: Pointer to the ivpu device structure.
+ * @flags: Input flags to determine the command queue flags.
+ *
+ * Returns the calculated command queue flags, considering both the input flags
+ * and the current test mode settings.
+ */
+static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
+{
+ u32 cmdq_flags = 0;
+
+ if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ /* Test mode can override the TURBO flag coming from the application */
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
+ cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ return cmdq_flags;
+}
+
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
@@ -107,8 +144,7 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
kfree(cmdq);
}
-static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority,
- bool is_legacy)
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq = NULL;
@@ -121,10 +157,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
ivpu_err(vdev, "Failed to allocate command queue\n");
return NULL;
}
-
- cmdq->priority = priority;
- cmdq->is_legacy = is_legacy;
-
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0) {
@@ -132,7 +164,15 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
goto err_free_cmdq;
}
- ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id);
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
+ cmdq->priority = priority;
+
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
+ cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
return cmdq;
err_free_cmdq:
@@ -188,27 +228,14 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
return ret;
}
-static void ivpu_cmdq_jobq_init(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
{
- jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
jobq->header.head = 0;
jobq->header.tail = 0;
- if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
- ivpu_dbg(vdev, JOB, "Turbo mode enabled");
- jobq->header.flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
- }
-
wmb(); /* Flush WC buffer for jobq->header */
}
-static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
-{
- size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
-
- return size / sizeof(struct vpu_job_queue_entry);
-}
-
static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -219,10 +246,7 @@ static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
if (cmdq->db_id)
return 0;
- cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
- cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
-
- ivpu_cmdq_jobq_init(vdev, cmdq->jobq);
+ ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
@@ -291,9 +315,10 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_pr
break;
if (!cmdq) {
- cmdq = ivpu_cmdq_create(file_priv, priority, true);
+ cmdq = ivpu_cmdq_create(file_priv, priority, 0);
if (!cmdq)
return NULL;
+ cmdq->is_legacy = true;
}
return cmdq;
@@ -891,7 +916,7 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
if (cmdq)
args->cmdq_id = cmdq->id;
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index ea30db181cd7..eacda1dbe840 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -33,8 +33,11 @@ static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+static unsigned long ivpu_inference_timeout_ms;
+module_param_named(inference_timeout_ms, ivpu_inference_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(inference_timeout_ms, "Inference maximum duration, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
-#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -191,6 +194,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ unsigned long inference_timeout_ms = ivpu_inference_timeout_ms ? ivpu_inference_timeout_ms :
+ vdev->timeout.inference;
+ u64 inference_max_retries;
u64 heartbeat;
if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
@@ -198,8 +205,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
goto recovery;
}
- if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
- ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ inference_max_retries = DIV_ROUND_UP(inference_timeout_ms, timeout_ms);
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) >= inference_max_retries) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit (%lld) exceeded\n",
+ inference_max_retries);
goto recovery;
}
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 35e883515629..1106b876f737 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,6 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
+ qaic_ras.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 0dbb8e32e4b9..c31081e42cee 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -167,6 +167,14 @@ struct qaic_device {
struct workqueue_struct *bootlog_wq;
/* Synchronizes access of pages in MHI bootlog device */
struct mutex bootlog_mutex;
+ /* MHI RAS channel device */
+ struct mhi_device *ras_ch;
+ /* Correctable error count */
+ unsigned int ce_count;
+ /* Un-correctable error count */
+ unsigned int ue_count;
+ /* Un-correctable non-fatal error count */
+ unsigned int ue_nf_count;
};
struct qaic_drm_device {
@@ -213,8 +221,6 @@ struct qaic_bo {
bool sliced;
/* Request ID of this BO if it is queued for execution */
u16 req_id;
- /* Handle assigned to this BO */
- u32 handle;
/* Wait on this for completion of DMA transfer of this BO */
struct completion xfer_done;
/*
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 1bce1af7c72c..797289e9d780 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -731,7 +731,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 3b415e2c9431..e31bcb0ecfc9 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -29,6 +29,7 @@
#include "mhi_controller.h"
#include "qaic.h"
#include "qaic_debugfs.h"
+#include "qaic_ras.h"
#include "qaic_timesync.h"
#include "sahara.h"
@@ -695,6 +696,10 @@ static int __init qaic_init(void)
if (ret)
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+ ret = qaic_ras_register();
+ if (ret)
+ pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+
return 0;
free_mhi:
@@ -722,6 +727,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
sahara_unregister();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
new file mode 100644
index 000000000000..914ffc4a9970
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+
+#include "qaic.h"
+#include "qaic_ras.h"
+
+#define MAGIC 0x55AA
+#define VERSION 0x2
+#define HDR_SZ 12
+#define NUM_TEMP_LVL 3
+#define POWER_BREAK BIT(0)
+
+enum msg_type {
+ MSG_PUSH, /* async push from device */
+ MSG_REQ, /* sync request to device */
+ MSG_RESP, /* sync response from device */
+};
+
+enum err_type {
+ CE, /* correctable error */
+ UE, /* uncorrectable error */
+ UE_NF, /* uncorrectable error that is non-fatal, expect a disruption */
+ ERR_TYPE_MAX,
+};
+
+static const char * const err_type_str[] = {
+ [CE] = "Correctable",
+ [UE] = "Uncorrectable",
+ [UE_NF] = "Uncorrectable Non-Fatal",
+};
+
+static const char * const err_class_str[] = {
+ [CE] = "Warning",
+ [UE] = "Fatal",
+ [UE_NF] = "Warning",
+};
+
+enum err_source {
+ SOC_MEM,
+ PCIE,
+ DDR,
+ SYS_BUS1,
+ SYS_BUS2,
+ NSP_MEM,
+ TSENS,
+};
+
+static const char * const err_src_str[TSENS + 1] = {
+ [SOC_MEM] = "SoC Memory",
+ [PCIE] = "PCIE",
+ [DDR] = "DDR",
+ [SYS_BUS1] = "System Bus source 1",
+ [SYS_BUS2] = "System Bus source 2",
+ [NSP_MEM] = "NSP Memory",
+ [TSENS] = "Temperature Sensors",
+};
+
+struct ras_data {
+ /* header start */
+ /* Magic number to validate the message */
+ u16 magic;
+ /* RAS version number */
+ u16 ver;
+ u32 seq_num;
+ /* RAS message type */
+ u8 type;
+ u8 id;
+ /* Size of RAS message without the header in byte */
+ u16 len;
+ /* header end */
+ s32 result;
+ /*
+ * Error source
+ * 0 : SoC Memory
+ * 1 : PCIE
+ * 2 : DDR
+ * 3 : System Bus source 1
+ * 4 : System Bus source 2
+ * 5 : NSP Memory
+ * 6 : Temperature Sensors
+ */
+ u32 source;
+ /*
+ * Stores the error type, there are three types of error in RAS
+ * 0 : correctable error (CE)
+ * 1 : uncorrectable error (UE)
+ * 2 : uncorrectable error that is non-fatal (UE_NF)
+ */
+ u32 err_type;
+ u32 err_threshold;
+ u32 ce_count;
+ u32 ue_count;
+ u32 intr_num;
+ /* Data specific to error source */
+ u8 syndrome[64];
+} __packed;
+
+struct soc_mem_syndrome {
+ u64 error_address[8];
+} __packed;
+
+struct nsp_mem_syndrome {
+ u32 error_address[8];
+ u8 nsp_id;
+} __packed;
+
+struct ddr_syndrome {
+ u32 count;
+ u32 irq_status;
+ u32 data_31_0[2];
+ u32 data_63_32[2];
+ u32 data_95_64[2];
+ u32 data_127_96[2];
+ u32 addr_lsb;
+ u16 addr_msb;
+ u16 parity_bits;
+ u16 instance;
+ u16 err_type;
+} __packed;
+
+struct tsens_syndrome {
+ u32 threshold_type;
+ s32 temp;
+} __packed;
+
+struct sysbus1_syndrome {
+ u32 slave;
+ u32 err_type;
+ u16 addr[8];
+ u8 instance;
+} __packed;
+
+struct sysbus2_syndrome {
+ u32 lsb3;
+ u32 msb3;
+ u32 lsb2;
+ u32 msb2;
+ u32 ext_id;
+ u16 path;
+ u16 op_type;
+ u16 len;
+ u16 redirect;
+ u8 valid;
+ u8 word_error;
+ u8 non_secure;
+ u8 opc;
+ u8 error_code;
+ u8 trans_type;
+ u8 addr_space;
+ u8 instance;
+} __packed;
+
+struct pcie_syndrome {
+ /* CE info */
+ u32 bad_tlp;
+ u32 bad_dllp;
+ u32 replay_rollover;
+ u32 replay_timeout;
+ u32 rx_err;
+ u32 internal_ce_count;
+ /* UE_NF info */
+ u32 fc_timeout;
+ u32 poison_tlp;
+ u32 ecrc_err;
+ u32 unsupported_req;
+ u32 completer_abort;
+ u32 completion_timeout;
+ /* UE info */
+ u32 addr;
+ u8 index;
+ /*
+ * Flag to indicate specific event of PCIe
+ * BIT(0): Power break (low power)
+ * BIT(1) to BIT(7): Reserved
+ */
+ u8 flag;
+} __packed;
+
+static const char * const threshold_type_str[NUM_TEMP_LVL] = {
+ [0] = "lower",
+ [1] = "upper",
+ [2] = "critical",
+};
+
+static void ras_msg_to_cpu(struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ int i;
+
+ le16_to_cpus(&msg->magic);
+ le16_to_cpus(&msg->ver);
+ le32_to_cpus(&msg->seq_num);
+ le16_to_cpus(&msg->len);
+ le32_to_cpus(&msg->result);
+ le32_to_cpus(&msg->source);
+ le32_to_cpus(&msg->err_type);
+ le32_to_cpus(&msg->err_threshold);
+ le32_to_cpus(&msg->ce_count);
+ le32_to_cpus(&msg->ue_count);
+ le32_to_cpus(&msg->intr_num);
+
+ switch (msg->source) {
+ case SOC_MEM:
+ for (i = 0; i < 8; i++)
+ le64_to_cpus(&soc_syndrome->error_address[i]);
+ break;
+ case PCIE:
+ le32_to_cpus(&pcie_syndrome->bad_tlp);
+ le32_to_cpus(&pcie_syndrome->bad_dllp);
+ le32_to_cpus(&pcie_syndrome->replay_rollover);
+ le32_to_cpus(&pcie_syndrome->replay_timeout);
+ le32_to_cpus(&pcie_syndrome->rx_err);
+ le32_to_cpus(&pcie_syndrome->internal_ce_count);
+ le32_to_cpus(&pcie_syndrome->fc_timeout);
+ le32_to_cpus(&pcie_syndrome->poison_tlp);
+ le32_to_cpus(&pcie_syndrome->ecrc_err);
+ le32_to_cpus(&pcie_syndrome->unsupported_req);
+ le32_to_cpus(&pcie_syndrome->completer_abort);
+ le32_to_cpus(&pcie_syndrome->completion_timeout);
+ le32_to_cpus(&pcie_syndrome->addr);
+ break;
+ case DDR:
+ le16_to_cpus(&ddr_syndrome->instance);
+ le16_to_cpus(&ddr_syndrome->err_type);
+ le32_to_cpus(&ddr_syndrome->count);
+ le32_to_cpus(&ddr_syndrome->irq_status);
+ le32_to_cpus(&ddr_syndrome->data_31_0[0]);
+ le32_to_cpus(&ddr_syndrome->data_31_0[1]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[0]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[1]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[0]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[1]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[0]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[1]);
+ le16_to_cpus(&ddr_syndrome->parity_bits);
+ le16_to_cpus(&ddr_syndrome->addr_msb);
+ le32_to_cpus(&ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ le32_to_cpus(&sysbus1_syndrome->slave);
+ le32_to_cpus(&sysbus1_syndrome->err_type);
+ for (i = 0; i < 8; i++)
+ le16_to_cpus(&sysbus1_syndrome->addr[i]);
+ break;
+ case SYS_BUS2:
+ le16_to_cpus(&sysbus2_syndrome->op_type);
+ le16_to_cpus(&sysbus2_syndrome->len);
+ le16_to_cpus(&sysbus2_syndrome->redirect);
+ le16_to_cpus(&sysbus2_syndrome->path);
+ le32_to_cpus(&sysbus2_syndrome->ext_id);
+ le32_to_cpus(&sysbus2_syndrome->lsb2);
+ le32_to_cpus(&sysbus2_syndrome->msb2);
+ le32_to_cpus(&sysbus2_syndrome->lsb3);
+ le32_to_cpus(&sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ for (i = 0; i < 8; i++)
+ le32_to_cpus(&nsp_syndrome->error_address[i]);
+ break;
+ case TSENS:
+ le32_to_cpus(&tsens_syndrome->threshold_type);
+ le32_to_cpus(&tsens_syndrome->temp);
+ break;
+ }
+}
+
+static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ char *class;
+ char *level;
+
+ if (msg->magic != MAGIC) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic);
+ return;
+ }
+
+ if (!msg->ver || msg->ver > VERSION) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver);
+ return;
+ }
+
+ if (msg->type != MSG_PUSH) {
+ pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n");
+ return;
+ }
+
+ if (msg->len != sizeof(*msg) - HDR_SZ) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len);
+ return;
+ }
+
+ if (msg->err_type >= ERR_TYPE_MAX) {
+ pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type);
+ return;
+ }
+
+ if (msg->err_type == UE)
+ level = KERN_ERR;
+ else
+ level = KERN_WARNING;
+
+ switch (msg->source) {
+ case SOC_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ soc_syndrome->error_address[0],
+ soc_syndrome->error_address[1],
+ soc_syndrome->error_address[2],
+ soc_syndrome->error_address[3],
+ soc_syndrome->error_address[4],
+ soc_syndrome->error_address[5],
+ soc_syndrome->error_address[6],
+ soc_syndrome->error_address[7]);
+ break;
+ case PCIE:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold);
+
+ switch (msg->err_type) {
+ case CE:
+ /*
+ * Modeled after AER prints. This continues the dev_printk() from a few
+ * lines up. We reduce duplication of code, but also avoid re-printing the
+ * PCI device info so that the end result looks uniform to the log user.
+ */
+ printk(KERN_WARNING pr_fmt("Syndrome:\n Bad TLP count %d\n Bad DLLP count %d\n Replay Rollover count %d\n Replay Timeout count %d\n Recv Error count %d\n Internal CE count %d\n"),
+ pcie_syndrome->bad_tlp,
+ pcie_syndrome->bad_dllp,
+ pcie_syndrome->replay_rollover,
+ pcie_syndrome->replay_timeout,
+ pcie_syndrome->rx_err,
+ pcie_syndrome->internal_ce_count);
+ if (msg->ver > 0x1)
+ pr_warn(" Power break %s\n",
+ pcie_syndrome->flag & POWER_BREAK ? "ON" : "OFF");
+ break;
+ case UE:
+ printk(KERN_ERR pr_fmt("Syndrome:\n Index %d\n Address 0x%x\n"),
+ pcie_syndrome->index, pcie_syndrome->addr);
+ break;
+ case UE_NF:
+ printk(KERN_WARNING pr_fmt("Syndrome:\n FC timeout count %d\n Poisoned TLP count %d\n ECRC error count %d\n Unsupported request count %d\n Completer abort count %d\n Completion timeout count %d\n"),
+ pcie_syndrome->fc_timeout,
+ pcie_syndrome->poison_tlp,
+ pcie_syndrome->ecrc_err,
+ pcie_syndrome->unsupported_req,
+ pcie_syndrome->completer_abort,
+ pcie_syndrome->completion_timeout);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DDR:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n Instance %d\n Count %d\n Data 31_0 0x%x 0x%x\n Data 63_32 0x%x 0x%x\n Data 95_64 0x%x 0x%x\n Data 127_96 0x%x 0x%x\n Parity bits 0x%x\n Address msb 0x%x\n Address lsb 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ ddr_syndrome->instance,
+ ddr_syndrome->count,
+ ddr_syndrome->data_31_0[1],
+ ddr_syndrome->data_31_0[0],
+ ddr_syndrome->data_63_32[1],
+ ddr_syndrome->data_63_32[0],
+ ddr_syndrome->data_95_64[1],
+ ddr_syndrome->data_95_64[0],
+ ddr_syndrome->data_127_96[1],
+ ddr_syndrome->data_127_96[0],
+ ddr_syndrome->parity_bits,
+ ddr_syndrome->addr_msb,
+ ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n %s\n err_type %d\n address0 0x%x\n address1 0x%x\n address2 0x%x\n address3 0x%x\n address4 0x%x\n address5 0x%x\n address6 0x%x\n address7 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus1_syndrome->instance,
+ sysbus1_syndrome->slave ? "Slave" : "Master",
+ sysbus1_syndrome->err_type,
+ sysbus1_syndrome->addr[0],
+ sysbus1_syndrome->addr[1],
+ sysbus1_syndrome->addr[2],
+ sysbus1_syndrome->addr[3],
+ sysbus1_syndrome->addr[4],
+ sysbus1_syndrome->addr[5],
+ sysbus1_syndrome->addr[6],
+ sysbus1_syndrome->addr[7]);
+ break;
+ case SYS_BUS2:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n valid %d\n word error %d\n non-secure %d\n opc %d\n error code %d\n transaction type %d\n address space %d\n operation type %d\n len %d\n redirect %d\n path %d\n ext_id %d\n lsb2 %d\n msb2 %d\n lsb3 %d\n msb3 %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus2_syndrome->instance,
+ sysbus2_syndrome->valid,
+ sysbus2_syndrome->word_error,
+ sysbus2_syndrome->non_secure,
+ sysbus2_syndrome->opc,
+ sysbus2_syndrome->error_code,
+ sysbus2_syndrome->trans_type,
+ sysbus2_syndrome->addr_space,
+ sysbus2_syndrome->op_type,
+ sysbus2_syndrome->len,
+ sysbus2_syndrome->redirect,
+ sysbus2_syndrome->path,
+ sysbus2_syndrome->ext_id,
+ sysbus2_syndrome->lsb2,
+ sysbus2_syndrome->msb2,
+ sysbus2_syndrome->lsb3,
+ sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n NSP ID %d\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ nsp_syndrome->nsp_id,
+ nsp_syndrome->error_address[0],
+ nsp_syndrome->error_address[1],
+ nsp_syndrome->error_address[2],
+ nsp_syndrome->error_address[3],
+ nsp_syndrome->error_address[4],
+ nsp_syndrome->error_address[5],
+ nsp_syndrome->error_address[6],
+ nsp_syndrome->error_address[7]);
+ break;
+ case TSENS:
+ if (tsens_syndrome->threshold_type >= NUM_TEMP_LVL) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid temp threshold %d\n",
+ tsens_syndrome->threshold_type);
+ break;
+ }
+
+ if (msg->err_type)
+ class = "Fatal";
+ else if (tsens_syndrome->threshold_type)
+ class = "Critical";
+ else
+ class = "Warning";
+
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n %s threshold\n %d deg C\n",
+ class,
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ threshold_type_str[tsens_syndrome->threshold_type],
+ tsens_syndrome->temp);
+ break;
+ }
+
+ /* Uncorrectable errors are fatal */
+ if (msg->err_type == UE)
+ mhi_soc_reset(qdev->mhi_cntrl);
+
+ switch (msg->err_type) {
+ case CE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ce_count++;
+ break;
+ case UE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_count++;
+ break;
+ case UE_NF:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_nf_count++;
+ break;
+ default:
+ /* not possible */
+ break;
+ }
+}
+
+static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count);
+}
+
+static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count);
+}
+
+static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count);
+}
+
+static DEVICE_ATTR_RO(ce_count);
+static DEVICE_ATTR_RO(ue_count);
+static DEVICE_ATTR_RO(ue_nonfatal_count);
+
+static struct attribute *ras_attrs[] = {
+ &dev_attr_ce_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ue_nonfatal_count.attr,
+ NULL,
+};
+
+static struct attribute_group ras_group = {
+ .attrs = ras_attrs,
+};
+
+static int qaic_ras_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ras_data *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp, sizeof(*resp), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ ret = device_add_group(&qdev->pdev->dev, &ras_group);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ pci_dbg(qdev->pdev, "ras add sysfs failed %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ras_ch = mhi_dev;
+
+ return ret;
+}
+
+static void qaic_ras_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ qdev->ras_ch = NULL;
+ device_remove_group(&qdev->pdev->dev, &ras_group);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void qaic_ras_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) {}
+
+static void qaic_ras_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ras_data *msg = mhi_result->buf_addr;
+ int ret;
+
+ if (mhi_result->transaction_status) {
+ kfree(msg);
+ return;
+ }
+
+ ras_msg_to_cpu(msg);
+ decode_ras_msg(qdev, msg);
+
+ ret = mhi_queue_buf(qdev->ras_ch, DMA_FROM_DEVICE, msg, sizeof(*msg), MHI_EOT);
+ if (ret) {
+ dev_err(&mhi_dev->dev, "Cannot requeue RAS recv buf %d\n", ret);
+ kfree(msg);
+ }
+}
+
+static const struct mhi_device_id qaic_ras_mhi_match_table[] = {
+ { .chan = "QAIC_STATUS", },
+ {},
+};
+
+static struct mhi_driver qaic_ras_mhi_driver = {
+ .id_table = qaic_ras_mhi_match_table,
+ .remove = qaic_ras_mhi_remove,
+ .probe = qaic_ras_mhi_probe,
+ .ul_xfer_cb = qaic_ras_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ras_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ras",
+ },
+};
+
+int qaic_ras_register(void)
+{
+ return mhi_driver_register(&qaic_ras_mhi_driver);
+}
+
+void qaic_ras_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ras_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ras.h b/drivers/accel/qaic/qaic_ras.h
new file mode 100644
index 000000000000..d44a4eeeb060
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __QAIC_RAS_H__
+#define __QAIC_RAS_H__
+
+int qaic_ras_register(void);
+void qaic_ras_unregister(void);
+
+#endif /* __QAIC_RAS_H__ */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 75c7db8b156a..7855bbf752b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
- if (!strstarts(ecdt_ptr->id, "\\")) {
+ if (!strlen(ecdt_ptr->id)) {
/*
* The ECDT table on some MSI notebooks contains invalid data, together
* with an empty ID string ("").
@@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
* a "fully qualified reference to the (...) embedded controller device",
* so this string always has to start with a backslash.
*
- * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
*/
- pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
goto out;
}
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 9d9052258e92..4958301f5417 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -962,10 +962,10 @@ static int hmat_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_target *target;
- struct memory_notify *mnb = arg;
- int pxm, nid = mnb->status_change_nid;
+ struct node_notify *nn = arg;
+ int pxm, nid = nn->nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_OK;
pxm = node_to_pxm(nid);
@@ -1118,7 +1118,7 @@ static __init int hmat_init(void)
hmat_register_targets();
/* Keep the table and structures if the notifier may use them */
- if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
+ if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI))
goto out_put;
if (!hmat_set_default_dram_perf())
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 755003bf3a45..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- if (!pr || !pr->performance)
+ if (!pr)
continue;
/*
@@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ if (!pr->performance)
+ continue;
+
ret = acpi_processor_get_platform_limit(pr);
if (ret)
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 229429ba5027..495fa096dd65 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
NULL
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index b335fb7e5cb4..c79abdfcd7a9 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 97d9f0488cc1..ff53f5f029b4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4602,7 +4602,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- ata_dev_dbg(dev, "init dev params \n");
+ ata_dev_dbg(dev, "init dev params\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 2946ae6d4b2c..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2075,7 +2075,7 @@ out:
* Check if a link is established. This is a relaxed version of
* ata_phys_link_online() which accounts for the fact that this is potentially
* called after changing the link power management policy, which may not be
- * reflected immediately in the SSTAUS register (e.g., we may still be seeing
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
* the PHY in partial, slumber or devsleep Partial power management state.
* So check that:
* - A device is still present, that is, DET is 1h (Device presence detected
@@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
u32 sstatus;
u8 det, ipm;
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
if (sata_scr_read(link, SCR_STATUS, &sstatus))
- return false;
+ return true;
det = sstatus & 0x0f;
ipm = (sstatus >> 8) & 0x0f;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 4734465d3b1e..b2817a2995d6 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -900,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
+/*
+ * Check if a port supports link power management.
+ * Must be called with the port locked.
+ */
+static bool ata_scsi_lpm_supported(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ return false;
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->quirks & ATA_QUIRK_NOLPM)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ bool supported;
+
+ spin_lock_irqsave(ap->lock, flags);
+ supported = ata_scsi_lpm_supported(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+DEVICE_ATTR(link_power_management_supported, S_IRUGO,
+ ata_scsi_lpm_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
+
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -924,20 +962,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
- if (ap->flags & ATA_FLAG_NO_LPM) {
+ if (!ata_scsi_lpm_supported(ap)) {
count = -EOPNOTSUPP;
goto out_unlock;
}
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
- }
-
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
out_unlock:
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 27b15176db56..2ded5e476d6e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -859,18 +859,14 @@ static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
- /* Must be first because BUSY means no other bits valid */
- {0x80, ABORTED_COMMAND, 0x47, 0x00},
- // Busy, fake parity for now
- {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
- // Device ready, unaligned write command
- {0x20, HARDWARE_ERROR, 0x44, 0x00},
- // Device fault, internal target failure
- {0x08, ABORTED_COMMAND, 0x47, 0x00},
- // Timed out in xfer, fake parity for now
- {0x04, RECOVERED_ERROR, 0x11, 0x00},
- // Recovered ECC error Medium error, recovered
- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ /* Busy: must be first because BUSY means no other bits valid */
+ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
+ /* Device fault: INTERNAL TARGET FAILURE */
+ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
+ /* Corrected data error */
+ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
+
+ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
};
/*
@@ -942,6 +938,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"missing result TF: can't generate ATA PT sense data\n");
+ if (qc->err_mask)
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -996,8 +994,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
- "missing result TF: can't generate sense data\n");
- return;
+ "Missing result TF: reporting aborted command\n");
+ goto aborted;
}
/* Use ata_to_sense_error() to map status register bits
@@ -1008,13 +1006,15 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
- } else {
- /* Could not decode error */
- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->status, qc->err_mask);
- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
+
+ /* Could not decode error */
+ ata_dev_warn(dev,
+ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
+ tf->error, tf->status, qc->err_mask);
+aborted:
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL if it is enabled */
- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
- return 0;
+ /* Disable CDL */
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/*
- * Enable CDL if not already enabled. Since this is mutually
- * exclusive with NCQ priority, allow this only if NCQ priority
- * is disabled.
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
*/
- if (dev->flags & ATA_DFLAG_CDL_ENABLED)
- return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f7a933eefe05..9eefdc5df5df 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -758,7 +758,7 @@ static void pata_macio_irq_clear(struct ata_port *ap)
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
- dev_dbg(priv->dev, "Enabling & resetting... \n");
+ dev_dbg(priv->dev, "Enabling & resetting...\n");
if (priv->mediabay)
return;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index d792ce6d97bf..ae914dcb0c83 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -295,7 +295,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/* Set the PIO timing registers using value table for 133MHz */
- ata_port_dbg(ap, "Set pio regs... \n");
+ ata_port_dbg(ap, "Set PIO regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -308,7 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to PIO mode[%u]\n", pio);
}
/**
@@ -341,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- ata_port_dbg(ap, "Set udma regs... \n");
+ ata_port_dbg(ap, "Set UDMA regs...\n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -350,14 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to UDMA mode[%u]\n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- ata_port_dbg(ap, "Set mdma regs... \n");
+ ata_port_dbg(ap, "Set MDMA regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -366,7 +366,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to MDMA mode[%u]\n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 2a1fe3080712..0dfa2cdc897c 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -755,7 +755,7 @@ static void lanai_shutdown_rx_vci(const struct lanai_vcc *lvcc)
/* Shutdown transmitting on card.
* Unfortunately the lanai needs us to wait until all the data
* drains out of the buffer before we can dealloc it, so this
- * can take awhile -- up to 370ms for a full 128KB buffer
+ * can take a while -- up to 370ms for a full 128KB buffer
* assuming everone else is quiet. In theory the time is
* boundless if there's a CBR VCC holding things up.
*/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index ed3e69dc785c..5c6c1d6bb59f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,6 +22,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
+#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str)
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
-static int sections_per_block;
-
-static inline unsigned long memory_block_id(unsigned long section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static inline unsigned long pfn_to_block_id(unsigned long pfn)
-{
- return memory_block_id(pfn_to_section_nr(pfn));
-}
-
-static inline unsigned long phys_to_block_id(unsigned long phys)
-{
- return pfn_to_block_id(PFN_DOWN(phys));
-}
+int sections_per_block;
+EXPORT_SYMBOL(sections_per_block);
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -683,7 +670,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
*
* Called under device_hotplug_lock.
*/
-static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 9328b81c2f47..3399594136b2 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
static const struct bus_type node_subsys = {
.name = "node",
@@ -111,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = {
NULL,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+static BLOCKING_NOTIFIER_HEAD(node_chain);
+
+int register_node_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&node_chain, nb);
+}
+EXPORT_SYMBOL(register_node_notifier);
+
+void unregister_node_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&node_chain, nb);
+}
+EXPORT_SYMBOL(unregister_node_notifier);
+
+int node_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&node_chain, val, v);
+}
+#endif
+
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
@@ -478,7 +500,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, 0UL,
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, 0UL,
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
@@ -637,6 +659,7 @@ static int register_node(struct node *node, int num)
} else {
hugetlb_register_node(node);
compaction_register_node(node);
+ reclaim_register_node(node);
}
return error;
@@ -653,6 +676,7 @@ void unregister_node(struct node *node)
{
hugetlb_unregister_node(node);
compaction_unregister_node(node);
+ reclaim_unregister_node(node);
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -756,15 +780,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __ref get_nid_for_pfn(unsigned long pfn)
-{
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- if (system_state < SYSTEM_RUNNING)
- return early_pfn_to_nid(pfn);
-#endif
- return pfn_to_nid(pfn);
-}
-
static void do_register_memory_block_under_node(int nid,
struct memory_block *mem_blk,
enum meminit_context context)
@@ -791,46 +806,6 @@ static void do_register_memory_block_under_node(int nid,
ret);
}
-/* register memory section under specified node if it spans that node */
-static int register_mem_block_under_node_early(struct memory_block *mem_blk,
- void *arg)
-{
- unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
- unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int nid = *(int *)arg;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
- int page_nid;
-
- /*
- * memory block could have several absent sections from start.
- * skip pfn range from absent section
- */
- if (!pfn_in_present_section(pfn)) {
- pfn = round_down(pfn + PAGES_PER_SECTION,
- PAGES_PER_SECTION) - 1;
- continue;
- }
-
- /*
- * We need to check if page belongs to nid only at the boot
- * case because node's ranges can be interleaved.
- */
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
-
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
- return 0;
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
@@ -859,24 +834,44 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+/* register all memory blocks under the corresponding nodes */
+static void register_memory_blocks_under_nodes(void)
{
- walk_memory_blocks_func_t func;
+ struct memblock_region *r;
+
+ for_each_mem_region(r) {
+ const unsigned long start_block_id = phys_to_block_id(r->base);
+ const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
+ const int nid = memblock_get_region_node(r);
+ unsigned long block_id;
- if (context == MEMINIT_HOTPLUG)
- func = register_mem_block_under_node_hotplug;
- else
- func = register_mem_block_under_node_early;
+ if (!node_online(nid))
+ continue;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ struct memory_block *mem;
+
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ do_register_memory_block_under_node(nid, mem, MEMINIT_EARLY);
+ put_device(&mem->dev);
+ }
+ }
+}
+
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
- (void *)&nid, func);
+ (void *)&nid, register_mem_block_under_node_hotplug);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int __register_one_node(int nid)
+int register_one_node(int nid)
{
int error;
int cpu;
@@ -980,11 +975,13 @@ void __init node_dev_init(void)
/*
* Create all node devices, which will properly link the node
- * to applicable memory block devices and already created cpu devices.
+ * to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_one_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
+
+ register_memory_blocks_under_nodes();
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bb382a70d260..dbf5456cd891 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -66,6 +66,20 @@ static pm_message_t pm_transition;
static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d1585f073776..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -21,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -801,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -816,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -827,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -838,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -855,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -879,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -901,7 +908,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -910,7 +917,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->status_is_level) {
ret = read_irq_data(d);
if (ret < 0)
- goto err_alloc;
+ goto err_mutex;
memcpy(d->prev_status_buf, d->status_buf,
array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
@@ -918,7 +925,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
- goto err_alloc;
+ goto err_mutex;
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -935,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -1027,6 +1037,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index f021e27644e0..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -186,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set_rv = bcma_gpio_set_value;
+ chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e21492981f7d..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 52724b79be30..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 975024cf03c5..caaf2781136d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
data_size -= len;
}
kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index a6ea737b3b71..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1030,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index aa163ae9b2aa..91642c9a3b29 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1179,7 +1179,7 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
memcpy_page(dest, off + count, t_page->page, offset,
temp);
else
- zero_user(dest, off + count, temp);
+ memzero_page(dest, off + count, temp);
count += temp;
sector += temp >> SECTOR_SHIFT;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6561d2a561fa..99abd67b708b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -235,7 +235,7 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
+ bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
@@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
{
blk_status_t res;
- if (unlikely(ubq->fail_io))
+ if (unlikely(READ_ONCE(ubq->fail_io)))
return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
@@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
if (check_cancel && unlikely(ubq->canceling))
@@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
+ ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* Transition the device to the nosrv state. What exactly this
* means depends on the recovery flags
*/
- blk_mq_quiesce_queue(disk->queue);
if (ublk_nosrv_should_stop_dev(ub)) {
/*
* Allow any pending/future I/O to pass through quickly
@@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* waits for all pending I/O to complete
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->force_abort = true;
- blk_mq_unquiesce_queue(disk->queue);
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
ublk_stop_dev_unlocked(ub);
} else {
@@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
} else {
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->fail_io = true;
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
}
- blk_mq_unquiesce_queue(disk->queue);
}
unlock:
mutex_unlock(&ub->mutex);
@@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
__must_hold(&ub->mutex)
{
ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
+ if (ublk_queue_ready(ubq))
ub->nr_queues_ready++;
-
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
- }
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
/* now we are ready for handling ublk io request */
@@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 553b1a713ab9..a423228e201b 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -700,6 +700,8 @@ static void zloop_free_disk(struct gendisk *disk)
struct zloop_device *zlo = disk->private_data;
unsigned int i;
+ blk_mq_free_tag_set(&zlo->tag_set);
+
for (i = 0; i < zlo->nr_zones; i++) {
struct zloop_zone *zone = &zlo->zones[i];
@@ -1080,7 +1082,6 @@ static int zloop_ctl_remove(struct zloop_options *opts)
del_gendisk(zlo->disk);
put_disk(zlo->disk);
- blk_mq_free_tag_set(&zlo->tag_set);
pr_info("Removed device %d\n", opts->id);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 06016ac3965c..be69d21c9aa7 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -555,7 +555,7 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
/* Consume Command Complete Status field */
skb_pull(skb, 1);
- /* Event parameters contatin multiple TLVs. Read each of them
+ /* Event parameters contain multiple TLVs. Read each of them
* and only keep the required data. Also, it use existing legacy
* version field like hw_platform, hw_variant, and fw_variant
* to keep the existing setup flow
@@ -889,7 +889,7 @@ int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
params.boot_param = cpu_to_le32(boot_param);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params), &params,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to send Intel Reset command");
@@ -1287,7 +1287,7 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
params.boot_option = 0x00;
params.boot_param = cpu_to_le32(0x00000000);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params),
&params, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "FW download error recovery failed (%ld)",
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 1d12c4113c66..431998049e68 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -52,6 +52,8 @@ struct intel_tlv {
u8 val[];
} __packed;
+#define BTINTEL_HCI_OP_RESET 0xfc01
+
#define BTINTEL_CNVI_BLAZARI 0x900
#define BTINTEL_CNVI_BLAZARIW 0x901
#define BTINTEL_CNVI_GAP 0x910
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index f4e3fb54fe76..6e7bbbd35279 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -35,12 +35,20 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+struct btintel_pcie_dev_recovery {
+ struct list_head list;
+ u8 count;
+ time64_t last_error;
+ char name[];
+};
+
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
@@ -62,6 +70,9 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+#define BTINTEL_PCIE_RESET_WINDOW_SECS 5
+#define BTINTEL_PCIE_FLR_MAX_RETRY 1
+
/* Alive interrupt context */
enum {
BTINTEL_PCIE_ROM,
@@ -99,6 +110,36 @@ struct btintel_pcie_dbgc_ctxt {
struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
};
+struct btintel_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+};
+
+static LIST_HEAD(btintel_pcie_recovery_list);
+static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
+
+static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
+{
+ switch (alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ return "rom";
+ case BTINTEL_PCIE_FW_DL:
+ return "fw_dl";
+ case BTINTEL_PCIE_D0:
+ return "d0";
+ case BTINTEL_PCIE_D3:
+ return "d3";
+ case BTINTEL_PCIE_HCI_RESET:
+ return "hci_reset";
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ return "intel_reset1";
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ return "intel_reset2";
+ default:
+ return "unknown";
+ }
+}
+
/* This function initializes the memory for DBGC buffers and formats the
* DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
* size as the payload
@@ -299,10 +340,14 @@ static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
}
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 pkt_type, u16 opcode)
{
int ret;
u16 tfd_index;
+ u32 old_ctxt;
+ bool wait_on_alive = false;
+ struct hci_dev *hdev = data->hdev;
+
struct txq *txq = &data->txq;
tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
@@ -310,6 +355,26 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
if (tfd_index > txq->count)
return -ERANGE;
+ /* Firmware raises alive interrupt on HCI_OP_RESET or
+ * BTINTEL_HCI_OP_RESET
+ */
+ wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
+ (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
+
+ if (wait_on_alive) {
+ data->gp0_received = false;
+ old_ctxt = data->alive_intr_ctxt;
+ data->alive_intr_ctxt =
+ (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
+ BTINTEL_PCIE_HCI_RESET);
+ bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
+ opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ }
+
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
/* Prepare for TX. It updates the TFD with the length of data and
* address of the DMA buffer, and copy the data to the DMA buffer
*/
@@ -328,11 +393,24 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
if (!ret) {
- bt_dev_err(data->hdev, "tx completion timeout");
+ bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
+ BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
}
+ if (wait_on_alive) {
+ ret = wait_event_timeout(data->gp0_wait_q,
+ data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (!ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ return -ETIME;
+ }
+ }
return 0;
}
@@ -811,28 +889,6 @@ static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
}
-static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
-{
- switch (alive_intr_ctxt) {
- case BTINTEL_PCIE_ROM:
- return "rom";
- case BTINTEL_PCIE_FW_DL:
- return "fw_dl";
- case BTINTEL_PCIE_D0:
- return "d0";
- case BTINTEL_PCIE_D3:
- return "d3";
- case BTINTEL_PCIE_HCI_RESET:
- return "hci_reset";
- case BTINTEL_PCIE_INTEL_HCI_RESET1:
- return "intel_reset1";
- case BTINTEL_PCIE_INTEL_HCI_RESET2:
- return "intel_reset2";
- default:
- return "unknown";
- }
-}
-
static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
void *buf, u32 dev_addr, int len)
{
@@ -928,11 +984,13 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
case BTINTEL_PCIE_INTEL_HCI_RESET1:
if (btintel_pcie_in_op(data)) {
submit_rx = true;
+ signal_waitq = true;
break;
}
if (btintel_pcie_in_iml(data)) {
submit_rx = true;
+ signal_waitq = true;
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
break;
}
@@ -1930,7 +1988,9 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
__u16 opcode = ~0;
int ret;
u32 type;
- u32 old_ctxt;
+
+ if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return -ENODEV;
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
@@ -1955,17 +2015,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
- /* Firmware raises alive interrupt on HCI_OP_RESET */
- if (opcode == HCI_OP_RESET)
- data->gp0_received = false;
hdev->stat.cmd_tx++;
break;
@@ -1984,38 +2041,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
bt_dev_err(hdev, "Unknown HCI packet type");
return -EILSEQ;
}
- memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
- BTINTEL_PCIE_HCI_TYPE_LEN);
- ret = btintel_pcie_send_sync(data, skb);
+ ret = btintel_pcie_send_sync(data, skb, type, opcode);
if (ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
- if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
- (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
- old_ctxt = data->alive_intr_ctxt;
- data->alive_intr_ctxt =
- (opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
- BTINTEL_PCIE_HCI_RESET);
- bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
- opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
- btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
- if (opcode == HCI_OP_RESET) {
- ret = wait_event_timeout(data->gp0_wait_q,
- data->gp0_received,
- msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
- if (!ret) {
- hdev->stat.err_tx++;
- bt_dev_err(hdev, "No alive interrupt received for %s",
- btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
- ret = -ETIME;
- goto exit_error;
- }
- }
- }
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@@ -2192,9 +2225,191 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
btintel_pcie_start_rx(data);
}
+
+ if (!err)
+ set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
return err;
}
+static struct btintel_pcie_dev_recovery *
+btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *tmp, *data = NULL;
+ const char *name = pci_name(pdev);
+ struct hci_dev *hdev = to_hci_dev(dev);
+
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ if (data) {
+ bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
+ return data;
+ }
+
+ data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strscpy_pad(data->name, name, strlen(name) + 1);
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_add_tail(&data->list, &btintel_pcie_recovery_list);
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ return data;
+}
+
+static void btintel_pcie_free_restart_list(void)
+{
+ struct btintel_pcie_dev_recovery *tmp;
+
+ while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
+ struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *data;
+ time64_t retry_window;
+
+ data = btintel_pcie_get_recovery(pdev, dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+ if (data->count == 0) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->count++;
+ } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ data->count++;
+ } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
+ data->last_error = 0;
+ data->count = 0;
+ }
+}
+
+static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
+
+static void btintel_pcie_removal_work(struct work_struct *wk)
+{
+ struct btintel_pcie_removal *removal =
+ container_of(wk, struct btintel_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ struct btintel_pcie_data *data;
+ int err;
+
+ pci_lock_rescan_remove();
+
+ if (!pdev->bus)
+ goto error;
+
+ data = pci_get_drvdata(pdev);
+
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+ flush_work(&data->hdev->dump.dump_rx);
+
+ bt_dev_dbg(data->hdev, "Release bluetooth interface");
+ btintel_pcie_release_hdev(data);
+
+ err = pci_reset_function(pdev);
+ if (err) {
+ BT_ERR("Failed resetting the pcie device (%d)", err);
+ goto error;
+ }
+
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
+ err);
+ goto error;
+ }
+
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_start_rx(data);
+ data->flags = 0;
+
+ err = btintel_pcie_setup_hdev(data);
+ if (err) {
+ BT_ERR("Failed registering hdev (%d)", err);
+ goto error;
+ }
+error:
+ pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
+ kfree(removal);
+}
+
+static void btintel_pcie_reset(struct hci_dev *hdev)
+{
+ struct btintel_pcie_removal *removal;
+ struct btintel_pcie_data *data;
+
+ data = hci_get_drvdata(hdev);
+
+ if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
+ return;
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal)
+ return;
+
+ removal->pdev = data->pdev;
+ INIT_WORK(&removal->work, btintel_pcie_removal_work);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+
+static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct btintel_pcie_dev_recovery *data;
+ struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
+ struct pci_dev *pdev = dev_data->pdev;
+ time64_t retry_window;
+
+ if (code == 0x13) {
+ bt_dev_err(hdev, "Encountered top exception");
+ return;
+ }
+
+ data = btintel_pcie_get_recovery(pdev, &hdev->dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+
+ if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
+ BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
+ bt_dev_dbg(hdev, "Boot time: %lld seconds",
+ ktime_get_boottime_seconds());
+ bt_dev_dbg(hdev, "last error at: %lld seconds",
+ data->last_error);
+ return;
+ }
+ btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
+ btintel_pcie_reset(hdev);
+}
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -2216,9 +2431,10 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->send = btintel_pcie_send_frame;
hdev->setup = btintel_pcie_setup;
hdev->shutdown = btintel_shutdown_combined;
- hdev->hw_error = btintel_hw_error;
+ hdev->hw_error = btintel_pcie_hw_error;
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->reset = btintel_pcie_reset;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -2366,7 +2582,20 @@ static struct pci_driver btintel_pcie_driver = {
.driver.coredump = btintel_pcie_coredump
#endif
};
-module_pci_driver(btintel_pcie_driver);
+
+static int __init btintel_pcie_init(void)
+{
+ return pci_register_driver(&btintel_pcie_driver);
+}
+
+static void __exit btintel_pcie_exit(void)
+{
+ pci_unregister_driver(&btintel_pcie_driver);
+ btintel_pcie_free_restart_list();
+}
+
+module_init(btintel_pcie_init);
+module_exit(btintel_pcie_exit);
MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 7dad4523236c..0fa876c5b954 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -117,7 +117,9 @@ enum {
enum {
BTINTEL_PCIE_CORE_HALTED,
BTINTEL_PCIE_HWEXP_INPROGRESS,
- BTINTEL_PCIE_COREDUMP_INPROGRESS
+ BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ BTINTEL_PCIE_RECOVERY_IN_PROGRESS,
+ BTINTEL_PCIE_SETUP_DONE
};
enum btintel_pcie_tlv_type {
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 51400a891f6e..76995cfcd534 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -316,7 +316,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
/* Resync STP when unexpected data is being read */
if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
- bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ bt_dev_err(bdev->hdev, "stp format unexpected (%d, %d)",
shdr->prefix, bdev->stp_dlen);
bdev->stp_cursor = 2;
bdev->stp_dlen = 0;
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 24f9b52605a1..73a4a325c867 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -18,6 +18,8 @@
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -73,7 +75,8 @@
#define FW_AUTH_ENC 0xc0
#define HCI_NXP_PRI_BAUDRATE 115200
-#define HCI_NXP_SEC_BAUDRATE 3000000
+#define HCI_NXP_SEC_BAUDRATE_3M 3000000
+#define HCI_NXP_SEC_BAUDRATE_4M 4000000
#define MAX_FW_FILE_NAME_LEN 50
@@ -201,12 +204,14 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
+ u32 secondary_baudrate;
enum bootloader_param_change timeout_changed;
enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
+ struct reset_control *pdn;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -365,17 +370,26 @@ static u8 crc8_table[CRC8_TABLE_SIZE];
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
u32 plen,
- void *param)
+ void *param,
+ bool resp)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
/* set flag to prevent nxp_enqueue from parsing values from this command and
* calling hci_cmd_sync_queue() again.
*/
psdata->driver_sent_cmd = true;
- skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ if (resp) {
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ } else {
+ __hci_cmd_send(hdev, opcode, plen, param);
+ /* Allow command to be sent before tx_work is cancelled
+ * by btnxpuart_flush()
+ */
+ msleep(20);
+ }
psdata->driver_sent_cmd = false;
return skb;
@@ -595,7 +609,8 @@ static int send_ps_cmd(struct hci_dev *hdev, void *data)
pcmd.ps_cmd = BT_PS_DISABLE;
pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval);
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -644,7 +659,8 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
break;
}
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -802,7 +818,10 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len)
nxpdev->fw_v3_offset_correction += req_len;
} else if (req_len == sizeof(uart_config)) {
uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr);
- uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
+ if (nxpdev->new_baudrate == HCI_NXP_SEC_BAUDRATE_4M)
+ uart_config.clkdiv.value = __cpu_to_le32(0x01000000);
+ else
+ uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr);
uart_config.uartdiv.value = __cpu_to_le32(1);
uart_config.mcr.address = __cpu_to_le32(uartmcraddr);
@@ -966,12 +985,13 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
if (nxp_fw_change_baudrate(hdev, len)) {
nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -992,7 +1012,7 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->helper_downloaded = true;
serdev_device_wait_until_sent(nxpdev->serdev, 0);
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ HCI_NXP_SEC_BAUDRATE_3M);
serdev_device_set_flow_control(nxpdev->serdev, true);
} else {
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -1216,12 +1236,13 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
}
if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
if (nxp_fw_change_baudrate(hdev, len)) {
nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -1265,7 +1286,8 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
if (!psdata)
return 0;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4,
+ (u8 *)&new_baudrate, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -1323,7 +1345,7 @@ static void nxp_coredump(struct hci_dev *hdev)
struct sk_buff *skb;
u8 pcmd = 2;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd, true);
if (IS_ERR(skb))
bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
else
@@ -1365,7 +1387,6 @@ static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
if (buf_len == 0) {
bt_dev_warn(hdev, "==== FW dump complete ===");
- clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
hci_devcd_complete(hdev);
nxp_set_ind_reset(hdev, NULL);
}
@@ -1419,6 +1440,10 @@ static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
static int nxp_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
int err = 0;
if (nxp_check_boot_sign(nxpdev)) {
@@ -1431,6 +1456,12 @@ static int nxp_setup(struct hci_dev *hdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_READY");
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
@@ -1447,8 +1478,8 @@ static int nxp_post_init(struct hci_dev *hdev)
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ if (nxpdev->current_baudrate != nxpdev->secondary_baudrate) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
}
if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
@@ -1479,7 +1510,13 @@ static int nxp_shutdown(struct hci_dev *hdev)
u8 pcmd = 0;
if (ind_reset_in_progress(nxpdev)) {
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
+ if (test_and_clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS,
+ &nxpdev->tx_state))
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, false);
+ else
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, true);
serdev_device_set_flow_control(nxpdev->serdev, false);
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
/* HCI_NXP_IND_RESET command may not returns any response */
@@ -1745,11 +1782,41 @@ static const struct serdev_device_ops btnxpuart_client_ops = {
.write_wakeup = btnxpuart_write_wakeup,
};
+static void nxp_coredump_notify(struct hci_dev *hdev, int state)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
+
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ switch (state) {
+ case HCI_DEVCOREDUMP_ACTIVE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_ACTIVE");
+ break;
+ case HCI_DEVCOREDUMP_DONE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_DONE");
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_TIMEOUT");
+ break;
+ default:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_STATE_%d",
+ state);
+ break;
+ }
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
bdaddr_t ba = {0};
+ int err;
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1773,10 +1840,31 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (!nxpdev->fw_init_baudrate)
nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE;
+ device_property_read_u32(&nxpdev->serdev->dev, "max-speed",
+ &nxpdev->secondary_baudrate);
+ if (!nxpdev->secondary_baudrate ||
+ (nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_3M &&
+ nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_4M)) {
+ if (nxpdev->secondary_baudrate)
+ dev_err(&serdev->dev,
+ "Invalid max-speed. Using default 3000000.");
+ nxpdev->secondary_baudrate = HCI_NXP_SEC_BAUDRATE_3M;
+ }
+
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
crc8_populate_msb(crc8_table, POLYNOMIAL8);
+ nxpdev->pdn = devm_reset_control_get_optional_shared(&serdev->dev, NULL);
+ if (IS_ERR(nxpdev->pdn))
+ return PTR_ERR(nxpdev->pdn);
+
+ err = devm_regulator_get_enable(&serdev->dev, "vcc");
+ if (err) {
+ dev_err(&serdev->dev, "Failed to enable vcc regulator\n");
+ return err;
+ }
+
/* Initialize and register HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
@@ -1784,6 +1872,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
}
+ reset_control_deassert(nxpdev->pdn);
+
nxpdev->hdev = hdev;
hdev->bus = HCI_UART;
@@ -1817,11 +1907,13 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (ps_setup(hdev))
goto probe_fail;
- hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr, NULL);
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr,
+ nxp_coredump_notify);
return 0;
probe_fail:
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
return -ENODEV;
}
@@ -1849,6 +1941,7 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 4d182cf6e037..6abd962502e3 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -693,7 +693,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
- * hardware supported by this firwmare file.
+ * hardware supported by this firmware file.
* Once we have that, we double-check that project_id is suitable
* for the hardware we are working with.
*/
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f9eeec0aed57..8085fabadde8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -298,75 +298,77 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN6855 chipset */
- { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x28de, 0x1401), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN785x chipset */
@@ -515,6 +517,11 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3601), .driver_info = BTUSB_REALTEK },
+
+ /* Realtek 8851BU Bluetooth devices */
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -565,6 +572,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
@@ -705,6 +714,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
@@ -725,6 +736,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x7009), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -2594,12 +2607,12 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
else
urb = alloc_ctrl_urb(hdev, skb);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hdev, opcode);
} else {
urb = alloc_ctrl_urb(hdev, skb);
@@ -3179,6 +3192,12 @@ struct qca_device_info {
u8 ver_offset; /* offset of version structure in rampatch */
};
+struct qca_custom_firmware {
+ u32 rom_version;
+ u16 board_id;
+ const char *subdirectory;
+};
+
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
@@ -3192,6 +3211,11 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
};
+static const struct qca_custom_firmware qca_custom_btfws[] = {
+ { 0x00130201, 0x030A, "QCA2066" },
+ { },
+};
+
static u16 qca_extract_board_id(const struct qca_version *ver)
{
u16 flag = le16_to_cpu(ver->flag);
@@ -3218,6 +3242,26 @@ static u16 qca_extract_board_id(const struct qca_version *ver)
return board_id;
}
+static const char *qca_get_fw_subdirectory(const struct qca_version *ver)
+{
+ const struct qca_custom_firmware *ptr;
+ u32 rom_ver;
+ u16 board_id;
+
+ rom_ver = le32_to_cpu(ver->rom_version);
+ board_id = qca_extract_board_id(ver);
+ if (!board_id)
+ return NULL;
+
+ for (ptr = qca_custom_btfws; ptr->rom_version; ptr++) {
+ if (ptr->rom_version == rom_ver &&
+ ptr->board_id == board_id)
+ return ptr->subdirectory;
+ }
+
+ return NULL;
+}
+
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
void *data, u16 size)
{
@@ -3322,15 +3366,22 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
+ const char *fw_subdir;
u32 ver_rom, ver_patch, rver_rom;
u16 rver_rom_low, rver_rom_high, rver_patch;
- char fwname[64];
+ char fwname[80];
int err;
ver_rom = le32_to_cpu(ver->rom_version);
ver_patch = le32_to_cpu(ver->patch_version);
- snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ if (fw_subdir)
+ snprintf(fwname, sizeof(fwname), "qca/%s/rampatch_usb_%08x.bin",
+ fw_subdir, ver_rom);
+ else
+ snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin",
+ ver_rom);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3374,10 +3425,11 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
- const char *variant;
+ const char *variant, *fw_subdir;
int len;
u16 board_id;
+ fw_subdir = qca_get_fw_subdirectory(ver);
board_id = qca_extract_board_id(ver);
switch (le32_to_cpu(ver->ram_version)) {
@@ -3390,7 +3442,12 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
break;
}
- len = snprintf(fwname, max_size, "qca/nvm_usb_%08x", rom_version);
+ if (fw_subdir)
+ len = snprintf(fwname, max_size, "qca/%s/nvm_usb_%08x",
+ fw_subdir, rom_version);
+ else
+ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x",
+ rom_version);
if (variant)
len += snprintf(fwname + len, max_size - len, "%s", variant);
if (board_id)
@@ -3403,7 +3460,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
const struct qca_device_info *info)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[80];
int err;
btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
@@ -3802,6 +3859,8 @@ static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
/* There are at most 7 alt (0 - 6) */
rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
rp->num = 0;
if (!drvdata->isoc)
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 8a9aa33776b0..45e6d84224ee 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -420,7 +420,7 @@ struct bcm4377_ring_state {
* payloads_dma:DMA address for payload buffer
* events: pointer to array of completions if waiting is allowed
* msgids: bitmap to keep track of used message ids
- * lock: Spinlock to protect access to ring structurs used in the irq handler
+ * lock: Spinlock to protect access to ring structures used in the irq handler
*/
struct bcm4377_transfer_ring {
enum bcm4377_transfer_ring_id ring_id;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index d22fbb7f9fc5..9b353c3d6442 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1029,12 +1029,12 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
- * control working inject that event here.
+ /* When the BTINTEL_HCI_OP_RESET command is issued to boot into
+ * the operational firmware, it will actually not send a command
+ * complete event. To keep the flow control working inject that
+ * event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hu->hdev, opcode);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 33c43503714b..4cff4d9be313 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1264,6 +1264,7 @@ static const struct h4_recv_pkt qca_recv_pkts[] = {
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = qca_recv_event },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index d2b00458761e..6ed24be3481d 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -80,7 +80,6 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy(trng->dev);
pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 4db198849695..a5be9258037f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -98,7 +98,6 @@ static void cc_trng_pm_put_suspend(struct device *dev)
{
int rc = 0;
- pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
if (rc)
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index b7fa1bc1122b..5808d09d12c4 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -98,7 +98,6 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
@@ -143,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 3e308c890bd2..40d6e29dea03 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -80,7 +80,6 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 8064c792caf0..aa71f61c3dc9 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -56,7 +56,6 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
else
r = 4;
- pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
return r;
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index fb4a30b95507..6e3ed4b85605 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -223,7 +223,6 @@ static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
/* Read random data stored in the registers */
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
out:
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
@@ -263,7 +262,6 @@ static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
out:
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
@@ -355,7 +353,6 @@ out:
/* close the TRNG */
rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 98edbe796bc5..9a8c00586ab0 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -255,7 +255,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
exit_rpm:
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 064944ae9fdc..8e9050f99e9e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4607,10 +4607,10 @@ return_unspecified:
* The NetFN and Command in the response is not even
* marginally correct.
*/
- dev_warn(intf->si_dev,
- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb42dfe1c6a8..8b5524069c15 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2108,7 +2108,6 @@ static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
static int __init init_ipmi_si(void)
{
struct smi_info *e, *e2;
- enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -2190,9 +2189,6 @@ static int __init init_ipmi_si(void)
initialized = true;
mutex_unlock(&smi_infos_lock);
- if (type)
- return 0;
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index ab759b492fdd..a013ddbf1466 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1146,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = {
.smi_gone = ipmi_smi_gone
};
-static int action_op(const char *inval, char *outval)
+static int action_op_set_val(const char *inval)
{
- if (outval)
- strcpy(outval, action);
-
- if (!inval)
- return 0;
-
if (strcmp(inval, "reset") == 0)
action_val = WDOG_TIMEOUT_RESET;
else if (strcmp(inval, "none") == 0)
@@ -1164,18 +1158,26 @@ static int action_op(const char *inval, char *outval)
action_val = WDOG_TIMEOUT_POWER_DOWN;
else
return -EINVAL;
- strcpy(action, inval);
return 0;
}
-static int preaction_op(const char *inval, char *outval)
+static int action_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preaction);
+ strcpy(outval, action);
if (!inval)
return 0;
+ rv = action_op_set_val(inval);
+ if (!rv)
+ strcpy(action, inval);
+ return rv;
+}
+static int preaction_op_set_val(const char *inval)
+{
if (strcmp(inval, "pre_none") == 0)
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
@@ -1188,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval)
preaction_val = WDOG_PRETIMEOUT_MSG_INT;
else
return -EINVAL;
- strcpy(preaction, inval);
return 0;
}
-static int preop_op(const char *inval, char *outval)
+static int preaction_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preop);
+ strcpy(outval, preaction);
if (!inval)
return 0;
+ rv = preaction_op_set_val(inval);
+ if (!rv)
+ strcpy(preaction, inval);
+ return 0;
+}
+static int preop_op_set_val(const char *inval)
+{
if (strcmp(inval, "preop_none") == 0)
preop_val = WDOG_PREOP_NONE;
else if (strcmp(inval, "preop_panic") == 0)
@@ -1208,7 +1218,22 @@ static int preop_op(const char *inval, char *outval)
preop_val = WDOG_PREOP_GIVE_DATA;
else
return -EINVAL;
- strcpy(preop, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ int rv;
+
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ rv = preop_op_set_val(inval);
+ if (!rv)
+ strcpy(preop, inval);
return 0;
}
@@ -1245,18 +1270,18 @@ static int __init ipmi_wdog_init(void)
{
int rv;
- if (action_op(action, NULL)) {
+ if (action_op_set_val(action)) {
action_op("reset", NULL);
pr_info("Unknown action '%s', defaulting to reset\n", action);
}
- if (preaction_op(preaction, NULL)) {
+ if (preaction_op_set_val(preaction)) {
preaction_op("pre_none", NULL);
pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
- if (preop_op(preop, NULL)) {
+ if (preop_op_set_val(preop)) {
preop_op("preop_none", NULL);
pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index b5a77669ed23..4d56475f94fc 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -61,7 +61,6 @@ config LMK04832
config COMMON_CLK_APPLE_NCO
tristate "Clock driver for Apple SoC NCOs"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
This driver supports NCO (Numerically Controlled Oscillator) blocks
found on Apple SoCs such as t8103 (M1). The blocks are typically
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 3d04f3463452..18ed29cfdc11 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -18,6 +18,7 @@ clk-test-y := clk_test.o \
kunit_clk_assigned_rates_without_consumer.dtbo.o \
kunit_clk_assigned_rates_zero.dtbo.o \
kunit_clk_assigned_rates_zero_consumer.dtbo.o \
+ kunit_clk_hw_get_dev_of_node.dtbo.o \
kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
index cbb8b220f16b..ffab32b047a0 100644
--- a/drivers/clk/at91/sam9x7.c
+++ b/drivers/clk/at91/sam9x7.c
@@ -61,44 +61,44 @@ static const struct clk_master_layout sam9x7_master_layout = {
/* Fractional PLL core output range. */
static const struct clk_range plla_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
static const struct clk_range upll_core_outputs[] = {
- { .min = 600000000, .max = 1200000000 },
+ { .min = 600000000, .max = 960000000 },
};
static const struct clk_range lvdspll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range audiopll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range plladiv2_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
/* Fractional PLL output range. */
static const struct clk_range plla_outputs[] = {
- { .min = 732421, .max = 800000000 },
+ { .min = 400000000, .max = 800000000 },
};
static const struct clk_range upll_outputs[] = {
- { .min = 300000000, .max = 600000000 },
+ { .min = 300000000, .max = 480000000 },
};
static const struct clk_range lvdspll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 175000000, .max = 550000000 },
};
static const struct clk_range audiopll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 0, .max = 300000000 },
};
static const struct clk_range plladiv2_outputs[] = {
- { .min = 366210, .max = 400000000 },
+ { .min = 200000000, .max = 400000000 },
};
/* PLL characteristics. */
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index 84555a00f950..17d75e8e2e8f 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -405,7 +405,7 @@ static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < data->divs_num; ++idx) {
if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index fce02ce77347..921b87024feb 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -196,7 +196,7 @@ static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index fb04734afc80..02215ea79403 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -570,18 +570,23 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
return rate >> A2W_PLL_FRAC_BITS;
}
-static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int bcm2835_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
- rate = clamp(rate, data->min_rate, data->max_rate);
+ req->rate = clamp(req->rate, data->min_rate, data->max_rate);
- bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+ bcm2835_pll_choose_ndiv_and_fdiv(req->rate, req->best_parent_rate,
+ &ndiv, &fdiv);
- return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+ req->rate = bcm2835_pll_rate_from_divisors(req->best_parent_rate,
+ ndiv, fdiv,
+ 1);
+
+ return 0;
}
static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
@@ -783,7 +788,7 @@ static const struct clk_ops bcm2835_pll_clk_ops = {
.unprepare = bcm2835_pll_off,
.recalc_rate = bcm2835_pll_get_rate,
.set_rate = bcm2835_pll_set_rate,
- .round_rate = bcm2835_pll_round_rate,
+ .determine_rate = bcm2835_pll_determine_rate,
.debug_init = bcm2835_pll_debug_init,
};
@@ -1550,7 +1555,7 @@ static const char *const bcm2835_clock_osc_parents[] = {
.parents = bcm2835_clock_osc_parents, \
__VA_ARGS__)
-/* main peripherial parent mux */
+/* main peripheral parent mux */
static const char *const bcm2835_clock_per_parents[] = {
"gnd",
"xosc",
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 83ef41d618be..b2fc05b60783 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -59,7 +59,7 @@ static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
/*
* At minimum we should loop for a bit to let hardware do the
* measurement. This isn't very accurate however, so for a better
- * precision lets try getting 20 different values for and use average.
+ * precision let's try getting 20 different values and use average.
*/
while (num < 20) {
regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index aa89b4c9464e..79f3d37a0ee0 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -319,7 +319,7 @@ berlin2_avpll_channel_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
/*
* AV3 divider start at VCO_CTRL14, bit 7; each 4 bits wide.
- * AV2/AV3 form a fractional divider, where only specfic values for AV3
+ * AV2/AV3 form a fractional divider, where only specific values for AV3
* are allowed. AV3 != 0 divides by AV2/2, AV3=0 is bypass.
*/
if (ch->index < 6) {
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 3432c801f1bd..595cfa533fb9 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -92,8 +92,8 @@ static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
{ CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
{ CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
- /* i2s has two deviders: one for only external mclk and internal
- * devider for all clks. */
+ /* i2s has two dividers: one for only external mclk and internal
+ * divider for all clks. */
{ CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
{ CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
{ CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index faf88324f7b1..114afc13d640 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -92,7 +92,7 @@ static u8 soc_rev;
*
* There are some gates that do not have an associated reset; these are
* handled by using -1 as the index for the reset, and the consumer must
- * explictly assert/deassert reset lines as required.
+ * explicitly assert/deassert reset lines as required.
*
* Clocks marked with CLK_IS_CRITICAL:
*
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 934e53a96ddd..aec62301fa06 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -6,14 +6,17 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
-#include <linux/platform_device.h>
+#include <linux/adi-axi-common.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/slab.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/module.h>
-#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define AXI_CLKGEN_V2_REG_RESET 0x40
#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
@@ -28,6 +31,9 @@
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+#define ADI_CLKGEN_REG_FPGA_VOLTAGE 0x0140
+#define ADI_CLKGEN_INFO_FPGA_VOLTAGE(val) ((val) & GENMASK(15, 0))
+
#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
@@ -90,7 +96,7 @@ static uint32_t axi_clkgen_lookup_filter(unsigned int m)
}
}
-static const uint32_t axi_clkgen_lock_table[] = {
+static const u32 axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
@@ -102,7 +108,7 @@ static const uint32_t axi_clkgen_lock_table[] = {
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
-static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+static u32 axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
@@ -118,14 +124,15 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
- .fpfd_max = 300000,
+ .fpfd_max = 450000,
.fvco_min = 600000,
.fvco_max = 1200000,
};
static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
- unsigned long fin, unsigned long fout,
- unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+ unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m,
+ unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
@@ -141,15 +148,15 @@ static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
- d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
+ d_min = max(DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min(fin / limits->fpfd_min, 80);
again:
fvco_min_fract = limits->fvco_min << fract_shift;
fvco_max_fract = limits->fvco_max << fract_shift;
- m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
- m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
+ m_min = max(DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
+ m_max = min(fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
@@ -172,7 +179,7 @@ again:
}
}
- /* Lets see if we find a better setting in fractional mode */
+ /* Let's see if we find a better setting in fractional mode */
if (fract_shift == 0) {
fract_shift = 3;
goto again;
@@ -192,9 +199,9 @@ struct axi_clkgen_div_params {
};
static void axi_clkgen_calc_clk_params(unsigned int divider,
- unsigned int frac_divider, struct axi_clkgen_div_params *params)
+ unsigned int frac_divider,
+ struct axi_clkgen_div_params *params)
{
-
memset(params, 0x0, sizeof(*params));
if (divider == 1) {
@@ -222,7 +229,7 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
if (params->edge == 0 || frac_divider == 1)
params->low--;
if (((params->edge == 0) ^ (frac_divider == 1)) ||
- (divider == 2 && frac_divider == 1))
+ (divider == 2 && frac_divider == 1))
params->frac_wf_f = 1;
params->frac_phase = params->edge * 4 + frac_divider / 2;
@@ -230,13 +237,13 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val)
+ unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
@@ -257,7 +264,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
}
static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
int ret;
@@ -281,7 +288,8 @@ static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
}
static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
+ unsigned int reg, unsigned int val,
+ unsigned int mask)
{
unsigned int reg_val = 0;
int ret;
@@ -302,8 +310,7 @@ static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen, bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -319,31 +326,31 @@ static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
}
static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2, unsigned int reg3,
- struct axi_clkgen_div_params *params)
+ unsigned int reg1, unsigned int reg2,
+ unsigned int reg3,
+ struct axi_clkgen_div_params *params)
{
axi_clkgen_mmcm_write(axi_clkgen, reg1,
- (params->high << 6) | params->low, 0xefff);
+ (params->high << 6) | params->low, 0xefff);
axi_clkgen_mmcm_write(axi_clkgen, reg2,
- (params->frac << 12) | (params->frac_en << 11) |
- (params->frac_wf_r << 10) | (params->edge << 7) |
- (params->nocount << 6), 0x7fff);
+ (params->frac << 12) | (params->frac_en << 11) |
+ (params->frac_wf_r << 10) | (params->edge << 7) |
+ (params->nocount << 6), 0x7fff);
if (reg3 != 0) {
axi_clkgen_mmcm_write(axi_clkgen, reg3,
- (params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
+ (params->frac_phase << 11) | (params->frac_wf_f << 10),
+ 0x3c00);
}
}
-static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
- unsigned long rate, unsigned long parent_rate)
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
- uint32_t power = 0;
- uint32_t filter;
- uint32_t lock;
+ u32 power = 0, filter, lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
@@ -363,22 +370,22 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
- MMCM_REG_CLKOUT5_2, &params);
+ MMCM_REG_CLKOUT5_2, &params);
axi_clkgen_calc_clk_params(d, 0, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
- (params.edge << 13) | (params.nocount << 12) |
- (params.high << 6) | params.low, 0x3fff);
+ (params.edge << 13) | (params.nocount << 12) |
+ (params.high << 6) | params.low, 0x3fff);
axi_clkgen_calc_clk_params(m >> 3, m & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
- MMCM_REG_CLKOUT6_2, &params);
+ MMCM_REG_CLKOUT6_2, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
- (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
- (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
@@ -407,7 +414,7 @@ static int axi_clkgen_determine_rate(struct clk_hw *hw,
}
static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2)
+ unsigned int reg1, unsigned int reg2)
{
unsigned int val1, val2;
unsigned int div;
@@ -434,7 +441,7 @@ static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
@@ -442,9 +449,9 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int val;
dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
- MMCM_REG_CLKOUT0_2);
+ MMCM_REG_CLKOUT0_2);
m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
- MMCM_REG_CLK_FB2);
+ MMCM_REG_CLK_FB2);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
if (val & MMCM_CLK_DIV_NOCOUNT)
@@ -496,6 +503,54 @@ static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
return parent;
}
+static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
+ struct device *dev)
+{
+ unsigned int tech, family, speed_grade, reg_value;
+
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_FPGA_INFO, &reg_value);
+ tech = ADI_AXI_INFO_FPGA_TECH(reg_value);
+ family = ADI_AXI_INFO_FPGA_FAMILY(reg_value);
+ speed_grade = ADI_AXI_INFO_FPGA_SPEED_GRADE(reg_value);
+
+ axi_clkgen->limits.fpfd_min = 10000;
+ axi_clkgen->limits.fvco_min = 600000;
+
+ switch (speed_grade) {
+ case ADI_AXI_FPGA_SPEED_1 ... ADI_AXI_FPGA_SPEED_1LV:
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ break;
+ case ADI_AXI_FPGA_SPEED_2 ... ADI_AXI_FPGA_SPEED_2LV:
+ axi_clkgen->limits.fvco_max = 1440000;
+ axi_clkgen->limits.fpfd_max = 500000;
+ if (family == ADI_AXI_FPGA_FAMILY_KINTEX || family == ADI_AXI_FPGA_FAMILY_ARTIX) {
+ axi_clkgen_read(axi_clkgen, ADI_CLKGEN_REG_FPGA_VOLTAGE,
+ &reg_value);
+ if (ADI_CLKGEN_INFO_FPGA_VOLTAGE(reg_value) < 950) {
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ }
+ }
+ break;
+ case ADI_AXI_FPGA_SPEED_3:
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fpfd_max = 550000;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
+ speed_grade);
+ };
+
+ /* Overwrite vco limits for ultrascale+ */
+ if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fvco_min = 800000;
+ }
+
+ return 0;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.determine_rate = axi_clkgen_determine_rate,
@@ -510,6 +565,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
{
const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
+ unsigned int pcore_version;
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
@@ -555,11 +611,20 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
- memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_VERSION, &pcore_version);
+
+ if (ADI_AXI_PCORE_VER_MAJOR(pcore_version) > 0x04) {
+ ret = axi_clkgen_setup_limits(axi_clkgen, &pdev->dev);
+ if (ret)
+ return ret;
+ } else {
+ memcpy(&axi_clkgen->limits, dflt_limits,
+ sizeof(axi_clkgen->limits));
+ }
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
- &clk_name);
+ &clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index f8417ee2961a..402ab74d9bfb 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -99,7 +99,7 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
- * Value writen is automatically re-loaded when
+ * Value written is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c
index 640c25788487..ea1c3d78e7cd 100644
--- a/drivers/clk/clk-eyeq.c
+++ b/drivers/clk/clk-eyeq.c
@@ -131,7 +131,7 @@ struct eqc_early_match_data {
* Both factors (mult and div) must fit in 32 bits. When an operation overflows,
* this function throws away low bits so that factors still fit in 32 bits.
*
- * Precision loss depends on amplitude of mult and div. Worst theorical
+ * Precision loss depends on amplitude of mult and div. Worst theoretical
* loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
* This is 1Hz every 4.3GHz.
*/
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 68e585a02fd9..4746f8219132 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
/**
- * DOC: basic gatable clock which can gate and ungate its output
+ * DOC: basic gateable clock which can gate and ungate its output
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 5d2a90addf1a..921523fc26f2 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -265,7 +265,7 @@ static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
return -EINVAL;
/*
- * Program divider to div-by-1 if we succesfuly set core clock below
+ * Program divider to div-by-1 if we successfully set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index bd4f21c22004..4709f0338e37 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -14,6 +14,7 @@
struct clk_pwm {
struct clk_hw hw;
struct pwm_device *pwm;
+ struct pwm_state state;
u32 fixed_rate;
};
@@ -22,11 +23,28 @@ static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
return container_of(hw, struct clk_pwm, hw);
}
+static int clk_pwm_enable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return pwm_apply_atomic(clk_pwm->pwm, &clk_pwm->state);
+}
+
+static void clk_pwm_disable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+ struct pwm_state state = clk_pwm->state;
+
+ state.enabled = false;
+
+ pwm_apply_atomic(clk_pwm->pwm, &state);
+}
+
static int clk_pwm_prepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
- return pwm_enable(clk_pwm->pwm);
+ return pwm_apply_might_sleep(clk_pwm->pwm, &clk_pwm->state);
}
static void clk_pwm_unprepare(struct clk_hw *hw)
@@ -48,8 +66,11 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
struct pwm_state state;
+ int ret;
- pwm_get_state(clk_pwm->pwm, &state);
+ ret = pwm_get_state_hw(clk_pwm->pwm, &state);
+ if (ret)
+ return ret;
duty->num = state.duty_cycle;
duty->den = state.period;
@@ -57,6 +78,13 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
return 0;
}
+static const struct clk_ops clk_pwm_ops_atomic = {
+ .enable = clk_pwm_enable,
+ .disable = clk_pwm_disable,
+ .recalc_rate = clk_pwm_recalc_rate,
+ .get_duty_cycle = clk_pwm_get_duty_cycle,
+};
+
static const struct clk_ops clk_pwm_ops = {
.prepare = clk_pwm_prepare,
.unprepare = clk_pwm_unprepare,
@@ -103,20 +131,19 @@ static int clk_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
- if (ret < 0)
- return ret;
+ pwm_init_state(pwm, &clk_pwm->state);
+ pwm_set_relative_duty_cycle(&clk_pwm->state, 1, 2);
+ clk_pwm->state.enabled = true;
clk_name = node->name;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
- init.ops = &clk_pwm_ops;
+ if (pwm_might_sleep(pwm))
+ init.ops = &clk_pwm_ops;
+ else
+ init.ops = &clk_pwm_ops_atomic;
+
init.flags = 0;
init.num_parents = 0;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 8ddf3a9a53df..d4e9c3577b35 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -235,7 +235,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
* through platform_device_id.
*
* However if device's DT node contains proper clock compatible and driver is
- * built as a module, then the *module* matching will be done trough DT aliases.
+ * built as a module, then the *module* matching will be done through DT aliases.
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 1b1561c84127..d2408403283f 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -455,7 +455,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
/*
* Note that the scmi_clk_ops_db is on the stack, not global,
- * because it cannot be shared between mulitple probe-sequences
+ * because it cannot be shared between multiple probe-sequences
* to avoid sharing the devm_ allocated clk_ops between multiple
* SCMI clk driver instances.
*/
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index a4c92c5ef3ff..e755db545e2e 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -655,7 +655,7 @@ static int si5351_msynth_determine_rate(struct clk_hw *hw,
unsigned long a, b, c;
int divby4;
- /* multisync6-7 can only handle freqencies < 150MHz */
+ /* multisync6-7 can only handle frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
rate = SI5351_MULTISYNTH67_MAX_FREQ;
@@ -1048,11 +1048,11 @@ static int si5351_clkout_determine_rate(struct clk_hw *hw,
unsigned long rate = req->rate;
unsigned char rdiv;
- /* clkout6/7 can only handle output freqencies < 150MHz */
+ /* clkout6/7 can only handle output frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
rate = SI5351_CLKOUT67_MAX_FREQ;
- /* clkout freqency is 8kHz - 160MHz */
+ /* clkout frequency is 8kHz - 160MHz */
if (rate > SI5351_CLKOUT_MAX_FREQ)
rate = SI5351_CLKOUT_MAX_FREQ;
if (rate < SI5351_CLKOUT_MIN_FREQ)
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index c88650558f32..ca3473efa314 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -39,7 +39,7 @@
/* Max freq depends on speed grade */
#define SI544_MIN_FREQ 200000U
-/* Si544 Internal oscilator runs at 55.05 MHz */
+/* Si544 Internal oscillator runs at 55.05 MHz */
#define FXO 55050000U
/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index a549ea13be20..e97fe90443a6 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -63,7 +63,7 @@ struct clk_si570_info {
* struct clk_si570:
* @hw: Clock hw struct
* @regmap: Device's regmap
- * @div_offset: Rgister offset for dividers
+ * @div_offset: Register offset for dividers
* @info: Device info
* @fxtal: Factory xtal frequency
* @n1: Clock divider N1
@@ -181,7 +181,7 @@ static int si570_update_rfreq(struct clk_si570 *data)
}
/**
- * si570_calc_divs() - Caluclate clock dividers
+ * si570_calc_divs() - Calculate clock dividers
* @frequency: Target frequency
* @data: Driver data structure
* @out_rfreq: RFREG fractional multiplier (output)
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 7cb7d501d7a6..95d66191df4b 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -14,7 +14,7 @@
#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
-/* speical div_width values for PLLTV/PLLA */
+/* special div_width values for PLLTV/PLLA */
#define DIV_TV 33
#define DIV_A 34
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 85e23961ec34..719cddc82ae6 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -19,7 +19,7 @@
#include <linux/mfd/syscon.h>
/*
- * Include list of clocks wich are not derived from system clock (SYSCLOCK)
+ * Include list of clocks which are not derived from system clock (SYSCLOCK)
* The index of these clocks is the secondary index of DT bindings
*
*/
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 6d31cd54d7cf..4200022d2084 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -136,7 +136,7 @@
#define VC5_MAX_FOD_NUM 4
/* flags to describe chip features */
-/* chip has built-in oscilator */
+/* chip has built-in oscillator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
/* chip has PFD requency doubler */
#define VC5_HAS_PFD_FREQ_DBL BIT(1)
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index f323263e32c3..483285b30c13 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -1257,7 +1257,7 @@ static const struct vc7_chip_info vc7_rc21008a_info = {
.num_outputs = 8,
};
-static struct regmap_range_cfg vc7_range_cfg[] = {
+static const struct regmap_range_cfg vc7_range_cfg[] = {
{
.range_min = 0,
.range_max = VC7_MAX_REG,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0565c87656cf..b821b2cdb155 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -365,6 +365,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index f08feeaa3750..a268d7b5d4cb 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -292,7 +292,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
}
/*
- * Test that clk_round_rate and clk_set_rate are consitent and will
+ * Test that clk_round_rate and clk_set_rate are consistent and will
* return the same frequency.
*/
static void clk_test_round_set_get_rate(struct kunit *test)
@@ -2794,49 +2794,49 @@ static struct kunit_suite clk_register_clk_parent_data_of_suite = {
};
/**
- * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
- * @dev: device of clk under test
- * @hw: clk_hw for clk under test
+ * struct platform_driver_dev_ctx - Context to stash platform device
+ * @dev: device under test
* @pdrv: driver to attach to find @dev
*/
-struct clk_register_clk_parent_data_device_ctx {
+struct platform_driver_dev_ctx {
struct device *dev;
- struct clk_hw hw;
struct platform_driver pdrv;
};
-static inline struct clk_register_clk_parent_data_device_ctx *
-clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
+static inline struct platform_driver_dev_ctx *
+pdev_to_platform_driver_dev_ctx(struct platform_device *pdev)
{
return container_of(to_platform_driver(pdev->dev.driver),
- struct clk_register_clk_parent_data_device_ctx, pdrv);
+ struct platform_driver_dev_ctx, pdrv);
}
-static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
+static int kunit_platform_driver_dev_probe(struct platform_device *pdev)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct platform_driver_dev_ctx *ctx;
- ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
+ ctx = pdev_to_platform_driver_dev_ctx(pdev);
ctx->dev = &pdev->dev;
return 0;
}
-static void clk_register_clk_parent_data_device_driver(struct kunit *test)
+static struct device *
+kunit_of_platform_driver_dev(struct kunit *test, const struct of_device_id *match_table)
{
- struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
- static const struct of_device_id match_table[] = {
- { .compatible = "test,clk-parent-data" },
- { }
- };
+ struct platform_driver_dev_ctx *ctx;
- ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdrv.probe = kunit_platform_driver_dev_probe;
ctx->pdrv.driver.of_match_table = match_table;
ctx->pdrv.driver.name = __func__;
ctx->pdrv.driver.owner = THIS_MODULE;
KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ return ctx->dev;
}
static const struct clk_register_clk_parent_data_test_case
@@ -2909,30 +2909,34 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
*/
static void clk_register_clk_parent_data_device_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_hw *parent_hw;
struct clk_init_data init = { };
struct clk *expected_parent, *actual_parent;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
- expected_parent = clk_get_kunit(test, ctx->dev, "50");
+ expected_parent = clk_get_kunit(test, dev, "50");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
test_param = test->param_value;
init.parent_data = &test_param->pdata;
init.num_parents = 1;
init.name = "parent_data_device_test_clk";
init.ops = &clk_dummy_single_parent_ops;
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- parent_hw = clk_hw_get_parent(&ctx->hw);
+ parent_hw = clk_hw_get_parent(hw);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
@@ -3016,18 +3020,19 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
*/
static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_dummy_context *parent;
struct clk_hw *parent_hw;
struct clk_parent_data pdata = { };
struct clk_init_data init = { };
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
@@ -3036,7 +3041,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
&clk_dummy_rate_ops, 0);
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, parent_hw));
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
test_param = test->param_value;
memcpy(&pdata, &test_param->pdata, sizeof(pdata));
@@ -3045,10 +3053,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
init.num_parents = 1;
init.ops = &clk_dummy_single_parent_ops;
init.name = "parent_data_device_hw_test_clk";
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(hw));
}
static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
@@ -3395,8 +3403,148 @@ static struct kunit_suite clk_assigned_rates_suite = {
.init = clk_assigned_rates_test_init,
};
+static const struct clk_init_data clk_hw_get_dev_of_node_init_data = {
+ .name = "clk_hw_get_dev_of_node",
+ .ops = &empty_clk_ops,
+};
+
+/*
+ * Test that a clk registered with a struct device returns the device from
+ * clk_hw_get_dev() and the node from clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_get_dev_returns_dev(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-hw-get-dev-of-node" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, dev_of_node(dev), clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with a struct device that's not associated with
+ * an OF node returns the device from clk_hw_get_dev() and NULL from
+ * clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_no_node_get_dev_returns_dev(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_hw *hw;
+
+ pdev = kunit_platform_device_alloc(test, "clk_hw_register_dev_no_node", -1);
+ KUNIT_ASSERT_NOT_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ dev = &pdev->dev;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without a struct device returns NULL from
+ * clk_hw_get_dev()
+ */
+static void clk_hw_register_NULL_get_dev_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with an of_node returns the node from
+ * clk_hw_get_of_node() and NULL from clk_hw_get_dev()
+ */
+static void of_clk_hw_register_node_get_of_node_returns_node(struct kunit *test)
+{
+ struct device_node *np;
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ np = of_find_compatible_node(NULL, NULL, "test,clk-hw-get-dev-of-node");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, np, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without an of_node returns the node from
+ * clk_hw_get_of_node() and clk_hw_get_dev()
+ */
+static void of_clk_hw_register_NULL_get_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+static struct kunit_case clk_hw_get_dev_of_node_test_cases[] = {
+ KUNIT_CASE(clk_hw_register_dev_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_dev_no_node_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_NULL_get_dev_of_node_returns_NULL),
+ KUNIT_CASE(of_clk_hw_register_node_get_of_node_returns_node),
+ KUNIT_CASE(of_clk_hw_register_NULL_get_of_node_returns_NULL),
+ {}
+};
+
+/*
+ * Test suite to verify clk_hw_get_dev() and clk_hw_get_of_node() when clk
+ * registered with clk_hw_register() and of_clk_hw_register()
+ */
+static struct kunit_suite clk_hw_get_dev_of_node_test_suite = {
+ .name = "clk_hw_get_dev_of_node_test_suite",
+ .test_cases = clk_hw_get_dev_of_node_test_cases,
+};
+
+
kunit_test_suites(
&clk_assigned_rates_suite,
+ &clk_hw_get_dev_of_node_test_suite,
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 20bfcec2d3b5..ad286ba4ce0c 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -80,7 +80,7 @@ static const struct davinci_pll_sysclk_info n = { \
* @name: The name of the clock
* @parent_names: Array of names of the parent clocks
* @num_parents: Length of @parent_names
- * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @table: Array of values to write to OCSEL[OCSRC] corresponding to
* @parent_names
* @ocsrc_mask: Bitmask for OCSEL[OCSRC]
*/
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index b48322176c21..f3ee9397bb0c 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -277,6 +277,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
best_dev_name(dev), name);
+ if (!lpsc->pm_domain.name) {
+ clk_hw_unregister(&lpsc->hw);
+ kfree(lpsc);
+ return ERR_PTR(-ENOMEM);
+ }
lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 90d858522967..21d4297f3225 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -17,9 +17,9 @@
#include "clk.h"
/* clock separated gate register offset */
-#define CLKGATE_SEPERATED_ENABLE 0x0
-#define CLKGATE_SEPERATED_DISABLE 0x4
-#define CLKGATE_SEPERATED_STATUS 0x8
+#define CLKGATE_SEPARATED_ENABLE 0x0
+#define CLKGATE_SEPARATED_DISABLE 0x4
+#define CLKGATE_SEPARATED_STATUS 0x8
struct clkgate_separated {
struct clk_hw hw;
@@ -40,7 +40,7 @@ static int clkgate_separated_enable(struct clk_hw *hw)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
writel_relaxed(reg, sclk->enable);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
return 0;
@@ -56,8 +56,8 @@ static void clkgate_separated_disable(struct clk_hw *hw)
if (sclk->lock)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
- writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPARATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
}
@@ -68,7 +68,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
u32 reg;
sclk = container_of(hw, struct clkgate_separated, hw);
- reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
reg &= BIT(sclk->bit_idx);
return reg ? 1 : 0;
@@ -100,7 +100,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->enable = reg + CLKGATE_SEPARATED_ENABLE;
sclk->bit_idx = bit_idx;
sclk->flags = clk_gate_flags;
sclk->hw.init = &init;
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index f163df952ccc..eb27c6fee359 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -46,12 +46,12 @@ static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
}
-static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_busy_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
- return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+ return busy->div_ops->determine_rate(&busy->div.hw, req);
}
static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -69,7 +69,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
- .round_rate = clk_busy_divider_round_rate,
+ .determine_rate = clk_busy_divider_determine_rate,
.set_rate = clk_busy_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index f187582ba491..1467d0a1b934 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -73,21 +73,6 @@ static int imx8m_clk_composite_compute_dividers(unsigned long rate,
return ret;
}
-static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
-{
- int prediv_value;
- int div_value;
-
- imx8m_clk_composite_compute_dividers(rate, *prate,
- &prediv_value, &div_value);
- rate = DIV_ROUND_UP(*prate, prediv_value);
-
- return DIV_ROUND_UP(rate, div_value);
-
-}
-
static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
@@ -153,7 +138,6 @@ static int imx8m_divider_determine_rate(struct clk_hw *hw,
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
- .round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
.determine_rate = imx8m_divider_determine_rate,
};
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 6c6c5a30f328..513d74a39d3b 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -98,12 +98,6 @@ imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long
-imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- return clk_divider_ops.round_rate(hw, rate, prate);
-}
-
static int
imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
@@ -141,7 +135,6 @@ static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long
static const struct clk_ops imx93_clk_composite_divider_ops = {
.recalc_rate = imx93_clk_composite_divider_recalc_rate,
- .round_rate = imx93_clk_composite_divider_round_rate,
.determine_rate = imx93_clk_composite_divider_determine_rate,
.set_rate = imx93_clk_composite_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index cb6ca4cf0535..43637cb61693 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -30,12 +30,14 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
return clk_get_rate(cpu->div);
}
-static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_cpu *cpu = to_clk_cpu(hw);
- return clk_round_rate(cpu->pll, rate);
+ req->rate = clk_round_rate(cpu->pll, req->rate);
+
+ return 0;
}
static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +68,7 @@ static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 100ca828b052..aa6addbeb5a8 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -18,7 +18,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup divider clock is a subclass of basic clk_divider
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_div {
struct clk_divider divider;
@@ -41,12 +41,12 @@ static unsigned long clk_fixup_div_recalc_rate(struct clk_hw *hw,
return fixup_div->ops->recalc_rate(&fixup_div->divider.hw, parent_rate);
}
-static long clk_fixup_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fixup_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- return fixup_div->ops->round_rate(&fixup_div->divider.hw, rate, prate);
+ return fixup_div->ops->determine_rate(&fixup_div->divider.hw, req);
}
static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -81,7 +81,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_fixup_div_ops = {
.recalc_rate = clk_fixup_div_recalc_rate,
- .round_rate = clk_fixup_div_round_rate,
+ .determine_rate = clk_fixup_div_determine_rate,
.set_rate = clk_fixup_div_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b48701864ef0..418ac9fe2c26 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -17,7 +17,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup multiplexer clock is a subclass of basic clk_mux
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_mux {
struct clk_mux mux;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index c703056fae85..eb668faaa38f 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -119,19 +119,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 parent_rate = *prate;
+ u64 parent_rate = req->best_parent_rate;
u32 divff, divfi;
u64 temp64;
parent_rate *= 8;
- rate *= 2;
- temp64 = rate;
+ req->rate *= 2;
+ temp64 = req->rate;
do_div(temp64, parent_rate);
divfi = temp64;
- temp64 = rate - divfi * parent_rate;
+ temp64 = req->rate - divfi * parent_rate;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;
@@ -140,9 +140,11 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= divff;
do_div(temp64, PLL_FRAC_DENOM);
- rate = parent_rate * divfi + temp64;
+ req->rate = parent_rate * divfi + temp64;
+
+ req->rate = req->rate / 2;
- return rate / 2;
+ return 0;
}
/*
@@ -198,7 +200,7 @@ static const struct clk_ops clk_frac_pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 85771afd4698..090d60867250 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -134,8 +134,8 @@ imx_get_pll_settings(struct clk_fracn_gppll *pll, unsigned long rate)
return NULL;
}
-static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fracn_gppll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
@@ -143,11 +143,16 @@ static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -345,7 +350,7 @@ static const struct clk_ops clk_fracn_gppll_ops = {
.unprepare = clk_fracn_gppll_unprepare,
.is_prepared = clk_fracn_gppll_is_prepared,
.recalc_rate = clk_fracn_gppll_recalc_rate,
- .round_rate = clk_fracn_gppll_round_rate,
+ .determine_rate = clk_fracn_gppll_determine_rate,
.set_rate = clk_fracn_gppll_set_rate,
};
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index 77342893bb71..7017e9d4e188 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -18,7 +18,7 @@
* gate clock
*
* The imx exclusive gate clock is a subclass of basic clk_gate
- * with an addtional mask to indicate which other gate bits in the same
+ * with an additional mask to indicate which other gate bits in the same
* register is mutually exclusive to this gate clock.
*/
struct clk_gate_exclusive {
diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
index b82044911603..9c5f489b3975 100644
--- a/drivers/clk/imx/clk-imx5.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -454,7 +454,7 @@ static void __init mx51_clocks_init(struct device_node *np)
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
- * enabled without the IPU clock being enabled aswell.
+ * enabled without the IPU clock being enabled as well.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index c169fe53a35f..790f7e44b11e 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -22,7 +22,7 @@
* struct clk_imx_acm_pm_domains - structure for multi power domain
* @pd_dev: power domain device
* @pd_dev_link: power domain device link
- * @num_domains: power domain nummber
+ * @num_domains: power domain number
*/
struct clk_imx_acm_pm_domains {
struct device **pd_dev;
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d0ccaa040225..1dae3410ee99 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -267,7 +267,6 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
if (ret)
goto unreg;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index cc2ee2be1819..7e88877a6245 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
+#include <dt-bindings/clock/nxp,imx94-clock.h>
#include <dt-bindings/clock/nxp,imx95-clock.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -156,7 +157,7 @@ static const struct imx95_blk_ctl_dev_data camblk_dev_data = {
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
+static const struct imx95_blk_ctl_clk_dev_data imx95_lvds_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = {
.name = "ldb_phy_div",
.parent_names = (const char *[]){ "ldbpll", },
@@ -213,21 +214,21 @@ static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
},
};
-static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
- .num_clks = ARRAY_SIZE(lvds_clk_dev_data),
- .clk_dev_data = lvds_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_lvds_clk_dev_data),
+ .clk_dev_data = imx95_lvds_clk_dev_data,
.clk_reg_offset = 0,
};
-static const char * const disp_engine_parents[] = {
+static const char * const imx95_disp_engine_parents[] = {
"videopll1", "dsi_pll", "ldb_pll_div7"
};
-static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+static const struct imx95_blk_ctl_clk_dev_data imx95_dispmix_csr_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_ENG0_SEL] = {
.name = "disp_engine0_sel",
- .parent_names = disp_engine_parents,
- .num_parents = ARRAY_SIZE(disp_engine_parents),
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 0,
.bit_width = 2,
@@ -236,8 +237,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
},
[IMX95_CLK_DISPMIX_ENG1_SEL] = {
.name = "disp_engine1_sel",
- .parent_names = disp_engine_parents,
- .num_parents = ARRAY_SIZE(disp_engine_parents),
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 2,
.bit_width = 2,
@@ -246,9 +247,9 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
}
};
-static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
- .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data),
- .clk_dev_data = dispmix_csr_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx95_dispmix_csr_clk_dev_data,
.clk_reg_offset = 0,
};
@@ -300,6 +301,51 @@ static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data imx94_lvds_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_LVDS_CLK_GATE] = {
+ .name = "lvds_clk_gate",
+ .parent_names = (const char *[]){ "ldbpll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_lvds_clk_dev_data),
+ .clk_dev_data = imx94_lvds_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
+static const char * const imx94_disp_engine_parents[] = {
+ "disppix", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_dispmix_csr_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_CLK_SEL] = {
+ .name = "disp_clk_sel",
+ .parent_names = imx94_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx94_disp_engine_parents),
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx94_dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -342,8 +388,10 @@ static int imx95_bc_probe(struct platform_device *pdev)
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled)
- pm_runtime_enable(&pdev->dev);
+ if (bc_data->rpm_enabled) {
+ devm_pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume_and_get(&pdev->dev);
+ }
clk_hw_data->num = bc_data->num_clks;
hws = clk_hw_data->hws;
@@ -383,8 +431,10 @@ static int imx95_bc_probe(struct platform_device *pdev)
goto cleanup;
}
- if (pm_runtime_enabled(bc->dev))
+ if (pm_runtime_enabled(bc->dev)) {
+ pm_runtime_put_sync(&pdev->dev);
clk_disable_unprepare(bc->clk_apb);
+ }
return 0;
@@ -395,9 +445,6 @@ cleanup:
clk_hw_unregister(hws[i]);
}
- if (bc_data->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -466,10 +513,12 @@ static const struct dev_pm_ops imx95_bc_pm_ops = {
};
static const struct of_device_id imx95_bc_of_match[] = {
+ { .compatible = "nxp,imx94-display-csr", .data = &imx94_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx94-lvds-csr", .data = &imx94_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data },
{ .compatible = "nxp,imx95-display-master-csr", },
- { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
- { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-display-csr", .data = &imx95_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-lvds-csr", .data = &imx95_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
{ .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 5cf0149dfa15..31220fa7882b 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -62,24 +62,26 @@ static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ u64 tmp = req->best_parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = tmp;
if (frac < 12)
frac = 12;
else if (frac > 35)
frac = 35;
- tmp = *prate;
+ tmp = req->best_parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -117,7 +119,7 @@ static const struct clk_ops clk_pfd_ops = {
.enable = clk_pfd_enable,
.disable = clk_pfd_disable,
.recalc_rate = clk_pfd_recalc_rate,
- .round_rate = clk_pfd_round_rate,
+ .determine_rate = clk_pfd_determine_rate,
.set_rate = clk_pfd_set_rate,
.is_enabled = clk_pfd_is_enabled,
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index f290981ea13b..36d0e80b55b8 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -216,8 +216,8 @@ found:
t->mdiv, t->kdiv);
}
-static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1416x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
@@ -225,22 +225,29 @@ static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
-static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1443x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
struct imx_pll14xx_rate_table t;
- imx_pll14xx_calc_settings(pll, rate, *prate, &t);
+ imx_pll14xx_calc_settings(pll, req->rate, req->best_parent_rate, &t);
+
+ req->rate = t.rate;
- return t.rate;
+ return 0;
}
static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
@@ -470,7 +477,7 @@ static const struct clk_ops clk_pll1416x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1416x_round_rate,
+ .determine_rate = clk_pll1416x_determine_rate,
.set_rate = clk_pll1416x_set_rate,
};
@@ -483,7 +490,7 @@ static const struct clk_ops clk_pll1443x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1443x_round_rate,
+ .determine_rate = clk_pll1443x_determine_rate,
.set_rate = clk_pll1443x_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index ff17f0664faa..bb497ad5e0ae 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -178,18 +178,25 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 dp_op, dp_mfd, dp_mfn;
int ret;
- ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
- if (ret)
- return ret;
+ ret = __clk_pllv2_set_rate(req->rate, req->best_parent_rate, &dp_op,
+ &dp_mfd, &dp_mfn);
+ if (ret) {
+ req->rate = ret;
- return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
- dp_op, dp_mfd, dp_mfn);
+ return 0;
+ }
+
+ req->rate = __clk_pllv2_recalc_rate(req->best_parent_rate,
+ MXC_PLL_DP_CTL_DPDCK0_2_EN, dp_op,
+ dp_mfd, dp_mfn);
+
+ return 0;
}
static int clk_pllv2_prepare(struct clk_hw *hw)
@@ -235,7 +242,7 @@ static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
- .round_rate = clk_pllv2_round_rate,
+ .determine_rate = clk_pllv2_determine_rate,
.set_rate = clk_pllv2_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 11fb238ee8f0..b99508367bcb 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -117,13 +117,14 @@ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
return (div == 1) ? parent_rate * 22 : parent_rate * 20;
}
-static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
- return (rate >= parent_rate * 22) ? parent_rate * 22 :
- parent_rate * 20;
+ req->rate = (req->rate >= parent_rate * 22) ? parent_rate * 22 : parent_rate * 20;
+
+ return 0;
}
static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -152,7 +153,7 @@ static const struct clk_ops clk_pllv3_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_recalc_rate,
- .round_rate = clk_pllv3_round_rate,
+ .determine_rate = clk_pllv3_determine_rate,
.set_rate = clk_pllv3_set_rate,
};
@@ -165,21 +166,23 @@ static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
return parent_rate * div / 2;
}
-static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_sys_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 div;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
- div = rate * 2 / parent_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
+ div = req->rate * 2 / parent_rate;
- return parent_rate * div / 2;
+ req->rate = parent_rate * div / 2;
+
+ return 0;
}
static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +210,7 @@ static const struct clk_ops clk_pllv3_sys_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_sys_recalc_rate,
- .round_rate = clk_pllv3_sys_round_rate,
+ .determine_rate = clk_pllv3_sys_determine_rate,
.set_rate = clk_pllv3_sys_set_rate,
};
@@ -226,10 +229,10 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
return parent_rate * div + (unsigned long)temp64;
}
-static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_av_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 div;
@@ -237,16 +240,16 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
if (parent_rate <= max_mfd)
mfd = parent_rate;
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
+ div = req->rate / parent_rate;
+ temp64 = (u64) (req->rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
@@ -255,7 +258,9 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= mfn;
do_div(temp64, mfd);
- return parent_rate * div + (unsigned long)temp64;
+ req->rate = parent_rate * div + (unsigned long)temp64;
+
+ return 0;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -296,7 +301,7 @@ static const struct clk_ops clk_pllv3_av_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_av_recalc_rate,
- .round_rate = clk_pllv3_av_round_rate,
+ .determine_rate = clk_pllv3_av_determine_rate,
.set_rate = clk_pllv3_av_set_rate,
};
@@ -355,12 +360,15 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
}
-static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_vf610_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate);
+ struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(req->best_parent_rate,
+ req->rate);
+
+ req->rate = clk_pllv3_vf610_mf_to_rate(req->best_parent_rate, mf);
- return clk_pllv3_vf610_mf_to_rate(*prate, mf);
+ return 0;
}
static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -389,7 +397,7 @@ static const struct clk_ops clk_pllv3_vf610_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_vf610_recalc_rate,
- .round_rate = clk_pllv3_vf610_round_rate,
+ .determine_rate = clk_pllv3_vf610_determine_rate,
.set_rate = clk_pllv3_vf610_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 9b136c951762..01d05b5d5438 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -95,11 +95,11 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
return (parent_rate * mult) + (u32)temp64;
}
-static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv4_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long round_rate, i;
u32 mfn, mfd = DEFAULT_MFD;
bool found = false;
@@ -107,7 +107,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mult;
if (pll->use_mult_range) {
- temp64 = (u64)rate;
+ temp64 = (u64) req->rate;
do_div(temp64, parent_rate);
mult = temp64;
if (mult >= pllv4_mult_range[1] &&
@@ -118,7 +118,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
} else {
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate) {
+ if (req->rate >= round_rate) {
found = true;
break;
}
@@ -127,14 +127,16 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
if (!found) {
pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
- clk_hw_get_name(hw), rate, parent_rate);
+ clk_hw_get_name(hw), req->rate, parent_rate);
+ req->rate = 0;
+
return 0;
}
if (parent_rate <= MAX_MFD)
mfd = parent_rate;
- temp64 = (u64)(rate - round_rate);
+ temp64 = (u64)(req->rate - round_rate);
temp64 *= mfd;
do_div(temp64, parent_rate);
mfn = temp64;
@@ -145,14 +147,19 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
* pair of mfn/mfd, we simply return the round_rate without using
* the frac part.
*/
- if (mfn >= mfd)
- return round_rate;
+ if (mfn >= mfd) {
+ req->rate = round_rate;
+
+ return 0;
+ }
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
- return round_rate + (u32)temp64;
+ req->rate = round_rate + (u32)temp64;
+
+ return 0;
}
static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
@@ -229,7 +236,7 @@ static void clk_pllv4_unprepare(struct clk_hw *hw)
static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
- .round_rate = clk_pllv4_round_rate,
+ .determine_rate = clk_pllv4_determine_rate,
.set_rate = clk_pllv4_set_rate,
.prepare = clk_pllv4_prepare,
.unprepare = clk_pllv4_unprepare,
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b27186aaf2a1..34c9dc1fb20e 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -269,24 +269,6 @@ static int clk_scu_determine_rate(struct clk_hw *hw,
return 0;
}
-/*
- * clk_scu_round_rate - Round clock rate for a SCU clock
- * @hw: clock to round rate for
- * @rate: rate to round
- * @parent_rate: parent rate provided by common clock framework, not used
- *
- * Returns the current clock rate, or zero in failure.
- */
-static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * Assume we support all the requested rate and let the SCU firmware
- * to handle the left work
- */
- return rate;
-}
-
static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -454,7 +436,7 @@ static const struct clk_ops clk_scu_ops = {
static const struct clk_ops clk_scu_cpu_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
@@ -462,7 +444,7 @@ static const struct clk_ops clk_scu_cpu_ops = {
static const struct clk_ops clk_scu_pi_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_set_rate,
};
@@ -567,7 +549,6 @@ static int imx_clk_scu_probe(struct platform_device *pdev)
if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
(clk->rsrc == IMX_SC_R_A72))) {
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
}
@@ -729,7 +710,7 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
if (ret)
goto put_device;
- /* For API backwards compatiblilty, simply return NULL for success */
+ /* For API backwards compatibility, simply return NULL for success */
return NULL;
put_device:
@@ -766,15 +747,15 @@ static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
return err ? 0 : rate;
}
-static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_gpr_div_scu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < *prate)
- rate = *prate / 2;
+ if (req->rate < req->best_parent_rate)
+ req->rate = req->best_parent_rate / 2;
else
- rate = *prate;
+ req->rate = req->best_parent_rate;
- return rate;
+ return 0;
}
static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -793,7 +774,7 @@ static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_gpr_div_scu_ops = {
.recalc_rate = clk_gpr_div_scu_recalc_rate,
- .round_rate = clk_gpr_div_scu_round_rate,
+ .determine_rate = clk_gpr_div_scu_determine_rate,
.set_rate = clk_gpr_div_scu_set_rate,
};
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 99da9bd86e63..0d417d69dab7 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -239,7 +239,7 @@ ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
*
* Register the clocks described by the CGU with the common clock framework.
*
- * Return: 0 on success or -errno if unsuccesful.
+ * Return: 0 on success or -errno if unsuccessful.
*/
int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu);
diff --git a/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
new file mode 100644
index 000000000000..760717da3235
--- /dev/null
+++ b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-clock-controller {
+ compatible = "test,clk-hw-get-dev-of-node";
+ #clock-cells = <0>;
+ };
+};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index ff003dc5ab20..7197d23543b8 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -5,6 +5,7 @@ menu "Clock support for Amlogic platforms"
config COMMON_CLK_MESON_REGMAP
tristate
select REGMAP
+ select MFD_SYSCON
config COMMON_CLK_MESON_DUALDIV
tristate
@@ -106,7 +107,8 @@ config COMMON_CLK_AXG_AUDIO
select COMMON_CLK_MESON_SCLK_DIV
select COMMON_CLK_MESON_CLKC_UTILS
select REGMAP_MMIO
- select RESET_CONTROLLER
+ select AUXILIARY_BUS
+ imply RESET_MESON_AUX
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 36489e0f948a..1f5d445d44fe 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -10,13 +10,42 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-peripherals.h"
#include "clk-dualdiv.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,a1-peripherals-clkc.h>
+#define SYS_OSCIN_CTRL 0x0
+#define RTC_BY_OSCIN_CTRL0 0x4
+#define RTC_BY_OSCIN_CTRL1 0x8
+#define RTC_CTRL 0xc
+#define SYS_CLK_CTRL0 0x10
+#define SYS_CLK_EN0 0x1c
+#define SYS_CLK_EN1 0x20
+#define AXI_CLK_EN 0x24
+#define DSPA_CLK_EN 0x28
+#define DSPB_CLK_EN 0x2c
+#define DSPA_CLK_CTRL0 0x30
+#define DSPB_CLK_CTRL0 0x34
+#define CLK12_24_CTRL 0x38
+#define GEN_CLK_CTRL 0x3c
+#define SAR_ADC_CLK_CTRL 0xc0
+#define PWM_CLK_AB_CTRL 0xc4
+#define PWM_CLK_CD_CTRL 0xc8
+#define PWM_CLK_EF_CTRL 0xcc
+#define SPICC_CLK_CTRL 0xd0
+#define TS_CLK_CTRL 0xd4
+#define SPIFC_CLK_CTRL 0xd8
+#define USB_BUSCLK_CTRL 0xdc
+#define SD_EMMC_CLK_CTRL 0xe0
+#define CECA_CLK_CTRL0 0xe4
+#define CECA_CLK_CTRL1 0xe8
+#define CECB_CLK_CTRL0 0xec
+#define CECB_CLK_CTRL1 0xf0
+#define PSRAM_CLK_CTRL 0xf4
+#define DMC_CLK_CTRL 0xf8
+
static struct clk_regmap xtal_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
@@ -2026,163 +2055,6 @@ static struct clk_hw *a1_periphs_hw_clks[] = {
[CLKID_DMC_SEL2] = &dmc_sel2.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const a1_periphs_regmaps[] = {
- &xtal_in,
- &fixpll_in,
- &usb_phy_in,
- &usb_ctrl_in,
- &hifipll_in,
- &syspll_in,
- &dds_in,
- &sys,
- &clktree,
- &reset_ctrl,
- &analog_ctrl,
- &pwr_ctrl,
- &pad_ctrl,
- &sys_ctrl,
- &temp_sensor,
- &am2axi_dev,
- &spicc_b,
- &spicc_a,
- &msr,
- &audio,
- &jtag_ctrl,
- &saradc_en,
- &pwm_ef,
- &pwm_cd,
- &pwm_ab,
- &cec,
- &i2c_s,
- &ir_ctrl,
- &i2c_m_d,
- &i2c_m_c,
- &i2c_m_b,
- &i2c_m_a,
- &acodec,
- &otp,
- &sd_emmc_a,
- &usb_phy,
- &usb_ctrl,
- &sys_dspb,
- &sys_dspa,
- &dma,
- &irq_ctrl,
- &nic,
- &gic,
- &uart_c,
- &uart_b,
- &uart_a,
- &sys_psram,
- &rsa,
- &coresight,
- &am2axi_vad,
- &audio_vad,
- &axi_dmc,
- &axi_psram,
- &ramb,
- &rama,
- &axi_spifc,
- &axi_nic,
- &axi_dma,
- &cpu_ctrl,
- &rom,
- &prod_i2c,
- &dspa_sel,
- &dspb_sel,
- &dspa_en,
- &dspa_en_nic,
- &dspb_en,
- &dspb_en_nic,
- &rtc,
- &ceca_32k_out,
- &cecb_32k_out,
- &clk_24m,
- &clk_12m,
- &fclk_div2_divn,
- &gen,
- &saradc_sel,
- &saradc,
- &pwm_a,
- &pwm_b,
- &pwm_c,
- &pwm_d,
- &pwm_e,
- &pwm_f,
- &spicc,
- &ts,
- &spifc,
- &usb_bus,
- &sd_emmc,
- &psram,
- &dmc,
- &sys_a_sel,
- &sys_a_div,
- &sys_a,
- &sys_b_sel,
- &sys_b_div,
- &sys_b,
- &dspa_a_sel,
- &dspa_a_div,
- &dspa_a,
- &dspa_b_sel,
- &dspa_b_div,
- &dspa_b,
- &dspb_a_sel,
- &dspb_a_div,
- &dspb_a,
- &dspb_b_sel,
- &dspb_b_div,
- &dspb_b,
- &rtc_32k_in,
- &rtc_32k_div,
- &rtc_32k_xtal,
- &rtc_32k_sel,
- &cecb_32k_in,
- &cecb_32k_div,
- &cecb_32k_sel_pre,
- &cecb_32k_sel,
- &ceca_32k_in,
- &ceca_32k_div,
- &ceca_32k_sel_pre,
- &ceca_32k_sel,
- &fclk_div2_divn_pre,
- &gen_sel,
- &gen_div,
- &saradc_div,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_f_sel,
- &pwm_f_div,
- &spicc_sel,
- &spicc_div,
- &spicc_sel2,
- &ts_div,
- &spifc_sel,
- &spifc_div,
- &spifc_sel2,
- &usb_bus_sel,
- &usb_bus_div,
- &sd_emmc_sel,
- &sd_emmc_div,
- &sd_emmc_sel2,
- &psram_sel,
- &psram_div,
- &psram_sel2,
- &dmc_sel,
- &dmc_div,
- &dmc_sel2,
-};
-
static const struct regmap_config a1_periphs_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -2200,7 +2072,7 @@ static int meson_a1_periphs_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
void __iomem *base;
struct regmap *map;
- int clkid, i, err;
+ int clkid, err;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -2212,10 +2084,6 @@ static int meson_a1_periphs_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(map),
"can't init regmap mmio region\n");
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_periphs_regmaps); i++)
- a1_periphs_regmaps[i]->map = map;
-
for (clkid = 0; clkid < a1_periphs_clks.num; clkid++) {
err = devm_clk_hw_register(dev, a1_periphs_clks.hws[clkid]);
if (err)
diff --git a/drivers/clk/meson/a1-peripherals.h b/drivers/clk/meson/a1-peripherals.h
deleted file mode 100644
index 26de8530184a..000000000000
--- a/drivers/clk/meson/a1-peripherals.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 Peripherals Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PERIPHERALS_H
-#define __A1_PERIPHERALS_H
-
-/* peripherals clock controller register offset */
-#define SYS_OSCIN_CTRL 0x0
-#define RTC_BY_OSCIN_CTRL0 0x4
-#define RTC_BY_OSCIN_CTRL1 0x8
-#define RTC_CTRL 0xc
-#define SYS_CLK_CTRL0 0x10
-#define SYS_CLK_EN0 0x1c
-#define SYS_CLK_EN1 0x20
-#define AXI_CLK_EN 0x24
-#define DSPA_CLK_EN 0x28
-#define DSPB_CLK_EN 0x2c
-#define DSPA_CLK_CTRL0 0x30
-#define DSPB_CLK_CTRL0 0x34
-#define CLK12_24_CTRL 0x38
-#define GEN_CLK_CTRL 0x3c
-#define SAR_ADC_CLK_CTRL 0xc0
-#define PWM_CLK_AB_CTRL 0xc4
-#define PWM_CLK_CD_CTRL 0xc8
-#define PWM_CLK_EF_CTRL 0xcc
-#define SPICC_CLK_CTRL 0xd0
-#define TS_CLK_CTRL 0xd4
-#define SPIFC_CLK_CTRL 0xd8
-#define USB_BUSCLK_CTRL 0xdc
-#define SD_EMMC_CLK_CTRL 0xe0
-#define CECA_CLK_CTRL0 0xe4
-#define CECA_CLK_CTRL1 0xe8
-#define CECB_CLK_CTRL0 0xec
-#define CECB_CLK_CTRL1 0xf0
-#define PSRAM_CLK_CTRL 0xf4
-#define DMC_CLK_CTRL 0xf8
-
-#endif /* __A1_PERIPHERALS_H */
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 86d8159f3319..dabd4fad1f57 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -10,10 +10,20 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-pll.h"
+#include "clk-pll.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
+#define ANACTRL_FIXPLL_CTRL0 0x0
+#define ANACTRL_FIXPLL_CTRL1 0x4
+#define ANACTRL_FIXPLL_STS 0x14
+#define ANACTRL_HIFIPLL_CTRL0 0xc0
+#define ANACTRL_HIFIPLL_CTRL1 0xc4
+#define ANACTRL_HIFIPLL_CTRL2 0xc8
+#define ANACTRL_HIFIPLL_CTRL3 0xcc
+#define ANACTRL_HIFIPLL_CTRL4 0xd0
+#define ANACTRL_HIFIPLL_STS 0xd4
+
#include <dt-bindings/clock/amlogic,a1-pll-clkc.h>
static struct clk_regmap fixed_pll_dco = {
@@ -285,16 +295,6 @@ static struct clk_hw *a1_pll_hw_clks[] = {
[CLKID_HIFI_PLL] = &hifi_pll.hw,
};
-static struct clk_regmap *const a1_pll_regmaps[] = {
- &fixed_pll_dco,
- &fixed_pll,
- &fclk_div2,
- &fclk_div3,
- &fclk_div5,
- &fclk_div7,
- &hifi_pll,
-};
-
static const struct regmap_config a1_pll_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -312,7 +312,7 @@ static int meson_a1_pll_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
void __iomem *base;
struct regmap *map;
- int clkid, i, err;
+ int clkid, err;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -324,10 +324,6 @@ static int meson_a1_pll_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(map),
"can't init regmap mmio region\n");
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_pll_regmaps); i++)
- a1_pll_regmaps[i]->map = map;
-
/* Register clocks */
for (clkid = 0; clkid < a1_pll_clks.num; clkid++) {
err = devm_clk_hw_register(dev, a1_pll_clks.hws[clkid]);
diff --git a/drivers/clk/meson/a1-pll.h b/drivers/clk/meson/a1-pll.h
deleted file mode 100644
index 4be17b2bf383..000000000000
--- a/drivers/clk/meson/a1-pll.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 PLL Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PLL_H
-#define __A1_PLL_H
-
-#include "clk-pll.h"
-
-/* PLL register offset */
-#define ANACTRL_FIXPLL_CTRL0 0x0
-#define ANACTRL_FIXPLL_CTRL1 0x4
-#define ANACTRL_FIXPLL_STS 0x14
-#define ANACTRL_HIFIPLL_CTRL0 0xc0
-#define ANACTRL_HIFIPLL_CTRL1 0xc4
-#define ANACTRL_HIFIPLL_CTRL2 0xc8
-#define ANACTRL_HIFIPLL_CTRL3 0xcc
-#define ANACTRL_HIFIPLL_CTRL4 0xd0
-#define ANACTRL_HIFIPLL_STS 0xd4
-
-#endif /* __A1_PLL_H */
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index f44091ffb57d..cd5d0b5ebdb2 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -270,26 +270,6 @@ static const unsigned int axg_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *axg_aoclk_regmap[] = {
- &axg_aoclk_remote,
- &axg_aoclk_i2c_master,
- &axg_aoclk_i2c_slave,
- &axg_aoclk_uart1,
- &axg_aoclk_uart2,
- &axg_aoclk_ir_blaster,
- &axg_aoclk_saradc,
- &axg_aoclk_cts_oscin,
- &axg_aoclk_32k_pre,
- &axg_aoclk_32k_div,
- &axg_aoclk_32k_sel,
- &axg_aoclk_32k,
- &axg_aoclk_cts_rtc_oscin,
- &axg_aoclk_clk81,
- &axg_aoclk_saradc_mux,
- &axg_aoclk_saradc_div,
- &axg_aoclk_saradc_gate,
-};
-
static struct clk_hw *axg_aoclk_hw_clks[] = {
[CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
[CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
@@ -314,8 +294,6 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(axg_aoclk_reset),
.reset = axg_aoclk_reset,
- .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
- .clks = axg_aoclk_regmap,
.hw_clks = {
.hws = axg_aoclk_hw_clks,
.num = ARRAY_SIZE(axg_aoclk_hw_clks),
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 9df627b142f8..fd7eca652261 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -4,6 +4,7 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
@@ -12,17 +13,70 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
#include "meson-clkc-utils.h"
-#include "axg-audio.h"
#include "clk-regmap.h"
#include "clk-phase.h"
#include "sclk-div.h"
#include <dt-bindings/clock/axg-audio-clkc.h>
+/* Audio clock register offsets */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_PAD_CTRL0 0x01c
+#define AUDIO_MST_PAD_CTRL1 0x020
+#define AUDIO_SW_RESET 0x024
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
+
+/* SM1 introduce new register and some shifts :( */
+#define AUDIO_CLK_GATE_EN1 0x004
+#define AUDIO_SM1_MCLK_A_CTRL 0x008
+#define AUDIO_SM1_MCLK_B_CTRL 0x00C
+#define AUDIO_SM1_MCLK_C_CTRL 0x010
+#define AUDIO_SM1_MCLK_D_CTRL 0x014
+#define AUDIO_SM1_MCLK_E_CTRL 0x018
+#define AUDIO_SM1_MCLK_F_CTRL 0x01C
+#define AUDIO_SM1_MST_PAD_CTRL0 0x020
+#define AUDIO_SM1_MST_PAD_CTRL1 0x024
+#define AUDIO_SM1_SW_RESET0 0x028
+#define AUDIO_SM1_SW_RESET1 0x02C
+#define AUDIO_CLK81_CTRL 0x030
+#define AUDIO_CLK81_EN 0x034
+#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
+#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
+
#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
@@ -1257,505 +1311,6 @@ static struct clk_hw *sm1_audio_hw_clks[] = {
[AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw,
};
-
-/* Convenience table to populate regmap in .probe(). */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &axg_tdmout_a_sclk,
- &axg_tdmout_b_sclk,
- &axg_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
-};
-
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &spdifout_b,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &g12a_tdm_mclk_pad_0,
- &g12a_tdm_mclk_pad_1,
- &g12a_tdm_lrclk_pad_0,
- &g12a_tdm_lrclk_pad_1,
- &g12a_tdm_lrclk_pad_2,
- &g12a_tdm_sclk_pad_0,
- &g12a_tdm_sclk_pad_1,
- &g12a_tdm_sclk_pad_2,
- &toram,
- &eqdrc,
-};
-
-static struct clk_regmap *const sm1_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &spdifout_b,
- &sm1_mst_a_mclk_sel,
- &sm1_mst_b_mclk_sel,
- &sm1_mst_c_mclk_sel,
- &sm1_mst_d_mclk_sel,
- &sm1_mst_e_mclk_sel,
- &sm1_mst_f_mclk_sel,
- &sm1_mst_a_mclk_div,
- &sm1_mst_b_mclk_div,
- &sm1_mst_c_mclk_div,
- &sm1_mst_d_mclk_div,
- &sm1_mst_e_mclk_div,
- &sm1_mst_f_mclk_div,
- &sm1_mst_a_mclk,
- &sm1_mst_b_mclk,
- &sm1_mst_c_mclk,
- &sm1_mst_d_mclk,
- &sm1_mst_e_mclk,
- &sm1_mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &sm1_tdm_mclk_pad_0,
- &sm1_tdm_mclk_pad_1,
- &sm1_tdm_lrclk_pad_0,
- &sm1_tdm_lrclk_pad_1,
- &sm1_tdm_lrclk_pad_2,
- &sm1_tdm_sclk_pad_0,
- &sm1_tdm_sclk_pad_1,
- &sm1_tdm_sclk_pad_2,
- &sm1_aud_top,
- &toram,
- &eqdrc,
- &resample_b,
- &tovad,
- &locker,
- &spdifin_lb,
- &frddr_d,
- &toddr_d,
- &loopback_b,
- &sm1_clk81_en,
- &sm1_sysclk_a_div,
- &sm1_sysclk_a_en,
- &sm1_sysclk_b_div,
- &sm1_sysclk_b_en,
- &earcrx,
- &sm1_earcrx_cmdc_clk_sel,
- &sm1_earcrx_cmdc_clk_div,
- &sm1_earcrx_cmdc_clk,
- &sm1_earcrx_dmac_clk_sel,
- &sm1_earcrx_dmac_clk_div,
- &sm1_earcrx_dmac_clk,
-};
-
-struct axg_audio_reset_data {
- struct reset_controller_dev rstc;
- struct regmap *map;
- unsigned int offset;
-};
-
-static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst,
- unsigned long id,
- unsigned int *reg,
- unsigned int *bit)
-{
- unsigned int stride = regmap_get_reg_stride(rst->map);
-
- *reg = (id / (stride * BITS_PER_BYTE)) * stride;
- *reg += rst->offset;
- *bit = id % (stride * BITS_PER_BYTE);
-}
-
-static int axg_audio_reset_update(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_update_bits(rst->map, offset, BIT(bit),
- assert ? BIT(bit) : 0);
-
- return 0;
-}
-
-static int axg_audio_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int val, offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_read(rst->map, offset, &val);
-
- return !!(val & BIT(bit));
-}
-
-static int axg_audio_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, true);
-}
-
-static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, false);
-}
-
-static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- int ret;
-
- ret = axg_audio_reset_assert(rcdev, id);
- if (ret)
- return ret;
-
- return axg_audio_reset_deassert(rcdev, id);
-}
-
-static const struct reset_control_ops axg_audio_rstc_ops = {
- .assert = axg_audio_reset_assert,
- .deassert = axg_audio_reset_deassert,
- .reset = axg_audio_reset_toggle,
- .status = axg_audio_reset_status,
-};
-
static struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -1763,11 +1318,8 @@ static struct regmap_config axg_audio_regmap_cfg = {
};
struct audioclk_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
struct meson_clk_hw_data hw_clks;
- unsigned int reset_offset;
- unsigned int reset_num;
+ const char *rst_drvname;
unsigned int max_register;
};
@@ -1775,7 +1327,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct audioclk_data *data;
- struct axg_audio_reset_data *rst;
+ struct auxiliary_device *auxdev;
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
@@ -1808,10 +1360,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
/* Take care to skip the registered input clocks */
for (i = AUD_CLKID_DDR_ARB; i < data->hw_clks.num; i++) {
const char *name;
@@ -1834,27 +1382,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Stop here if there is no reset */
- if (!data->reset_num)
- return 0;
-
- rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL);
- if (!rst)
- return -ENOMEM;
-
- rst->map = map;
- rst->offset = data->reset_offset;
- rst->rstc.nr_resets = data->reset_num;
- rst->rstc.ops = &axg_audio_rstc_ops;
- rst->rstc.of_node = dev->of_node;
- rst->rstc.owner = THIS_MODULE;
+ /* Register auxiliary reset driver when applicable */
+ if (data->rst_drvname) {
+ auxdev = __devm_auxiliary_device_create(dev, dev->driver->name,
+ data->rst_drvname, NULL, 0);
+ if (!auxdev)
+ return -ENODEV;
+ }
- return devm_reset_controller_register(dev, &rst->rstc);
+ return 0;
}
static const struct audioclk_data axg_audioclk_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_clks = {
.hws = axg_audio_hw_clks,
.num = ARRAY_SIZE(axg_audio_hw_clks),
@@ -1863,26 +1402,20 @@ static const struct audioclk_data axg_audioclk_data = {
};
static const struct audioclk_data g12a_audioclk_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = g12a_audio_hw_clks,
.num = ARRAY_SIZE(g12a_audio_hw_clks),
},
- .reset_offset = AUDIO_SW_RESET,
- .reset_num = 26,
+ .rst_drvname = "rst-g12a",
.max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
static const struct audioclk_data sm1_audioclk_data = {
- .regmap_clks = sm1_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(sm1_clk_regmaps),
.hw_clks = {
.hws = sm1_audio_hw_clks,
.num = ARRAY_SIZE(sm1_audio_hw_clks),
},
- .reset_offset = AUDIO_SM1_SW_RESET0,
- .reset_num = 39,
+ .rst_drvname = "rst-sm1",
.max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
};
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
deleted file mode 100644
index 9e7765b630c9..000000000000
--- a/drivers/clk/meson/axg-audio.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __AXG_AUDIO_CLKC_H
-#define __AXG_AUDIO_CLKC_H
-
-/*
- * Audio Clock register offsets
- *
- * Register offsets from the datasheet must be multiplied by 4 before
- * to get the right offset
- */
-#define AUDIO_CLK_GATE_EN 0x000
-#define AUDIO_MCLK_A_CTRL 0x004
-#define AUDIO_MCLK_B_CTRL 0x008
-#define AUDIO_MCLK_C_CTRL 0x00C
-#define AUDIO_MCLK_D_CTRL 0x010
-#define AUDIO_MCLK_E_CTRL 0x014
-#define AUDIO_MCLK_F_CTRL 0x018
-#define AUDIO_MST_PAD_CTRL0 0x01c
-#define AUDIO_MST_PAD_CTRL1 0x020
-#define AUDIO_SW_RESET 0x024
-#define AUDIO_MST_A_SCLK_CTRL0 0x040
-#define AUDIO_MST_A_SCLK_CTRL1 0x044
-#define AUDIO_MST_B_SCLK_CTRL0 0x048
-#define AUDIO_MST_B_SCLK_CTRL1 0x04C
-#define AUDIO_MST_C_SCLK_CTRL0 0x050
-#define AUDIO_MST_C_SCLK_CTRL1 0x054
-#define AUDIO_MST_D_SCLK_CTRL0 0x058
-#define AUDIO_MST_D_SCLK_CTRL1 0x05C
-#define AUDIO_MST_E_SCLK_CTRL0 0x060
-#define AUDIO_MST_E_SCLK_CTRL1 0x064
-#define AUDIO_MST_F_SCLK_CTRL0 0x068
-#define AUDIO_MST_F_SCLK_CTRL1 0x06C
-#define AUDIO_CLK_TDMIN_A_CTRL 0x080
-#define AUDIO_CLK_TDMIN_B_CTRL 0x084
-#define AUDIO_CLK_TDMIN_C_CTRL 0x088
-#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
-#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
-#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
-#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
-#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
-#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
-#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
-#define AUDIO_CLK_LOCKER_CTRL 0x0A8
-#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
-#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
-#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
-
-/* SM1 introduce new register and some shifts :( */
-#define AUDIO_CLK_GATE_EN1 0x004
-#define AUDIO_SM1_MCLK_A_CTRL 0x008
-#define AUDIO_SM1_MCLK_B_CTRL 0x00C
-#define AUDIO_SM1_MCLK_C_CTRL 0x010
-#define AUDIO_SM1_MCLK_D_CTRL 0x014
-#define AUDIO_SM1_MCLK_E_CTRL 0x018
-#define AUDIO_SM1_MCLK_F_CTRL 0x01C
-#define AUDIO_SM1_MST_PAD_CTRL0 0x020
-#define AUDIO_SM1_MST_PAD_CTRL1 0x024
-#define AUDIO_SM1_SW_RESET0 0x028
-#define AUDIO_SM1_SW_RESET1 0x02C
-#define AUDIO_CLK81_CTRL 0x030
-#define AUDIO_CLK81_EN 0x034
-#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
-#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
-
-#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 448eece246ca..208833c3ee95 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -18,11 +18,96 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "axg.h"
#include "meson-eeclk.h"
#include <dt-bindings/clock/axg-clkc.h>
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_STS 0x54
+#define HHI_GP0_PLL_CNTL1 0x58
+#define HHI_HIFI_PLL_CNTL 0x80
+#define HHI_HIFI_PLL_CNTL2 0x84
+#define HHI_HIFI_PLL_CNTL3 0x88
+#define HHI_HIFI_PLL_CNTL4 0x8C
+#define HHI_HIFI_PLL_CNTL5 0x90
+#define HHI_HIFI_PLL_STS 0x94
+#define HHI_HIFI_PLL_CNTL1 0x98
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_GCLK2_MPEG0 0xc0
+#define HHI_GCLK2_MPEG1 0xc4
+#define HHI_GCLK2_MPEG2 0xc8
+#define HHI_GCLK2_OTHER 0xd0
+#define HHI_GCLK2_AO 0xd4
+#define HHI_PCIE_PLL_CNTL 0xd8
+#define HHI_PCIE_PLL_CNTL1 0xdC
+#define HHI_PCIE_PLL_CNTL2 0xe0
+#define HHI_PCIE_PLL_CNTL3 0xe4
+#define HHI_PCIE_PLL_CNTL4 0xe8
+#define HHI_PCIE_PLL_CNTL5 0xec
+#define HHI_PCIE_PLL_CNTL6 0xf0
+#define HHI_PCIE_PLL_STS 0xf4
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_VPU_MEM_PD_REG0 0x104
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_SPICC_HCLK_CNTL 0x168
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_VPU_CLK_CNTL 0x1bC
+
+#define HHI_VAPBCLK_CNTL 0x1F4
+
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_NAND_CLK_CNTL 0x25C
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28C
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29C
+#define HHI_MPLL_CNTL9 0x2A0
+#define HHI_MPLL_CNTL10 0x2A4
+
+#define HHI_MPLL3_CNTL0 0x2E0
+#define HHI_MPLL3_CNTL1 0x2E4
+#define HHI_PLL_TOP_MISC 0x2E8
+
+#define HHI_SYS_PLL_CNTL1 0x2FC
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_SYS_PLL_STS 0x314
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31C
+
static struct clk_regmap axg_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -918,7 +1003,7 @@ static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -2025,138 +2110,7 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &axg_clk81,
- &axg_ddr,
- &axg_audio_locker,
- &axg_mipi_dsi_host,
- &axg_isa,
- &axg_pl301,
- &axg_periphs,
- &axg_spicc_0,
- &axg_i2c,
- &axg_rng0,
- &axg_uart0,
- &axg_mipi_dsi_phy,
- &axg_spicc_1,
- &axg_pcie_a,
- &axg_pcie_b,
- &axg_hiu_reg,
- &axg_assist_misc,
- &axg_emmc_b,
- &axg_emmc_c,
- &axg_dma,
- &axg_spi,
- &axg_audio,
- &axg_eth_core,
- &axg_uart1,
- &axg_g2d,
- &axg_usb0,
- &axg_usb1,
- &axg_reset,
- &axg_usb_general,
- &axg_ahb_arb0,
- &axg_efuse,
- &axg_boot_rom,
- &axg_ahb_data_bus,
- &axg_ahb_ctrl_bus,
- &axg_usb1_to_ddr,
- &axg_usb0_to_ddr,
- &axg_mmc_pclk,
- &axg_vpu_intr,
- &axg_sec_ahb_ahb3_bridge,
- &axg_gic,
- &axg_ao_media_cpu,
- &axg_ao_ahb_sram,
- &axg_ao_ahb_bus,
- &axg_ao_iface,
- &axg_ao_i2c,
- &axg_sd_emmc_b_clk0,
- &axg_sd_emmc_c_clk0,
- &axg_mpeg_clk_div,
- &axg_sd_emmc_b_clk0_div,
- &axg_sd_emmc_c_clk0_div,
- &axg_mpeg_clk_sel,
- &axg_sd_emmc_b_clk0_sel,
- &axg_sd_emmc_c_clk0_sel,
- &axg_mpll0,
- &axg_mpll1,
- &axg_mpll2,
- &axg_mpll3,
- &axg_mpll0_div,
- &axg_mpll1_div,
- &axg_mpll2_div,
- &axg_mpll3_div,
- &axg_fixed_pll,
- &axg_sys_pll,
- &axg_gp0_pll,
- &axg_hifi_pll,
- &axg_mpll_prediv,
- &axg_fclk_div2,
- &axg_fclk_div3,
- &axg_fclk_div4,
- &axg_fclk_div5,
- &axg_fclk_div7,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_pcie_pll,
- &axg_pcie_mux,
- &axg_pcie_ref,
- &axg_pcie_cml_en0,
- &axg_pcie_cml_en1,
- &axg_gen_clk_sel,
- &axg_gen_clk_div,
- &axg_gen_clk,
- &axg_fixed_pll_dco,
- &axg_sys_pll_dco,
- &axg_gp0_pll_dco,
- &axg_hifi_pll_dco,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_vpu_0_div,
- &axg_vpu_0_sel,
- &axg_vpu_0,
- &axg_vpu_1_div,
- &axg_vpu_1_sel,
- &axg_vpu_1,
- &axg_vpu,
- &axg_vapb_0_div,
- &axg_vapb_0_sel,
- &axg_vapb_0,
- &axg_vapb_1_div,
- &axg_vapb_1_sel,
- &axg_vapb_1,
- &axg_vapb_sel,
- &axg_vapb,
- &axg_vclk,
- &axg_vclk2,
- &axg_vclk_sel,
- &axg_vclk2_sel,
- &axg_vclk_input,
- &axg_vclk2_input,
- &axg_vclk_div,
- &axg_vclk_div1,
- &axg_vclk2_div,
- &axg_vclk2_div1,
- &axg_vclk_div2_en,
- &axg_vclk_div4_en,
- &axg_vclk_div6_en,
- &axg_vclk_div12_en,
- &axg_vclk2_div2_en,
- &axg_vclk2_div4_en,
- &axg_vclk2_div6_en,
- &axg_vclk2_div12_en,
- &axg_cts_encl_sel,
- &axg_cts_encl,
- &axg_vdin_meas_sel,
- &axg_vdin_meas_div,
- &axg_vdin_meas,
-};
-
static const struct meson_eeclkc_data axg_clkc_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_clks = {
.hws = axg_hw_clks,
.num = ARRAY_SIZE(axg_hw_clks),
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
deleted file mode 100644
index 624d8d3ce7c4..000000000000
--- a/drivers/clk/meson/axg.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2017 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- *
- */
-#ifndef __AXG_H
-#define __AXG_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_GP0_PLL_CNTL 0x40
-#define HHI_GP0_PLL_CNTL2 0x44
-#define HHI_GP0_PLL_CNTL3 0x48
-#define HHI_GP0_PLL_CNTL4 0x4c
-#define HHI_GP0_PLL_CNTL5 0x50
-#define HHI_GP0_PLL_STS 0x54
-#define HHI_GP0_PLL_CNTL1 0x58
-#define HHI_HIFI_PLL_CNTL 0x80
-#define HHI_HIFI_PLL_CNTL2 0x84
-#define HHI_HIFI_PLL_CNTL3 0x88
-#define HHI_HIFI_PLL_CNTL4 0x8C
-#define HHI_HIFI_PLL_CNTL5 0x90
-#define HHI_HIFI_PLL_STS 0x94
-#define HHI_HIFI_PLL_CNTL1 0x98
-
-#define HHI_XTAL_DIVN_CNTL 0xbc
-#define HHI_GCLK2_MPEG0 0xc0
-#define HHI_GCLK2_MPEG1 0xc4
-#define HHI_GCLK2_MPEG2 0xc8
-#define HHI_GCLK2_OTHER 0xd0
-#define HHI_GCLK2_AO 0xd4
-#define HHI_PCIE_PLL_CNTL 0xd8
-#define HHI_PCIE_PLL_CNTL1 0xdC
-#define HHI_PCIE_PLL_CNTL2 0xe0
-#define HHI_PCIE_PLL_CNTL3 0xe4
-#define HHI_PCIE_PLL_CNTL4 0xe8
-#define HHI_PCIE_PLL_CNTL5 0xec
-#define HHI_PCIE_PLL_CNTL6 0xf0
-#define HHI_PCIE_PLL_STS 0xf4
-
-#define HHI_MEM_PD_REG0 0x100
-#define HHI_VPU_MEM_PD_REG0 0x104
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12c
-
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_AO 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_SYS_CPU_RESET_CNTL 0x160
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_SPICC_HCLK_CNTL 0x168
-
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1a0
-#define HHI_VPU_CLK_CNTL 0x1bC
-
-#define HHI_VAPBCLK_CNTL 0x1F4
-
-#define HHI_GEN_CLK_CNTL 0x228
-
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-
-#define HHI_MPLL_CNTL 0x280
-#define HHI_MPLL_CNTL2 0x284
-#define HHI_MPLL_CNTL3 0x288
-#define HHI_MPLL_CNTL4 0x28C
-#define HHI_MPLL_CNTL5 0x290
-#define HHI_MPLL_CNTL6 0x294
-#define HHI_MPLL_CNTL7 0x298
-#define HHI_MPLL_CNTL8 0x29C
-#define HHI_MPLL_CNTL9 0x2A0
-#define HHI_MPLL_CNTL10 0x2A4
-
-#define HHI_MPLL3_CNTL0 0x2E0
-#define HHI_MPLL3_CNTL1 0x2E4
-#define HHI_PLL_TOP_MISC 0x2E8
-
-#define HHI_SYS_PLL_CNTL1 0x2FC
-#define HHI_SYS_PLL_CNTL 0x300
-#define HHI_SYS_PLL_CNTL2 0x304
-#define HHI_SYS_PLL_CNTL3 0x308
-#define HHI_SYS_PLL_CNTL4 0x30c
-#define HHI_SYS_PLL_CNTL5 0x310
-#define HHI_SYS_PLL_STS 0x314
-#define HHI_DPLL_TOP_I 0x318
-#define HHI_DPLL_TOP2_I 0x31C
-
-#endif /* __AXG_H */
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 2075668ed306..a25e7d5dc669 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -2092,210 +2092,6 @@ static struct clk_hw *c3_periphs_hw_clks[] = {
[CLKID_VAPB] = &vapb.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_periphs_clk_regmaps[] = {
- &rtc_xtal_clkin,
- &rtc_32k_div,
- &rtc_32k_mux,
- &rtc_32k,
- &rtc_clk,
- &sys_reset_ctrl,
- &sys_pwr_ctrl,
- &sys_pad_ctrl,
- &sys_ctrl,
- &sys_ts_pll,
- &sys_dev_arb,
- &sys_mmc_pclk,
- &sys_cpu_ctrl,
- &sys_jtag_ctrl,
- &sys_ir_ctrl,
- &sys_irq_ctrl,
- &sys_msr_clk,
- &sys_rom,
- &sys_uart_f,
- &sys_cpu_apb,
- &sys_rsa,
- &sys_sar_adc,
- &sys_startup,
- &sys_secure,
- &sys_spifc,
- &sys_nna,
- &sys_eth_mac,
- &sys_gic,
- &sys_rama,
- &sys_big_nic,
- &sys_ramb,
- &sys_audio_pclk,
- &sys_pwm_kl,
- &sys_pwm_ij,
- &sys_usb,
- &sys_sd_emmc_a,
- &sys_sd_emmc_c,
- &sys_pwm_ab,
- &sys_pwm_cd,
- &sys_pwm_ef,
- &sys_pwm_gh,
- &sys_spicc_1,
- &sys_spicc_0,
- &sys_uart_a,
- &sys_uart_b,
- &sys_uart_c,
- &sys_uart_d,
- &sys_uart_e,
- &sys_i2c_m_a,
- &sys_i2c_m_b,
- &sys_i2c_m_c,
- &sys_i2c_m_d,
- &sys_i2c_s_a,
- &sys_rtc,
- &sys_ge2d,
- &sys_isp,
- &sys_gpv_isp_nic,
- &sys_gpv_cve_nic,
- &sys_mipi_dsi_host,
- &sys_mipi_dsi_phy,
- &sys_eth_phy,
- &sys_acodec,
- &sys_dwap,
- &sys_dos,
- &sys_cve,
- &sys_vout,
- &sys_vc9000e,
- &sys_pwm_mn,
- &sys_sd_emmc_b,
- &axi_sys_nic,
- &axi_isp_nic,
- &axi_cve_nic,
- &axi_ramb,
- &axi_rama,
- &axi_cpu_dmc,
- &axi_nic,
- &axi_dma,
- &axi_mux_nic,
- &axi_cve,
- &axi_dev1_dmc,
- &axi_dev0_dmc,
- &axi_dsp_dmc,
- &clk_12_24m_in,
- &clk_12_24m,
- &fclk_25m_div,
- &fclk_25m,
- &gen_sel,
- &gen_div,
- &gen,
- &saradc_sel,
- &saradc_div,
- &saradc,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_a,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_b,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_c,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_d,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_e,
- &pwm_f_sel,
- &pwm_f_div,
- &pwm_f,
- &pwm_g_sel,
- &pwm_g_div,
- &pwm_g,
- &pwm_h_sel,
- &pwm_h_div,
- &pwm_h,
- &pwm_i_sel,
- &pwm_i_div,
- &pwm_i,
- &pwm_j_sel,
- &pwm_j_div,
- &pwm_j,
- &pwm_k_sel,
- &pwm_k_div,
- &pwm_k,
- &pwm_l_sel,
- &pwm_l_div,
- &pwm_l,
- &pwm_m_sel,
- &pwm_m_div,
- &pwm_m,
- &pwm_n_sel,
- &pwm_n_div,
- &pwm_n,
- &spicc_a_sel,
- &spicc_a_div,
- &spicc_a,
- &spicc_b_sel,
- &spicc_b_div,
- &spicc_b,
- &spifc_sel,
- &spifc_div,
- &spifc,
- &sd_emmc_a_sel,
- &sd_emmc_a_div,
- &sd_emmc_a,
- &sd_emmc_b_sel,
- &sd_emmc_b_div,
- &sd_emmc_b,
- &sd_emmc_c_sel,
- &sd_emmc_c_div,
- &sd_emmc_c,
- &ts_div,
- &ts,
- &eth_125m,
- &eth_rmii_div,
- &eth_rmii,
- &mipi_dsi_meas_sel,
- &mipi_dsi_meas_div,
- &mipi_dsi_meas,
- &dsi_phy_sel,
- &dsi_phy_div,
- &dsi_phy,
- &vout_mclk_sel,
- &vout_mclk_div,
- &vout_mclk,
- &vout_enc_sel,
- &vout_enc_div,
- &vout_enc,
- &hcodec_0_sel,
- &hcodec_0_div,
- &hcodec_0,
- &hcodec_1_sel,
- &hcodec_1_div,
- &hcodec_1,
- &hcodec,
- &vc9000e_aclk_sel,
- &vc9000e_aclk_div,
- &vc9000e_aclk,
- &vc9000e_core_sel,
- &vc9000e_core_div,
- &vc9000e_core,
- &csi_phy0_sel,
- &csi_phy0_div,
- &csi_phy0,
- &dewarpa_sel,
- &dewarpa_div,
- &dewarpa,
- &isp0_sel,
- &isp0_div,
- &isp0,
- &nna_core_sel,
- &nna_core_div,
- &nna_core,
- &ge2d_sel,
- &ge2d_div,
- &ge2d,
- &vapb_sel,
- &vapb_div,
- &vapb,
-};
-
static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -2313,7 +2109,7 @@ static int c3_peripherals_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct regmap *regmap;
void __iomem *base;
- int clkid, ret, i;
+ int clkid, ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -2323,10 +2119,6 @@ static int c3_peripherals_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_periphs_clk_regmaps); i++)
- c3_periphs_clk_regmaps[i]->map = regmap;
-
for (clkid = 0; clkid < c3_periphs_clks.num; clkid++) {
/* array might be sparse */
if (!c3_periphs_clks.hws[clkid])
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index ed4bc495862e..2c5594b8e49a 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -653,32 +653,6 @@ static struct clk_hw *c3_pll_hw_clks[] = {
[CLKID_MCLK1] = &mclk1.hw
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_pll_clk_regmaps[] = {
- &fclk_50m_en,
- &fclk_div2,
- &fclk_div2p5,
- &fclk_div3,
- &fclk_div4,
- &fclk_div5,
- &fclk_div7,
- &gp0_pll_dco,
- &gp0_pll,
- &hifi_pll_dco,
- &hifi_pll,
- &mclk_pll_dco,
- &mclk_pll_od,
- &mclk_pll,
- &mclk0_sel,
- &mclk0_div_en,
- &mclk0_div,
- &mclk0,
- &mclk1_sel,
- &mclk1_div_en,
- &mclk1_div,
- &mclk1,
-};
-
static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -696,7 +670,7 @@ static int c3_pll_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct regmap *regmap;
void __iomem *base;
- int clkid, ret, i;
+ int clkid, ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -706,10 +680,6 @@ static int c3_pll_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_pll_clk_regmaps); i++)
- c3_pll_clk_regmaps[i]->map = regmap;
-
for (clkid = 0; clkid < c3_pll_clks.num; clkid++) {
/* array might be sparse */
if (!c3_pll_clks.hws[clkid])
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index cb043b52b65d..83aedbfd2891 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -61,6 +61,7 @@ static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
};
const struct clk_ops meson_clk_cpu_dyndiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_cpu_dyndiv_recalc_rate,
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index c896cf29b318..787df6cdf841 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -126,6 +126,7 @@ static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops meson_clk_dualdiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
@@ -133,6 +134,7 @@ const struct clk_ops meson_clk_dualdiv_ops = {
EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, "CLK_MESON");
const struct clk_ops meson_clk_dualdiv_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index ee91e32b4050..7f8dada66e16 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -128,6 +128,11 @@ static int mpll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
if (mpll->init_count)
regmap_multi_reg_write(clk->map, mpll->init_regs,
@@ -151,6 +156,7 @@ static int mpll_init(struct clk_hw *hw)
}
const struct clk_ops meson_clk_mpll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index 701211120610..58dd982e6878 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -58,6 +58,7 @@ static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
}
const struct clk_ops meson_clk_phase_ops = {
+ .init = clk_regmap_init,
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
@@ -83,6 +84,11 @@ static int meson_clk_triphase_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase 0 and sync it to phase 1 and 2 */
val = meson_parm_read(clk->map, &tph->ph0);
@@ -142,6 +148,11 @@ static int meson_sclk_ws_inv_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase and sync the inverted value to ws */
val = meson_parm_read(clk->map, &tph->ph);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index e8e53855b00a..1ea6579a760f 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -311,6 +311,11 @@ static int meson_clk_pll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/*
* Keep the clock running, which was already initialized and enabled
@@ -468,6 +473,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
* the other ops except set_rate since the rate is fixed.
*/
const struct clk_ops meson_clk_pcie_pll_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.determine_rate = meson_clk_pll_determine_rate,
.is_enabled = meson_clk_pll_is_enabled,
@@ -488,6 +494,7 @@ const struct clk_ops meson_clk_pll_ops = {
EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index f3e504f67571..1ed56fe63cae 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,9 +4,52 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include "clk-regmap.h"
+int clk_regmap_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct device_node *np, *parent_np;
+ struct device *dev;
+
+ /* Allow regmap to be preset as it was historically done */
+ if (clk->map)
+ return 0;
+
+ /*
+ * FIXME: what follows couples the controller implementation
+ * and clk_regmap clock type. This situation is not desirable
+ * but temporary, until the controller is able to register
+ * a hook to initialize a clock type
+ */
+
+ /* Check the usual dev enabled controller with an basic IO regmap */
+ dev = clk_hw_get_dev(hw);
+ if (dev) {
+ clk->map = dev_get_regmap(dev, NULL);
+ if (clk->map)
+ return 0;
+ }
+
+ /* Move on to early and syscon based controllers */
+ np = clk_hw_get_of_node(hw);
+ if (np) {
+ parent_np = of_get_parent(np);
+ clk->map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+
+ if (!IS_ERR_OR_NULL(clk->map))
+ return 0;
+ }
+
+ /* Bail out if regmap can't be found */
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(clk_regmap_init, "CLK_MESON");
+
static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_regmap *clk = to_clk_regmap(hw);
@@ -45,6 +88,7 @@ static int clk_regmap_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops clk_regmap_gate_ops = {
+ .init = clk_regmap_init,
.enable = clk_regmap_gate_enable,
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
@@ -52,6 +96,7 @@ const struct clk_ops clk_regmap_gate_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, "CLK_MESON");
const struct clk_ops clk_regmap_gate_ro_ops = {
+ .init = clk_regmap_init,
.is_enabled = clk_regmap_gate_is_enabled,
};
EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, "CLK_MESON");
@@ -121,6 +166,7 @@ static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
/* Would prefer clk_regmap_div_ro_ops but clashes with qcom */
const struct clk_ops clk_regmap_divider_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
@@ -128,6 +174,7 @@ const struct clk_ops clk_regmap_divider_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, "CLK_MESON");
const struct clk_ops clk_regmap_divider_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
@@ -170,6 +217,7 @@ static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_mux_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
@@ -177,6 +225,7 @@ const struct clk_ops clk_regmap_mux_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, "CLK_MESON");
const struct clk_ops clk_regmap_mux_ro_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
};
EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e365312da54e..f8cac2df5755 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -7,6 +7,7 @@
#ifndef __CLK_REGMAP_H
#define __CLK_REGMAP_H
+#include <linux/device.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
@@ -31,6 +32,9 @@ static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
return container_of(hw, struct clk_regmap, hw);
}
+/* clk_regmap init op to get and cache regmap from the controllers */
+int clk_regmap_init(struct clk_hw *hw);
+
/**
* struct clk_regmap_gate_data - regmap backed gate specific data
*
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 71c758ffa493..4095a1b2bb80 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -381,38 +381,6 @@ static const unsigned int g12a_aoclk_reset[] = {
[RESET_AO_IR_OUT] = 23,
};
-static struct clk_regmap *g12a_aoclk_regmap[] = {
- &g12a_aoclk_ahb,
- &g12a_aoclk_ir_in,
- &g12a_aoclk_i2c_m0,
- &g12a_aoclk_i2c_s0,
- &g12a_aoclk_uart,
- &g12a_aoclk_prod_i2c,
- &g12a_aoclk_uart2,
- &g12a_aoclk_ir_out,
- &g12a_aoclk_saradc,
- &g12a_aoclk_mailbox,
- &g12a_aoclk_m3,
- &g12a_aoclk_ahb_sram,
- &g12a_aoclk_rti,
- &g12a_aoclk_m4_fclk,
- &g12a_aoclk_m4_hclk,
- &g12a_aoclk_cts_oscin,
- &g12a_aoclk_32k_by_oscin_pre,
- &g12a_aoclk_32k_by_oscin_div,
- &g12a_aoclk_32k_by_oscin_sel,
- &g12a_aoclk_32k_by_oscin,
- &g12a_aoclk_cec_pre,
- &g12a_aoclk_cec_div,
- &g12a_aoclk_cec_sel,
- &g12a_aoclk_cec,
- &g12a_aoclk_cts_rtc_oscin,
- &g12a_aoclk_clk81,
- &g12a_aoclk_saradc_mux,
- &g12a_aoclk_saradc_div,
- &g12a_aoclk_saradc_gate,
-};
-
static struct clk_hw *g12a_aoclk_hw_clks[] = {
[CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
[CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
@@ -449,8 +417,6 @@ static const struct meson_aoclk_data g12a_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(g12a_aoclk_reset),
.reset = g12a_aoclk_reset,
- .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
- .clks = g12a_aoclk_regmap,
.hw_clks = {
.hws = g12a_aoclk_hw_clks,
.num = ARRAY_SIZE(g12a_aoclk_hw_clks),
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index d9e546e006d7..66f0e817e416 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -24,10 +24,119 @@
#include "vid-pll-div.h"
#include "vclk.h"
#include "meson-eeclk.h"
-#include "g12a.h"
#include <dt-bindings/clock/g12a-clkc.h>
+#define HHI_MIPI_CNTL0 0x000
+#define HHI_MIPI_CNTL1 0x004
+#define HHI_MIPI_CNTL2 0x008
+#define HHI_MIPI_STS 0x00c
+#define HHI_GP0_PLL_CNTL0 0x040
+#define HHI_GP0_PLL_CNTL1 0x044
+#define HHI_GP0_PLL_CNTL2 0x048
+#define HHI_GP0_PLL_CNTL3 0x04c
+#define HHI_GP0_PLL_CNTL4 0x050
+#define HHI_GP0_PLL_CNTL5 0x054
+#define HHI_GP0_PLL_CNTL6 0x058
+#define HHI_GP0_PLL_STS 0x05c
+#define HHI_GP1_PLL_CNTL0 0x060
+#define HHI_GP1_PLL_CNTL1 0x064
+#define HHI_GP1_PLL_CNTL2 0x068
+#define HHI_GP1_PLL_CNTL3 0x06c
+#define HHI_GP1_PLL_CNTL4 0x070
+#define HHI_GP1_PLL_CNTL5 0x074
+#define HHI_GP1_PLL_CNTL6 0x078
+#define HHI_GP1_PLL_STS 0x07c
+#define HHI_PCIE_PLL_CNTL0 0x098
+#define HHI_PCIE_PLL_CNTL1 0x09c
+#define HHI_PCIE_PLL_CNTL2 0x0a0
+#define HHI_PCIE_PLL_CNTL3 0x0a4
+#define HHI_PCIE_PLL_CNTL4 0x0a8
+#define HHI_PCIE_PLL_CNTL5 0x0ac
+#define HHI_PCIE_PLL_STS 0x0b8
+#define HHI_HIFI_PLL_CNTL0 0x0d8
+#define HHI_HIFI_PLL_CNTL1 0x0dc
+#define HHI_HIFI_PLL_CNTL2 0x0e0
+#define HHI_HIFI_PLL_CNTL3 0x0e4
+#define HHI_HIFI_PLL_CNTL4 0x0e8
+#define HHI_HIFI_PLL_CNTL5 0x0ec
+#define HHI_HIFI_PLL_CNTL6 0x0f0
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_OTHER2 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLKC_CNTL 0x1b4
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_ISP_CLK_CNTL 0x1c0
+#define HHI_NNA_CLK_CNTL 0x1c8
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+#define HHI_SYS_CPUB_CLK_CNTL1 0x200
+#define HHI_SYS_CPUB_CLK_CNTL 0x208
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_SYS_CPU_CLK_CNTL2 0x210
+#define HHI_SYS_CPU_CLK_CNTL3 0x214
+#define HHI_SYS_CPU_CLK_CNTL4 0x218
+#define HHI_SYS_CPU_CLK_CNTL5 0x21c
+#define HHI_SYS_CPU_CLK_CNTL6 0x220
+#define HHI_GEN_CLK_CNTL 0x228
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+#define HHI_MPLL_CNTL0 0x278
+#define HHI_MPLL_CNTL1 0x27c
+#define HHI_MPLL_CNTL2 0x280
+#define HHI_MPLL_CNTL3 0x284
+#define HHI_MPLL_CNTL4 0x288
+#define HHI_MPLL_CNTL5 0x28c
+#define HHI_MPLL_CNTL6 0x290
+#define HHI_MPLL_CNTL7 0x294
+#define HHI_MPLL_CNTL8 0x298
+#define HHI_FIX_PLL_CNTL0 0x2a0
+#define HHI_FIX_PLL_CNTL1 0x2a4
+#define HHI_FIX_PLL_CNTL3 0x2ac
+#define HHI_SYS_PLL_CNTL0 0x2f4
+#define HHI_SYS_PLL_CNTL1 0x2f8
+#define HHI_SYS_PLL_CNTL2 0x2fc
+#define HHI_SYS_PLL_CNTL3 0x300
+#define HHI_SYS_PLL_CNTL4 0x304
+#define HHI_SYS_PLL_CNTL5 0x308
+#define HHI_SYS_PLL_CNTL6 0x30c
+#define HHI_HDMI_PLL_CNTL0 0x320
+#define HHI_HDMI_PLL_CNTL1 0x324
+#define HHI_HDMI_PLL_CNTL2 0x328
+#define HHI_HDMI_PLL_CNTL3 0x32c
+#define HHI_HDMI_PLL_CNTL4 0x330
+#define HHI_HDMI_PLL_CNTL5 0x334
+#define HHI_HDMI_PLL_CNTL6 0x338
+#define HHI_SPICC_CLK_CNTL 0x3dc
+#define HHI_SYS1_PLL_CNTL0 0x380
+#define HHI_SYS1_PLL_CNTL1 0x384
+#define HHI_SYS1_PLL_CNTL2 0x388
+#define HHI_SYS1_PLL_CNTL3 0x38c
+#define HHI_SYS1_PLL_CNTL4 0x390
+#define HHI_SYS1_PLL_CNTL5 0x394
+#define HHI_SYS1_PLL_CNTL6 0x398
+
static struct clk_regmap g12a_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -2489,7 +2598,7 @@ static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* g12a_ee_core or fclk_div clocks
*/
};
@@ -3753,8 +3862,8 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
};
/*
- * FIXME: Force as bypass by forcing a single /1 table entry, and doensn't on boot value
- * when setting a clock whith this node in the clock path, but doesn't garantee the divider
+ * FIXME: Force as bypass by forcing a single /1 table entry, and doesn't on boot value
+ * when setting a clock with this node in the clock path, but doesn't guarantee the divider
* is at /1 at boot until a rate is set.
*/
static const struct clk_div_table g12a_mipi_dsi_pxclk_div_table[] = {
@@ -5126,261 +5235,6 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &g12a_clk81,
- &g12a_dos,
- &g12a_ddr,
- &g12a_audio_locker,
- &g12a_mipi_dsi_host,
- &g12a_eth_phy,
- &g12a_isa,
- &g12a_pl301,
- &g12a_periphs,
- &g12a_spicc_0,
- &g12a_i2c,
- &g12a_sana,
- &g12a_sd,
- &g12a_rng0,
- &g12a_uart0,
- &g12a_spicc_1,
- &g12a_hiu_reg,
- &g12a_mipi_dsi_phy,
- &g12a_assist_misc,
- &g12a_emmc_a,
- &g12a_emmc_b,
- &g12a_emmc_c,
- &g12a_audio_codec,
- &g12a_audio,
- &g12a_eth_core,
- &g12a_demux,
- &g12a_audio_ififo,
- &g12a_adc,
- &g12a_uart1,
- &g12a_g2d,
- &g12a_reset,
- &g12a_pcie_comb,
- &g12a_parser,
- &g12a_usb_general,
- &g12a_pcie_phy,
- &g12a_ahb_arb0,
- &g12a_ahb_data_bus,
- &g12a_ahb_ctrl_bus,
- &g12a_htx_hdcp22,
- &g12a_htx_pclk,
- &g12a_bt656,
- &g12a_usb1_to_ddr,
- &g12a_mmc_pclk,
- &g12a_uart2,
- &g12a_vpu_intr,
- &g12a_gic,
- &g12a_sd_emmc_a_clk0,
- &g12a_sd_emmc_b_clk0,
- &g12a_sd_emmc_c_clk0,
- &g12a_mpeg_clk_div,
- &g12a_sd_emmc_a_clk0_div,
- &g12a_sd_emmc_b_clk0_div,
- &g12a_sd_emmc_c_clk0_div,
- &g12a_mpeg_clk_sel,
- &g12a_sd_emmc_a_clk0_sel,
- &g12a_sd_emmc_b_clk0_sel,
- &g12a_sd_emmc_c_clk0_sel,
- &g12a_mpll0,
- &g12a_mpll1,
- &g12a_mpll2,
- &g12a_mpll3,
- &g12a_mpll0_div,
- &g12a_mpll1_div,
- &g12a_mpll2_div,
- &g12a_mpll3_div,
- &g12a_fixed_pll,
- &g12a_sys_pll,
- &g12a_gp0_pll,
- &g12a_hifi_pll,
- &g12a_vclk2_venci0,
- &g12a_vclk2_venci1,
- &g12a_vclk2_vencp0,
- &g12a_vclk2_vencp1,
- &g12a_vclk2_venct0,
- &g12a_vclk2_venct1,
- &g12a_vclk2_other,
- &g12a_vclk2_enci,
- &g12a_vclk2_encp,
- &g12a_dac_clk,
- &g12a_aoclk_gate,
- &g12a_iec958_gate,
- &g12a_enc480p,
- &g12a_rng1,
- &g12a_vclk2_enct,
- &g12a_vclk2_encl,
- &g12a_vclk2_venclmmc,
- &g12a_vclk2_vencl,
- &g12a_vclk2_other1,
- &g12a_fixed_pll_dco,
- &g12a_sys_pll_dco,
- &g12a_gp0_pll_dco,
- &g12a_hifi_pll_dco,
- &g12a_fclk_div2,
- &g12a_fclk_div3,
- &g12a_fclk_div4,
- &g12a_fclk_div5,
- &g12a_fclk_div7,
- &g12a_fclk_div2p5,
- &g12a_dma,
- &g12a_efuse,
- &g12a_rom_boot,
- &g12a_reset_sec,
- &g12a_sec_ahb_apb3,
- &g12a_vpu_0_sel,
- &g12a_vpu_0_div,
- &g12a_vpu_0,
- &g12a_vpu_1_sel,
- &g12a_vpu_1_div,
- &g12a_vpu_1,
- &g12a_vpu,
- &g12a_vapb_0_sel,
- &g12a_vapb_0_div,
- &g12a_vapb_0,
- &g12a_vapb_1_sel,
- &g12a_vapb_1_div,
- &g12a_vapb_1,
- &g12a_vapb_sel,
- &g12a_vapb,
- &g12a_hdmi_pll_dco,
- &g12a_hdmi_pll_od,
- &g12a_hdmi_pll_od2,
- &g12a_hdmi_pll,
- &g12a_vid_pll_div,
- &g12a_vid_pll_sel,
- &g12a_vid_pll,
- &g12a_vclk_sel,
- &g12a_vclk2_sel,
- &g12a_vclk_input,
- &g12a_vclk2_input,
- &g12a_vclk_div,
- &g12a_vclk2_div,
- &g12a_vclk,
- &g12a_vclk2,
- &g12a_vclk_div1,
- &g12a_vclk_div2_en,
- &g12a_vclk_div4_en,
- &g12a_vclk_div6_en,
- &g12a_vclk_div12_en,
- &g12a_vclk2_div1,
- &g12a_vclk2_div2_en,
- &g12a_vclk2_div4_en,
- &g12a_vclk2_div6_en,
- &g12a_vclk2_div12_en,
- &g12a_cts_enci_sel,
- &g12a_cts_encp_sel,
- &g12a_cts_encl_sel,
- &g12a_cts_vdac_sel,
- &g12a_hdmi_tx_sel,
- &g12a_cts_enci,
- &g12a_cts_encp,
- &g12a_cts_encl,
- &g12a_cts_vdac,
- &g12a_hdmi_tx,
- &g12a_hdmi_sel,
- &g12a_hdmi_div,
- &g12a_hdmi,
- &g12a_mali_0_sel,
- &g12a_mali_0_div,
- &g12a_mali_0,
- &g12a_mali_1_sel,
- &g12a_mali_1_div,
- &g12a_mali_1,
- &g12a_mali,
- &g12a_mpll_50m,
- &g12a_sys_pll_div16_en,
- &g12a_cpu_clk_premux0,
- &g12a_cpu_clk_mux0_div,
- &g12a_cpu_clk_postmux0,
- &g12a_cpu_clk_premux1,
- &g12a_cpu_clk_mux1_div,
- &g12a_cpu_clk_postmux1,
- &g12a_cpu_clk_dyn,
- &g12a_cpu_clk,
- &g12a_cpu_clk_div16_en,
- &g12a_cpu_clk_apb_div,
- &g12a_cpu_clk_apb,
- &g12a_cpu_clk_atb_div,
- &g12a_cpu_clk_atb,
- &g12a_cpu_clk_axi_div,
- &g12a_cpu_clk_axi,
- &g12a_cpu_clk_trace_div,
- &g12a_cpu_clk_trace,
- &g12a_pcie_pll_od,
- &g12a_pcie_pll_dco,
- &g12a_vdec_1_sel,
- &g12a_vdec_1_div,
- &g12a_vdec_1,
- &g12a_vdec_hevc_sel,
- &g12a_vdec_hevc_div,
- &g12a_vdec_hevc,
- &g12a_vdec_hevcf_sel,
- &g12a_vdec_hevcf_div,
- &g12a_vdec_hevcf,
- &g12a_ts_div,
- &g12a_ts,
- &g12b_cpu_clk,
- &g12b_sys1_pll_dco,
- &g12b_sys1_pll,
- &g12b_sys1_pll_div16_en,
- &g12b_cpub_clk_premux0,
- &g12b_cpub_clk_mux0_div,
- &g12b_cpub_clk_postmux0,
- &g12b_cpub_clk_premux1,
- &g12b_cpub_clk_mux1_div,
- &g12b_cpub_clk_postmux1,
- &g12b_cpub_clk_dyn,
- &g12b_cpub_clk,
- &g12b_cpub_clk_div16_en,
- &g12b_cpub_clk_apb_sel,
- &g12b_cpub_clk_apb,
- &g12b_cpub_clk_atb_sel,
- &g12b_cpub_clk_atb,
- &g12b_cpub_clk_axi_sel,
- &g12b_cpub_clk_axi,
- &g12b_cpub_clk_trace_sel,
- &g12b_cpub_clk_trace,
- &sm1_gp1_pll_dco,
- &sm1_gp1_pll,
- &sm1_dsu_clk_premux0,
- &sm1_dsu_clk_premux1,
- &sm1_dsu_clk_mux0_div,
- &sm1_dsu_clk_postmux0,
- &sm1_dsu_clk_mux1_div,
- &sm1_dsu_clk_postmux1,
- &sm1_dsu_clk_dyn,
- &sm1_dsu_final_clk,
- &sm1_dsu_clk,
- &sm1_cpu1_clk,
- &sm1_cpu2_clk,
- &sm1_cpu3_clk,
- &g12a_spicc0_sclk_sel,
- &g12a_spicc0_sclk_div,
- &g12a_spicc0_sclk,
- &g12a_spicc1_sclk_sel,
- &g12a_spicc1_sclk_div,
- &g12a_spicc1_sclk,
- &sm1_nna_axi_clk_sel,
- &sm1_nna_axi_clk_div,
- &sm1_nna_axi_clk,
- &sm1_nna_core_clk_sel,
- &sm1_nna_core_clk_div,
- &sm1_nna_core_clk,
- &g12a_mipi_dsi_pxclk_sel,
- &g12a_mipi_dsi_pxclk_div,
- &g12a_mipi_dsi_pxclk,
- &g12b_mipi_isp_sel,
- &g12b_mipi_isp_div,
- &g12b_mipi_isp,
- &g12b_mipi_isp_gate,
- &g12b_csi_phy1,
- &g12b_csi_phy0,
-};
-
static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
@@ -5559,8 +5413,6 @@ static int meson_g12a_probe(struct platform_device *pdev)
static const struct meson_g12a_data g12a_clkc_data = {
.eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = g12a_hw_clks,
.num = ARRAY_SIZE(g12a_hw_clks),
@@ -5573,8 +5425,6 @@ static const struct meson_g12a_data g12a_clkc_data = {
static const struct meson_g12a_data g12b_clkc_data = {
.eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = g12b_hw_clks,
.num = ARRAY_SIZE(g12b_hw_clks),
@@ -5585,8 +5435,6 @@ static const struct meson_g12a_data g12b_clkc_data = {
static const struct meson_g12a_data sm1_clkc_data = {
.eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = sm1_hw_clks,
.num = ARRAY_SIZE(sm1_hw_clks),
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
deleted file mode 100644
index 27df99c4565a..000000000000
--- a/drivers/clk/meson/g12a.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 Amlogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2018 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- */
-#ifndef __G12A_H
-#define __G12A_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_MIPI_CNTL0 0x000
-#define HHI_MIPI_CNTL1 0x004
-#define HHI_MIPI_CNTL2 0x008
-#define HHI_MIPI_STS 0x00C
-#define HHI_GP0_PLL_CNTL0 0x040
-#define HHI_GP0_PLL_CNTL1 0x044
-#define HHI_GP0_PLL_CNTL2 0x048
-#define HHI_GP0_PLL_CNTL3 0x04C
-#define HHI_GP0_PLL_CNTL4 0x050
-#define HHI_GP0_PLL_CNTL5 0x054
-#define HHI_GP0_PLL_CNTL6 0x058
-#define HHI_GP0_PLL_STS 0x05C
-#define HHI_GP1_PLL_CNTL0 0x060
-#define HHI_GP1_PLL_CNTL1 0x064
-#define HHI_GP1_PLL_CNTL2 0x068
-#define HHI_GP1_PLL_CNTL3 0x06C
-#define HHI_GP1_PLL_CNTL4 0x070
-#define HHI_GP1_PLL_CNTL5 0x074
-#define HHI_GP1_PLL_CNTL6 0x078
-#define HHI_GP1_PLL_STS 0x07C
-#define HHI_PCIE_PLL_CNTL0 0x098
-#define HHI_PCIE_PLL_CNTL1 0x09C
-#define HHI_PCIE_PLL_CNTL2 0x0A0
-#define HHI_PCIE_PLL_CNTL3 0x0A4
-#define HHI_PCIE_PLL_CNTL4 0x0A8
-#define HHI_PCIE_PLL_CNTL5 0x0AC
-#define HHI_PCIE_PLL_STS 0x0B8
-#define HHI_HIFI_PLL_CNTL0 0x0D8
-#define HHI_HIFI_PLL_CNTL1 0x0DC
-#define HHI_HIFI_PLL_CNTL2 0x0E0
-#define HHI_HIFI_PLL_CNTL3 0x0E4
-#define HHI_HIFI_PLL_CNTL4 0x0E8
-#define HHI_HIFI_PLL_CNTL5 0x0EC
-#define HHI_HIFI_PLL_CNTL6 0x0F0
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12C
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_OTHER2 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_AUD_CLK_CNTL 0x178
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1A0
-#define HHI_MALI_CLK_CNTL 0x1b0
-#define HHI_VPU_CLKC_CNTL 0x1b4
-#define HHI_VPU_CLK_CNTL 0x1bC
-#define HHI_ISP_CLK_CNTL 0x1C0
-#define HHI_NNA_CLK_CNTL 0x1C8
-#define HHI_HDMI_CLK_CNTL 0x1CC
-#define HHI_VDEC_CLK_CNTL 0x1E0
-#define HHI_VDEC2_CLK_CNTL 0x1E4
-#define HHI_VDEC3_CLK_CNTL 0x1E8
-#define HHI_VDEC4_CLK_CNTL 0x1EC
-#define HHI_HDCP22_CLK_CNTL 0x1F0
-#define HHI_VAPBCLK_CNTL 0x1F4
-#define HHI_SYS_CPUB_CLK_CNTL1 0x200
-#define HHI_SYS_CPUB_CLK_CNTL 0x208
-#define HHI_VPU_CLKB_CNTL 0x20C
-#define HHI_SYS_CPU_CLK_CNTL2 0x210
-#define HHI_SYS_CPU_CLK_CNTL3 0x214
-#define HHI_SYS_CPU_CLK_CNTL4 0x218
-#define HHI_SYS_CPU_CLK_CNTL5 0x21c
-#define HHI_SYS_CPU_CLK_CNTL6 0x220
-#define HHI_GEN_CLK_CNTL 0x228
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-#define HHI_MPLL_CNTL0 0x278
-#define HHI_MPLL_CNTL1 0x27C
-#define HHI_MPLL_CNTL2 0x280
-#define HHI_MPLL_CNTL3 0x284
-#define HHI_MPLL_CNTL4 0x288
-#define HHI_MPLL_CNTL5 0x28c
-#define HHI_MPLL_CNTL6 0x290
-#define HHI_MPLL_CNTL7 0x294
-#define HHI_MPLL_CNTL8 0x298
-#define HHI_FIX_PLL_CNTL0 0x2A0
-#define HHI_FIX_PLL_CNTL1 0x2A4
-#define HHI_FIX_PLL_CNTL3 0x2AC
-#define HHI_SYS_PLL_CNTL0 0x2f4
-#define HHI_SYS_PLL_CNTL1 0x2f8
-#define HHI_SYS_PLL_CNTL2 0x2fc
-#define HHI_SYS_PLL_CNTL3 0x300
-#define HHI_SYS_PLL_CNTL4 0x304
-#define HHI_SYS_PLL_CNTL5 0x308
-#define HHI_SYS_PLL_CNTL6 0x30c
-#define HHI_HDMI_PLL_CNTL0 0x320
-#define HHI_HDMI_PLL_CNTL1 0x324
-#define HHI_HDMI_PLL_CNTL2 0x328
-#define HHI_HDMI_PLL_CNTL3 0x32c
-#define HHI_HDMI_PLL_CNTL4 0x330
-#define HHI_HDMI_PLL_CNTL5 0x334
-#define HHI_HDMI_PLL_CNTL6 0x338
-#define HHI_SPICC_CLK_CNTL 0x3dc
-#define HHI_SYS1_PLL_CNTL0 0x380
-#define HHI_SYS1_PLL_CNTL1 0x384
-#define HHI_SYS1_PLL_CNTL2 0x388
-#define HHI_SYS1_PLL_CNTL3 0x38c
-#define HHI_SYS1_PLL_CNTL4 0x390
-#define HHI_SYS1_PLL_CNTL5 0x394
-#define HHI_SYS1_PLL_CNTL6 0x398
-
-#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 43940232f718..f075fbd450f3 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -237,23 +237,6 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *gxbb_aoclk[] = {
- &remote_ao,
- &i2c_master_ao,
- &i2c_slave_ao,
- &uart1_ao,
- &uart2_ao,
- &ir_blaster_ao,
- &ao_cts_oscin,
- &ao_32k_pre,
- &ao_32k_div,
- &ao_32k_sel,
- &ao_32k,
- &ao_cts_rtc_oscin,
- &ao_clk81,
- &ao_cts_cec,
-};
-
static struct clk_hw *gxbb_aoclk_hw_clks[] = {
[CLKID_AO_REMOTE] = &remote_ao.hw,
[CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
@@ -275,8 +258,6 @@ static const struct meson_aoclk_data gxbb_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
.reset = gxbb_aoclk_reset,
- .num_clks = ARRAY_SIZE(gxbb_aoclk),
- .clks = gxbb_aoclk,
.hw_clks = {
.hws = gxbb_aoclk_hw_clks,
.num = ARRAY_SIZE(gxbb_aoclk_hw_clks),
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 3abb44a2532b..362d1b87ea5b 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include "gxbb.h"
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
@@ -19,6 +18,104 @@
#include <dt-bindings/clock/gxbb-clkc.h>
+#define SCR 0x2c
+#define TIMEOUT_VALUE 0x3c
+
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_CNTL1 0x58
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_TIMER90K 0xec
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_MEM_PD_REG1 0x104
+#define HHI_VPU_MEM_PD_REG1 0x108
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_OSCIN_CNTL 0x158
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_AUD_CLK_CNTL3 0x1a4
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_USB_CLK_CNTL 0x220
+#define HHI_32K_CLK_CNTL 0x224
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_PCM_CLK_CNTL 0x258
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
+
+#define HHI_MPLL3_CNTL0 0x2e0
+#define HHI_MPLL3_CNTL1 0x2e4
+#define HHI_VDAC_CNTL0 0x2f4
+#define HHI_VDAC_CNTL1 0x2f8
+
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31c
+#define HHI_HDMI_PLL_CNTL 0x320
+#define HHI_HDMI_PLL_CNTL2 0x324
+#define HHI_HDMI_PLL_CNTL3 0x328
+#define HHI_HDMI_PLL_CNTL4 0x32c
+#define HHI_HDMI_PLL_CNTL5 0x330
+#define HHI_HDMI_PLL_CNTL6 0x334
+#define HHI_HDMI_PLL_CNTL_I 0x338
+#define HHI_HDMI_PLL_CNTL7 0x33c
+
+#define HHI_HDMI_PHY_CNTL0 0x3a0
+#define HHI_HDMI_PHY_CNTL1 0x3a4
+#define HHI_HDMI_PHY_CNTL2 0x3a8
+#define HHI_HDMI_PHY_CNTL3 0x3ac
+
+#define HHI_VID_LOCK_CLK_CNTL 0x3c8
+#define HHI_BT656_CLK_CNTL 0x3d4
+#define HHI_SAR_CLK_CNTL 0x3d8
+
static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
PLL_PARAMS(32, 1),
PLL_PARAMS(33, 1),
@@ -1335,7 +1432,7 @@ static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -3140,398 +3237,7 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_ACODEC] = &gxl_acodec.hw,
};
-static struct clk_regmap *const gxbb_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxbb_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxbb_gp0_pll_dco,
- &gxbb_hdmi_pll,
- &gxbb_hdmi_pll_od,
- &gxbb_hdmi_pll_od2,
- &gxbb_hdmi_pll_dco,
-};
-
-static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxl_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxl_gp0_pll_dco,
- &gxl_hdmi_pll,
- &gxl_hdmi_pll_od,
- &gxl_hdmi_pll_od2,
- &gxl_hdmi_pll_dco,
- &gxl_acodec,
-};
-
static const struct meson_eeclkc_data gxbb_clkc_data = {
- .regmap_clks = gxbb_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
.hw_clks = {
.hws = gxbb_hw_clks,
.num = ARRAY_SIZE(gxbb_hw_clks),
@@ -3539,8 +3245,6 @@ static const struct meson_eeclkc_data gxbb_clkc_data = {
};
static const struct meson_eeclkc_data gxl_clkc_data = {
- .regmap_clks = gxl_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
.hw_clks = {
.hws = gxl_hw_clks,
.num = ARRAY_SIZE(gxl_hw_clks),
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
deleted file mode 100644
index ba5f39a8d746..000000000000
--- a/drivers/clk/meson/gxbb.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __GXBB_H
-#define __GXBB_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet are listed in comment blocks below.
- * Those offsets must be multiplied by 4 before adding them to the base address
- * to get the right value
- */
-#define SCR 0x2C /* 0x0b offset in data sheet */
-#define TIMEOUT_VALUE 0x3c /* 0x0f offset in data sheet */
-
-#define HHI_GP0_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */
-#define HHI_GP0_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_GP0_PLL_CNTL1 0x58 /* 0x16 offset in data sheet */
-
-#define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */
-#define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */
-
-#define HHI_MEM_PD_REG0 0x100 /* 0x40 offset in data sheet */
-#define HHI_MEM_PD_REG1 0x104 /* 0x41 offset in data sheet */
-#define HHI_VPU_MEM_PD_REG1 0x108 /* 0x42 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_OSCIN_CNTL 0x158 /* 0x56 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_SYS_CPU_RESET_CNTL 0x160 /* 0x58 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
-#define HHI_AUD_CLK_CNTL3 0x1a4 /* 0x69 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bC /* 0x6f offset in data sheet */
-
-#define HHI_HDMI_CLK_CNTL 0x1CC /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1E0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1E4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1E8 /* 0x7a offset in data sheet */
-#define HHI_VDEC4_CLK_CNTL 0x1EC /* 0x7b offset in data sheet */
-#define HHI_HDCP22_CLK_CNTL 0x1F0 /* 0x7c offset in data sheet */
-#define HHI_VAPBCLK_CNTL 0x1F4 /* 0x7d offset in data sheet */
-
-#define HHI_VPU_CLKB_CNTL 0x20C /* 0x83 offset in data sheet */
-#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
-#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-
-#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
-#define HHI_SD_EMMC_CLK_CNTL 0x264 /* 0x99 offset in data sheet */
-
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* MP0, 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* MP1, 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* MP2, 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* MP2, 0xa9 offset in data sheet */
-
-#define HHI_MPLL3_CNTL0 0x2E0 /* 0xb8 offset in data sheet */
-#define HHI_MPLL3_CNTL1 0x2E4 /* 0xb9 offset in data sheet */
-#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL2 0x304 /* 0xc1 offset in data sheet */
-#define HHI_SYS_PLL_CNTL3 0x308 /* 0xc2 offset in data sheet */
-#define HHI_SYS_PLL_CNTL4 0x30c /* 0xc3 offset in data sheet */
-#define HHI_SYS_PLL_CNTL5 0x310 /* 0xc4 offset in data sheet */
-#define HHI_DPLL_TOP_I 0x318 /* 0xc6 offset in data sheet */
-#define HHI_DPLL_TOP2_I 0x31C /* 0xc7 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
-#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_HDMI_PLL_CNTL_I 0x338 /* 0xce offset in data sheet */
-#define HHI_HDMI_PLL_CNTL7 0x33C /* 0xcf offset in data sheet */
-
-#define HHI_HDMI_PHY_CNTL0 0x3A0 /* 0xe8 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL1 0x3A4 /* 0xe9 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL2 0x3A8 /* 0xea offset in data sheet */
-#define HHI_HDMI_PHY_CNTL3 0x3AC /* 0xeb offset in data sheet */
-
-#define HHI_VID_LOCK_CLK_CNTL 0x3C8 /* 0xf2 offset in data sheet */
-#define HHI_BT656_CLK_CNTL 0x3D4 /* 0xf5 offset in data sheet */
-#define HHI_SAR_CLK_CNTL 0x3D8 /* 0xf6 offset in data sheet */
-
-#endif /* __GXBB_H */
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 995be51987f4..894c02fda072 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include "meson-aoclk.h"
+#include "clk-regmap.h"
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
@@ -70,10 +71,6 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap */
- for (clkid = 0; clkid < data->num_clks; clkid++)
- data->clks[clkid]->map = regmap;
-
/* Register all clks */
for (clkid = 0; clkid < data->hw_clks.num; clkid++) {
if (!data->hw_clks.hws[clkid])
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index 308be3e4814a..ea5fc61308af 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -23,8 +23,6 @@ struct meson_aoclk_data {
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- const int num_clks;
- struct clk_regmap **clks;
struct meson_clk_hw_data hw_clks;
};
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 3053ee7425eb..6236bf970d79 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -39,10 +39,6 @@ int meson_eeclkc_probe(struct platform_device *pdev)
if (data->init_count)
regmap_multi_reg_write(map, data->init_regs, data->init_count);
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
for (i = 0; i < data->hw_clks.num; i++) {
/* array might be sparse */
if (!data->hw_clks.hws[i])
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
index 37a48b75c660..6a81d67b46b2 100644
--- a/drivers/clk/meson/meson-eeclk.h
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -14,8 +14,6 @@
struct platform_device;
struct meson_eeclkc_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
const struct reg_sequence *init_regs;
unsigned int init_count;
struct meson_clk_hw_data hw_clks;
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
index 4b73ea244b63..1975fc3987e2 100644
--- a/drivers/clk/meson/meson8-ddr.c
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -85,11 +85,6 @@ static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
.num = 2,
};
-static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
- &meson8_ddr_pll_dco,
- &meson8_ddr_pll,
-};
-
static const struct regmap_config meson8_ddr_clkc_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
@@ -113,10 +108,6 @@ static int meson8_ddr_clkc_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /* Populate regmap */
- for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
- meson8_ddr_clk_regmaps[i]->map = regmap;
-
/* Register all clks */
for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
hw = meson8_ddr_clk_hw_onecell_data.hws[i];
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index e4b474c5f86c..206538326614 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "meson8b.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include "clk-pll.h"
@@ -25,6 +24,72 @@
#include <dt-bindings/clock/meson8b-clkc.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the HardKernel[0] data sheet must be multiplied
+ * by 4 before adding them to the base address to get the right value
+ *
+ * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
+ */
+#define HHI_GP_PLL_CNTL 0x40
+#define HHI_GP_PLL_CNTL2 0x44
+#define HHI_GP_PLL_CNTL3 0x48
+#define HHI_GP_PLL_CNTL4 0x4C
+#define HHI_GP_PLL_CNTL5 0x50
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_VID_DIVIDER_CNTL 0x198
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_MPLL_CNTL 0x280
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_VID_PLL_CNTL 0x320
+#define HHI_VID_PLL_CNTL2 0x324
+#define HHI_VID_PLL_CNTL3 0x328
+#define HHI_VID_PLL_CNTL4 0x32c
+#define HHI_VID_PLL_CNTL5 0x330
+#define HHI_VID_PLL_CNTL6 0x334
+#define HHI_VID2_PLL_CNTL 0x380
+#define HHI_VID2_PLL_CNTL2 0x384
+#define HHI_VID2_PLL_CNTL3 0x388
+#define HHI_VID2_PLL_CNTL4 0x38c
+#define HHI_VID2_PLL_CNTL5 0x390
+#define HHI_VID2_PLL_CNTL6 0x394
+
+/*
+ * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
+ * confirm these are the same for the S805.
+ */
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
+
struct meson8b_clk_reset {
struct reset_controller_dev reset;
struct regmap *regmap;
@@ -3407,202 +3472,6 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
};
-static struct clk_regmap *const meson8b_clk_regmaps[] = {
- &meson8b_clk81,
- &meson8b_ddr,
- &meson8b_dos,
- &meson8b_isa,
- &meson8b_pl301,
- &meson8b_periphs,
- &meson8b_spicc,
- &meson8b_i2c,
- &meson8b_sar_adc,
- &meson8b_smart_card,
- &meson8b_rng0,
- &meson8b_uart0,
- &meson8b_sdhc,
- &meson8b_stream,
- &meson8b_async_fifo,
- &meson8b_sdio,
- &meson8b_abuf,
- &meson8b_hiu_iface,
- &meson8b_assist_misc,
- &meson8b_spi,
- &meson8b_i2s_spdif,
- &meson8b_eth,
- &meson8b_demux,
- &meson8b_aiu_glue,
- &meson8b_iec958,
- &meson8b_i2s_out,
- &meson8b_amclk,
- &meson8b_aififo2,
- &meson8b_mixer,
- &meson8b_mixer_iface,
- &meson8b_adc,
- &meson8b_blkmv,
- &meson8b_aiu,
- &meson8b_uart1,
- &meson8b_g2d,
- &meson8b_usb0,
- &meson8b_usb1,
- &meson8b_reset,
- &meson8b_nand,
- &meson8b_dos_parser,
- &meson8b_usb,
- &meson8b_vdin1,
- &meson8b_ahb_arb0,
- &meson8b_efuse,
- &meson8b_boot_rom,
- &meson8b_ahb_data_bus,
- &meson8b_ahb_ctrl_bus,
- &meson8b_hdmi_intr_sync,
- &meson8b_hdmi_pclk,
- &meson8b_usb1_ddr_bridge,
- &meson8b_usb0_ddr_bridge,
- &meson8b_mmc_pclk,
- &meson8b_dvin,
- &meson8b_uart2,
- &meson8b_sana,
- &meson8b_vpu_intr,
- &meson8b_sec_ahb_ahb3_bridge,
- &meson8b_clk81_a9,
- &meson8b_vclk2_venci0,
- &meson8b_vclk2_venci1,
- &meson8b_vclk2_vencp0,
- &meson8b_vclk2_vencp1,
- &meson8b_gclk_venci_int,
- &meson8b_gclk_vencp_int,
- &meson8b_dac_clk,
- &meson8b_aoclk_gate,
- &meson8b_iec958_gate,
- &meson8b_enc480p,
- &meson8b_rng1,
- &meson8b_gclk_vencl_int,
- &meson8b_vclk2_venclmcc,
- &meson8b_vclk2_vencl,
- &meson8b_vclk2_other,
- &meson8b_edp,
- &meson8b_ao_media_cpu,
- &meson8b_ao_ahb_sram,
- &meson8b_ao_ahb_bus,
- &meson8b_ao_iface,
- &meson8b_mpeg_clk_div,
- &meson8b_mpeg_clk_sel,
- &meson8b_mpll0,
- &meson8b_mpll1,
- &meson8b_mpll2,
- &meson8b_mpll0_div,
- &meson8b_mpll1_div,
- &meson8b_mpll2_div,
- &meson8b_fixed_pll,
- &meson8b_sys_pll,
- &meson8b_cpu_in_sel,
- &meson8b_cpu_scale_div,
- &meson8b_cpu_scale_out_sel,
- &meson8b_cpu_clk,
- &meson8b_mpll_prediv,
- &meson8b_fclk_div2,
- &meson8b_fclk_div3,
- &meson8b_fclk_div4,
- &meson8b_fclk_div5,
- &meson8b_fclk_div7,
- &meson8b_nand_clk_sel,
- &meson8b_nand_clk_div,
- &meson8b_nand_clk_gate,
- &meson8b_fixed_pll_dco,
- &meson8b_hdmi_pll_dco,
- &meson8b_sys_pll_dco,
- &meson8b_apb_clk_sel,
- &meson8b_apb_clk_gate,
- &meson8b_periph_clk_sel,
- &meson8b_periph_clk_gate,
- &meson8b_axi_clk_sel,
- &meson8b_axi_clk_gate,
- &meson8b_l2_dram_clk_sel,
- &meson8b_l2_dram_clk_gate,
- &meson8b_hdmi_pll_lvds_out,
- &meson8b_hdmi_pll_hdmi_out,
- &meson8b_vid_pll_in_sel,
- &meson8b_vid_pll_in_en,
- &meson8b_vid_pll_pre_div,
- &meson8b_vid_pll_post_div,
- &meson8b_vid_pll,
- &meson8b_vid_pll_final_div,
- &meson8b_vclk_in_sel,
- &meson8b_vclk_in_en,
- &meson8b_vclk_en,
- &meson8b_vclk_div1_gate,
- &meson8b_vclk_div2_div_gate,
- &meson8b_vclk_div4_div_gate,
- &meson8b_vclk_div6_div_gate,
- &meson8b_vclk_div12_div_gate,
- &meson8b_vclk2_in_sel,
- &meson8b_vclk2_clk_in_en,
- &meson8b_vclk2_clk_en,
- &meson8b_vclk2_div1_gate,
- &meson8b_vclk2_div2_div_gate,
- &meson8b_vclk2_div4_div_gate,
- &meson8b_vclk2_div6_div_gate,
- &meson8b_vclk2_div12_div_gate,
- &meson8b_cts_enct_sel,
- &meson8b_cts_enct,
- &meson8b_cts_encp_sel,
- &meson8b_cts_encp,
- &meson8b_cts_enci_sel,
- &meson8b_cts_enci,
- &meson8b_hdmi_tx_pixel_sel,
- &meson8b_hdmi_tx_pixel,
- &meson8b_cts_encl_sel,
- &meson8b_cts_encl,
- &meson8b_cts_vdac0_sel,
- &meson8b_cts_vdac0,
- &meson8b_hdmi_sys_sel,
- &meson8b_hdmi_sys_div,
- &meson8b_hdmi_sys,
- &meson8b_mali_0_sel,
- &meson8b_mali_0_div,
- &meson8b_mali_0,
- &meson8b_mali_1_sel,
- &meson8b_mali_1_div,
- &meson8b_mali_1,
- &meson8b_mali,
- &meson8m2_gp_pll_dco,
- &meson8m2_gp_pll,
- &meson8b_vpu_0_sel,
- &meson8m2_vpu_0_sel,
- &meson8b_vpu_0_div,
- &meson8b_vpu_0,
- &meson8b_vpu_1_sel,
- &meson8m2_vpu_1_sel,
- &meson8b_vpu_1_div,
- &meson8b_vpu_1,
- &meson8b_vpu,
- &meson8b_vdec_1_sel,
- &meson8b_vdec_1_1_div,
- &meson8b_vdec_1_1,
- &meson8b_vdec_1_2_div,
- &meson8b_vdec_1_2,
- &meson8b_vdec_1,
- &meson8b_vdec_hcodec_sel,
- &meson8b_vdec_hcodec_div,
- &meson8b_vdec_hcodec,
- &meson8b_vdec_2_sel,
- &meson8b_vdec_2_div,
- &meson8b_vdec_2,
- &meson8b_vdec_hevc_sel,
- &meson8b_vdec_hevc_div,
- &meson8b_vdec_hevc_en,
- &meson8b_vdec_hevc,
- &meson8b_cts_amclk,
- &meson8b_cts_amclk_sel,
- &meson8b_cts_amclk_div,
- &meson8b_cts_mclk_i958_sel,
- &meson8b_cts_mclk_i958_div,
- &meson8b_cts_mclk_i958,
- &meson8b_cts_i958,
- &meson8b_vid_pll_lvds_en,
-};
-
static const struct meson8b_clk_reset_line {
u32 reg;
u8 bit_idx;
@@ -3819,10 +3688,6 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
return;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
- meson8b_clk_regmaps[i]->map = map;
-
/*
* register all clks and start with the first used ID (which is
* CLKID_PLL_FIXED)
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
deleted file mode 100644
index a5b6e67eeefb..000000000000
--- a/drivers/clk/meson/meson8b.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- *
- * Copyright (c) 2016 BayLibre, Inc.
- * Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __MESON8B_H
-#define __MESON8B_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the HardKernel[0] data sheet are listed in comment
- * blocks below. Those offsets must be multiplied by 4 before adding them to
- * the base address to get the right value
- *
- * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
- */
-#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */
-#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */
-#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_VID_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_VID_PLL_CNTL4 0x32c /* 0xcb offset in data sheet */
-#define HHI_VID_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_VID_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_VID2_PLL_CNTL 0x380 /* 0xe0 offset in data sheet */
-#define HHI_VID2_PLL_CNTL2 0x384 /* 0xe1 offset in data sheet */
-#define HHI_VID2_PLL_CNTL3 0x388 /* 0xe2 offset in data sheet */
-#define HHI_VID2_PLL_CNTL4 0x38c /* 0xe3 offset in data sheet */
-#define HHI_VID2_PLL_CNTL5 0x390 /* 0xe4 offset in data sheet */
-#define HHI_VID2_PLL_CNTL6 0x394 /* 0xe5 offset in data sheet */
-
-/*
- * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
- * confirm these are the same for the S805.
- */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* 0xa9 offset in data sheet */
-
-#endif /* __MESON8B_H */
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index 8a4037377787..c9400cf54c84 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -13,10 +13,55 @@
#include "clk-regmap.h"
#include "vid-pll-div.h"
#include "clk-dualdiv.h"
-#include "s4-peripherals.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-peripherals-clkc.h>
+#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
+#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
+#define CLKCTRL_RTC_CTRL 0x010
+#define CLKCTRL_SYS_CLK_CTRL0 0x040
+#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
+#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
+#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
+#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
+#define CLKCTRL_CECA_CTRL0 0x088
+#define CLKCTRL_CECA_CTRL1 0x08c
+#define CLKCTRL_CECB_CTRL0 0x090
+#define CLKCTRL_CECB_CTRL1 0x094
+#define CLKCTRL_SC_CLK_CTRL 0x098
+#define CLKCTRL_CLK12_24_CTRL 0x0a8
+#define CLKCTRL_VID_CLK_CTRL 0x0c0
+#define CLKCTRL_VID_CLK_CTRL2 0x0c4
+#define CLKCTRL_VID_CLK_DIV 0x0c8
+#define CLKCTRL_VIID_CLK_DIV 0x0cc
+#define CLKCTRL_VIID_CLK_CTRL 0x0d0
+#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
+#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
+#define CLKCTRL_VPU_CLK_CTRL 0x0e8
+#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
+#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
+#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
+#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
+#define CLKCTRL_VAPBCLK_CTRL 0x0fc
+#define CLKCTRL_HDCP22_CTRL 0x100
+#define CLKCTRL_VDEC_CLK_CTRL 0x140
+#define CLKCTRL_VDEC2_CLK_CTRL 0x144
+#define CLKCTRL_VDEC3_CLK_CTRL 0x148
+#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
+#define CLKCTRL_TS_CLK_CTRL 0x158
+#define CLKCTRL_MALI_CLK_CTRL 0x15c
+#define CLKCTRL_NAND_CLK_CTRL 0x168
+#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
+#define CLKCTRL_SPICC_CLK_CTRL 0x174
+#define CLKCTRL_GEN_CLK_CTRL 0x178
+#define CLKCTRL_SAR_CLK_CTRL 0x17c
+#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
+#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
+#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
+#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
+#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
+#define CLKCTRL_DEMOD_CLK_CTRL 0x200
+
static struct clk_regmap s4_rtc_32k_by_oscin_clkin = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_RTC_BY_OSCIN_CTRL0,
@@ -3129,118 +3174,6 @@ static struct clk_regmap s4_gen_clk = {
},
};
-static const struct clk_parent_data s4_adc_extclk_in_parent_data[] = {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div4", },
- { .fw_name = "fclk_div3", },
- { .fw_name = "fclk_div5", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "mpll2", },
- { .fw_name = "gp0_pll", },
- { .fw_name = "hifi_pll", },
-};
-
-static struct clk_regmap s4_adc_extclk_in_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_adc_extclk_in_parent_data,
- .num_parents = ARRAY_SIZE(s4_adc_extclk_in_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_demod_core_clk_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "fclk_div4", },
- { .hw = &s4_adc_extclk_in_gate.hw }
- },
- .num_parents = 4,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_demod_core_clk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_demod_core_clk_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
#define MESON_GATE(_name, _reg, _bit) \
MESON_PCLK(_name, _reg, _bit, &s4_sys_clk.hw)
@@ -3522,231 +3455,6 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk_gate.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const s4_periphs_clk_regmaps[] = {
- &s4_rtc_32k_by_oscin_clkin,
- &s4_rtc_32k_by_oscin_div,
- &s4_rtc_32k_by_oscin_sel,
- &s4_rtc_32k_by_oscin,
- &s4_rtc_clk,
- &s4_sysclk_b_sel,
- &s4_sysclk_b_div,
- &s4_sysclk_b,
- &s4_sysclk_a_sel,
- &s4_sysclk_a_div,
- &s4_sysclk_a,
- &s4_sys_clk,
- &s4_ceca_32k_clkin,
- &s4_ceca_32k_div,
- &s4_ceca_32k_sel_pre,
- &s4_ceca_32k_sel,
- &s4_ceca_32k_clkout,
- &s4_cecb_32k_clkin,
- &s4_cecb_32k_div,
- &s4_cecb_32k_sel_pre,
- &s4_cecb_32k_sel,
- &s4_cecb_32k_clkout,
- &s4_sc_clk_mux,
- &s4_sc_clk_div,
- &s4_sc_clk_gate,
- &s4_12_24M_clk_gate,
- &s4_12_24M_clk,
- &s4_vid_pll_div,
- &s4_vid_pll_sel,
- &s4_vid_pll,
- &s4_vclk_sel,
- &s4_vclk2_sel,
- &s4_vclk_input,
- &s4_vclk2_input,
- &s4_vclk_div,
- &s4_vclk2_div,
- &s4_vclk,
- &s4_vclk2,
- &s4_vclk_div1,
- &s4_vclk_div2_en,
- &s4_vclk_div4_en,
- &s4_vclk_div6_en,
- &s4_vclk_div12_en,
- &s4_vclk2_div1,
- &s4_vclk2_div2_en,
- &s4_vclk2_div4_en,
- &s4_vclk2_div6_en,
- &s4_vclk2_div12_en,
- &s4_cts_enci_sel,
- &s4_cts_encp_sel,
- &s4_cts_vdac_sel,
- &s4_hdmi_tx_sel,
- &s4_cts_enci,
- &s4_cts_encp,
- &s4_cts_vdac,
- &s4_hdmi_tx,
- &s4_hdmi_sel,
- &s4_hdmi_div,
- &s4_hdmi,
- &s4_ts_clk_div,
- &s4_ts_clk_gate,
- &s4_mali_0_sel,
- &s4_mali_0_div,
- &s4_mali_0,
- &s4_mali_1_sel,
- &s4_mali_1_div,
- &s4_mali_1,
- &s4_mali_mux,
- &s4_vdec_p0_mux,
- &s4_vdec_p0_div,
- &s4_vdec_p0,
- &s4_vdec_p1_mux,
- &s4_vdec_p1_div,
- &s4_vdec_p1,
- &s4_vdec_mux,
- &s4_hevcf_p0_mux,
- &s4_hevcf_p0_div,
- &s4_hevcf_p0,
- &s4_hevcf_p1_mux,
- &s4_hevcf_p1_div,
- &s4_hevcf_p1,
- &s4_hevcf_mux,
- &s4_vpu_0_sel,
- &s4_vpu_0_div,
- &s4_vpu_0,
- &s4_vpu_1_sel,
- &s4_vpu_1_div,
- &s4_vpu_1,
- &s4_vpu,
- &s4_vpu_clkb_tmp_mux,
- &s4_vpu_clkb_tmp_div,
- &s4_vpu_clkb_tmp,
- &s4_vpu_clkb_div,
- &s4_vpu_clkb,
- &s4_vpu_clkc_p0_mux,
- &s4_vpu_clkc_p0_div,
- &s4_vpu_clkc_p0,
- &s4_vpu_clkc_p1_mux,
- &s4_vpu_clkc_p1_div,
- &s4_vpu_clkc_p1,
- &s4_vpu_clkc_mux,
- &s4_vapb_0_sel,
- &s4_vapb_0_div,
- &s4_vapb_0,
- &s4_vapb_1_sel,
- &s4_vapb_1_div,
- &s4_vapb_1,
- &s4_vapb,
- &s4_ge2d_gate,
- &s4_hdcp22_esmclk_mux,
- &s4_hdcp22_esmclk_div,
- &s4_hdcp22_esmclk_gate,
- &s4_hdcp22_skpclk_mux,
- &s4_hdcp22_skpclk_div,
- &s4_hdcp22_skpclk_gate,
- &s4_vdin_meas_mux,
- &s4_vdin_meas_div,
- &s4_vdin_meas_gate,
- &s4_sd_emmc_c_clk0_sel,
- &s4_sd_emmc_c_clk0_div,
- &s4_sd_emmc_c_clk0,
- &s4_sd_emmc_a_clk0_sel,
- &s4_sd_emmc_a_clk0_div,
- &s4_sd_emmc_a_clk0,
- &s4_sd_emmc_b_clk0_sel,
- &s4_sd_emmc_b_clk0_div,
- &s4_sd_emmc_b_clk0,
- &s4_spicc0_mux,
- &s4_spicc0_div,
- &s4_spicc0_gate,
- &s4_pwm_a_mux,
- &s4_pwm_a_div,
- &s4_pwm_a_gate,
- &s4_pwm_b_mux,
- &s4_pwm_b_div,
- &s4_pwm_b_gate,
- &s4_pwm_c_mux,
- &s4_pwm_c_div,
- &s4_pwm_c_gate,
- &s4_pwm_d_mux,
- &s4_pwm_d_div,
- &s4_pwm_d_gate,
- &s4_pwm_e_mux,
- &s4_pwm_e_div,
- &s4_pwm_e_gate,
- &s4_pwm_f_mux,
- &s4_pwm_f_div,
- &s4_pwm_f_gate,
- &s4_pwm_g_mux,
- &s4_pwm_g_div,
- &s4_pwm_g_gate,
- &s4_pwm_h_mux,
- &s4_pwm_h_div,
- &s4_pwm_h_gate,
- &s4_pwm_i_mux,
- &s4_pwm_i_div,
- &s4_pwm_i_gate,
- &s4_pwm_j_mux,
- &s4_pwm_j_div,
- &s4_pwm_j_gate,
- &s4_saradc_mux,
- &s4_saradc_div,
- &s4_saradc_gate,
- &s4_gen_clk_sel,
- &s4_gen_clk_div,
- &s4_gen_clk,
- &s4_ddr,
- &s4_dos,
- &s4_ethphy,
- &s4_mali,
- &s4_aocpu,
- &s4_aucpu,
- &s4_cec,
- &s4_sdemmca,
- &s4_sdemmcb,
- &s4_nand,
- &s4_smartcard,
- &s4_acodec,
- &s4_spifc,
- &s4_msr_clk,
- &s4_ir_ctrl,
- &s4_audio,
- &s4_eth,
- &s4_uart_a,
- &s4_uart_b,
- &s4_uart_c,
- &s4_uart_d,
- &s4_uart_e,
- &s4_aififo,
- &s4_ts_ddr,
- &s4_ts_pll,
- &s4_g2d,
- &s4_spicc0,
- &s4_usb,
- &s4_i2c_m_a,
- &s4_i2c_m_b,
- &s4_i2c_m_c,
- &s4_i2c_m_d,
- &s4_i2c_m_e,
- &s4_hdmitx_apb,
- &s4_i2c_s_a,
- &s4_usb1_to_ddr,
- &s4_hdcp22,
- &s4_mmc_apb,
- &s4_rsa,
- &s4_cpu_debug,
- &s4_vpu_intr,
- &s4_demod,
- &s4_sar_adc,
- &s4_gic,
- &s4_pwm_ab,
- &s4_pwm_cd,
- &s4_pwm_ef,
- &s4_pwm_gh,
- &s4_pwm_ij,
- &s4_demod_core_clk_mux,
- &s4_demod_core_clk_div,
- &s4_demod_core_clk_gate,
- &s4_adc_extclk_in_mux,
- &s4_adc_extclk_in_div,
- &s4_adc_extclk_in_gate,
-};
-
static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -3776,10 +3484,6 @@ static int meson_s4_periphs_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(regmap),
"can't init regmap mmio region\n");
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_periphs_clk_regmaps); i++)
- s4_periphs_clk_regmaps[i]->map = regmap;
-
for (i = 0; i < s4_periphs_clks.num; i++) {
/* array might be sparse */
if (!s4_periphs_clks.hws[i])
diff --git a/drivers/clk/meson/s4-peripherals.h b/drivers/clk/meson/s4-peripherals.h
deleted file mode 100644
index 1e298713c2b2..000000000000
--- a/drivers/clk/meson/s4-peripherals.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PERIPHERALS_H__
-#define __MESON_S4_PERIPHERALS_H__
-
-#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
-#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
-#define CLKCTRL_RTC_CTRL 0x010
-#define CLKCTRL_SYS_CLK_CTRL0 0x040
-#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
-#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
-#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
-#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
-#define CLKCTRL_CECA_CTRL0 0x088
-#define CLKCTRL_CECA_CTRL1 0x08c
-#define CLKCTRL_CECB_CTRL0 0x090
-#define CLKCTRL_CECB_CTRL1 0x094
-#define CLKCTRL_SC_CLK_CTRL 0x098
-#define CLKCTRL_CLK12_24_CTRL 0x0a8
-#define CLKCTRL_VID_CLK_CTRL 0x0c0
-#define CLKCTRL_VID_CLK_CTRL2 0x0c4
-#define CLKCTRL_VID_CLK_DIV 0x0c8
-#define CLKCTRL_VIID_CLK_DIV 0x0cc
-#define CLKCTRL_VIID_CLK_CTRL 0x0d0
-#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
-#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
-#define CLKCTRL_VPU_CLK_CTRL 0x0e8
-#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
-#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
-#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
-#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
-#define CLKCTRL_VAPBCLK_CTRL 0x0fc
-#define CLKCTRL_HDCP22_CTRL 0x100
-#define CLKCTRL_VDEC_CLK_CTRL 0x140
-#define CLKCTRL_VDEC2_CLK_CTRL 0x144
-#define CLKCTRL_VDEC3_CLK_CTRL 0x148
-#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
-#define CLKCTRL_TS_CLK_CTRL 0x158
-#define CLKCTRL_MALI_CLK_CTRL 0x15c
-#define CLKCTRL_NAND_CLK_CTRL 0x168
-#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
-#define CLKCTRL_SPICC_CLK_CTRL 0x174
-#define CLKCTRL_GEN_CLK_CTRL 0x178
-#define CLKCTRL_SAR_CLK_CTRL 0x17c
-#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
-#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
-#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
-#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
-#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
-#define CLKCTRL_DEMOD_CLK_CTRL 0x200
-
-#endif /* __MESON_S4_PERIPHERALS_H__ */
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index f9cc05a506e3..3d689d2f003e 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -13,10 +13,37 @@
#include "clk-mpll.h"
#include "clk-pll.h"
#include "clk-regmap.h"
-#include "s4-pll.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-pll-clkc.h>
+#define ANACTRL_FIXPLL_CTRL0 0x040
+#define ANACTRL_FIXPLL_CTRL1 0x044
+#define ANACTRL_FIXPLL_CTRL3 0x04c
+#define ANACTRL_GP0PLL_CTRL0 0x080
+#define ANACTRL_GP0PLL_CTRL1 0x084
+#define ANACTRL_GP0PLL_CTRL2 0x088
+#define ANACTRL_GP0PLL_CTRL3 0x08c
+#define ANACTRL_GP0PLL_CTRL4 0x090
+#define ANACTRL_GP0PLL_CTRL5 0x094
+#define ANACTRL_GP0PLL_CTRL6 0x098
+#define ANACTRL_HIFIPLL_CTRL0 0x100
+#define ANACTRL_HIFIPLL_CTRL1 0x104
+#define ANACTRL_HIFIPLL_CTRL2 0x108
+#define ANACTRL_HIFIPLL_CTRL3 0x10c
+#define ANACTRL_HIFIPLL_CTRL4 0x110
+#define ANACTRL_HIFIPLL_CTRL5 0x114
+#define ANACTRL_HIFIPLL_CTRL6 0x118
+#define ANACTRL_MPLL_CTRL0 0x180
+#define ANACTRL_MPLL_CTRL1 0x184
+#define ANACTRL_MPLL_CTRL2 0x188
+#define ANACTRL_MPLL_CTRL3 0x18c
+#define ANACTRL_MPLL_CTRL4 0x190
+#define ANACTRL_MPLL_CTRL5 0x194
+#define ANACTRL_MPLL_CTRL6 0x198
+#define ANACTRL_MPLL_CTRL7 0x19c
+#define ANACTRL_MPLL_CTRL8 0x1a0
+#define ANACTRL_HDMIPLL_CTRL0 0x1c0
+
/*
* These clock are a fixed value (fixed_pll is 2GHz) that is initialized by ROMcode.
* The chip was changed fixed pll for security reasons. Fixed PLL registers are not writable
@@ -767,33 +794,6 @@ static struct clk_hw *s4_pll_hw_clks[] = {
[CLKID_MPLL3] = &s4_mpll3.hw,
};
-static struct clk_regmap *const s4_pll_clk_regmaps[] = {
- &s4_fixed_pll_dco,
- &s4_fixed_pll,
- &s4_fclk_div2,
- &s4_fclk_div3,
- &s4_fclk_div4,
- &s4_fclk_div5,
- &s4_fclk_div7,
- &s4_fclk_div2p5,
- &s4_gp0_pll_dco,
- &s4_gp0_pll,
- &s4_hifi_pll_dco,
- &s4_hifi_pll,
- &s4_hdmi_pll_dco,
- &s4_hdmi_pll_od,
- &s4_hdmi_pll,
- &s4_mpll_50m,
- &s4_mpll0_div,
- &s4_mpll0,
- &s4_mpll1_div,
- &s4_mpll1,
- &s4_mpll2_div,
- &s4_mpll2,
- &s4_mpll3_div,
- &s4_mpll3,
-};
-
static const struct reg_sequence s4_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
@@ -832,10 +832,6 @@ static int meson_s4_pll_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to init registers\n");
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_pll_clk_regmaps); i++)
- s4_pll_clk_regmaps[i]->map = regmap;
-
/* Register clocks */
for (i = 0; i < s4_pll_clks.num; i++) {
/* array might be sparse */
diff --git a/drivers/clk/meson/s4-pll.h b/drivers/clk/meson/s4-pll.h
deleted file mode 100644
index ff7d58302f2a..000000000000
--- a/drivers/clk/meson/s4-pll.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PLL_H__
-#define __MESON_S4_PLL_H__
-
-#define ANACTRL_FIXPLL_CTRL0 0x040
-#define ANACTRL_FIXPLL_CTRL1 0x044
-#define ANACTRL_FIXPLL_CTRL3 0x04c
-#define ANACTRL_GP0PLL_CTRL0 0x080
-#define ANACTRL_GP0PLL_CTRL1 0x084
-#define ANACTRL_GP0PLL_CTRL2 0x088
-#define ANACTRL_GP0PLL_CTRL3 0x08c
-#define ANACTRL_GP0PLL_CTRL4 0x090
-#define ANACTRL_GP0PLL_CTRL5 0x094
-#define ANACTRL_GP0PLL_CTRL6 0x098
-#define ANACTRL_HIFIPLL_CTRL0 0x100
-#define ANACTRL_HIFIPLL_CTRL1 0x104
-#define ANACTRL_HIFIPLL_CTRL2 0x108
-#define ANACTRL_HIFIPLL_CTRL3 0x10c
-#define ANACTRL_HIFIPLL_CTRL4 0x110
-#define ANACTRL_HIFIPLL_CTRL5 0x114
-#define ANACTRL_HIFIPLL_CTRL6 0x118
-#define ANACTRL_MPLL_CTRL0 0x180
-#define ANACTRL_MPLL_CTRL1 0x184
-#define ANACTRL_MPLL_CTRL2 0x188
-#define ANACTRL_MPLL_CTRL3 0x18c
-#define ANACTRL_MPLL_CTRL4 0x190
-#define ANACTRL_MPLL_CTRL5 0x194
-#define ANACTRL_MPLL_CTRL6 0x198
-#define ANACTRL_MPLL_CTRL7 0x19c
-#define ANACTRL_MPLL_CTRL8 0x1a0
-#define ANACTRL_HDMIPLL_CTRL0 0x1c0
-
-#endif /* __MESON_S4_PLL_H__ */
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 9c4945234f26..4ba3d82810e8 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -222,6 +222,11 @@ static int sclk_div_init(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
val = meson_parm_read(clk->map, &sclk->div);
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index 6a167ebdc8d7..009bd1193042 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -45,6 +45,7 @@ static int meson_vclk_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_gate_ops = {
+ .init = clk_regmap_init,
.enable = meson_vclk_gate_enable,
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
@@ -127,6 +128,7 @@ static int meson_vclk_div_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_div_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vclk_div_recalc_rate,
.determine_rate = meson_vclk_div_determine_rate,
.set_rate = meson_vclk_div_set_rate,
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 965ed7281f57..2a3cdbe6d86a 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -90,6 +90,7 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops meson_vid_pll_div_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 1b4f023cdc8b..6fbc6dc50ca3 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -326,7 +326,7 @@ static void roclk_calc_div_trim(unsigned long rate,
* i.e. fout = fin / 2 * DIV
* whereas DIV = rodiv + (rotrim / 512)
*
- * Since kernel does not perform floating-point arithmatic so
+ * Since kernel does not perform floating-point arithmetic so
* (rotrim/512) will be zero. And DIV & rodiv will result same.
*
* ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 350eeb3e9e25..6855815ee8be 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -15,7 +15,7 @@
#include "clk.h"
/*
- * Some clocks will have mutiple bits to enable the clocks, and
+ * Some clocks will have multiple bits to enable the clocks, and
* the bits to disable the clock is not same as enabling bits.
*/
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 45665655a258..8d31a595a27c 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -7,7 +7,6 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
- *
*/
#include <linux/kernel.h>
@@ -19,8 +18,8 @@
/*
* Core Clocks
*
- * Armada XP Sample At Reset is a 64 bit bitfiled split in two
- * register of 32 bits
+ * Armada XP Sample At Reset is a 64 bit bitfield split in two
+ * registers of 32 bits
*/
#define SARL 0 /* Low part [0:31] */
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 928e8b1c46a1..0a78ef380646 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -16,7 +16,7 @@
* @busy: busy bit shift
*
* The mxs divider clock is a subclass of basic clk_divider with an
- * addtional busy bit.
+ * additional busy bit.
*/
struct clk_div {
struct clk_divider divider;
diff --git a/drivers/clk/nuvoton/Kconfig b/drivers/clk/nuvoton/Kconfig
index fe4b7f62f467..e7019b69ea74 100644
--- a/drivers/clk/nuvoton/Kconfig
+++ b/drivers/clk/nuvoton/Kconfig
@@ -4,7 +4,7 @@
config COMMON_CLK_NUVOTON
bool "Nuvoton clock controller common support"
depends on ARCH_MA35 || COMPILE_TEST
- default y
+ default ARCH_MA35
help
Say y here to enable common clock controller for Nuvoton platforms.
@@ -12,7 +12,7 @@ if COMMON_CLK_NUVOTON
config CLK_MA35D1
bool "Nuvoton MA35D1 clock controller support"
- default y
+ default ARCH_MA35
help
Build the clock controller driver for MA35D1 SoC.
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index ddb28b38f549..751b786d73f8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -148,7 +148,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
val |= LPC18XX_CCU_RUN;
} else {
/*
- * To safely disable a branch clock a squence of two separate
+ * To safely disable a branch clock a sequence of two separate
* writes must be used. First write should set the AUTO bit
* and the next write should clear the RUN bit.
*/
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7d5dac26b244..6cb6cd3e1778 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -493,6 +493,25 @@ config QCM_DISPCC_2290
Say Y if you want to support display devices and functionality such as
splash screen.
+config QCS_DISPCC_615
+ tristate "QCS615 Display Clock Controller"
+ select QCM_GCC_615
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config QCS_CAMCC_615
+ tristate "QCS615 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -529,6 +548,22 @@ config QCS_GCC_615
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config QCS_GPUCC_615
+ tristate "QCS615 Graphics clock controller"
+ select QCS_GCC_615
+ help
+ Support for the graphics clock controller on QCS615 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config QCS_VIDEOCC_615
+ tristate "QCS615 Video Clock Controller"
+ select QCS_GCC_615
+ help
+ Support for the video clock controller on QCS615 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -549,6 +584,16 @@ config SC_CAMCC_7280
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_8180X
+ tristate "SC8180X Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SC_GCC_8180X
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC8180X devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_CAMCC_8280XP
tristate "SC8280XP Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -924,6 +969,14 @@ config SM_CAMCC_7150
Support for the camera clock controller on SM7150 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_MILOS
+ tristate "Milos Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the camera clock controller on Milos devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_8150
tristate "SM8150 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1036,6 +1089,16 @@ config SM_DISPCC_6375
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_MILOS
+ tristate "Milos Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_MILOS
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ Milos devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_DISPCC_8450
tristate "SM8450 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1121,6 +1184,15 @@ config SM_GCC_7150
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_MILOS
+ tristate "Milos Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Milos devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,6 +1302,15 @@ config SM_GPUCC_6350
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_MILOS
+ tristate "Milos Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the graphics clock controller on Milos devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1329,6 +1410,15 @@ config SA_VIDEOCC_8775P
Say Y if you want to support video devices and functionality such as
video encode/decode.
+config SM_VIDEOCC_6350
+ tristate "SM6350 Video Clock Controller"
+ select SM_GCC_6350
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM6350 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SM_VIDEOCC_7150
tristate "SM7150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1339,6 +1429,17 @@ config SM_VIDEOCC_7150
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SM_VIDEOCC_MILOS
+ tristate "Milos Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ Milos devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1372,11 +1473,10 @@ config SM_VIDEOCC_8350
config SM_VIDEOCC_8550
tristate "SM8550 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550 || SM_GCC_8650
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8550 or SM8650 devices.
+ SM8550 or SM8650 or X1E80100 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 96862e99e5d4..ddb7e06fae40 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -73,15 +73,20 @@ obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
+obj-$(CONFIG_QCS_DISPCC_615) += dispcc-qcs615.o
+obj-$(CONFIG_QCS_CAMCC_615) += camcc-qcs615.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
+obj-$(CONFIG_QCS_GPUCC_615) += gpucc-qcs615.o
+obj-$(CONFIG_QCS_VIDEOCC_615) += videocc-qcs615.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
obj-$(CONFIG_QDU_GCC_1000) += gcc-qdu1000.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
+obj-$(CONFIG_SC_CAMCC_8180X) += camcc-sc8180x.o
obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
@@ -126,6 +131,7 @@ obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
+obj-$(CONFIG_SM_CAMCC_MILOS) += camcc-milos.o
obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
@@ -136,6 +142,7 @@ obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
+obj-$(CONFIG_SM_DISPCC_MILOS) += dispcc-milos.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -149,6 +156,7 @@ obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
+obj-$(CONFIG_SM_GCC_MILOS) += gcc-milos.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -160,16 +168,19 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_GPUCC_MILOS) += gpucc-milos.o
obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
+obj-$(CONFIG_SM_VIDEOCC_6350) += videocc-sm6350.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8450) += videocc-sm8450.o
obj-$(CONFIG_SM_VIDEOCC_8550) += videocc-sm8550.o
+obj-$(CONFIG_SM_VIDEOCC_MILOS) += videocc-milos.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/camcc-milos.c b/drivers/clk/qcom/camcc-milos.c
new file mode 100644
index 000000000000..75bd939f7dd1
--- /dev/null
+++ b/drivers/clk/qcom/camcc-milos.c
@@ -0,0 +1,2161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_MAIN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_MAIN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_MAIN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco rivian_ole_vco[] = {
+ { 777000000, 1285000000, 0 },
+};
+
+/* 1200.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 960.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00100000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = rivian_ole_vco,
+ .num_vco = ARRAY_SIZE(rivian_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .config = &cam_cc_pll4_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .config = &cam_cc_pll5_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .config = &cam_cc_pll6_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL4_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+ { .hw = &cam_cc_pll4.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL5_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+ { .hw = &cam_cc_pll5.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL6_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+ { .hw = &cam_cc_pll6.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x1a004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x2401c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x21004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x22004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1c05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cre_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cre_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x19004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x19028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x1904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x19070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x1a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x18004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x18044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x18064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x18084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(645000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ope_0_clk_src = {
+ .cmd_rcgr = 0x1b004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ope_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x25044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x1a04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x1c030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_1_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_clk_src = {
+ .cmd_rcgr = 0x1d004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_tfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x1d030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_2_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_tfe_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk_src",
+ .parent_data = cam_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x1e030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x25020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_10,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1a064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x1a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x1a01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0x24040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_hf_clk = {
+ .halt_reg = 0x24010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_hf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_sf_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_nrt_axi_clk = {
+ .halt_reg = 0x2404c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2404c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2404c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_nrt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_rt_axi_clk = {
+ .halt_reg = 0x24034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_rt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x2101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x2201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x2501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x27020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x2701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x1901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x19040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x19064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x19088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x19020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x19044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x19068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x1908c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x2002c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2002c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x1803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x1805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x1809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_ahb_clk = {
+ .halt_reg = 0x1b034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_areg_clk = {
+ .halt_reg = 0x1b030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_clk = {
+ .halt_reg = 0x1b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ope_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0x25018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0x20038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_ahb_clk = {
+ .halt_reg = 0x1c078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_clk = {
+ .halt_reg = 0x1c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x1c074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_csid_clk = {
+ .halt_reg = 0x1c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_ahb_clk = {
+ .halt_reg = 0x1d058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_clk = {
+ .halt_reg = 0x1d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x1d054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_csid_clk = {
+ .halt_reg = 0x1d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_ahb_clk = {
+ .halt_reg = 0x1e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_clk = {
+ .halt_reg = 0x1e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x1e054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_csid_clk = {
+ .halt_reg = 0x1e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_top_shift_clk = {
+ .halt_reg = 0x25040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_top_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_camss_top_gdsc = {
+ .gdscr = 0x25004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_milos_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr,
+ [CAM_CC_CAMNOC_NRT_AXI_CLK] = &cam_cc_camnoc_nrt_axi_clk.clkr,
+ [CAM_CC_CAMNOC_RT_AXI_CLK] = &cam_cc_camnoc_rt_axi_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr,
+ [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr,
+ [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr,
+ [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+ [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr,
+ [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr,
+ [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr,
+ [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr,
+ [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr,
+ [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr,
+ [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr,
+ [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr,
+ [CAM_CC_TFE_2_AHB_CLK] = &cam_cc_tfe_2_ahb_clk.clkr,
+ [CAM_CC_TFE_2_CLK] = &cam_cc_tfe_2_clk.clkr,
+ [CAM_CC_TFE_2_CLK_SRC] = &cam_cc_tfe_2_clk_src.clkr,
+ [CAM_CC_TFE_2_CPHY_RX_CLK] = &cam_cc_tfe_2_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK] = &cam_cc_tfe_2_csid_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK_SRC] = &cam_cc_tfe_2_csid_clk_src.clkr,
+ [CAM_CC_TOP_SHIFT_CLK] = &cam_cc_top_shift_clk.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_milos_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x1a000 },
+ [CAM_CC_CAMNOC_BCR] = { 0x24000 },
+ [CAM_CC_CAMSS_TOP_BCR] = { 0x25000 },
+ [CAM_CC_CCI_0_BCR] = { 0x21000 },
+ [CAM_CC_CCI_1_BCR] = { 0x22000 },
+ [CAM_CC_CPAS_BCR] = { 0x23000 },
+ [CAM_CC_CRE_BCR] = { 0x27000 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x19000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x19024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x19048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x1906c },
+ [CAM_CC_ICP_BCR] = { 0x20000 },
+ [CAM_CC_MCLK0_BCR] = { 0x18000 },
+ [CAM_CC_MCLK1_BCR] = { 0x18020 },
+ [CAM_CC_MCLK2_BCR] = { 0x18040 },
+ [CAM_CC_MCLK3_BCR] = { 0x18060 },
+ [CAM_CC_MCLK4_BCR] = { 0x18080 },
+ [CAM_CC_OPE_0_BCR] = { 0x1b000 },
+ [CAM_CC_TFE_0_BCR] = { 0x1c000 },
+ [CAM_CC_TFE_1_BCR] = { 0x1d000 },
+ [CAM_CC_TFE_2_BCR] = { 0x1e000 },
+};
+
+static struct gdsc *cam_cc_milos_gdscs[] = {
+ [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc,
+};
+
+static struct clk_alpha_pll *cam_cc_milos_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+};
+
+static u32 cam_cc_milos_critical_cbcrs[] = {
+ 0x25038, /* CAM_CC_GDSC_CLK */
+ 0x2505c, /* CAM_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config cam_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30728,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_milos_driver_data = {
+ .alpha_plls = cam_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_milos_plls),
+ .clk_cbcrs = cam_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_milos_critical_cbcrs),
+};
+
+static struct qcom_cc_desc cam_cc_milos_desc = {
+ .config = &cam_cc_milos_regmap_config,
+ .clks = cam_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_milos_clocks),
+ .resets = cam_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_milos_resets),
+ .gdscs = cam_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_milos_driver_data,
+};
+
+static const struct of_device_id cam_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_milos_match_table);
+
+static int cam_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_milos_desc);
+}
+
+static struct platform_driver cam_cc_milos_driver = {
+ .probe = cam_cc_milos_probe,
+ .driver = {
+ .name = "cam_cc-milos",
+ .of_match_table = cam_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI CAM_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-qcs615.c b/drivers/clk/qcom/camcc-qcs615.c
new file mode 100644
index 000000000000..c063a3bfacd0
--- /dev/null
+++ b/drivers/clk/qcom/camcc-qcs615.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_AUX,
+ P_CAM_CC_PLL1_OUT_AUX,
+ P_CAM_CC_PLL2_OUT_AUX2,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_MAIN,
+};
+
+static const struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static const struct pll_vco spark_vco[] = {
+ { 1000000000, 2100000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 300000000, 500000000, 3 },
+ { 550000000, 1100000000, 4 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 808MHz configuration VCO - 2 */
+static struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x2a,
+ .alpha_hi = 0x15,
+ .alpha = 0x55555555,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 960MHz configuration VCO - 0 */
+static struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = 0x3 << 8,
+ .config_ctl_val = 0x04289,
+ .test_ctl_val = 0x08000000,
+ .test_ctl_mask = 0x08000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_aux2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 1080MHz configuration - VCO - 0 */
+static struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x38,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX2, 1 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2_out_aux2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_AUX, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_AUX, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(66666667, P_CAM_CC_PLL0_OUT_AUX, 9, 0, 0),
+ F(133333333, P_CAM_CC_PLL0_OUT_AUX, 4.5, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_AUX2, 10, 1, 2),
+ F(34285714, P_CAM_CC_PLL2_OUT_AUX2, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_AUX, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xb144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_qcs615_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+ [CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX2] = &cam_cc_pll2_out_aux2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static struct gdsc *cam_cc_qcs615_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_qcs615_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x6000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xb120 },
+ [CAM_CC_CCI_BCR] = { 0xb0d4 },
+ [CAM_CC_CPAS_BCR] = { 0xb118 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+ [CAM_CC_ICP_BCR] = { 0xb074 },
+ [CAM_CC_IFE_0_BCR] = { 0x9000 },
+ [CAM_CC_IFE_1_BCR] = { 0xa000 },
+ [CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+ [CAM_CC_IPE_0_BCR] = { 0x7000 },
+ [CAM_CC_JPEG_BCR] = { 0xb048 },
+ [CAM_CC_LRME_BCR] = { 0xb0f4 },
+ [CAM_CC_MCLK0_BCR] = { 0x4000 },
+ [CAM_CC_MCLK1_BCR] = { 0x4020 },
+ [CAM_CC_MCLK2_BCR] = { 0x4040 },
+ [CAM_CC_MCLK3_BCR] = { 0x4060 },
+ [CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static struct clk_alpha_pll *cam_cc_qcs615_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+};
+
+static const struct regmap_config cam_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd004,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_qcs615_driver_data = {
+ .alpha_plls = cam_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_qcs615_plls),
+};
+
+static const struct qcom_cc_desc cam_cc_qcs615_desc = {
+ .config = &cam_cc_qcs615_regmap_config,
+ .clks = cam_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_qcs615_clocks),
+ .resets = cam_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_qcs615_resets),
+ .gdscs = cam_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_qcs615_gdscs),
+ .driver_data = &cam_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id cam_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_qcs615_match_table);
+
+static int cam_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_qcs615_desc);
+}
+
+static struct platform_driver cam_cc_qcs615_driver = {
+ .probe = cam_cc_qcs615_probe,
+ .driver = {
+ .name = "camcc-qcs615",
+ .of_match_table = cam_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sc8180x.c b/drivers/clk/qcom/camcc-sc8180x.c
new file mode 100644
index 000000000000..388fedf1dc81
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc8180x.c
@@ -0,0 +1,2889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sc8180x-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco regera_vco[] = {
+ { 600000000, 3300000000, 0 },
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x13,
+ .alpha = 0x8800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000807,
+ .config_ctl_hi_val = 0x00000011,
+ .config_ctl_hi1_val = 0x04300142,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000100,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = regera_vco,
+ .num_vco = ARRAY_SIZE(regera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_regera_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x4078,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x40f0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0xc204,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_3_clk_src = {
+ .cmd_rcgr = 0xc220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0xf010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xf03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_3_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_3_clk_src = {
+ .cmd_rcgr = 0xf07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_3_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_3_csid_clk_src = {
+ .cmd_rcgr = 0xf0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_clk_src = {
+ .cmd_rcgr = 0xc240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_csid_clk_src = {
+ .cmd_rcgr = 0xc25c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_clk_src = {
+ .cmd_rcgr = 0xc284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_csid_clk_src = {
+ .cmd_rcgr = 0xc2a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(375000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x5084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x50a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x50c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x50e4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc1cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0xc21c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc21c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_3_clk = {
+ .halt_reg = 0xc238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc238,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x6088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xc100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_axi_clk = {
+ .halt_reg = 0xf068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0xf028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xf064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_csid_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0xf038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_axi_clk = {
+ .halt_reg = 0xf0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_clk = {
+ .halt_reg = 0xf094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_cphy_rx_clk = {
+ .halt_reg = 0xf0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_csid_clk = {
+ .halt_reg = 0xf0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_dsp_clk = {
+ .halt_reg = 0xf0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_clk = {
+ .halt_reg = 0xc258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc258,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_cphy_rx_clk = {
+ .halt_reg = 0xc27c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc27c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_csid_clk = {
+ .halt_reg = 0xc274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_clk = {
+ .halt_reg = 0xc29c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc29c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_cphy_rx_clk = {
+ .halt_reg = 0xc2c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_csid_clk = {
+ .halt_reg = 0xc2b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x509c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x50bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x50dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x50fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_3_gdsc = {
+ .gdscr = 0xf070,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *cam_cc_sc8180x_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr,
+ [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_2_AXI_CLK] = &cam_cc_ife_2_axi_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_CPHY_RX_CLK] = &cam_cc_ife_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK] = &cam_cc_ife_2_csid_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK_SRC] = &cam_cc_ife_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_3_AXI_CLK] = &cam_cc_ife_3_axi_clk.clkr,
+ [CAM_CC_IFE_3_CLK] = &cam_cc_ife_3_clk.clkr,
+ [CAM_CC_IFE_3_CLK_SRC] = &cam_cc_ife_3_clk_src.clkr,
+ [CAM_CC_IFE_3_CPHY_RX_CLK] = &cam_cc_ife_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK] = &cam_cc_ife_3_csid_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK_SRC] = &cam_cc_ife_3_csid_clk_src.clkr,
+ [CAM_CC_IFE_3_DSP_CLK] = &cam_cc_ife_3_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CLK] = &cam_cc_ife_lite_2_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CLK_SRC] = &cam_cc_ife_lite_2_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CPHY_RX_CLK] = &cam_cc_ife_lite_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK] = &cam_cc_ife_lite_2_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK_SRC] = &cam_cc_ife_lite_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CLK] = &cam_cc_ife_lite_3_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CLK_SRC] = &cam_cc_ife_lite_3_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CPHY_RX_CLK] = &cam_cc_ife_lite_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK] = &cam_cc_ife_lite_3_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK_SRC] = &cam_cc_ife_lite_3_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sc8180x_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [IFE_3_GDSC] = &ife_3_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sc8180x_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x7000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xc16c },
+ [CAM_CC_CCI_BCR] = { 0xc104 },
+ [CAM_CC_CPAS_BCR] = { 0xc164 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x6000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x6024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x6048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x606c },
+ [CAM_CC_FD_BCR] = { 0xc0dc },
+ [CAM_CC_ICP_BCR] = { 0xc0b4 },
+ [CAM_CC_IFE_0_BCR] = { 0xa000 },
+ [CAM_CC_IFE_1_BCR] = { 0xb000 },
+ [CAM_CC_IFE_2_BCR] = { 0xf000 },
+ [CAM_CC_IFE_3_BCR] = { 0xf06c },
+ [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAM_CC_IFE_LITE_2_BCR] = { 0xc23c },
+ [CAM_CC_IFE_LITE_3_BCR] = { 0xc280 },
+ [CAM_CC_IPE_0_BCR] = { 0x8000 },
+ [CAM_CC_IPE_1_BCR] = { 0x9000 },
+ [CAM_CC_JPEG_BCR] = { 0xc088 },
+ [CAM_CC_LRME_BCR] = { 0xc140 },
+ [CAM_CC_MCLK0_BCR] = { 0x5000 },
+ [CAM_CC_MCLK1_BCR] = { 0x5020 },
+ [CAM_CC_MCLK2_BCR] = { 0x5040 },
+ [CAM_CC_MCLK3_BCR] = { 0x5060 },
+ [CAM_CC_MCLK4_BCR] = { 0x5080 },
+ [CAM_CC_MCLK5_BCR] = { 0x50a0 },
+ [CAM_CC_MCLK6_BCR] = { 0x50c0 },
+ [CAM_CC_MCLK7_BCR] = { 0x50e0 },
+};
+
+static const struct regmap_config cam_cc_sc8180x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf0d4,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc8180x_desc = {
+ .config = &cam_cc_sc8180x_regmap_config,
+ .clks = cam_cc_sc8180x_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc8180x_clocks),
+ .resets = cam_cc_sc8180x_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sc8180x_resets),
+ .gdscs = cam_cc_sc8180x_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc8180x_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc8180x_match_table[] = {
+ { .compatible = "qcom,sc8180x-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc8180x_match_table);
+
+static int cam_cc_sc8180x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc8180x_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_trion_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_trion_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc200); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sc8180x_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sc8180x_driver = {
+ .probe = cam_cc_sc8180x_probe,
+ .driver = {
+ .name = "camcc-sc8180x",
+ .of_match_table = cam_cc_sc8180x_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sc8180x_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SC8180X Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index 08982737e490..4dd8be8cc988 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -86,6 +86,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -191,6 +192,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -257,6 +259,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_evo_vco,
.num_vco = ARRAY_SIZE(rivian_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -296,6 +299,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -368,6 +372,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -440,6 +445,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -512,6 +518,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -584,6 +591,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -656,6 +664,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -1476,24 +1485,6 @@ static struct clk_rcg2 cam_cc_xo_clk_src = {
},
};
-static struct clk_branch cam_cc_gdsc_clk = {
- .halt_reg = 0x1320c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1320c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "cam_cc_gdsc_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &cam_cc_xo_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch cam_cc_bps_ahb_clk = {
.halt_reg = 0x1004c,
.halt_check = BRANCH_HALT,
@@ -2819,7 +2810,6 @@ static struct clk_regmap *cam_cc_sm8450_clocks[] = {
[CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
[CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
- [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
[CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
@@ -2913,6 +2903,22 @@ static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x13094 },
};
+static struct clk_alpha_pll *cam_cc_sm8450_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_sm8450_critical_cbcrs[] = {
+ 0x1320c, /* CAM_CC_GDSC_CLK */
+};
+
static const struct regmap_config cam_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3021,6 +3027,13 @@ static struct gdsc *cam_cc_sm8450_gdscs[] = {
[TITAN_TOP_GDSC] = &titan_top_gdsc,
};
+static struct qcom_cc_driver_data cam_cc_sm8450_driver_data = {
+ .alpha_plls = cam_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8450_plls),
+ .clk_cbcrs = cam_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8450_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.config = &cam_cc_sm8450_regmap_config,
.clks = cam_cc_sm8450_clocks,
@@ -3029,6 +3042,8 @@ static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
.gdscs = cam_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8450_driver_data,
};
static const struct of_device_id cam_cc_sm8450_match_table[] = {
@@ -3040,12 +3055,6 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
static int cam_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-camcc")) {
/* Update CAMCC PLL0 */
cam_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
@@ -3092,28 +3101,18 @@ static int cam_cc_sm8450_probe(struct platform_device *pdev)
cam_cc_pll8_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
cam_cc_pll8_out_even.clkr.hw.init = &sm8475_cam_cc_pll8_out_even_init;
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &sm8475_cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &sm8475_cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &sm8475_cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &sm8475_cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &sm8475_cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &sm8475_cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &sm8475_cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &sm8475_cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &sm8475_cam_cc_pll8_config);
- } else {
- clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+ cam_cc_pll0.config = &sm8475_cam_cc_pll0_config;
+ cam_cc_pll1.config = &sm8475_cam_cc_pll1_config;
+ cam_cc_pll2.config = &sm8475_cam_cc_pll2_config;
+ cam_cc_pll3.config = &sm8475_cam_cc_pll3_config;
+ cam_cc_pll4.config = &sm8475_cam_cc_pll4_config;
+ cam_cc_pll5.config = &sm8475_cam_cc_pll5_config;
+ cam_cc_pll6.config = &sm8475_cam_cc_pll6_config;
+ cam_cc_pll7.config = &sm8475_cam_cc_pll7_config;
+ cam_cc_pll8.config = &sm8475_cam_cc_pll8_config;
}
- return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
+ return qcom_cc_probe(pdev, &cam_cc_sm8450_desc);
}
static struct platform_driver cam_cc_sm8450_driver = {
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index 871155783c79..63aed9e4c362 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8550-camcc.h>
@@ -74,6 +73,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -151,6 +151,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -201,6 +202,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -232,6 +234,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -286,6 +289,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -340,6 +344,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -394,6 +399,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -448,6 +454,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -502,6 +509,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -556,6 +564,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -610,6 +619,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -664,6 +674,7 @@ static const struct alpha_pll_config cam_cc_pll11_config = {
static struct clk_alpha_pll cam_cc_pll11 = {
.offset = 0xb000,
+ .config = &cam_cc_pll11_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -718,6 +729,7 @@ static const struct alpha_pll_config cam_cc_pll12_config = {
static struct clk_alpha_pll cam_cc_pll12 = {
.offset = 0xc000,
+ .config = &cam_cc_pll12_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3479,6 +3491,27 @@ static const struct qcom_reset_map cam_cc_sm8550_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x133dc },
};
+static struct clk_alpha_pll *cam_cc_sm8550_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+ &cam_cc_pll11,
+ &cam_cc_pll12,
+};
+
+static u32 cam_cc_sm8550_critical_cbcrs[] = {
+ 0x1419c, /* CAM_CC_GDSC_CLK */
+ 0x142cc, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3487,6 +3520,13 @@ static const struct regmap_config cam_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_sm8550_driver_data = {
+ .alpha_plls = cam_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8550_plls),
+ .clk_cbcrs = cam_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8550_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.config = &cam_cc_sm8550_regmap_config,
.clks = cam_cc_sm8550_clocks,
@@ -3495,6 +3535,8 @@ static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8550_resets),
.gdscs = cam_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8550_driver_data,
};
static const struct of_device_id cam_cc_sm8550_match_table[] = {
@@ -3505,46 +3547,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8550_match_table);
static int cam_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll11, regmap, &cam_cc_pll11_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll12, regmap, &cam_cc_pll12_config);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x1419c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x142cc); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8550_desc);
}
static struct platform_driver cam_cc_sm8550_driver = {
diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c
index 0ccd6de8ba78..8b388904f56f 100644
--- a/drivers/clk/qcom/camcc-sm8650.c
+++ b/drivers/clk/qcom/camcc-sm8650.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-camcc.h>
@@ -72,6 +71,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -149,6 +149,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -199,6 +200,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -230,6 +232,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -284,6 +287,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -338,6 +342,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -392,6 +397,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -446,6 +452,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -500,6 +507,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -554,6 +562,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -631,6 +640,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3509,6 +3519,27 @@ static const struct qcom_reset_map cam_cc_sm8650_resets[] = {
[CAM_CC_SFE_2_BCR] = { 0x130f4 },
};
+static struct clk_alpha_pll *cam_cc_sm8650_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+};
+
+static u32 cam_cc_sm8650_critical_cbcrs[] = {
+ 0x132ec, /* CAM_CC_GDSC_CLK */
+ 0x13308, /* CAM_CC_SLEEP_CLK */
+ 0x13314, /* CAM_CC_DRV_XO_CLK */
+ 0x13318, /* CAM_CC_DRV_AHB_CLK */
+};
+
static const struct regmap_config cam_cc_sm8650_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3517,6 +3548,13 @@ static const struct regmap_config cam_cc_sm8650_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_sm8650_driver_data = {
+ .alpha_plls = cam_cc_sm8650_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8650_plls),
+ .clk_cbcrs = cam_cc_sm8650_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8650_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.config = &cam_cc_sm8650_regmap_config,
.clks = cam_cc_sm8650_clocks,
@@ -3525,6 +3563,8 @@ static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8650_resets),
.gdscs = cam_cc_sm8650_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8650_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8650_driver_data,
};
static const struct of_device_id cam_cc_sm8650_match_table[] = {
@@ -3535,46 +3575,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8650_match_table);
static int cam_cc_sm8650_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8650_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13318); /* CAM_CC_DRV_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x13314); /* CAM_CC_DRV_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x132ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13308); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8650_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8650_desc);
}
static struct platform_driver cam_cc_sm8650_driver = {
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index b73524ae64b1..cbcc1c9fcb34 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,x1e80100-camcc.h>
@@ -67,6 +66,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +144,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -194,6 +195,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -225,6 +227,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -279,6 +282,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -333,6 +337,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -387,6 +392,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -2418,6 +2424,21 @@ static const struct qcom_reset_map cam_cc_x1e80100_resets[] = {
[CAM_CC_SFE_0_BCR] = { 0x1327c },
};
+static struct clk_alpha_pll *cam_cc_x1e80100_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll6,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_x1e80100_critical_cbcrs[] = {
+ 0x13a9c, /* CAM_CC_GDSC_CLK */
+ 0x13ab8, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2426,6 +2447,13 @@ static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_x1e80100_driver_data = {
+ .alpha_plls = cam_cc_x1e80100_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_x1e80100_plls),
+ .clk_cbcrs = cam_cc_x1e80100_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_x1e80100_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.config = &cam_cc_x1e80100_regmap_config,
.clks = cam_cc_x1e80100_clocks,
@@ -2434,6 +2462,8 @@ static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.num_resets = ARRAY_SIZE(cam_cc_x1e80100_resets),
.gdscs = cam_cc_x1e80100_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_x1e80100_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_x1e80100_driver_data,
};
static const struct of_device_id cam_cc_x1e80100_match_table[] = {
@@ -2444,40 +2474,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_x1e80100_match_table);
static int cam_cc_x1e80100_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_x1e80100_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13a9c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13ab8); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_x1e80100_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_x1e80100_desc);
}
static struct platform_driver cam_cc_x1e80100_driver = {
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index cec0afea8e44..fec6eb376e27 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -63,6 +63,8 @@
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+#define GET_PLL_TYPE(pll) (((pll)->regs - clk_alpha_pll_regs[0]) / PLL_OFF_MAX_REGS)
+
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
@@ -788,6 +790,29 @@ static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
return __clk_alpha_pll_update_latch(pll);
}
+static void clk_alpha_pll_update_configs(struct clk_alpha_pll *pll, const struct pll_vco *vco,
+ u32 l, u64 alpha, u32 alpha_width, bool alpha_en)
+{
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ alpha <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), upper_32_bits(alpha));
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), lower_32_bits(alpha));
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ if (alpha_en)
+ regmap_set_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_ALPHA_EN);
+}
+
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
@@ -805,24 +830,7 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
-
- if (alpha_width > ALPHA_BITWIDTH)
- a <<= alpha_width - ALPHA_BITWIDTH;
-
- if (alpha_width > 32)
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
-
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
-
- if (vco) {
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_VCO_MASK << PLL_VCO_SHIFT,
- vco->val << PLL_VCO_SHIFT);
- }
-
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_ALPHA_EN, PLL_ALPHA_EN);
+ clk_alpha_pll_update_configs(pll, vco, l, a, alpha_width, true);
return clk_alpha_pll_update_latch(pll, is_enabled);
}
@@ -2960,3 +2968,208 @@ const struct clk_ops clk_alpha_pll_regera_ops = {
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
+
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap)
+{
+ const struct clk_init_data *init = pll->clkr.hw.init;
+
+ switch (GET_PLL_TYPE(pll)) {
+ case CLK_ALPHA_PLL_TYPE_LUCID_OLE:
+ clk_lucid_ole_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_LUCID_EVO:
+ clk_lucid_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TAYCAN_ELU:
+ clk_taycan_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_RIVIAN_EVO:
+ clk_rivian_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TRION:
+ clk_trion_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_2290:
+ clk_huayra_2290_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_FABIA:
+ clk_fabia_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_AGERA:
+ clk_agera_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_PONGO_ELU:
+ clk_pongo_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_ZONDA:
+ case CLK_ALPHA_PLL_TYPE_ZONDA_OLE:
+ clk_zonda_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_STROMER:
+ case CLK_ALPHA_PLL_TYPE_STROMER_PLUS:
+ clk_stromer_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_DEFAULT:
+ case CLK_ALPHA_PLL_TYPE_DEFAULT_EVO:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_APSS:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO_EVO:
+ clk_alpha_pll_configure(pll, regmap, pll->config);
+ break;
+ default:
+ WARN(1, "%s: invalid pll type\n", init->name);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_clk_alpha_pll_configure);
+
+static int clk_alpha_pll_slew_update(struct clk_alpha_pll *pll)
+{
+ u32 val;
+ int ret;
+
+ regmap_set_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE);
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ /*
+ * Hardware programming mandates a wait of at least 570ns before polling the LOCK
+ * detect bit. Have a delay of 1us just to be safe.
+ */
+ udelay(1);
+
+ return wait_for_pll_enable_lock(pll);
+}
+
+static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *curr_vco, *vco;
+ unsigned long freq_hz;
+ u64 a;
+ u32 l;
+
+ freq_hz = alpha_pll_round_rate(rate, parent_rate, &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ curr_vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!curr_vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, freq_hz);
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Dynamic pll update will not support switching frequencies across
+ * vco ranges. In those cases fall back to normal alpha set rate.
+ */
+ if (curr_vco->val != vco->val)
+ return clk_alpha_pll_set_rate(hw, rate, parent_rate);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Ensure that the write above goes before slewing the PLL */
+ mb();
+
+ if (clk_hw_is_enabled(hw))
+ return clk_alpha_pll_slew_update(pll);
+
+ return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int clk_alpha_pll_calibrate(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *parent;
+ const struct pll_vco *vco;
+ unsigned long calibration_freq, freq_hz;
+ u64 a;
+ u32 l;
+ int rc;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("alpha pll: no valid parent found\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * As during slewing plls vco_sel won't be allowed to change, vco table
+ * should have only one entry table, i.e. index = 0, find the
+ * calibration frequency.
+ */
+ calibration_freq = (pll->vco_table[0].min_freq + pll->vco_table[0].max_freq) / 2;
+
+ freq_hz = alpha_pll_round_rate(calibration_freq, clk_hw_get_rate(parent),
+ &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != calibration_freq) {
+ pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ clk_alpha_pll_update_configs(pll, vco, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Bringup the pll at calibration frequency */
+ rc = clk_alpha_pll_enable(hw);
+ if (rc) {
+ pr_err("alpha pll calibration failed\n");
+ return rc;
+ }
+
+ /*
+ * PLL is already running at calibration frequency.
+ * So slew pll to the previously set frequency.
+ */
+ freq_hz = alpha_pll_round_rate(clk_hw_get_rate(hw),
+ clk_hw_get_rate(parent), &l, &a, ALPHA_REG_BITWIDTH);
+
+ pr_debug("pll %s: setting back to required rate %lu, freq_hz %ld\n",
+ clk_hw_get_name(hw), clk_hw_get_rate(hw), freq_hz);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, true);
+
+ return clk_alpha_pll_slew_update(pll);
+}
+
+static int clk_alpha_pll_slew_enable(struct clk_hw *hw)
+{
+ int rc;
+
+ rc = clk_alpha_pll_calibrate(hw);
+ if (rc)
+ return rc;
+
+ return clk_alpha_pll_enable(hw);
+}
+
+const struct clk_ops clk_alpha_pll_slew_ops = {
+ .enable = clk_alpha_pll_slew_enable,
+ .disable = clk_alpha_pll_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_slew_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 79aca8525262..ff41aeab0ab9 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -81,6 +81,7 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
+ * @config: array of pll settings
* @vco_table: array of VCO settings
* @num_vco: number of VCO settings in @vco_table
* @flags: bitmask to indicate features supported by the hardware
@@ -90,6 +91,7 @@ struct clk_alpha_pll {
u32 offset;
const u8 *regs;
+ const struct alpha_pll_config *config;
const struct pll_vco *vco_table;
size_t num_vco;
#define SUPPORTS_OFFLINE_REQ BIT(0)
@@ -204,6 +206,7 @@ extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
extern const struct clk_ops clk_alpha_pll_regera_ops;
+extern const struct clk_ops clk_alpha_pll_slew_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -237,5 +240,6 @@ void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap);
#endif
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index ccc112c21667..be0145631197 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -351,15 +351,15 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
return 0;
}
-static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
@@ -383,7 +383,7 @@ static const struct clk_ops clk_rpm_xo_ops = {
static const struct clk_ops clk_rpm_fixed_ops = {
.prepare = clk_rpm_fixed_prepare,
.unprepare = clk_rpm_fixed_unprepare,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
@@ -391,7 +391,7 @@ static const struct clk_ops clk_rpm_ops = {
.prepare = clk_rpm_prepare,
.unprepare = clk_rpm_unprepare,
.set_rate = clk_rpm_set_rate,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 00fb3e53a388..1496fb3de4be 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -321,10 +321,10 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpmh_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
@@ -339,7 +339,7 @@ static const struct clk_ops clk_rpmh_bcm_ops = {
.prepare = clk_rpmh_bcm_prepare,
.unprepare = clk_rpmh_bcm_unprepare,
.set_rate = clk_rpmh_bcm_set_rate,
- .round_rate = clk_rpmh_round_rate,
+ .determine_rate = clk_rpmh_determine_rate,
.recalc_rate = clk_rpmh_bcm_recalc_rate,
};
@@ -386,6 +386,8 @@ DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2);
DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2);
DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2);
+DEFINE_CLK_RPMH_VRM(clk7, _a4, "clka7", 4);
+
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
DEFINE_CLK_RPMH_BCM(ce, "CE0");
@@ -541,6 +543,29 @@ static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
.num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
};
+static struct clk_hw *milos_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a4.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a4_ao.hw,
+ /*
+ * RPMH_LN_BB_CLK3(_A) and RPMH_LN_BB_CLK4(_A) are marked as optional
+ * downstream, but do not exist in cmd-db on SM7635, so skip them.
+ */
+ [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_milos = {
+ .clks = milos_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(milos_rpmh_clocks),
+};
+
static struct clk_hw *sm8250_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -943,6 +968,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,milos-rpmh-clk", .data = &clk_rpmh_milos},
{ .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 3fbaa646286f..3bf6df3884a5 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -370,15 +370,15 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_smd_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
@@ -427,7 +427,7 @@ static const struct clk_ops clk_smd_rpm_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
.set_rate = clk_smd_rpm_set_rate,
- .round_rate = clk_smd_rpm_round_rate,
+ .determine_rate = clk_smd_rpm_determine_rate,
.recalc_rate = clk_smd_rpm_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index 41a0a4f3b4fb..3e2ac6745325 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -112,16 +112,18 @@ static void clk_spmi_pmic_div_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&clkdiv->lock, flags);
}
-static long clk_spmi_pmic_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_spmi_pmic_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div, div_factor;
- div = DIV_ROUND_UP(*parent_rate, rate);
+ div = DIV_ROUND_UP(req->best_parent_rate, req->rate);
div_factor = div_to_div_factor(div);
div = div_factor_to_div(div_factor);
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static unsigned long
@@ -169,7 +171,7 @@ static const struct clk_ops clk_spmi_pmic_div_ops = {
.disable = clk_spmi_pmic_div_disable,
.set_rate = clk_spmi_pmic_div_set_rate,
.recalc_rate = clk_spmi_pmic_div_recalc_rate,
- .round_rate = clk_spmi_pmic_div_round_rate,
+ .determine_rate = clk_spmi_pmic_div_determine_rate,
};
struct spmi_pmic_div_clk_cc {
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 9e3380fd7181..37c3008e6c1b 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -9,10 +9,13 @@
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/interconnect-clk.h>
+#include <linux/pm_runtime.h>
#include <linux/reset-controller.h>
#include <linux/of.h>
#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
@@ -284,6 +287,40 @@ static int qcom_cc_icc_register(struct device *dev,
desc->num_icc_hws, icd);
}
+static int qcom_cc_clk_pll_configure(const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ const struct clk_init_data *init;
+ struct clk_alpha_pll *pll;
+ int i;
+
+ for (i = 0; i < data->num_alpha_plls; i++) {
+ pll = data->alpha_plls[i];
+ init = pll->clkr.hw.init;
+
+ if (!pll->config || !pll->regs) {
+ pr_err("%s: missing pll config or regs\n", init->name);
+ return -EINVAL;
+ }
+
+ qcom_clk_alpha_pll_configure(pll, regmap);
+ }
+
+ return 0;
+}
+
+static void qcom_cc_clk_regs_configure(struct device *dev, const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ int i;
+
+ for (i = 0; i < data->num_clk_cbcrs; i++)
+ qcom_branch_set_clk_en(regmap, data->clk_cbcrs[i]);
+
+ if (data->clk_regs_configure)
+ data->clk_regs_configure(dev, regmap);
+}
+
int qcom_cc_really_probe(struct device *dev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
@@ -304,6 +341,24 @@ int qcom_cc_really_probe(struct device *dev,
if (ret < 0 && ret != -EEXIST)
return ret;
+ if (desc->use_rpm) {
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (desc->driver_data) {
+ ret = qcom_cc_clk_pll_configure(desc->driver_data, regmap);
+ if (ret)
+ goto put_rpm;
+
+ qcom_cc_clk_regs_configure(dev, desc->driver_data, regmap);
+ }
+
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -314,23 +369,35 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
- return ret;
+ goto put_rpm;
if (desc->gdscs && desc->num_gdscs) {
scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
- if (!scd)
- return -ENOMEM;
+ if (!scd) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
scd->dev = dev;
scd->scs = desc->gdscs;
scd->num = desc->num_gdscs;
scd->pd_list = cc->pd_list;
ret = gdsc_register(scd, &reset->rcdev, regmap);
if (ret)
- return ret;
+ goto put_rpm;
ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
scd);
if (ret)
- return ret;
+ goto put_rpm;
+ }
+
+ if (desc->driver_data &&
+ desc->driver_data->dfs_rcgs &&
+ desc->driver_data->num_dfs_rcgs) {
+ ret = qcom_cc_register_rcg_dfs(regmap,
+ desc->driver_data->dfs_rcgs,
+ desc->driver_data->num_dfs_rcgs);
+ if (ret)
+ goto put_rpm;
}
cc->rclks = rclks;
@@ -341,7 +408,7 @@ int qcom_cc_really_probe(struct device *dev,
for (i = 0; i < num_clk_hws; i++) {
ret = devm_clk_hw_register(dev, clk_hws[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
for (i = 0; i < num_clks; i++) {
@@ -350,14 +417,20 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_clk_register_regmap(dev, rclks[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
if (ret)
- return ret;
+ goto put_rpm;
+
+ ret = qcom_cc_icc_register(dev, desc);
+
+put_rpm:
+ if (desc->use_rpm)
+ pm_runtime_put(dev);
- return qcom_cc_icc_register(dev, desc);
+ return ret;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7ace5d7f5836..953c91f7b145 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -25,6 +25,16 @@ struct qcom_icc_hws_data {
int clk_id;
};
+struct qcom_cc_driver_data {
+ struct clk_alpha_pll **alpha_plls;
+ size_t num_alpha_plls;
+ u32 *clk_cbcrs;
+ size_t num_clk_cbcrs;
+ const struct clk_rcg_dfs_data *dfs_rcgs;
+ size_t num_dfs_rcgs;
+ void (*clk_regs_configure)(struct device *dev, struct regmap *regmap);
+};
+
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
@@ -38,6 +48,8 @@ struct qcom_cc_desc {
const struct qcom_icc_hws_data *icc_hws;
size_t num_icc_hws;
unsigned int icc_first_node_id;
+ bool use_rpm;
+ struct qcom_cc_driver_data *driver_data;
};
/**
diff --git a/drivers/clk/qcom/dispcc-milos.c b/drivers/clk/qcom/dispcc-milos.c
new file mode 100644
index 000000000000..602d3a498d33
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-milos.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_AHB_CLK,
+ DT_GCC_DISP_GPLL0_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_EVEN,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GCC_DISP_GPLL0_CLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+ { P_DISP_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+ { .hw = &disp_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
+ F(75000000, P_GCC_DISP_GPLL0_CLK, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8130,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8118,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x80cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x80e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x8100,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(12800000, P_BI_TCXO, 1.5, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x80b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(342000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(535000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(630000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8050,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x80b0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x80e4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_milos_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_milos_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_milos_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_milos_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_milos_critical_cbcrs[] = {
+ 0xe06c, /* DISP_CC_SLEEP_CLK */
+ 0xe04c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static void disp_cc_milos_clk_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+}
+
+
+static struct qcom_cc_driver_data disp_cc_milos_driver_data = {
+ .alpha_plls = disp_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_milos_plls),
+ .clk_cbcrs = disp_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_milos_critical_cbcrs),
+ .clk_regs_configure = disp_cc_milos_clk_regs_configure,
+};
+
+static struct qcom_cc_desc disp_cc_milos_desc = {
+ .config = &disp_cc_milos_regmap_config,
+ .clks = disp_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_milos_clocks),
+ .resets = disp_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_milos_resets),
+ .gdscs = disp_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_milos_driver_data,
+};
+
+static const struct of_device_id disp_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_milos_match_table);
+
+static int disp_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_milos_desc);
+}
+
+static struct platform_driver disp_cc_milos_driver = {
+ .probe = disp_cc_milos_probe,
+ .driver = {
+ .name = "disp_cc-milos",
+ .of_match_table = disp_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI DISP_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-qcs615.c b/drivers/clk/qcom/dispcc-qcs615.c
new file mode 100644
index 000000000000..4a6d78466098
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-qcs615.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_DP_PHY_PLL_LINK_CLK,
+ DT_DP_PHY_PLL_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco disp_cc_pll_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 576MHz configuration VCO - 2 */
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x1e,
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = disp_cc_pll_vco,
+ .num_vco = ARRAY_SIZE(disp_cc_pll_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP_PHY_PLL_LINK_CLK },
+ { .index = DT_DP_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x2158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x20f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2128,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x2090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d8,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x210c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_core_gdsc = {
+ .gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_qcs615_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] = &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static struct gdsc *disp_cc_qcs615_gdscs[] = {
+ [MDSS_CORE_GDSC] = &mdss_core_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_qcs615_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_qcs615_critical_cbcrs[] = {
+ 0x6054, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_qcs615_driver_data = {
+ .alpha_plls = disp_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_qcs615_plls),
+ .clk_cbcrs = disp_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_qcs615_desc = {
+ .config = &disp_cc_qcs615_regmap_config,
+ .clks = disp_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_qcs615_clocks),
+ .gdscs = disp_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_qcs615_gdscs),
+ .driver_data = &disp_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id disp_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_qcs615_match_table);
+
+static int disp_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_qcs615_desc);
+}
+
+static struct platform_driver disp_cc_qcs615_driver = {
+ .probe = disp_cc_qcs615_probe,
+ .driver = {
+ .name = "dispcc-qcs615",
+ .of_match_table = disp_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
index 877b40d50e6f..ca09da111a50 100644
--- a/drivers/clk/qcom/dispcc-sm8750.c
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -393,7 +393,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
.name = "disp_cc_mdss_byte0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_byte2_ops,
},
};
@@ -408,7 +408,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
.name = "disp_cc_mdss_byte1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_byte2_ops,
},
};
@@ -712,7 +712,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
.name = "disp_cc_mdss_pclk0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -727,7 +727,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
.name = "disp_cc_mdss_pclk1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -742,7 +742,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
.name = "disp_cc_mdss_pclk2_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index d38628b52268..5ac44cfb53ce 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -125,21 +125,23 @@ static const struct clk_fepll_vco gcc_fepll_vco = {
* It looks up the frequency table and returns the next higher frequency
* supported in hardware.
*/
-static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate)
+static int clk_cpu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fepll *pll = to_clk_fepll(hw);
struct clk_hw *p_hw;
const struct freq_tbl *f;
- f = qcom_find_freq(pll->freq_tbl, rate);
+ f = qcom_find_freq(pll->freq_tbl, req->rate);
if (!f)
return -EINVAL;
p_hw = clk_hw_get_parent_by_index(hw, f->src);
- *p_rate = clk_hw_get_rate(p_hw);
+ req->best_parent_rate = clk_hw_get_rate(p_hw);
+
+ req->rate = f->freq;
- return f->freq;
+ return 0;
};
/*
@@ -205,7 +207,7 @@ clk_cpu_div_recalc_rate(struct clk_hw *hw,
};
static const struct clk_ops clk_regmap_cpu_div_ops = {
- .round_rate = clk_cpu_div_round_rate,
+ .determine_rate = clk_cpu_div_determine_rate,
.set_rate = clk_cpu_div_set_rate,
.recalc_rate = clk_cpu_div_recalc_rate,
};
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
index 70f5dcb96700..dcda2be8c1a5 100644
--- a/drivers/clk/qcom/gcc-ipq5018.c
+++ b/drivers/clk/qcom/gcc-ipq5018.c
@@ -1371,7 +1371,7 @@ static struct clk_branch gcc_xo_clk = {
&gcc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3660,7 +3660,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
[GCC_WCSS_AXI_S_ARES] = { 0x59008, 6 },
[GCC_WCSS_Q6_BCR] = { 0x18004, 0 },
[GCC_WCSSAON_RESET] = { 0x59010, 0},
- [GCC_GEPHY_MISC_ARES] = { 0x56004, 0 },
+ [GCC_GEPHY_MISC_ARES] = { 0x56004, .bitmask = GENMASK(3, 0) },
};
static const struct of_device_id gcc_ipq5018_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 7258ba5c0900..1329ea28d703 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
FMS(19200000, P_XO, 1, 0, 0),
FM(25000000, ftbl_nss_port6_tx_clk_src_25),
- FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
FM(125000000, ftbl_nss_port6_tx_clk_src_125),
- FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
- FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
+ FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-milos.c b/drivers/clk/qcom/gcc-milos.c
new file mode 100644
index 000000000000..c9d61b05bafa
--- /dev/null
+++ b/drivers/clk/qcom/gcc-milos.c
@@ -0,0 +1,3225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+ DT_UFS_PHY_RX_SYMBOL_0,
+ DT_UFS_PHY_RX_SYMBOL_1,
+ DT_UFS_PHY_TX_SYMBOL_0,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL0_OUT_ODD,
+ P_GCC_GPLL2_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL6_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL7_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_PCIE_0_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_PCIE_1_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL2_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9006c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x90070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x90054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x18768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x18290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(128000000, P_GCC_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x183c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x18500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x18638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x1e768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x1e290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x1e500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x1e638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xa3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0xa3038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div2_clk_src = {
+ .reg = 0x6b094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div2_clk_src = {
+ .reg = 0x90090,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x18280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1e280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1005c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7115c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7115c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7115c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1006c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1006c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_div2_clk = {
+ .halt_reg = 0x6b098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x90038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x90034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x90034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x90028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x90028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x90050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x90044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_div2_clk = {
+ .halt_reg = 0x90094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9001c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9001c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x11004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_ref_clk = {
+ .halt_reg = 0x18764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x18284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x183bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x184f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1862c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23168,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23158,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x1e764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1e284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x1e3bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1e62c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xa3004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xa3008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0xa302c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa302c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_atb_clk = {
+ .halt_reg = 0x39088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x39088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x39068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(1),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x90004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(3),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0xa2000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(4),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x5000c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_milos_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL2] = &gcc_gpll2.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL6] = &gcc_gpll6.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK] = &gcc_pcie_0_pipe_div2_clk.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK_SRC] = &gcc_pcie_0_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK] = &gcc_pcie_1_pipe_div2_clk.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK_SRC] = &gcc_pcie_1_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK] = &gcc_qupv3_wrap0_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_ATB_CLK] = &gcc_usb30_prim_atb_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_milos_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x90000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e024 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC1_BCR] = { 0xa3000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_milos_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+};
+
+static struct gdsc *gcc_milos_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [UFS_MEM_PHY_GDSC] = &ufs_mem_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static u32 gcc_milos_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26018, /* GCC_CAMERA_HF_XO_CLK */
+ 0x2601c, /* GCC_CAMERA_SF_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x27018, /* GCC_DISP_XO_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32024, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gcc_milos_driver_data = {
+ .clk_cbcrs = gcc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_milos_critical_cbcrs),
+ .dfs_rcgs = gcc_milos_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_milos_dfs_clocks),
+};
+
+static const struct qcom_cc_desc gcc_milos_desc = {
+ .config = &gcc_milos_regmap_config,
+ .clks = gcc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gcc_milos_clocks),
+ .resets = gcc_milos_resets,
+ .num_resets = ARRAY_SIZE(gcc_milos_resets),
+ .gdscs = gcc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gcc_milos_driver_data,
+};
+
+static const struct of_device_id gcc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_milos_match_table);
+
+static int gcc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_milos_desc);
+}
+
+static struct platform_driver gcc_milos_driver = {
+ .probe = gcc_milos_probe,
+ .driver = {
+ .name = "gcc-milos",
+ .of_match_table = gcc_milos_match_table,
+ },
+};
+
+static int __init gcc_milos_init(void)
+{
+ return platform_driver_register(&gcc_milos_driver);
+}
+subsys_initcall(gcc_milos_init);
+
+static void __exit gcc_milos_exit(void)
+{
+ platform_driver_unregister(&gcc_milos_driver);
+}
+module_exit(gcc_milos_exit);
+
+MODULE_DESCRIPTION("QTI GCC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 9a6703365e61..6684cab63ae1 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -2720,6 +2720,7 @@ static struct gdsc gcc_vcodec0_gdsc = {
.pd = {
.name = "gcc_vcodec0",
},
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index cefceb780889..a93d1f412a7b 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -1245,7 +1245,7 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for camss boot
*/
static struct clk_branch gcc_camera_ahb_clk = {
@@ -1398,7 +1398,7 @@ static struct clk_branch gcc_ddrss_gpu_axi_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for disp boot
*/
static struct clk_branch gcc_disp_ahb_clk = {
@@ -3339,7 +3339,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for video boot
*/
static struct clk_branch gcc_video_ahb_clk = {
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 3e44757e25d3..301fc9fc32d8 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6674,6 +6674,8 @@ static const struct qcom_reset_map gcc_x1e80100_resets[] = {
[GCC_USB_1_PHY_BCR] = { 0x2a020 },
[GCC_USB_2_PHY_BCR] = { 0xa3020 },
[GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x32018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x32024, .bit = 2, .udelay = 1000 },
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-milos.c b/drivers/clk/qcom/gpucc-milos.c
new file mode 100644
index 000000000000..4ee09879156e
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-milos.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpu_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 2 },
+ { P_GPU_CC_PLL0_OUT_ODD, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll0_out_even.clkr.hw },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(650000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(687500000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x910c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x910c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x90ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x90d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x90e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_dpm_clk = {
+ .halt_reg = 0x9110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_dpm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x9070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_acd_ahb_ff_clk = {
+ .halt_reg = 0x9068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_acd_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x9060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_rcg_ahb_ff_clk = {
+ .halt_reg = 0x906c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_rcg_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x90e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x90f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9080,
+ .gds_hw_ctrl = 0x9094,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct clk_regmap *gpu_cc_milos_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DPM_CLK] = &gpu_cc_dpm_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_ACD_AHB_FF_CLK] = &gpu_cc_gx_acd_ahb_ff_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_RCG_AHB_FF_CLK] = &gpu_cc_gx_rcg_ahb_ff_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_DIV_CLK_SRC] = &gpu_cc_hub_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
+};
+
+static struct gdsc *gpu_cc_milos_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_milos_resets[] = {
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x907c },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x905c },
+ [GPU_CC_RBCPR_BCR] = { 0x91e0 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_milos_plls[] = {
+ &gpu_cc_pll0,
+};
+
+static u32 gpu_cc_milos_critical_cbcrs[] = {
+ 0x93a4, /* GPU_CC_CB_CLK */
+ 0x9008, /* GPU_CC_CXO_AON_CLK */
+ 0x9010, /* GPU_CC_DEMET_CLK */
+ 0x9064, /* GPU_CC_GX_AHB_FF_CLK */
+ 0x93a8, /* GPU_CC_RSCC_HUB_AON_CLK */
+ 0x9004, /* GPU_CC_RSCC_XO_AON_CLK */
+ 0x90cc, /* GPU_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config gpu_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x95e8,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gpu_cc_milos_driver_data = {
+ .alpha_plls = gpu_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_milos_plls),
+ .clk_cbcrs = gpu_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc gpu_cc_milos_desc = {
+ .config = &gpu_cc_milos_regmap_config,
+ .clks = gpu_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_milos_clocks),
+ .resets = gpu_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_milos_resets),
+ .gdscs = gpu_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gpu_cc_milos_driver_data,
+};
+
+static const struct of_device_id gpu_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_milos_match_table);
+
+static int gpu_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_milos_desc);
+}
+
+static struct platform_driver gpu_cc_milos_driver = {
+ .probe = gpu_cc_milos_probe,
+ .driver = {
+ .name = "gpu_cc-milos",
+ .of_match_table = gpu_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-qcs615.c b/drivers/clk/qcom/gpucc-qcs615.c
new file mode 100644
index 000000000000..ec6739c08425
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-qcs615.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_2X_CLK,
+ P_CRC_DIV_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_CRC_DIV_PLL1_OUT_AUX2,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco gpu_cc_pll0_vco[] = {
+ { 1000000000, 2100000000, 0 },
+};
+
+static struct pll_vco gpu_cc_pll1_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 1020MHz configuration VCO - 0 */
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x35,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x20,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = gpu_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+/* 930MHz configuration VCO - 2 */
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x30,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x70,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .config = &gpu_cc_pll1_config,
+ .vco_table = gpu_cc_pll1_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll1_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ }
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll0 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll1 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_2X_CLK, 1 },
+ { P_CRC_DIV_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX, 3 },
+ { P_CRC_DIV_PLL1_OUT_AUX2, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &crc_div_pll0.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .hw = &crc_div_pll1.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(290000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(350000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(435000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(500000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(550000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(650000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(700000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(745000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(845000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(895000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x10a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gpu_cc_qcs615_hws[] = {
+ [CRC_DIV_PLL0] = &crc_div_pll0.hw,
+ [CRC_DIV_PLL1] = &crc_div_pll1.hw,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_qcs615_clocks[] = {
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_qcs615_gdscs[] = {
+ [CX_GDSC] = &cx_gdsc,
+ [GX_GDSC] = &gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_qcs615_resets[] = {
+ [GPU_CC_CX_BCR] = { 0x1068 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPU_CC_GMU_BCR] = { 0x111c },
+ [GPU_CC_GX_BCR] = { 0x1008 },
+ [GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_qcs615_plls[] = {
+ &gpu_cc_pll0,
+ &gpu_cc_pll1,
+};
+
+static u32 gpu_cc_qcs615_critical_cbcrs[] = {
+ 0x1078, /* GPU_CC_AHB_CLK */
+};
+
+static const struct regmap_config gpu_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7008,
+ .fast_io = true,
+};
+
+static void clk_qcs615_regs_crc_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, 0xff0, 0xff0);
+
+ /*
+ * After POR, Clock Ramp Controller(CRC) will be in bypass mode.
+ * Software needs to do the following operation to enable the CRC
+ * for GFX3D clock and divide the input clock by div by 2.
+ */
+ regmap_update_bits(regmap, 0x1028, 0x00015011, 0x00015011);
+ regmap_update_bits(regmap, 0x1024, 0x00800000, 0x00800000);
+}
+
+static struct qcom_cc_driver_data gpu_cc_qcs615_driver_data = {
+ .alpha_plls = gpu_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_qcs615_plls),
+ .clk_cbcrs = gpu_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_qcs615_critical_cbcrs),
+ .clk_regs_configure = clk_qcs615_regs_crc_configure,
+};
+
+static const struct qcom_cc_desc gpu_cc_qcs615_desc = {
+ .config = &gpu_cc_qcs615_regmap_config,
+ .clks = gpu_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_qcs615_clocks),
+ .clk_hws = gpu_cc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gpu_cc_qcs615_hws),
+ .resets = gpu_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_qcs615_resets),
+ .gdscs = gpu_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_qcs615_gdscs),
+ .driver_data = &gpu_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id gpu_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_qcs615_match_table);
+
+static int gpu_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_qcs615_desc);
+}
+
+static struct platform_driver gpu_cc_qcs615_driver = {
+ .probe = gpu_cc_qcs615_probe,
+ .driver = {
+ .name = "gpucc-qcs615",
+ .of_match_table = gpu_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
index 432d4c4b7aa6..b3d7169c63e5 100644
--- a/drivers/clk/qcom/ipq-cmn-pll.c
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/*
@@ -16,6 +16,10 @@
* are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
* with 31.25 MHZ.
*
+ * On the IPQ5424 SoC, there is an output clock from CMN PLL to PPE at 375 MHZ,
+ * and an output clock to NSS (network subsystem) at 300 MHZ. The other output
+ * clocks from CMN PLL on IPQ5424 are the same as IPQ9574.
+ *
* +---------+
* | GCC |
* +--+---+--+
@@ -46,6 +50,8 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5018-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5424-cmn-pll.h>
#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
@@ -105,6 +111,26 @@ static const struct regmap_config ipq_cmn_pll_regmap_config = {
.fast_io = true,
};
+static const struct cmn_pll_fixed_output_clk ipq5018_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5018_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5018_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5018_ETH_50MHZ_CLK, "eth-50mhz", 50000000UL),
+ { /* Sentinel */ }
+};
+
+static const struct cmn_pll_fixed_output_clk ipq5424_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5424_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(IPQ5424_NSS_300MHZ_CLK, "nss-300mhz", 300000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PPE_375MHZ_CLK, "ppe-375mhz", 375000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
+};
+
static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
@@ -115,6 +141,7 @@ static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
};
/*
@@ -297,7 +324,7 @@ static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
{
- const struct cmn_pll_fixed_output_clk *fixed_clk;
+ const struct cmn_pll_fixed_output_clk *p, *fixed_clk;
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
struct clk_hw *cmn_pll_hw;
@@ -305,8 +332,13 @@ static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- fixed_clk = ipq9574_output_clks;
- num_clks = ARRAY_SIZE(ipq9574_output_clks);
+ fixed_clk = device_get_match_data(dev);
+ if (!fixed_clk)
+ return -EINVAL;
+
+ num_clks = 0;
+ for (p = fixed_clk; p->name; p++)
+ num_clks++;
hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
GFP_KERNEL);
@@ -375,11 +407,11 @@ static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
*/
ret = pm_clk_add(dev, "ahb");
if (ret)
- return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
+ return dev_err_probe(dev, ret, "Failed to add AHB clock\n");
ret = pm_clk_add(dev, "sys");
if (ret)
- return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
+ return dev_err_probe(dev, ret, "Failed to add SYS clock\n");
ret = pm_runtime_resume_and_get(dev);
if (ret)
@@ -390,7 +422,7 @@ static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
pm_runtime_put(dev);
if (ret)
return dev_err_probe(dev, ret,
- "Fail to register CMN PLL clocks\n");
+ "Failed to register CMN PLL clocks\n");
return 0;
}
@@ -415,7 +447,9 @@ static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
};
static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
- { .compatible = "qcom,ipq9574-cmn-pll", },
+ { .compatible = "qcom,ipq5018-cmn-pll", .data = &ipq5018_output_clks },
+ { .compatible = "qcom,ipq5424-cmn-pll", .data = &ipq5424_output_clks },
+ { .compatible = "qcom,ipq9574-cmn-pll", .data = &ipq9574_output_clks },
{ }
};
MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 22169da08a51..3ff123bffa11 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -799,7 +799,6 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -868,7 +867,6 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 605516d03993..5937b071533b 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -412,7 +412,6 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
ret = qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7180_desc, regmap);
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -433,7 +432,6 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 0, desc);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return ret;
diff --git a/drivers/clk/qcom/tcsrcc-sm8650.c b/drivers/clk/qcom/tcsrcc-sm8650.c
index 11c7d6df48c7..3685dcde9a4b 100644
--- a/drivers/clk/qcom/tcsrcc-sm8650.c
+++ b/drivers/clk/qcom/tcsrcc-sm8650.c
@@ -148,6 +148,7 @@ static const struct qcom_cc_desc tcsr_cc_sm8650_desc = {
};
static const struct of_device_id tcsr_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,milos-tcsr" },
{ .compatible = "qcom,sm8650-tcsr" },
{ }
};
@@ -155,6 +156,13 @@ MODULE_DEVICE_TABLE(of, tcsr_cc_sm8650_match_table);
static int tcsr_cc_sm8650_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,milos-tcsr")) {
+ tcsr_ufs_clkref_en.halt_reg = 0x31118;
+ tcsr_ufs_clkref_en.clkr.enable_reg = 0x31118;
+ tcsr_cc_sm8650_clocks[TCSR_USB2_CLKREF_EN] = NULL;
+ tcsr_cc_sm8650_clocks[TCSR_USB3_CLKREF_EN] = NULL;
+ }
+
return qcom_cc_probe(pdev, &tcsr_cc_sm8650_desc);
}
diff --git a/drivers/clk/qcom/videocc-milos.c b/drivers/clk/qcom/videocc-milos.c
new file mode 100644
index 000000000000..998301e0ba88
--- /dev/null
+++ b/drivers/clk/qcom/videocc-milos.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 604.8 MHz Configuration */
+static const struct alpha_pll_config video_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .config = &video_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(604800000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1656000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x8128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x810c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x8144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x8148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8148,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x80a4,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_milos_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *video_cc_milos_gdscs[] = {
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_milos_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80f0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x80a0 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+};
+
+static struct clk_alpha_pll *video_cc_milos_plls[] = {
+ &video_cc_pll0,
+};
+
+static u32 video_cc_milos_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f50,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_milos_driver_data = {
+ .alpha_plls = video_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_milos_plls),
+ .clk_cbcrs = video_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_milos_critical_cbcrs),
+};
+
+static struct qcom_cc_desc video_cc_milos_desc = {
+ .config = &video_cc_milos_regmap_config,
+ .clks = video_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_milos_clocks),
+ .resets = video_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(video_cc_milos_resets),
+ .gdscs = video_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_milos_driver_data,
+};
+
+static const struct of_device_id video_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_milos_match_table);
+
+static int video_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_milos_desc);
+}
+
+static struct platform_driver video_cc_milos_driver = {
+ .probe = video_cc_milos_probe,
+ .driver = {
+ .name = "video_cc-milos",
+ .of_match_table = video_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(video_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-qcs615.c b/drivers/clk/qcom/videocc-qcs615.c
new file mode 100644
index 000000000000..1b41fa44c17e
--- /dev/null
+++ b/drivers/clk/qcom/videocc-qcs615.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_AUX,
+ P_VIDEO_PLL0_OUT_AUX2,
+ P_VIDEO_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco video_cc_pll0_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .config = &video_pll0_config,
+ .vco_table = video_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(video_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+ { P_VIDEO_PLL0_OUT_AUX, 2 },
+ { P_VIDEO_PLL0_OUT_AUX2, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xaf8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(133333333, P_VIDEO_PLL0_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(410000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_venus_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0xb18,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb18,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+ .halt_reg = 0x8f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x9b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+ .halt_reg = 0x8d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *video_cc_qcs615_clocks[] = {
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+ [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+ [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+ [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_qcs615_gdscs[] = {
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_qcs615_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x8b0 },
+ [VIDEO_CC_VCODEC0_BCR] = { 0x870 },
+ [VIDEO_CC_VENUS_BCR] = { 0x810 },
+};
+
+static struct clk_alpha_pll *video_cc_qcs615_plls[] = {
+ &video_pll0,
+};
+
+static u32 video_cc_qcs615_critical_cbcrs[] = {
+ 0xab8, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb94,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_qcs615_driver_data = {
+ .alpha_plls = video_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_qcs615_plls),
+ .clk_cbcrs = video_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_qcs615_desc = {
+ .config = &video_cc_qcs615_regmap_config,
+ .clks = video_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_qcs615_clocks),
+ .resets = video_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(video_cc_qcs615_resets),
+ .gdscs = video_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_qcs615_gdscs),
+ .driver_data = &video_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id video_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_qcs615_match_table);
+
+static int video_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_qcs615_desc);
+}
+
+static struct platform_driver video_cc_qcs615_driver = {
+ .probe = video_cc_qcs615_probe,
+ .driver = {
+ .name = "videocc-qcs615",
+ .of_match_table = video_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(video_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI VIDEOCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
index d7f845480396..dd2441d6aa83 100644
--- a/drivers/clk/qcom/videocc-sc7180.c
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -166,7 +166,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index f77a07779477..6dedc80a8b3e 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -260,7 +260,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x930 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -271,7 +271,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0x950 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm6350.c b/drivers/clk/qcom/videocc-sm6350.c
new file mode 100644
index 000000000000..34bdc5aa865a
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm6350.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6350-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_EVEN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 125000000, 1000000000, 1 },
+};
+
+/* 600 MHz */
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000002,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00004005,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_video_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv video_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_video_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_video_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_CHIP_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+ F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_iris_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x701c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_axi_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_ctl_axi_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0x7034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvsc_gdsc = {
+ .gdscr = 0x2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvsc_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0x3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER,
+};
+
+static struct gdsc *video_cc_sm6350_gdscs[] = {
+ [MVSC_GDSC] = &mvsc_gdsc,
+ [MVS0_GDSC] = &mvs0_gdsc,
+};
+
+static struct clk_regmap *video_cc_sm6350_clocks[] = {
+ [VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+ [VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+ [VIDEO_CC_MVS0_AXI_CLK] = &video_cc_mvs0_axi_clk.clkr,
+ [VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+ [VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+ [VIDEO_CC_MVSC_CTL_AXI_CLK] = &video_cc_mvsc_ctl_axi_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL0_OUT_EVEN] = &video_pll0_out_even.clkr,
+};
+
+static const struct regmap_config video_cc_sm6350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sm6350_desc = {
+ .config = &video_cc_sm6350_regmap_config,
+ .clks = video_cc_sm6350_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm6350_clocks),
+ .gdscs = video_cc_sm6350_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm6350_gdscs),
+};
+
+static const struct of_device_id video_cc_sm6350_match_table[] = {
+ { .compatible = "qcom,sm6350-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm6350_match_table);
+
+static int video_cc_sm6350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm6350_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x7018); /* VIDEO_CC_XO_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sm6350_desc, regmap);
+}
+
+static struct platform_driver video_cc_sm6350_driver = {
+ .probe = video_cc_sm6350_probe,
+ .driver = {
+ .name = "video_cc-sm6350",
+ .of_match_table = video_cc_sm6350_match_table,
+ },
+};
+
+module_platform_driver(video_cc_sm6350_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC SM6350 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm7150.c b/drivers/clk/qcom/videocc-sm7150.c
index 14ef7f561753..b6912560ef9b 100644
--- a/drivers/clk/qcom/videocc-sm7150.c
+++ b/drivers/clk/qcom/videocc-sm7150.c
@@ -271,7 +271,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x9ec },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -282,7 +282,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0xa0c },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index daab3237eec1..3024f6fc89c8 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -179,7 +179,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -188,7 +188,7 @@ static struct gdsc vcodec1_gdsc = {
.pd = {
.name = "vcodec1_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
static struct clk_regmap *video_cc_sm8150_clocks[] = {
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index 2e11dcffb664..dc168ce199cc 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8450-videocc.h>
@@ -63,6 +62,7 @@ static const struct alpha_pll_config sm8475_video_cc_pll0_config = {
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -106,6 +106,7 @@ static const struct alpha_pll_config sm8475_video_cc_pll1_config = {
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -347,7 +348,7 @@ static struct gdsc video_cc_mvs0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs0c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct gdsc video_cc_mvs1c_gdsc = {
@@ -372,7 +373,7 @@ static struct gdsc video_cc_mvs1_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs1c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct clk_regmap *video_cc_sm8450_clocks[] = {
@@ -407,6 +408,17 @@ static const struct qcom_reset_map video_cc_sm8450_resets[] = {
[VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x808c, .bit = 2, .udelay = 1000 },
};
+static struct clk_alpha_pll *video_cc_sm8450_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8450_critical_cbcrs[] = {
+ 0x80e4, /* VIDEO_CC_AHB_CLK */
+ 0x8114, /* VIDEO_CC_XO_CLK */
+ 0x8130, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -415,6 +427,13 @@ static const struct regmap_config video_cc_sm8450_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data video_cc_sm8450_driver_data = {
+ .alpha_plls = video_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8450_plls),
+ .clk_cbcrs = video_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8450_critical_cbcrs),
+};
+
static const struct qcom_cc_desc video_cc_sm8450_desc = {
.config = &video_cc_sm8450_regmap_config,
.clks = video_cc_sm8450_clocks,
@@ -423,6 +442,8 @@ static const struct qcom_cc_desc video_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8450_resets),
.gdscs = video_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8450_driver_data,
};
static const struct of_device_id video_cc_sm8450_match_table[] = {
@@ -434,23 +455,6 @@ MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table);
static int video_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &video_cc_sm8450_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-videocc")) {
/* Update VideoCC PLL0 */
video_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
@@ -458,23 +462,11 @@ static int video_cc_sm8450_probe(struct platform_device *pdev)
/* Update VideoCC PLL1 */
video_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
- clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &sm8475_video_cc_pll0_config);
- clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &sm8475_video_cc_pll1_config);
- } else {
- clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+ video_cc_pll0.config = &sm8475_video_cc_pll0_config;
+ video_cc_pll1.config = &sm8475_video_cc_pll1_config;
}
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x8130); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8114); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8450_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8450_desc);
}
static struct platform_driver video_cc_sm8450_driver = {
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index fcfe0cade6d0..32a6505abe26 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-videocc.h>
@@ -51,6 +50,7 @@ static struct alpha_pll_config video_cc_pll0_config = {
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -82,6 +82,7 @@ static struct alpha_pll_config video_cc_pll1_config = {
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +145,16 @@ static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_x1e80100[] = {
+ F(576000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1443000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs0_clk_src = {
.cmd_rcgr = 0x8000,
.mnd_width = 0,
@@ -176,6 +187,15 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_x1e80100[] = {
+ F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs1_clk_src = {
.cmd_rcgr = 0x8018,
.mnd_width = 0,
@@ -511,6 +531,23 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = {
[VIDEO_CC_XO_CLK_ARES] = { .reg = 0x8124, .bit = 2, .udelay = 100 },
};
+static struct clk_alpha_pll *video_cc_sm8550_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8550_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+};
+
+static u32 video_cc_sm8650_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8150, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -519,6 +556,13 @@ static const struct regmap_config video_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data video_cc_sm8550_driver_data = {
+ .alpha_plls = video_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8550_plls),
+ .clk_cbcrs = video_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8550_critical_cbcrs),
+};
+
static const struct qcom_cc_desc video_cc_sm8550_desc = {
.config = &video_cc_sm8550_regmap_config,
.clks = video_cc_sm8550_clocks,
@@ -527,37 +571,30 @@ static const struct qcom_cc_desc video_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8550_resets),
.gdscs = video_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8550_driver_data,
};
static const struct of_device_id video_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-videocc" },
{ .compatible = "qcom,sm8650-videocc" },
+ { .compatible = "qcom,x1e80100-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table);
static int video_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
- u32 sleep_clk_offset = 0x8140;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &video_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,x1e80100-videocc")) {
+ video_cc_pll0_config.l = 0x1e;
+ video_cc_pll0_config.alpha = 0x0000;
+ video_cc_pll1_config.l = 0x2b;
+ video_cc_pll1_config.alpha = 0xc000;
+ video_cc_mvs0_clk_src.freq_tbl = ftbl_video_cc_mvs0_clk_src_x1e80100;
+ video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_x1e80100;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-videocc")) {
- sleep_clk_offset = 0x8150;
video_cc_pll0_config.l = 0x1e;
video_cc_pll0_config.alpha = 0xa000;
video_cc_pll1_config.l = 0x2b;
@@ -569,21 +606,13 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
video_cc_sm8550_clocks[VIDEO_CC_MVS1_SHIFT_CLK] = &video_cc_mvs1_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_MVS1C_SHIFT_CLK] = &video_cc_mvs1c_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr;
- }
-
- clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, sleep_clk_offset); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
+ video_cc_sm8550_driver_data.clk_cbcrs = video_cc_sm8650_critical_cbcrs;
+ video_cc_sm8550_driver_data.num_clk_cbcrs =
+ ARRAY_SIZE(video_cc_sm8650_critical_cbcrs);
+ }
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8550_desc);
}
static struct platform_driver video_cc_sm8550_driver = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 50c20119d12a..6a5a04664990 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -43,6 +43,8 @@ config CLK_RENESAS
select CLK_R9A09G047 if ARCH_R9A09G047
select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
+ select CLK_R9A09G077 if ARCH_R9A09G077
+ select CLK_R9A09G087 if ARCH_R9A09G087
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -208,6 +210,14 @@ config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G077
+ bool "RZ/T2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
+config CLK_R9A09G087
+ bool "RZ/N2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index f9075bca6e95..d28eb276a153 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -40,6 +40,8 @@ obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
+obj-$(CONFIG_CLK_R9A09G077) += r9a09g077-cpg.o
+obj-$(CONFIG_CLK_R9A09G087) += r9a09g077-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index e1812867a6da..a8ed87c11ba1 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -159,12 +159,13 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
static struct clk * __init rza2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
- struct clk *parent;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
unsigned int mult = 1;
unsigned int div = 1;
+ struct clk *parent;
parent = clks[core->parent];
if (IS_ERR(parent))
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index 3cec0f501b94..e2bda2c10730 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -219,10 +219,11 @@ static int __init r8a77970_cpg_mssr_init(struct device *dev)
static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int shift;
@@ -236,8 +237,7 @@ static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
shift = 4;
break;
default:
- return rcar_gen3_cpg_clk_register(dev, core, info, clks, base,
- notifiers);
+ return rcar_gen3_cpg_clk_register(dev, core, info, pub);
}
parent = clks[core->parent];
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index fce2eecfa8c0..02dc5cecfd8d 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -164,143 +164,143 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
- 0x514, 0),
+ 0x514, 0, 0),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, 0),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, 0),
#endif
#ifdef CONFIG_RISCV
DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, 0),
DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, 0),
#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, 0),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, 0),
DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
- 0x534, 0),
+ 0x534, 0, 0),
DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
- 0x534, 1),
+ 0x534, 1, 0),
DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
- 0x534, 2),
+ 0x534, 2, 0),
DEF_MOD("mtu_x_mck", R9A07G043_MTU_X_MCK_MTU3, R9A07G043_CLK_P0,
- 0x538, 0),
+ 0x538, 0, 0),
DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
- 0x548, 0),
+ 0x548, 0, 0),
DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
- 0x548, 1),
+ 0x548, 1, 0),
DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, 0),
DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, 0),
DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, 0),
DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, 0),
DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, 0),
DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
- 0x554, 3),
+ 0x554, 3, 0),
DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, 0),
DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, 0),
DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, 0),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
- 0x554, 7),
+ 0x554, 7, 0),
#ifdef CONFIG_ARM64
DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
+ 0x564, 0, 0),
DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
- 0x564, 1),
+ 0x564, 1, 0),
DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
- 0x564, 2),
+ 0x564, 2, 0),
DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
- 0x564, 3),
+ 0x564, 3, 0),
DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, 0),
DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, 0),
DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, 0),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
- 0x570, 0),
+ 0x570, 0, 0),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 1),
+ 0x570, 1, 0),
DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
- 0x570, 2),
+ 0x570, 2, 0),
DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 3),
+ 0x570, 3, 0),
DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
- 0x570, 4),
+ 0x570, 4, 0),
DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 5),
+ 0x570, 5, 0),
DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
- 0x570, 6),
+ 0x570, 6, 0),
DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 7),
+ 0x570, 7, 0),
DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
- 0x578, 0),
+ 0x578, 0, 0),
DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
- 0x578, 1),
+ 0x578, 1, 0),
DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
- 0x578, 2),
+ 0x578, 2, 0),
DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
- 0x578, 3),
+ 0x578, 3, 0),
DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, 0),
DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, 0),
DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, 0),
DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, 0),
DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
- 0x580, 0),
+ 0x580, 0, 0),
DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
- 0x580, 1),
+ 0x580, 1, 0),
DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
- 0x580, 2),
+ 0x580, 2, 0),
DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
- 0x580, 3),
+ 0x580, 3, 0),
DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 0),
+ 0x584, 0, 0),
DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 1),
+ 0x584, 1, 0),
DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 2),
+ 0x584, 2, 0),
DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 3),
+ 0x584, 3, 0),
DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 4),
+ 0x584, 4, 0),
DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
- 0x588, 0),
+ 0x588, 0, 0),
DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
- 0x588, 1),
+ 0x588, 1, 0),
DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
- 0x590, 0),
+ 0x590, 0, 0),
DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
- 0x590, 1),
+ 0x590, 1, 0),
DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
- 0x590, 2),
+ 0x590, 2, 0),
DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
- 0x594, 0),
+ 0x594, 0, 0),
DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
- 0x598, 0),
+ 0x598, 0, 0),
DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, 0),
DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, 0),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, 0),
#ifdef CONFIG_RISCV
DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
- 0x608, 0),
+ 0x608, 0, 0),
#endif
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 77ca3a789568..c851d4eeebbe 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -242,176 +242,176 @@ static const struct {
} mod_clks = {
.common = {
DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
- 0x514, 0),
+ 0x514, 0, 0),
DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
- 0x518, 0),
+ 0x518, 0, 0),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
- 0x518, 1),
+ 0x518, 1, 0),
DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, 0),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, 0),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
- 0x534, 0),
+ 0x534, 0, 0),
DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
- 0x534, 1),
+ 0x534, 1, 0),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
- 0x534, 2),
+ 0x534, 2, 0),
DEF_MOD("mtu_x_mck", R9A07G044_MTU_X_MCK_MTU3, R9A07G044_CLK_P0,
- 0x538, 0),
+ 0x538, 0, 0),
DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
- 0x540, 0),
+ 0x540, 0, 0),
DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
- 0x544, 0),
+ 0x544, 0, 0),
DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
- 0x544, 1),
+ 0x544, 1, 0),
DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
- 0x544, 2),
+ 0x544, 2, 0),
DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
- 0x544, 3),
+ 0x544, 3, 0),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
- 0x548, 0),
+ 0x548, 0, 0),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
- 0x548, 1),
+ 0x548, 1, 0),
DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
- 0x548, 2),
+ 0x548, 2, 0),
DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
- 0x548, 3),
+ 0x548, 3, 0),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, 0),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, 0),
DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, 0),
DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, 0),
DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, 0),
DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
- 0x554, 3),
+ 0x554, 3, 0),
DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, 0),
DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, 0),
DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, 0),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
- 0x554, 7),
+ 0x554, 7, 0),
DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
- 0x558, 0),
+ 0x558, 0, 0),
DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
- 0x558, 1),
+ 0x558, 1, 0),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
- 0x558, 2),
+ 0x558, 2, 0),
DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
+ 0x564, 0, 0),
DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
- 0x564, 1),
+ 0x564, 1, 0),
DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
- 0x564, 2),
+ 0x564, 2, 0),
DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
- 0x564, 3),
+ 0x564, 3, 0),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
- 0x568, 0),
+ 0x568, 0, 0),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
- 0x568, 1),
+ 0x568, 1, 0),
DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
- 0x568, 2),
+ 0x568, 2, 0),
DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
- 0x568, 3),
+ 0x568, 3, 0),
DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
- 0x568, 4),
+ 0x568, 4, 0),
DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
- 0x568, 5),
+ 0x568, 5, 0),
DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, 0),
DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, 0),
DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, 0),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
- 0x570, 0),
+ 0x570, 0, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 1),
+ 0x570, 1, 0),
DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
- 0x570, 2),
+ 0x570, 2, 0),
DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 3),
+ 0x570, 3, 0),
DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
- 0x570, 4),
+ 0x570, 4, 0),
DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 5),
+ 0x570, 5, 0),
DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
- 0x570, 6),
+ 0x570, 6, 0),
DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 7),
+ 0x570, 7, 0),
DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
- 0x578, 0),
+ 0x578, 0, 0),
DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
- 0x578, 1),
+ 0x578, 1, 0),
DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
- 0x578, 2),
+ 0x578, 2, 0),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
- 0x578, 3),
+ 0x578, 3, 0),
DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, 0),
DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, 0),
DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, 0),
DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, 0),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
- 0x580, 0),
+ 0x580, 0, 0),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
- 0x580, 1),
+ 0x580, 1, 0),
DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
- 0x580, 2),
+ 0x580, 2, 0),
DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
- 0x580, 3),
+ 0x580, 3, 0),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 0),
+ 0x584, 0, 0),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 1),
+ 0x584, 1, 0),
DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 2),
+ 0x584, 2, 0),
DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 3),
+ 0x584, 3, 0),
DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 4),
+ 0x584, 4, 0),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
- 0x588, 0),
+ 0x588, 0, 0),
DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
- 0x588, 1),
+ 0x588, 1, 0),
DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
- 0x590, 0),
+ 0x590, 0, 0),
DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
- 0x590, 1),
+ 0x590, 1, 0),
DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
- 0x590, 2),
+ 0x590, 2, 0),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
- 0x594, 0),
+ 0x594, 0, 0),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
- 0x598, 0),
+ 0x598, 0, 0),
DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, 0),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, 0),
DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, 0),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
DEF_MOD("stpai_initclk", R9A07G054_STPAI_INITCLK, R9A07G044_OSCCLK,
- 0x5e8, 0),
+ 0x5e8, 0, 0),
DEF_MOD("stpai_aclk", R9A07G054_STPAI_ACLK, R9A07G044_CLK_P1,
- 0x5e8, 1),
+ 0x5e8, 1, 0),
DEF_MOD("stpai_mclk", R9A07G054_STPAI_MCLK, R9A07G054_CLK_DRP_M,
- 0x5e8, 2),
+ 0x5e8, 2, 0),
DEF_MOD("stpai_dclkin", R9A07G054_STPAI_DCLKIN, R9A07G054_CLK_DRP_D,
- 0x5e8, 3),
+ 0x5e8, 3, 0),
DEF_MOD("stpai_aclk_drp", R9A07G054_STPAI_ACLK_DRP, R9A07G054_CLK_DRP_A,
- 0x5e8, 4),
+ 0x5e8, 4, 0),
},
#endif
};
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 4035f3443598..ed0661997928 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -192,58 +192,107 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
- DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0),
- DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
- DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
- DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
- DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1),
- DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
- DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
- DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
- DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1),
- DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2),
- DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3),
- DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4),
- DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5),
- DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6),
- DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7),
- DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8),
- DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
- DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
- DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
- DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0),
- DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1),
- DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2),
- DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3),
- DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4),
- DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5),
- DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6),
- DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7),
- DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
- DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
- DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
- DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3),
- DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
- DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
- DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
- DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1),
- DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1),
- DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9),
- DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0),
- DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1),
- DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
- DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
- DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
- DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1),
- DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2),
- DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3),
- DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4),
- DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5),
- DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
- DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
- DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
- DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0),
- DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
+ DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0,
+ MSTOP(BUS_ACPU, BIT(3))),
+ DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0,
+ MSTOP(BUS_REG1, BIT(2))),
+ DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1,
+ MSTOP(BUS_REG1, BIT(3))),
+ DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0,
+ MSTOP(BUS_PERI_COM, BIT(5))),
+ DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1,
+ MSTOP(BUS_PERI_COM, BIT(7))),
+ DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2,
+ MSTOP(BUS_PERI_COM, BIT(6))),
+ DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3,
+ MSTOP(BUS_PERI_COM, BIT(4))),
+ DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8, 0),
+ DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9, 0),
+ DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0,
+ MSTOP(BUS_MCPU2, BIT(10))),
+ DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1,
+ MSTOP(BUS_MCPU2, BIT(11))),
+ DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2,
+ MSTOP(BUS_MCPU2, BIT(12))),
+ DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3,
+ MSTOP(BUS_MCPU2, BIT(13))),
+ DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0,
+ MSTOP(BUS_MCPU2, BIT(1))),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1,
+ MSTOP(BUS_MCPU2, BIT(2))),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2,
+ MSTOP(BUS_MCPU2, BIT(3))),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3,
+ MSTOP(BUS_MCPU2, BIT(4))),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4,
+ MSTOP(BUS_MCPU2, BIT(5))),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5,
+ MSTOP(BUS_MCPU3, BIT(4))),
+ DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0, 0),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0,
+ MSTOP(BUS_MCPU2, BIT(15))),
+ DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0,
+ MSTOP(BUS_MCPU3, GENMASK(8, 7))),
};
static const struct rzg2l_reset r9a08g045_resets[] = {
@@ -293,78 +342,6 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_VBAT_BCLK,
};
-static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
- /* Keep always-on domain on the first position for proper domains registration. */
- DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON,
- DEF_REG_CONF(0, 0),
- GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
- DEF_PD("gic", R9A08G045_PD_GIC,
- DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("ia55", R9A08G045_PD_IA55,
- DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("dmac", R9A08G045_PD_DMAC,
- DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("wdt0", R9A08G045_PD_WDT0,
- DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)),
- GENPD_FLAG_IRQ_SAFE),
- DEF_PD("sdhi0", R9A08G045_PD_SDHI0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), 0),
- DEF_PD("sdhi1", R9A08G045_PD_SDHI1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0),
- DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0),
- DEF_PD("ssi0", R9A08G045_PD_SSI0,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(10)), 0),
- DEF_PD("ssi1", R9A08G045_PD_SSI1,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(11)), 0),
- DEF_PD("ssi2", R9A08G045_PD_SSI2,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(12)), 0),
- DEF_PD("ssi3", R9A08G045_PD_SSI3,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(13)), 0),
- DEF_PD("usb0", R9A08G045_PD_USB0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0),
- DEF_PD("usb1", R9A08G045_PD_USB1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), 0),
- DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), 0),
- DEF_PD("eth0", R9A08G045_PD_ETHER0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), 0),
- DEF_PD("eth1", R9A08G045_PD_ETHER1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), 0),
- DEF_PD("i2c0", R9A08G045_PD_I2C0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)), 0),
- DEF_PD("i2c1", R9A08G045_PD_I2C1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)), 0),
- DEF_PD("i2c2", R9A08G045_PD_I2C2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)), 0),
- DEF_PD("i2c3", R9A08G045_PD_I2C3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0),
- DEF_PD("scif0", R9A08G045_PD_SCIF0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0),
- DEF_PD("scif1", R9A08G045_PD_SCIF1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(2)), 0),
- DEF_PD("scif2", R9A08G045_PD_SCIF2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(3)), 0),
- DEF_PD("scif3", R9A08G045_PD_SCIF3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(4)), 0),
- DEF_PD("scif4", R9A08G045_PD_SCIF4,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(5)), 0),
- DEF_PD("scif5", R9A08G045_PD_SCIF5,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
- DEF_PD("adc", R9A08G045_PD_ADC,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
- DEF_PD("tsu", R9A08G045_PD_TSU,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(15)), 0),
- DEF_PD("vbat", R9A08G045_PD_VBAT,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("rtc", R9A08G045_PD_RTC,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(7)), 0),
-};
-
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
/* Core Clocks */
.core_clks = r9a08g045_core_clks,
@@ -385,9 +362,5 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
- /* Power domains */
- .pm_domains = r9a08g045_pm_domains,
- .num_pm_domains = ARRAY_SIZE(r9a08g045_pm_domains),
-
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index 22272279b104..ba25429c244d 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -151,64 +151,64 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
- DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
- DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
- DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0),
- DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1),
- DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2),
- DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3),
- DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4),
- DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5),
- DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6),
- DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7),
- DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8),
- DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9),
- DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10),
- DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11),
- DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
- DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
- DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
- DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4),
- DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5),
- DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6),
- DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
- DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
- DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0),
- DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4),
- DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5),
- DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6),
- DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7),
- DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8),
- DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9),
- DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10),
- DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11),
- DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
- DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0),
- DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4),
- DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5),
- DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6),
- DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7),
- DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8),
- DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9),
- DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10),
- DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11),
- DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
- DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
- DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0),
- DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4),
- DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5),
- DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6),
- DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7),
- DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8),
- DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9),
- DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10),
- DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0),
- DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1),
- DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
- DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
- DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8),
- DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12),
- DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2, 0),
+ DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5, 0),
+ DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0, 0),
+ DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1, 0),
+ DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2, 0),
+ DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3, 0),
+ DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4, 0),
+ DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5, 0),
+ DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6, 0),
+ DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7, 0),
+ DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8, 0),
+ DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9, 0),
+ DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10, 0),
+ DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11, 0),
+ DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8, 0),
+ DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8, 0),
+ DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9, 0),
+ DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4, 0),
+ DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5, 0),
+ DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6, 0),
+ DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12, 0),
+ DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12, 0),
+ DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0, 0),
+ DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4, 0),
+ DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5, 0),
+ DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6, 0),
+ DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7, 0),
+ DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8, 0),
+ DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9, 0),
+ DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10, 0),
+ DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11, 0),
+ DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12, 0),
+ DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0, 0),
+ DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4, 0),
+ DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5, 0),
+ DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6, 0),
+ DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7, 0),
+ DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8, 0),
+ DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9, 0),
+ DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10, 0),
+ DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11, 0),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12, 0),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13, 0),
+ DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0, 0),
+ DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4, 0),
+ DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5, 0),
+ DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6, 0),
+ DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7, 0),
+ DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8, 0),
+ DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9, 0),
+ DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10, 0),
+ DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0, 0),
+ DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1, 0),
+ DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4, 0),
+ DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5, 0),
+ DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8, 0),
+ DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12, 0),
+ DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index 21699999cedd..26e2be7667eb 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLETH,
/* Internal Core Clocks */
CLK_PLLCM33_DIV3,
@@ -46,6 +47,15 @@ enum clk_ids {
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
CLK_PLLVDO_GPU,
@@ -85,7 +95,18 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
@@ -100,6 +121,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
@@ -122,6 +144,18 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
@@ -139,6 +173,10 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G047_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
@@ -160,6 +198,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -214,6 +258,30 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -239,6 +307,8 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -255,6 +325,8 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
index e2712a25c43a..437af86f49dd 100644
--- a/drivers/clk/renesas/r9a09g056-cpg.c
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G056_SPI_CLK_SPI,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -28,13 +28,34 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLETH,
+ CLK_PLLGPU,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
+ CLK_PLLCM33_GEAR,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -48,6 +69,14 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -57,6 +86,21 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -68,15 +112,41 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -89,13 +159,76 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G056_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G056_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G056_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G056_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
};
static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -120,16 +253,83 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
+ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
+ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
+ DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */
+ DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */
+ DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */
+ DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */
+ DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */
+ DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 3c40e36259fe..f7de69a93de1 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G057_SPI_CLK_SPI,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,12 +29,18 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLETH,
CLK_PLLGPU,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
CLK_PLLCM33_DIV4,
- CLK_PLLCM33_DIV4_PLLCM33,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -49,6 +55,14 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
CLK_PLLGPU_GEAR,
/* Module Clocks */
@@ -69,6 +83,14 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -78,6 +100,21 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -90,13 +127,20 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
- DEF_DDIV(".pllcm33_div4_pllcm33", CLK_PLLCM33_DIV4_PLLCM33,
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR,
CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
@@ -115,6 +159,17 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+
DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
@@ -130,10 +185,16 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G057_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G057_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G057_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD("dmac_0_aclk", CLK_PLLCM33_DIV4_PLLCM33, 0, 0, 0, 0,
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
BUS_MSTOP(5, BIT(9))),
DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
BUS_MSTOP(3, BIT(2))),
@@ -179,6 +240,24 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rspi_0_pclk", CLK_PLLCLN_DIV8, 5, 4, 2, 20,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_pclk_sfr", CLK_PLLCLN_DIV8, 5, 5, 2, 21,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_tclk", CLK_PLLCLN_DIV8, 5, 6, 2, 22,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_1_pclk", CLK_PLLCLN_DIV8, 5, 7, 2, 23,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_pclk_sfr", CLK_PLLCLN_DIV8, 5, 8, 2, 24,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_tclk", CLK_PLLCLN_DIV8, 5, 9, 2, 25,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_2_pclk", CLK_PLLCLN_DIV8, 5, 10, 2, 26,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_pclk_sfr", CLK_PLLCLN_DIV8, 5, 11, 2, 27,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_tclk", CLK_PLLCLN_DIV8, 5, 12, 2, 28,
+ BUS_MSTOP(11, BIT(2))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
@@ -199,6 +278,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(1, BIT(7))),
DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -233,6 +318,30 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(7, BIT(10))),
DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
BUS_MSTOP(7, BIT(11))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -287,6 +396,12 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(7, 11, 3, 12), /* RSPI_0_PRESETN */
+ DEF_RST(7, 12, 3, 13), /* RSPI_0_TRESETN */
+ DEF_RST(7, 13, 3, 14), /* RSPI_1_PRESETN */
+ DEF_RST(7, 14, 3, 15), /* RSPI_1_TRESETN */
+ DEF_RST(7, 15, 3, 16), /* RSPI_2_PRESETN */
+ DEF_RST(8, 0, 3, 17), /* RSPI_2_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
@@ -297,6 +412,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
@@ -304,6 +421,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
diff --git a/drivers/clk/renesas/r9a09g077-cpg.c b/drivers/clk/renesas/r9a09g077-cpg.c
new file mode 100644
index 000000000000..c920d6a9707f
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g077-cpg.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r9a09g077 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h>
+#include <dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h>
+#include "renesas-cpg-mssr.h"
+
+#define RZT2H_REG_BLOCK_SHIFT 11
+#define RZT2H_REG_OFFSET_MASK GENMASK(10, 0)
+#define RZT2H_REG_CONF(block, offset) (((block) << RZT2H_REG_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_REG_OFFSET_MASK))
+
+#define RZT2H_REG_BLOCK(x) ((x) >> RZT2H_REG_BLOCK_SHIFT)
+#define RZT2H_REG_OFFSET(x) ((x) & RZT2H_REG_OFFSET_MASK)
+
+#define SCKCR RZT2H_REG_CONF(0, 0x00)
+#define SCKCR2 RZT2H_REG_CONF(1, 0x04)
+#define SCKCR3 RZT2H_REG_CONF(0, 0x08)
+
+#define OFFSET_MASK GENMASK(31, 20)
+#define SHIFT_MASK GENMASK(19, 12)
+#define WIDTH_MASK GENMASK(11, 8)
+
+#define CONF_PACK(offset, shift, width) \
+ (FIELD_PREP_CONST(OFFSET_MASK, (offset)) | \
+ FIELD_PREP_CONST(SHIFT_MASK, (shift)) | \
+ FIELD_PREP_CONST(WIDTH_MASK, (width)))
+
+#define GET_SHIFT(val) FIELD_GET(SHIFT_MASK, val)
+#define GET_WIDTH(val) FIELD_GET(WIDTH_MASK, val)
+#define GET_REG_OFFSET(val) FIELD_GET(OFFSET_MASK, val)
+
+#define DIVCA55C0 CONF_PACK(SCKCR2, 8, 1)
+#define DIVCA55C1 CONF_PACK(SCKCR2, 9, 1)
+#define DIVCA55C2 CONF_PACK(SCKCR2, 10, 1)
+#define DIVCA55C3 CONF_PACK(SCKCR2, 11, 1)
+#define DIVCA55S CONF_PACK(SCKCR2, 12, 1)
+
+#define DIVSCI0ASYNC CONF_PACK(SCKCR3, 6, 2)
+
+#define SEL_PLL CONF_PACK(SCKCR, 22, 1)
+
+
+enum rzt2h_clk_types {
+ CLK_TYPE_RZT2H_DIV = CLK_TYPE_CUSTOM, /* Clock with divider */
+ CLK_TYPE_RZT2H_MUX, /* Clock with clock source selector */
+};
+
+#define DEF_DIV(_name, _id, _parent, _conf, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_DIV, .conf = _conf, \
+ .parent = _parent, .dtable = _dtable, .flag = 0)
+#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _mux_flags) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents, \
+ .flag = 0, .mux_flags = _mux_flags)
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G077_SDHI_CLKHS,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_LOCO,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL4,
+ CLK_SEL_CLK_PLL0,
+ CLK_SEL_CLK_PLL1,
+ CLK_SEL_CLK_PLL2,
+ CLK_SEL_CLK_PLL4,
+ CLK_PLL4D1,
+ CLK_SCI0ASYNC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_2[] = {
+ {0, 2},
+ {1, 1},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_24_25_30_32[] = {
+ {0, 32},
+ {1, 30},
+ {2, 25},
+ {3, 24},
+ {0, 0},
+};
+
+/* Mux clock tables */
+
+static const char * const sel_clk_pll0[] = { ".loco", ".pll0" };
+static const char * const sel_clk_pll1[] = { ".loco", ".pll1" };
+static const char * const sel_clk_pll2[] = { ".loco", ".pll2" };
+static const char * const sel_clk_pll4[] = { ".loco", ".pll4" };
+
+static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_RATE(".loco", CLK_LOCO, 1000 * 1000),
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_EXTAL, 1, 48),
+ DEF_FIXED(".pll1", CLK_PLL1, CLK_EXTAL, 1, 40),
+ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 1, 32),
+ DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 1, 96),
+
+ DEF_MUX(".sel_clk_pll0", CLK_SEL_CLK_PLL0, SEL_PLL,
+ sel_clk_pll0, ARRAY_SIZE(sel_clk_pll0), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll1", CLK_SEL_CLK_PLL1, SEL_PLL,
+ sel_clk_pll1, ARRAY_SIZE(sel_clk_pll1), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll2", CLK_SEL_CLK_PLL2, SEL_PLL,
+ sel_clk_pll2, ARRAY_SIZE(sel_clk_pll2), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll4", CLK_SEL_CLK_PLL4, SEL_PLL,
+ sel_clk_pll4, ARRAY_SIZE(sel_clk_pll4), CLK_MUX_READ_ONLY),
+
+ DEF_FIXED(".pll4d1", CLK_PLL4D1, CLK_SEL_CLK_PLL4, 1, 1),
+ DEF_DIV(".sci0async", CLK_SCI0ASYNC, CLK_PLL4D1, DIVSCI0ASYNC,
+ dtable_24_25_30_32),
+
+ /* Core output clk */
+ DEF_DIV("CA55C0", R9A09G077_CLK_CA55C0, CLK_SEL_CLK_PLL0, DIVCA55C0,
+ dtable_1_2),
+ DEF_DIV("CA55C1", R9A09G077_CLK_CA55C1, CLK_SEL_CLK_PLL0, DIVCA55C1,
+ dtable_1_2),
+ DEF_DIV("CA55C2", R9A09G077_CLK_CA55C2, CLK_SEL_CLK_PLL0, DIVCA55C2,
+ dtable_1_2),
+ DEF_DIV("CA55C3", R9A09G077_CLK_CA55C3, CLK_SEL_CLK_PLL0, DIVCA55C3,
+ dtable_1_2),
+ DEF_DIV("CA55S", R9A09G077_CLK_CA55S, CLK_SEL_CLK_PLL0, DIVCA55S,
+ dtable_1_2),
+ DEF_FIXED("PCLKGPTL", R9A09G077_CLK_PCLKGPTL, CLK_SEL_CLK_PLL1, 2, 1),
+ DEF_FIXED("PCLKM", R9A09G077_CLK_PCLKM, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("PCLKL", R9A09G077_CLK_PCLKL, CLK_SEL_CLK_PLL1, 16, 1),
+ DEF_FIXED("PCLKAM", R9A09G077_CLK_PCLKAM, CLK_PLL4D1, 12, 1),
+ DEF_FIXED("SDHI_CLKHS", R9A09G077_SDHI_CLKHS, CLK_SEL_CLK_PLL2, 1, 1),
+};
+
+static const struct mssr_mod_clk r9a09g077_mod_clks[] __initconst = {
+ DEF_MOD("sci0fck", 8, CLK_SCI0ASYNC),
+ DEF_MOD("iic0", 100, R9A09G077_CLK_PCLKL),
+ DEF_MOD("iic1", 101, R9A09G077_CLK_PCLKL),
+ DEF_MOD("iic2", 601, R9A09G077_CLK_PCLKL),
+ DEF_MOD("sdhi0", 1212, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sdhi1", 1213, R9A09G077_CLK_PCLKAM),
+};
+
+static struct clk * __init
+r9a09g077_cpg_div_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+
+ parent = pub->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ if (core->dtable)
+ clk_hw = clk_hw_register_divider_table(dev, core->name,
+ parent_name, 0,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag,
+ core->dtable,
+ &pub->rmw_lock);
+ else
+ clk_hw = clk_hw_register_divider(dev, core->name,
+ parent_name, 0,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag, &pub->rmw_lock);
+
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+
+}
+
+static struct clk * __init
+r9a09g077_cpg_mux_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->mux_flags, &pub->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static struct clk * __init
+r9a09g077_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct cpg_mssr_pub *pub)
+{
+ u32 offset = GET_REG_OFFSET(core->conf);
+ void __iomem *base = RZT2H_REG_BLOCK(offset) ? pub->base1 : pub->base0;
+ void __iomem *addr = base + RZT2H_REG_OFFSET(offset);
+
+ switch (core->type) {
+ case CLK_TYPE_RZT2H_DIV:
+ return r9a09g077_cpg_div_clk_register(dev, core, addr, pub);
+ case CLK_TYPE_RZT2H_MUX:
+ return r9a09g077_cpg_mux_clk_register(dev, core, addr, pub);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+const struct cpg_mssr_info r9a09g077_cpg_mssr_info = {
+ /* Core Clocks */
+ .core_clks = r9a09g077_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g077_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g077_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g077_mod_clks),
+ .num_hw_mod_clks = 14 * 32,
+
+ .reg_layout = CLK_REG_LAYOUT_RZ_T2H,
+ .cpg_clk_register = r9a09g077_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index 4c3764972bad..ab34bb8c3e07 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -274,10 +274,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table = NULL;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
const char *parent_name;
unsigned int mult = 1;
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index bdcd4a38d48d..3d4b127fdeaf 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -32,8 +32,7 @@ struct rcar_gen2_cpg_pll_config {
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 027100e84ee4..10ae20489df9 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -345,9 +345,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index bfdc649bdf12..d15a5d1df71c 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -81,8 +81,7 @@ struct rcar_gen3_cpg_pll_config {
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 31aa790fd003..fb9a876aaba5 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -418,9 +418,11 @@ static const struct clk_div_table cpg_rpcsrc_div_table[] = {
struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 717fd148464f..6c8280b37c37 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -78,8 +78,7 @@ struct rcar_gen4_cpg_pll_config {
struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 71431970d6e6..5ff6ee1f7d4b 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -81,6 +81,37 @@ static const u16 mstpcr_for_gen4[] = {
};
/*
+ * Module Stop Control Register (RZ/T2H)
+ * RZ/T2H has 2 registers blocks,
+ * Bit 12 is used to differentiate them
+ */
+
+#define RZT2H_MSTPCR_BLOCK_SHIFT 12
+#define RZT2H_MSTPCR_OFFSET_MASK GENMASK(11, 0)
+#define RZT2H_MSTPCR(block, offset) (((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_MSTPCR_OFFSET_MASK))
+
+#define RZT2H_MSTPCR_BLOCK(x) ((x) >> RZT2H_MSTPCR_BLOCK_SHIFT)
+#define RZT2H_MSTPCR_OFFSET(x) ((x) & RZT2H_MSTPCR_OFFSET_MASK)
+
+static const u16 mstpcr_for_rzt2h[] = {
+ RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */
+ RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */
+ RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */
+ RZT2H_MSTPCR(0, 0x30c), /* MSTPCRD */
+ RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */
+ 0,
+ RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */
+ 0,
+ RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */
+ RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */
+ RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */
+ RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */
+ RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */
+ RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */
+};
+
+/*
* Standby Control Register offsets (RZ/A)
* Base address is FRQCR register
*/
@@ -126,16 +157,14 @@ static const u16 srstclr_for_gen4[] = {
* struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
* and Software Reset Private Data
*
+ * @pub: Data passed to clock registration callback
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
- * @base: CPG/MSSR register block base address
* @reg_layout: CPG/MSSR register layout
- * @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @notifiers: Notifier chain to save/restore clock state for system resume
* @status_regs: Pointer to status registers array
* @control_regs: Pointer to control registers array
* @reset_regs: Pointer to reset registers array
@@ -147,20 +176,18 @@ static const u16 srstclr_for_gen4[] = {
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
+ struct cpg_mssr_pub pub;
#ifdef CONFIG_RESET_CONTROLLER
struct reset_controller_dev rcdev;
#endif
struct device *dev;
- void __iomem *base;
enum clk_reg_layout reg_layout;
- spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
- struct raw_notifier_head notifiers;
const u16 *status_regs;
const u16 *control_regs;
const u16 *reset_regs;
@@ -192,6 +219,26 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+static u32 cpg_rzt2h_mstp_read(struct clk_hw *hw, u16 offset)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ return readl(base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
+static void cpg_rzt2h_mstp_write(struct clk_hw *hw, u16 offset, u32 value)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ writel(value, base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@@ -206,38 +253,52 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
str_on_off(enable));
- spin_lock_irqsave(&priv->rmw_lock, flags);
+ spin_lock_irqsave(&priv->pub.rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- value = readb(priv->base + priv->control_regs[reg]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writeb(value, priv->base + priv->control_regs[reg]);
+ writeb(value, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
+
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
+
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+
+ cpg_rzt2h_mstp_write(hw,
+ priv->control_regs[reg],
+ value);
} else {
- value = readl(priv->base + priv->control_regs[reg]);
+ value = readl(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writel(value, priv->base + priv->control_regs[reg]);
+ writel(value, priv->pub.base0 + priv->control_regs[reg]);
}
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
- if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
return 0;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
value, !(value & bitmask), 0, 10);
if (error)
dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
- priv->base + priv->control_regs[reg], bit);
+ priv->pub.base0 + priv->control_regs[reg], bit);
return error;
}
@@ -256,12 +317,16 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
{
struct mstp_clock *clock = to_mstp_clock(hw);
struct cpg_mssr_priv *priv = clock->priv;
+ unsigned int reg = clock->index / 32;
u32 value;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- value = readb(priv->base + priv->control_regs[clock->index / 32]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
+ else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
else
- value = readl(priv->base + priv->status_regs[clock->index / 32]);
+ value = readl(priv->pub.base0 + priv->status_regs[reg]);
return !(value & BIT(clock->index % 32));
}
@@ -348,7 +413,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DIV6P1:
case CLK_TYPE_DIV6_RO:
WARN_DEBUG(core->parent >= priv->num_core_clks);
- parent = priv->clks[core->parent];
+ parent = priv->pub.clks[core->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -358,12 +423,12 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (core->type == CLK_TYPE_DIV6_RO)
/* Multiply with the DIV6 register value */
- div *= (readl(priv->base + core->offset) & 0x3f) + 1;
+ div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1;
if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
- priv->base + core->offset,
- &priv->notifiers);
+ priv->pub.base0 + core->offset,
+ &priv->pub.notifiers);
} else {
clk = clk_register_fixed_factor(NULL, core->name,
parent_name, 0,
@@ -379,8 +444,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
- priv->clks, priv->base,
- &priv->notifiers);
+ &priv->pub);
else
dev_err(dev, "%s has unsupported core clock type %u\n",
core->name, core->type);
@@ -391,7 +455,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
+ priv->pub.clks[id] = clk;
return;
fail:
@@ -414,14 +478,14 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
WARN_DEBUG(id < priv->num_core_clks);
WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
- WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+ WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT);
if (!mod->name) {
/* Skip NULLified clock */
return;
}
- parent = priv->clks[mod->parent];
+ parent = priv->pub.clks[mod->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -623,13 +687,13 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- writel(bitmask, priv->base + priv->reset_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
/* Release module from reset state */
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
return 0;
}
@@ -643,7 +707,7 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + priv->reset_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
return 0;
}
@@ -657,7 +721,7 @@ static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
+ writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
return 0;
}
@@ -669,7 +733,7 @@ static int cpg_mssr_status(struct reset_controller_dev *rcdev,
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
+ return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
}
static const struct reset_control_ops cpg_mssr_reset_ops = {
@@ -872,6 +936,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779h0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G077
+ {
+ .compatible = "renesas,r9a09g077-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G087
+ {
+ .compatible = "renesas,r9a09g087-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -895,12 +971,12 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
if (priv->smstpcr_saved[reg].mask)
priv->smstpcr_saved[reg].val =
priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
- readb(priv->base + priv->control_regs[reg]) :
- readl(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]) :
+ readl(priv->pub.base0 + priv->control_regs[reg]);
}
/* Save core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL);
return 0;
}
@@ -917,7 +993,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
return 0;
/* Restore core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL);
/* Restore module clocks */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
@@ -926,29 +1002,29 @@ static int cpg_mssr_resume_noirq(struct device *dev)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- oldval = readb(priv->base + priv->control_regs[reg]);
+ oldval = readb(priv->pub.base0 + priv->control_regs[reg]);
else
- oldval = readl(priv->base + priv->control_regs[reg]);
+ oldval = readl(priv->pub.base0 + priv->control_regs[reg]);
newval = oldval & ~mask;
newval |= priv->smstpcr_saved[reg].val & mask;
if (newval == oldval)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- writeb(newval, priv->base + priv->control_regs[reg]);
+ writeb(newval, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
continue;
} else
- writel(newval, priv->base + priv->control_regs[reg]);
+ writel(newval, priv->pub.base0 + priv->control_regs[reg]);
/* Wait until enabled clocks are really enabled */
mask &= ~priv->smstpcr_saved[reg].val;
if (!mask)
continue;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
oldval, !(oldval & mask), 0, 10);
if (error)
dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
@@ -1058,20 +1134,28 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (!priv)
return -ENOMEM;
+ priv->pub.clks = priv->clks;
priv->np = np;
priv->dev = dev;
- spin_lock_init(&priv->rmw_lock);
+ spin_lock_init(&priv->pub.rmw_lock);
- priv->base = of_iomap(np, 0);
- if (!priv->base) {
+ priv->pub.base0 = of_iomap(np, 0);
+ if (!priv->pub.base0) {
error = -ENOMEM;
goto out_err;
}
+ if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->pub.base1 = of_iomap(np, 1);
+ if (!priv->pub.base1) {
+ error = -ENOMEM;
+ goto out_err;
+ }
+ }
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
- RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
+ RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers);
priv->reg_layout = info->reg_layout;
if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
priv->status_regs = mstpsr;
@@ -1080,6 +1164,8 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->reset_clear_regs = srstclr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
priv->control_regs = stbcr;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->control_regs = mstpcr_for_rzt2h;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
priv->status_regs = mstpsr_for_gen4;
priv->control_regs = mstpcr_for_gen4;
@@ -1091,7 +1177,7 @@ static int __init cpg_mssr_common_init(struct device *dev,
}
for (i = 0; i < nclks; i++)
- priv->clks[i] = ERR_PTR(-ENOENT);
+ priv->pub.clks[i] = ERR_PTR(-ENOENT);
error = cpg_mssr_reserved_init(priv, info);
if (error)
@@ -1108,8 +1194,10 @@ static int __init cpg_mssr_common_init(struct device *dev,
reserve_err:
cpg_mssr_reserved_exit(priv);
out_err:
- if (priv->base)
- iounmap(priv->base);
+ if (priv->pub.base0)
+ iounmap(priv->pub.base0);
+ if (priv->pub.base1)
+ iounmap(priv->pub.base1);
kfree(priv);
return error;
@@ -1174,7 +1262,8 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
goto reserve_exit;
/* Reset Controller not supported for Standby Control SoCs */
- if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
goto reserve_exit;
error = cpg_mssr_reset_controller_register(priv);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index a1d6e0cbcff9..ad11ab5f0069 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -8,6 +8,8 @@
#ifndef __CLK_RENESAS_CPG_MSSR_H__
#define __CLK_RENESAS_CPG_MSSR_H__
+#include <linux/notifier.h>
+
/*
* Definitions of CPG Core Clocks
*
@@ -27,6 +29,31 @@ struct cpg_core_clk {
unsigned int div;
unsigned int mult;
unsigned int offset;
+ union {
+ const char * const *parent_names;
+ const struct clk_div_table *dtable;
+ };
+ u32 conf;
+ u16 flag;
+ u8 mux_flags;
+ u8 num_parents;
+};
+
+/**
+ * struct cpg_mssr_pub - data shared with device-specific clk registration code
+ *
+ * @base0: CPG/MSSR register block base0 address
+ * @base1: CPG/MSSR register block base1 address
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @rmw_lock: protects RMW register accesses
+ * @clks: pointer to clocks
+ */
+struct cpg_mssr_pub {
+ void __iomem *base0;
+ void __iomem *base1;
+ struct raw_notifier_head notifiers;
+ spinlock_t rmw_lock;
+ struct clk **clks;
};
enum clk_types {
@@ -89,6 +116,7 @@ enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
CLK_REG_LAYOUT_RCAR_GEN4,
+ CLK_REG_LAYOUT_RZ_T2H,
};
/**
@@ -153,8 +181,7 @@ struct cpg_mssr_info {
struct clk *(*cpg_clk_register)(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
};
extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
@@ -181,6 +208,7 @@ extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779h0_cpg_mssr_info;
+extern const struct cpg_mssr_info r9a09g077_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index a8628f64a03b..187233302818 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -11,10 +11,13 @@
* Copyright (C) 2015 Renesas Electronics Corp.
*/
+#include <linux/atomic.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -68,6 +71,9 @@
#define MAX_VCLK_FREQ (148500000)
+#define MSTOP_OFF(conf) FIELD_GET(GENMASK(31, 16), (conf))
+#define MSTOP_MASK(conf) FIELD_GET(GENMASK(15, 0), (conf))
+
/**
* struct clk_hw_data - clock hardware data
* @hw: clock hw
@@ -142,6 +148,7 @@ struct rzg2l_pll5_mux_dsi_div_param {
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @info: Pointer to platform data
+ * @genpd: PM domain
* @mux_dsi_div_params: pll5 mux and dsi div parameters
*/
struct rzg2l_cpg_priv {
@@ -158,6 +165,8 @@ struct rzg2l_cpg_priv {
const struct rzg2l_cpg_info *info;
+ struct generic_pm_domain genpd;
+
struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
};
@@ -1182,29 +1191,146 @@ fail:
}
/**
- * struct mstp_clock - MSTP gating clock
+ * struct mstop - MSTOP specific data structure
+ * @usecnt: Usage counter for MSTOP settings (when zero the settings
+ * are applied to register)
+ * @conf: MSTOP configuration (register offset, setup bits)
+ */
+struct mstop {
+ atomic_t usecnt;
+ u32 conf;
+};
+
+/**
+ * struct mod_clock - Module clock
*
* @hw: handle between common and hardware-specific interfaces
+ * @priv: CPG/MSTP private data
+ * @sibling: pointer to the other coupled clock
+ * @mstop: MSTOP configuration
+ * @shared_mstop_clks: clocks sharing the MSTOP with this clock
* @off: register offset
* @bit: ON/MON bit
+ * @num_shared_mstop_clks: number of the clocks sharing MSTOP with this clock
* @enabled: soft state of the clock, if it is coupled with another clock
- * @priv: CPG/MSTP private data
- * @sibling: pointer to the other coupled clock
*/
-struct mstp_clock {
+struct mod_clock {
struct clk_hw hw;
+ struct rzg2l_cpg_priv *priv;
+ struct mod_clock *sibling;
+ struct mstop *mstop;
+ struct mod_clock **shared_mstop_clks;
u16 off;
u8 bit;
+ u8 num_shared_mstop_clks;
bool enabled;
- struct rzg2l_cpg_priv *priv;
- struct mstp_clock *sibling;
};
-#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
+
+#define for_each_mod_clock(mod_clock, hw, priv) \
+ for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
+ if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
+ continue; \
+ else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
+ ((mod_clock) = to_mod_clock(hw)))
+
+/* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
+static void rzg2l_mod_clock_module_set_state(struct mod_clock *clock,
+ bool standby)
+{
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ struct mstop *mstop = clock->mstop;
+ bool update = false;
+ u32 value;
+
+ if (!mstop)
+ return;
+
+ value = MSTOP_MASK(mstop->conf) << 16;
+
+ if (standby) {
+ unsigned int criticals = 0;
+
+ for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
+ struct mod_clock *clk = clock->shared_mstop_clks[i];
+
+ if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
+ criticals++;
+ }
+
+ if (!clock->num_shared_mstop_clks &&
+ clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
+ criticals++;
+
+ /*
+ * If this is a shared MSTOP and it is shared with critical clocks,
+ * and the system boots up with this clock enabled but no driver
+ * uses it the CCF will disable it (as it is unused). As we don't
+ * increment reference counter for it at registration (to avoid
+ * messing with clocks enabled at probe but later used by drivers)
+ * do not set the MSTOP here too if it is shared with critical
+ * clocks and ref counted only by those critical clocks.
+ */
+ if (criticals && criticals == atomic_read(&mstop->usecnt))
+ return;
+
+ value |= MSTOP_MASK(mstop->conf);
+
+ /* Allow updates on probe when usecnt = 0. */
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ else
+ update = atomic_dec_and_test(&mstop->usecnt);
+ } else {
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ atomic_inc(&mstop->usecnt);
+ }
+
+ if (update)
+ writel(value, priv->base + MSTOP_OFF(mstop->conf));
+}
+
+static int rzg2l_mod_clock_mstop_show(struct seq_file *s, void *what)
+{
+ struct rzg2l_cpg_priv *priv = s->private;
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "clk_name", "cnt", "cnt", "off", "val", "shared");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "--------", "-----", "-----", "-----", "------", "------");
+
+ for_each_mod_clock(clk, hw, priv) {
+ u32 val;
+
+ if (!clk->mstop)
+ continue;
+
+ val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
+ MSTOP_MASK(clk->mstop->conf);
+
+ seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
+ __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
+ MSTOP_OFF(clk->mstop->conf), val);
+
+ for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
+ seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rzg2l_mod_clock_mstop);
static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
unsigned int reg = clock->off;
struct device *dev = priv->dev;
@@ -1224,7 +1350,15 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
if (enable)
value |= bitmask;
- writel(value, priv->base + CLK_ON_R(reg));
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (enable) {
+ writel(value, priv->base + CLK_ON_R(reg));
+ rzg2l_mod_clock_module_set_state(clock, false);
+ } else {
+ rzg2l_mod_clock_module_set_state(clock, true);
+ writel(value, priv->base + CLK_ON_R(reg));
+ }
+ }
if (!enable)
return 0;
@@ -1243,7 +1377,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
static int rzg2l_mod_clock_enable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1263,7 +1397,7 @@ static int rzg2l_mod_clock_enable(struct clk_hw *hw)
static void rzg2l_mod_clock_disable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1283,7 +1417,7 @@ static void rzg2l_mod_clock_disable(struct clk_hw *hw)
static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
u32 bitmask = BIT(clock->bit);
u32 value;
@@ -1310,34 +1444,104 @@ static const struct clk_ops rzg2l_mod_clock_ops = {
.is_enabled = rzg2l_mod_clock_is_enabled,
};
-static struct mstp_clock
-*rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
+static struct mod_clock
+*rzg2l_mod_clock_get_sibling(struct mod_clock *clock,
struct rzg2l_cpg_priv *priv)
{
+ struct mod_clock *clk;
struct clk_hw *hw;
- unsigned int i;
- for (i = 0; i < priv->num_mod_clks; i++) {
- struct mstp_clock *clk;
+ for_each_mod_clock(clk, hw, priv) {
+ if (clock->off == clk->off && clock->bit == clk->bit)
+ return clk;
+ }
+
+ return NULL;
+}
- if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
+static struct mstop *rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv *priv, u32 conf)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
continue;
- hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
- clk = to_mod_clock(hw);
- if (clock->off == clk->off && clock->bit == clk->bit)
- return clk;
+ if (clk->mstop->conf == conf)
+ return clk->mstop;
}
return NULL;
}
+static void rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv *priv)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
+ continue;
+
+ /*
+ * Out of reset all modules are enabled. Set module state
+ * in case associated clocks are disabled at probe. Otherwise
+ * module is in invalid HW state.
+ */
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (!rzg2l_mod_clock_is_enabled(&clk->hw))
+ rzg2l_mod_clock_module_set_state(clk, true);
+ }
+ }
+}
+
+static int rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv *priv,
+ struct mod_clock *clock)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ if (!clock->mstop)
+ return 0;
+
+ for_each_mod_clock(clk, hw, priv) {
+ int num_shared_mstop_clks, incr = 1;
+ struct mod_clock **new_clks;
+
+ if (clk->mstop != clock->mstop)
+ continue;
+
+ num_shared_mstop_clks = clk->num_shared_mstop_clks;
+ if (!num_shared_mstop_clks)
+ incr++;
+
+ new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
+ (num_shared_mstop_clks + incr) * sizeof(*new_clks),
+ GFP_KERNEL);
+ if (!new_clks)
+ return -ENOMEM;
+
+ if (!num_shared_mstop_clks)
+ new_clks[num_shared_mstop_clks++] = clk;
+ new_clks[num_shared_mstop_clks++] = clock;
+
+ for (unsigned int i = 0; i < num_shared_mstop_clks; i++) {
+ new_clks[i]->shared_mstop_clks = new_clks;
+ new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
const struct rzg2l_cpg_info *info,
struct rzg2l_cpg_priv *priv)
{
- struct mstp_clock *clock = NULL;
+ struct mod_clock *clock = NULL;
struct device *dev = priv->dev;
unsigned int id = mod->id;
struct clk_init_data init;
@@ -1383,18 +1587,29 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
clock->priv = priv;
clock->hw.init = &init;
+ if (mod->mstop_conf) {
+ struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
+
+ if (!mstop) {
+ mstop = devm_kzalloc(dev, sizeof(*mstop), GFP_KERNEL);
+ if (!mstop) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ mstop->conf = mod->mstop_conf;
+ atomic_set(&mstop->usecnt, 0);
+ }
+ clock->mstop = mstop;
+ }
+
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
clk = ERR_PTR(ret);
goto fail;
}
- clk = clock->hw.clk;
- dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
-
if (mod->is_coupled) {
- struct mstp_clock *sibling;
+ struct mod_clock *sibling;
clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
sibling = rzg2l_mod_clock_get_sibling(clock, priv);
@@ -1404,6 +1619,17 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
}
}
+ /* Keep this before priv->clks[id] is updated. */
+ ret = rzg2l_mod_clock_update_shared_mstop_clks(priv, clock);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto fail;
+ }
+
+ clk = clock->hw.clk;
+ dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+
return;
fail:
@@ -1540,39 +1766,14 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
-/**
- * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
- * @onecell_data: cell data
- * @domains: generic PM domains
- */
-struct rzg2l_cpg_pm_domains {
- struct genpd_onecell_data onecell_data;
- struct generic_pm_domain *domains[];
-};
-
-/**
- * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
- * @genpd: generic PM domain
- * @priv: pointer to CPG private data structure
- * @conf: CPG PM domain configuration info
- * @id: RZ/G2L power domain ID
- */
-struct rzg2l_cpg_pd {
- struct generic_pm_domain genpd;
- struct rzg2l_cpg_priv *priv;
- struct rzg2l_cpg_pm_domain_conf conf;
- u16 id;
-};
-
-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
+static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
const struct of_phandle_args *clkspec)
{
- if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
return false;
switch (clkspec->args[0]) {
case CPG_MOD: {
- struct rzg2l_cpg_priv *priv = pd->priv;
const struct rzg2l_cpg_info *info = priv->info;
unsigned int id = clkspec->args[1];
@@ -1597,7 +1798,7 @@ static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+ struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
@@ -1606,7 +1807,7 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device
int error;
for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
- if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
+ if (!rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
of_node_put(clkspec.np);
continue;
}
@@ -1652,182 +1853,30 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
static void rzg2l_cpg_genpd_remove(void *data)
{
- struct genpd_onecell_data *celldata = data;
-
- for (unsigned int i = 0; i < celldata->num_domains; i++)
- pm_genpd_remove(celldata->domains[i]);
-}
-
-static void rzg2l_cpg_genpd_remove_simple(void *data)
-{
pm_genpd_remove(data);
}
-static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask << 16, priv->base + mstop.off);
-
- return 0;
-}
-
-static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
-
- return 0;
-}
-
-static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
-{
- bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
- struct dev_power_governor *governor;
- int ret;
-
- if (always_on)
- governor = &pm_domain_always_on_gov;
- else
- governor = &simple_qos_governor;
-
- pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
- pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
- pd->genpd.power_on = rzg2l_cpg_power_on;
- pd->genpd.power_off = rzg2l_cpg_power_off;
-
- ret = pm_genpd_init(&pd->genpd, governor, !always_on);
- if (ret)
- return ret;
-
- if (always_on)
- ret = rzg2l_cpg_power_on(&pd->genpd);
-
- return ret;
-}
-
static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
{
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pd *pd;
- int ret;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = np->name;
- pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
- pd->priv = priv;
- ret = rzg2l_cpg_pd_setup(pd);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
- if (ret)
- return ret;
-
- return of_genpd_add_provider_simple(np, &pd->genpd);
-}
-
-static struct generic_pm_domain *
-rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
-{
- struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
- struct genpd_onecell_data *genpd = data;
-
- if (spec->args_count != 1)
- return ERR_PTR(-EINVAL);
-
- for (unsigned int i = 0; i < genpd->num_domains; i++) {
- struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
- genpd);
-
- if (pd->id == spec->args[0]) {
- domain = &pd->genpd;
- break;
- }
- }
-
- return domain;
-}
-
-static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
-{
- const struct rzg2l_cpg_info *info = priv->info;
- struct device *dev = priv->dev;
- struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pm_domains *domains;
- struct generic_pm_domain *parent;
- u32 ncells;
+ struct generic_pm_domain *genpd = &priv->genpd;
int ret;
- ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
+ genpd->name = np->name;
+ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+ GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = rzg2l_cpg_attach_dev;
+ genpd->detach_dev = rzg2l_cpg_detach_dev;
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
if (ret)
return ret;
- /* For backward compatibility. */
- if (!ncells)
- return rzg2l_cpg_add_clk_domain(priv);
-
- domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
- GFP_KERNEL);
- if (!domains)
- return -ENOMEM;
-
- domains->onecell_data.domains = domains->domains;
- domains->onecell_data.num_domains = info->num_pm_domains;
- domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
-
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
if (ret)
return ret;
- for (unsigned int i = 0; i < info->num_pm_domains; i++) {
- struct rzg2l_cpg_pd *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = info->pm_domains[i].name;
- pd->genpd.flags = info->pm_domains[i].genpd_flags;
- pd->conf = info->pm_domains[i].conf;
- pd->id = info->pm_domains[i].id;
- pd->priv = priv;
-
- ret = rzg2l_cpg_pd_setup(pd);
- if (ret)
- return ret;
-
- domains->domains[i] = &pd->genpd;
- /* Parent should be on the very first entry of info->pm_domains[]. */
- if (!i) {
- parent = &pd->genpd;
- continue;
- }
-
- ret = pm_genpd_add_subdomain(parent, &pd->genpd);
- if (ret)
- return ret;
- }
-
- ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
- if (ret)
- return ret;
-
- return 0;
+ return of_genpd_add_provider_simple(np, genpd);
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
@@ -1875,6 +1924,13 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
for (i = 0; i < info->num_mod_clks; i++)
rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
+ /*
+ * Initialize MSTOP after all the clocks were registered to avoid
+ * invalid reference counting when multiple clocks (critical,
+ * non-critical) share the same MSTOP.
+ */
+ rzg2l_mod_clock_init_mstop(priv);
+
error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
if (error)
return error;
@@ -1883,7 +1939,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
- error = rzg2l_cpg_add_pm_domains(priv);
+ error = rzg2l_cpg_add_clk_domain(priv);
if (error)
return error;
@@ -1891,9 +1947,23 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
+ debugfs_create_file("mstop", 0444, NULL, priv, &rzg2l_mod_clock_mstop_fops);
+ return 0;
+}
+
+static int rzg2l_cpg_resume(struct device *dev)
+{
+ struct rzg2l_cpg_priv *priv = dev_get_drvdata(dev);
+
+ rzg2l_mod_clock_init_mstop(priv);
+
return 0;
}
+static const struct dev_pm_ops rzg2l_cpg_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, rzg2l_cpg_resume)
+};
+
static const struct of_device_id rzg2l_cpg_match[] = {
#ifdef CONFIG_CLK_R9A07G043
{
@@ -1932,6 +2002,7 @@ static struct platform_driver rzg2l_cpg_driver = {
.driver = {
.name = "rzg2l-cpg",
.of_match_table = rzg2l_cpg_match,
+ .pm = pm_sleep_ptr(&rzg2l_cpg_pm_ops),
},
};
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index b6eece5ffa20..0a71c5ec24b6 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -82,6 +82,8 @@
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
+#define MSTOP(name, bitmask) ((CPG_##name##_MSTOP) << 16 | (bitmask))
+
#define EXTAL_FREQ_IN_MEGA_HZ (24)
/**
@@ -201,6 +203,7 @@ enum clk_types {
* @name: handle between common and hardware-specific interfaces
* @id: clock index in array containing all Core and Module Clocks
* @parent: id of parent clock
+ * @mstop_conf: MSTOP configuration
* @off: register offset
* @bit: ON/MON bit
* @is_coupled: flag to indicate coupled clock
@@ -209,26 +212,28 @@ struct rzg2l_mod_clk {
const char *name;
unsigned int id;
unsigned int parent;
+ u32 mstop_conf;
u16 off;
u8 bit;
bool is_coupled;
};
-#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \
+#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, _is_coupled) \
{ \
.name = _name, \
.id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
+ .mstop_conf = (_mstop_conf), \
.off = (_off), \
.bit = (_bit), \
.is_coupled = (_is_coupled), \
}
-#define DEF_MOD(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false)
+#define DEF_MOD(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, false)
-#define DEF_COUPLED(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true)
+#define DEF_COUPLED(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, true)
/**
* struct rzg2l_reset - Reset definitions
@@ -253,51 +258,6 @@ struct rzg2l_reset {
DEF_RST_MON(_id, _off, _bit, -1)
/**
- * struct rzg2l_cpg_reg_conf - RZ/G2L register configuration data structure
- * @off: register offset
- * @mask: register mask
- */
-struct rzg2l_cpg_reg_conf {
- u16 off;
- u16 mask;
-};
-
-#define DEF_REG_CONF(_off, _mask) ((struct rzg2l_cpg_reg_conf) { .off = (_off), .mask = (_mask) })
-
-/**
- * struct rzg2l_cpg_pm_domain_conf - PM domain configuration data structure
- * @mstop: MSTOP register configuration
- */
-struct rzg2l_cpg_pm_domain_conf {
- struct rzg2l_cpg_reg_conf mstop;
-};
-
-/**
- * struct rzg2l_cpg_pm_domain_init_data - PM domain init data
- * @name: PM domain name
- * @conf: PM domain configuration
- * @genpd_flags: genpd flags (see GENPD_FLAG_*)
- * @id: PM domain ID (similar to the ones defined in
- * include/dt-bindings/clock/<soc-id>-cpg.h)
- */
-struct rzg2l_cpg_pm_domain_init_data {
- const char * const name;
- struct rzg2l_cpg_pm_domain_conf conf;
- u32 genpd_flags;
- u16 id;
-};
-
-#define DEF_PD(_name, _id, _mstop_conf, _flags) \
- { \
- .name = (_name), \
- .id = (_id), \
- .conf = { \
- .mstop = (_mstop_conf), \
- }, \
- .genpd_flags = (_flags), \
- }
-
-/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
*
* @core_clks: Array of Core Clock definitions
@@ -315,8 +275,6 @@ struct rzg2l_cpg_pm_domain_init_data {
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
- * @pm_domains: PM domains init data array
- * @num_pm_domains: Number of PM domains
* @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers
*/
struct rzg2l_cpg_info {
@@ -343,10 +301,6 @@ struct rzg2l_cpg_info {
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
- /* Power domain. */
- const struct rzg2l_cpg_pm_domain_init_data *pm_domains;
- unsigned int num_pm_domains;
-
bool has_clk_mon_regs;
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index bcc496e8cbcd..f468afbb54e2 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -77,6 +77,7 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @ff_mod_status_ops: Fixed Factor Module Status Clock operations
* @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
*/
@@ -92,6 +93,8 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ struct clk_ops *ff_mod_status_ops;
+
atomic_t *mstop_count;
struct reset_controller_dev rcdev;
@@ -101,7 +104,6 @@ struct rzv2h_cpg_priv {
struct pll_clk {
struct rzv2h_cpg_priv *priv;
- void __iomem *base;
struct clk_hw hw;
struct pll pll;
};
@@ -119,6 +121,7 @@ struct pll_clk {
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
* @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -129,6 +132,7 @@ struct mod_clock {
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
@@ -148,6 +152,22 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+/**
+ * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock
+ *
+ * @priv: CPG private data
+ * @conf: fixed mod configuration
+ * @fix: fixed factor clock
+ */
+struct rzv2h_ff_mod_status_clk {
+ struct rzv2h_cpg_priv *priv;
+ struct fixed_mod_conf conf;
+ struct clk_fixed_factor fix;
+};
+
+#define to_rzv2h_ff_mod_status_clk(_hw) \
+ container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
+
static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
{
struct pll_clk *pll_clk = to_pll(hw);
@@ -228,7 +248,6 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct rzv2h_cpg_priv *priv,
const struct clk_ops *ops)
{
- void __iomem *base = priv->base;
struct device *dev = priv->dev;
struct clk_init_data init;
const struct clk *parent;
@@ -253,7 +272,6 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->hw.init = &init;
pll_clk->pll = core->cfg.pll;
- pll_clk->base = base;
pll_clk->priv = priv;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
@@ -381,6 +399,7 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
ddiv->priv = priv;
ddiv->mon = cfg_ddiv.monbit;
@@ -418,6 +437,65 @@ rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
return clk_hw->clk;
}
+static int
+rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw)
+{
+ struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw);
+ struct rzv2h_cpg_priv *priv = fix->priv;
+ u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index);
+ u32 bitmask = BIT(fix->conf.mon_bit);
+ u32 val;
+
+ val = readl(priv->base + offset);
+ return !!(val & bitmask);
+}
+
+static struct clk * __init
+rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct rzv2h_ff_mod_status_clk *clk_hw_data;
+ struct clk_init_data init = { };
+ struct clk_fixed_factor *fix;
+ const struct clk *parent;
+ const char *parent_name;
+ int ret;
+
+ WARN_DEBUG(core->parent >= priv->num_core_clks);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->cfg.fixed_mod;
+
+ init.name = core->name;
+ init.ops = priv->ff_mod_status_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ fix = &clk_hw_data->fix;
+ fix->hw.init = &init;
+ fix->mult = core->mult;
+ fix->div = core->div;
+
+ ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_data->fix.hw.clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -496,6 +574,20 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
else
clk = clk_hw->clk;
break;
+ case CLK_TYPE_FF_MOD_STATUS:
+ if (!priv->ff_mod_status_ops) {
+ priv->ff_mod_status_ops =
+ devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL);
+ if (!priv->ff_mod_status_ops) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops,
+ sizeof(const struct clk_ops));
+ priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled;
+ }
+ clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv);
+ break;
case CLK_TYPE_PLL:
clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
break;
@@ -563,15 +655,38 @@ static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
spin_unlock_irqrestore(&priv->rmw_lock, flags);
}
+static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw;
+ struct clk *parent_clk;
+ struct clk_mux *mux;
+ u32 val;
+
+ /* This will always succeed, so no need to check for IS_ERR() */
+ parent_clk = clk_get_parent(hw->clk);
+
+ parent_hw = __clk_get_hw(parent_clk);
+ mux = to_clk_mux(parent_hw);
+
+ val = readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+ return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
+}
+
static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
{
struct mod_clock *clock = to_mod_clock(hw);
struct rzv2h_cpg_priv *priv = clock->priv;
+ int mon_index = clock->mon_index;
u32 bitmask;
u32 offset;
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ if (clock->ext_clk_mux_index >= 0 &&
+ rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index)
+ mon_index = -1;
+
+ if (mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(mon_index);
bitmask = BIT(clock->mon_bit);
if (!(readl(priv->base + offset) & bitmask))
@@ -687,6 +802,7 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
clock->no_pm = mod->no_pm;
+ clock->ext_clk_mux_index = mod->ext_clk_mux_index;
clock->priv = priv;
clock->hw.init = &init;
clock->mstop_data = mod->mstop_data;
@@ -1004,8 +1120,8 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
/* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
priv->mstop_count -= 16;
- priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
- info->num_resets, GFP_KERNEL);
+ priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets,
+ sizeof(*info->resets), GFP_KERNEL);
if (!priv->resets)
return -ENOMEM;
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 9104b1cd276c..840eed25aeda 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -9,6 +9,7 @@
#define __RENESAS_RZV2H_CPG_H__
#include <linux/bitfield.h>
+#include <linux/types.h>
/**
* struct pll - Structure for PLL configuration
@@ -93,6 +94,24 @@ struct smuxed {
.width = (_width), \
})
+/**
+ * struct fixed_mod_conf - Structure for fixed module configuration
+ *
+ * @mon_index: monitor index
+ * @mon_bit: monitor bit
+ */
+struct fixed_mod_conf {
+ u8 mon_index;
+ u8 mon_bit;
+};
+
+#define FIXED_MOD_CONF_PACK(_index, _bit) \
+ ((struct fixed_mod_conf){ \
+ .mon_index = (_index), \
+ .mon_bit = (_bit), \
+ })
+
+#define CPG_SSEL0 (0x300)
#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
@@ -113,8 +132,14 @@ struct smuxed {
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL0 DDIV_PACK(CPG_CSDIV0, 0, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL1 DDIV_PACK(CPG_CSDIV0, 4, 2, CSDIV_NO_MON)
#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+#define SSEL0_SELCTL2 SMUX_PACK(CPG_SSEL0, 8, 1)
+#define SSEL0_SELCTL3 SMUX_PACK(CPG_SSEL0, 12, 1)
+#define SSEL1_SELCTL0 SMUX_PACK(CPG_SSEL1, 0, 1)
+#define SSEL1_SELCTL1 SMUX_PACK(CPG_SSEL1, 4, 1)
#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
@@ -124,6 +149,8 @@ struct smuxed {
FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
#define BUS_MSTOP_NONE GENMASK(31, 0)
+#define FIXED_MOD_CONF_XSPI FIXED_MOD_CONF_PACK(5, 1)
+
/**
* Definitions of CPG Core Clocks
*
@@ -144,6 +171,7 @@ struct cpg_core_clk {
struct ddiv ddiv;
struct pll pll;
struct smuxed smux;
+ struct fixed_mod_conf fixed_mod;
} cfg;
const struct clk_div_table *dtable;
const char * const *parent_names;
@@ -156,6 +184,7 @@ enum clk_types {
/* Generic */
CLK_TYPE_IN, /* External Clock Input */
CLK_TYPE_FF, /* Fixed Factor Clock */
+ CLK_TYPE_FF_MOD_STATUS, /* Fixed Factor Clock which can report the status of module clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
CLK_TYPE_SMUX, /* Static Mux */
@@ -171,6 +200,9 @@ enum clk_types {
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
+#define DEF_FIXED_MOD_STATUS(_name, _id, _parent, _mult, _div, _gate) \
+ DEF_BASE(_name, _id, CLK_TYPE_FF_MOD_STATUS, _parent, .div = _div, \
+ .mult = _mult, .cfg.fixed_mod = _gate)
#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \
.cfg.ddiv = _ddiv_packed, \
@@ -199,6 +231,7 @@ enum clk_types {
* @on_bit: ON bit
* @mon_index: monitor register index
* @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct rzv2h_mod_clk {
const char *name;
@@ -210,9 +243,11 @@ struct rzv2h_mod_clk {
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
-#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, \
+ _onbit, _monindex, _monbit, _ext_clk_mux_index) \
{ \
.name = (_name), \
.mstop_data = (_mstop), \
@@ -223,16 +258,22 @@ struct rzv2h_mod_clk {
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
+ .ext_clk_mux_index = (_ext_clk_mux_index), \
}
#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, -1)
#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit, -1)
#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit, -1)
+
+#define DEF_MOD_MUX_EXTERNAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop, \
+ _ext_clk_mux_index) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, \
+ _ext_clk_mux_index)
/**
* struct rzv2h_reset - Reset definitions
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 398a226ad34e..dcc9dcb597ae 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -16,14 +16,14 @@
* of the SoC or supplied after the SoC characterization.
*
* The below implementation of the CPU clock allows the rate changes of the CPU
- * clock and the corresponding rate changes of the auxillary clocks of the CPU
+ * clock and the corresponding rate changes of the auxiliary clocks of the CPU
* domain. The platform clock driver provides a clock register configuration
* for each configurable rate which is then used to program the clock hardware
- * registers to acheive a fast co-oridinated rate change for all the CPU domain
+ * registers to achieve a fast co-oridinated rate change for all the CPU domain
* clocks.
*
* On a rate change request for the CPU clock, the rate change is propagated
- * upto the PLL supplying the clock to the CPU domain clock blocks. While the
+ * up to the PLL supplying the clock to the CPU domain clock blocks. While the
* CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
* alternate clock source. If required, the alternate clock source is divided
* down in order to keep the output clock rate within the previous OPP limits.
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index b3ed8e7523e5..8b1292c56863 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -174,11 +174,11 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
/*
* rockchip_mmc_clk is mostly used by mmc controllers to sample
- * the intput data, which expects the fixed phase after the tuning
+ * the input data, which expects the fixed phase after the tuning
* process. However if the clock rate is changed, the phase is stale
* and may break the data sampling. So here we try to restore the phase
* for that case, except that
- * (1) cached_phase is invaild since we inevitably cached it when the
+ * (1) cached_phase is invalid since we inevitably cached it when the
* clock provider be reparented from orphan to its real parent in the
* first place. Otherwise we may mess up the initialization of MMC cards
* since we only set the default sample phase and drive phase later on.
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index af74439a7457..c9d599c31923 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -68,7 +68,7 @@ static long rockchip_pll_round_rate(struct clk_hw *hw,
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
if (drate >= rate_table[i].rate)
return rate_table[i].rate;
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index d48ab9d6c064..97d279399ae8 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -79,6 +79,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+ RK3036_PLL_RATE(132000000, 1, 66, 6, 2, 1, 0),
RK3036_PLL_RATE(128000000, 1, 16, 3, 1, 1, 0),
RK3036_PLL_RATE(126400000, 1, 79, 5, 3, 1, 0),
RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 1e9c3c0d31e3..7c5e74c7a2e2 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -532,7 +532,7 @@ struct rockchip_pll_rate_table {
*
* Flags:
* ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
- * rate_table parameters and ajust them if necessary.
+ * rate_table parameters and adjust them if necessary.
* ROCKCHIP_PLL_FIXED_MODE - the pll operates in normal mode only
*/
struct rockchip_pll_clock {
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 97982662e1a6..4e1ebd8a30b1 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -243,7 +243,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
/*
* In Exynos4210, ATB clock parent is also mout_core. So
- * ATB clock also needs to be mantained at safe speed.
+ * ATB clock also needs to be maintained at safe speed.
*/
alt_div |= E4210_DIV0_ATB_MASK;
alt_div_mask |= E4210_DIV0_ATB_MASK;
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index cf7e08cca78e..56f27697c76b 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -1360,7 +1360,7 @@ static const unsigned long cpucl1_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
};
-/* List of parent clocks for Muxes in CMU_CPUCL0 */
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" };
PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" };
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index da4afe8ac2ab..572b6ace14ac 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -26,6 +26,7 @@
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1)
#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1)
+#define CLKS_NR_HSI2 (CLK_DOUT_HSI2_ETHERNET_PTP + 1)
/* ---- CMU_TOP ------------------------------------------------------------ */
@@ -1752,6 +1753,74 @@ static const struct samsung_cmu_info hsi1_cmu_info __initconst = {
.clk_name = "noc",
};
+/* ---- CMU_HSI2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI2 (0x16b00000) */
+#define PLL_LOCKTIME_PLL_ETH 0x0
+#define PLL_CON3_PLL_ETH 0x10c
+#define PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER 0x610
+#define PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x630
+#define CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET 0x1000
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET 0x1800
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP 0x1804
+
+static const unsigned long hsi2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_ETH,
+ PLL_CON3_PLL_ETH,
+ PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+};
+
+static const struct samsung_pll_clock hsi2_pll_clks[] __initconst = {
+ /* CMU_HSI2_PLL */
+ PLL(pll_531x, FOUT_PLL_ETH, "fout_pll_eth", "oscclk",
+ PLL_LOCKTIME_PLL_ETH, PLL_CON3_PLL_ETH, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_HSI2 */
+PNAME(mout_clkcmu_hsi2_noc_ufs_user_p) = { "oscclk", "dout_clkcmu_hsi2_noc_ufs" };
+PNAME(mout_clkcmu_hsi2_ufs_embd_user_p) = { "oscclk", "dout_clkcmu_hsi2_ufs_embd" };
+PNAME(mout_hsi2_ethernet_p) = { "fout_pll_eth", "mout_clkcmu_hsi2_ethernet_user" };
+PNAME(mout_clkcmu_hsi2_ethernet_user_p) = { "oscclk", "dout_clkcmu_hsi2_ethernet" };
+
+static const struct samsung_mux_clock hsi2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI2_NOC_UFS_USER, "mout_clkcmu_hsi2_noc_ufs_user",
+ mout_clkcmu_hsi2_noc_ufs_user_p, PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_UFS_EMBD_USER, "mout_clkcmu_hsi2_ufs_embd_user",
+ mout_clkcmu_hsi2_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET, "mout_hsi2_ethernet",
+ mout_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET, 0, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET_USER, "mout_clkcmu_hsi2_ethernet_user",
+ mout_clkcmu_hsi2_ethernet_user_p, PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER, 4, 1),
+};
+
+static const struct samsung_div_clock hsi2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI2_ETHERNET, "dout_hsi2_ethernet",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ 0, 4),
+ DIV(CLK_DOUT_HSI2_ETHERNET_PTP, "dout_hsi2_ethernet_ptp",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+ 0, 4),
+};
+
+static const struct samsung_cmu_info hsi2_cmu_info __initconst = {
+ .pll_clks = hsi2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(hsi2_pll_clks),
+ .mux_clks = hsi2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi2_mux_clks),
+ .div_clks = hsi2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi2_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI2,
+ .clk_regs = hsi2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi2_clk_regs),
+ .clk_name = "noc",
+};
+
static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
@@ -1779,6 +1848,9 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = {
}, {
.compatible = "samsung,exynosautov920-cmu-hsi1",
.data = &hsi1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi2",
+ .data = &hsi2_cmu_info,
},
{ }
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index f9c3d68d449c..70b26db9b95a 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -1154,7 +1154,7 @@ static const struct samsung_div_clock cmu_top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
DIV(CLK_DOUT_CMU_G3AA_G3AA, "dout_cmu_g3aa_g3aa", "gout_cmu_g3aa_g3aa",
CLK_CON_DIV_CLKCMU_G3AA_G3AA, 0, 4),
- DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
+ DIV(CLK_DOUT_CMU_G3D_BUSD, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
CLK_CON_DIV_CLKCMU_G3D_BUSD, 0, 4),
DIV(CLK_DOUT_CMU_G3D_GLB, "dout_cmu_g3d_glb", "gout_cmu_g3d_glb",
CLK_CON_DIV_CLKCMU_G3D_GLB, 0, 4),
@@ -2129,7 +2129,7 @@ PNAME(mout_hsi0_usbdpdbg_user_p) = { "oscclk",
"dout_cmu_hsi0_usbdpdbg" };
PNAME(mout_hsi0_bus_p) = { "mout_hsi0_bus_user",
"mout_hsi0_alt_user" };
-PNAME(mout_hsi0_usb20_ref_p) = { "fout_usb_pll",
+PNAME(mout_hsi0_usb20_ref_p) = { "mout_pll_usb",
"mout_hsi0_tcxo_user" };
PNAME(mout_hsi0_usb31drd_p) = { "fout_usb_pll",
"mout_hsi0_usb31drd_user",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index fe8abe442c51..e4faf02b631e 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -56,7 +56,7 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
const struct samsung_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
if (drate >= rate_table[i].rate)
return rate_table[i].rate;
diff --git a/drivers/clk/sophgo/clk-sg2042-clkgen.c b/drivers/clk/sophgo/clk-sg2042-clkgen.c
index a334963e83ce..9e61288d34f3 100644
--- a/drivers/clk/sophgo/clk-sg2042-clkgen.c
+++ b/drivers/clk/sophgo/clk-sg2042-clkgen.c
@@ -968,7 +968,7 @@ static int sg2042_mux_notifier_cb(struct notifier_block *nb,
/*
* "1" is the array index of the second parent input source of
* mux. For SG2042, it's fpll for all mux clocks.
- * "0" is the array index of the frist parent input source of
+ * "0" is the array index of the first parent input source of
* mux, For SG2042, it's mpll.
* FIXME, any good idea to avoid magic number?
*/
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index 1537f4f05860..e5fb0bb7ac4f 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -155,7 +155,7 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value,
numerator = (u64)parent_rate * ctrl_table.fbdiv;
denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2;
- do_div(numerator, denominator);
+ numerator = div64_u64(numerator, denominator);
return numerator;
}
@@ -212,7 +212,7 @@ static int sg2042_pll_get_postdiv_1_2(unsigned long rate,
tmp0 *= fbdiv;
/* ((prate/REFDIV) x FBDIV)/rate and result save to tmp0 */
- do_div(tmp0, rate);
+ tmp0 = div64_ul(tmp0, rate);
/* tmp0 is POSTDIV1*POSTDIV2, now we calculate div1 and div2 value */
if (tmp0 <= 7) {
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
index 4c4df845b3cb..3854f6ae6d0e 100644
--- a/drivers/clk/spacemit/Kconfig
+++ b/drivers/clk/spacemit/Kconfig
@@ -3,6 +3,7 @@
config SPACEMIT_CCU
tristate "Clock support for SpacemiT SoCs"
depends on ARCH_SPACEMIT || COMPILE_TEST
+ select AUXILIARY_BUS
select MFD_SYSCON
help
Say Y to enable clock controller unit support for SpacemiT SoCs.
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
index cdde37a05235..65e6de030717 100644
--- a/drivers/clk/spacemit/ccu-k1.c
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -5,12 +5,16 @@
*/
#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/idr.h>
#include <linux/mfd/syscon.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/spacemit/k1-syscon.h>
#include "ccu_common.h"
#include "ccu_pll.h"
@@ -19,121 +23,14 @@
#include <dt-bindings/clock/spacemit,k1-syscon.h>
-/* APBS register offset */
-#define APBS_PLL1_SWCR1 0x100
-#define APBS_PLL1_SWCR2 0x104
-#define APBS_PLL1_SWCR3 0x108
-#define APBS_PLL2_SWCR1 0x118
-#define APBS_PLL2_SWCR2 0x11c
-#define APBS_PLL2_SWCR3 0x120
-#define APBS_PLL3_SWCR1 0x124
-#define APBS_PLL3_SWCR2 0x128
-#define APBS_PLL3_SWCR3 0x12c
-
-/* MPMU register offset */
-#define MPMU_POSR 0x0010
-#define POSR_PLL1_LOCK BIT(27)
-#define POSR_PLL2_LOCK BIT(28)
-#define POSR_PLL3_LOCK BIT(29)
-#define MPMU_SUCCR 0x0014
-#define MPMU_ISCCR 0x0044
-#define MPMU_WDTPCR 0x0200
-#define MPMU_RIPCCR 0x0210
-#define MPMU_ACGR 0x1024
-#define MPMU_APBCSCR 0x1050
-#define MPMU_SUCCR_1 0x10b0
-
-/* APBC register offset */
-#define APBC_UART1_CLK_RST 0x00
-#define APBC_UART2_CLK_RST 0x04
-#define APBC_GPIO_CLK_RST 0x08
-#define APBC_PWM0_CLK_RST 0x0c
-#define APBC_PWM1_CLK_RST 0x10
-#define APBC_PWM2_CLK_RST 0x14
-#define APBC_PWM3_CLK_RST 0x18
-#define APBC_TWSI8_CLK_RST 0x20
-#define APBC_UART3_CLK_RST 0x24
-#define APBC_RTC_CLK_RST 0x28
-#define APBC_TWSI0_CLK_RST 0x2c
-#define APBC_TWSI1_CLK_RST 0x30
-#define APBC_TIMERS1_CLK_RST 0x34
-#define APBC_TWSI2_CLK_RST 0x38
-#define APBC_AIB_CLK_RST 0x3c
-#define APBC_TWSI4_CLK_RST 0x40
-#define APBC_TIMERS2_CLK_RST 0x44
-#define APBC_ONEWIRE_CLK_RST 0x48
-#define APBC_TWSI5_CLK_RST 0x4c
-#define APBC_DRO_CLK_RST 0x58
-#define APBC_IR_CLK_RST 0x5c
-#define APBC_TWSI6_CLK_RST 0x60
-#define APBC_COUNTER_CLK_SEL 0x64
-#define APBC_TWSI7_CLK_RST 0x68
-#define APBC_TSEN_CLK_RST 0x6c
-#define APBC_UART4_CLK_RST 0x70
-#define APBC_UART5_CLK_RST 0x74
-#define APBC_UART6_CLK_RST 0x78
-#define APBC_SSP3_CLK_RST 0x7c
-#define APBC_SSPA0_CLK_RST 0x80
-#define APBC_SSPA1_CLK_RST 0x84
-#define APBC_IPC_AP2AUD_CLK_RST 0x90
-#define APBC_UART7_CLK_RST 0x94
-#define APBC_UART8_CLK_RST 0x98
-#define APBC_UART9_CLK_RST 0x9c
-#define APBC_CAN0_CLK_RST 0xa0
-#define APBC_PWM4_CLK_RST 0xa8
-#define APBC_PWM5_CLK_RST 0xac
-#define APBC_PWM6_CLK_RST 0xb0
-#define APBC_PWM7_CLK_RST 0xb4
-#define APBC_PWM8_CLK_RST 0xb8
-#define APBC_PWM9_CLK_RST 0xbc
-#define APBC_PWM10_CLK_RST 0xc0
-#define APBC_PWM11_CLK_RST 0xc4
-#define APBC_PWM12_CLK_RST 0xc8
-#define APBC_PWM13_CLK_RST 0xcc
-#define APBC_PWM14_CLK_RST 0xd0
-#define APBC_PWM15_CLK_RST 0xd4
-#define APBC_PWM16_CLK_RST 0xd8
-#define APBC_PWM17_CLK_RST 0xdc
-#define APBC_PWM18_CLK_RST 0xe0
-#define APBC_PWM19_CLK_RST 0xe4
-
-/* APMU register offset */
-#define APMU_JPG_CLK_RES_CTRL 0x020
-#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
-#define APMU_ISP_CLK_RES_CTRL 0x038
-#define APMU_LCD_CLK_RES_CTRL1 0x044
-#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
-#define APMU_LCD_CLK_RES_CTRL2 0x04c
-#define APMU_CCIC_CLK_RES_CTRL 0x050
-#define APMU_SDH0_CLK_RES_CTRL 0x054
-#define APMU_SDH1_CLK_RES_CTRL 0x058
-#define APMU_USB_CLK_RES_CTRL 0x05c
-#define APMU_QSPI_CLK_RES_CTRL 0x060
-#define APMU_DMA_CLK_RES_CTRL 0x064
-#define APMU_AES_CLK_RES_CTRL 0x068
-#define APMU_VPU_CLK_RES_CTRL 0x0a4
-#define APMU_GPU_CLK_RES_CTRL 0x0cc
-#define APMU_SDH2_CLK_RES_CTRL 0x0e0
-#define APMU_PMUA_MC_CTRL 0x0e8
-#define APMU_PMU_CC2_AP 0x100
-#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
-#define APMU_AUDIO_CLK_RES_CTRL 0x14c
-#define APMU_HDMI_CLK_RES_CTRL 0x1b8
-#define APMU_CCI550_CLK_CTRL 0x300
-#define APMU_ACLK_CLK_CTRL 0x388
-#define APMU_CPU_C0_CLK_CTRL 0x38C
-#define APMU_CPU_C1_CLK_CTRL 0x390
-#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
-#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
-#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
-#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
-#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
-
struct spacemit_ccu_data {
+ const char *reset_name;
struct clk_hw **hws;
size_t num;
};
+static DEFINE_IDA(auxiliary_ids);
+
/* APBS clocks start, APBS region contains and only contains all PLL clocks */
/*
@@ -170,7 +67,8 @@ CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4,
CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
-CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_FLAGS_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1,
+ CLK_IS_CRITICAL);
CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
@@ -819,8 +717,9 @@ static struct clk_hw *k1_ccu_pll_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_pll_data = {
- .hws = k1_ccu_pll_hws,
- .num = ARRAY_SIZE(k1_ccu_pll_hws),
+ /* The PLL CCU implements no resets */
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
};
static struct clk_hw *k1_ccu_mpmu_hws[] = {
@@ -860,8 +759,9 @@ static struct clk_hw *k1_ccu_mpmu_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
- .hws = k1_ccu_mpmu_hws,
- .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+ .reset_name = "mpmu-reset",
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
};
static struct clk_hw *k1_ccu_apbc_hws[] = {
@@ -968,8 +868,9 @@ static struct clk_hw *k1_ccu_apbc_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_apbc_data = {
- .hws = k1_ccu_apbc_hws,
- .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+ .reset_name = "apbc-reset",
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
};
static struct clk_hw *k1_ccu_apmu_hws[] = {
@@ -1038,8 +939,21 @@ static struct clk_hw *k1_ccu_apmu_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_apmu_data = {
- .hws = k1_ccu_apmu_hws,
- .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+ .reset_name = "apmu-reset",
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu_data = {
+ .reset_name = "rcpu-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu2_data = {
+ .reset_name = "rcpu2-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc2_data = {
+ .reset_name = "apbc2-reset",
};
static int spacemit_ccu_register(struct device *dev,
@@ -1050,6 +964,10 @@ static int spacemit_ccu_register(struct device *dev,
struct clk_hw_onecell_data *clk_data;
int i, ret;
+ /* Nothing to do if the CCU does not implement any clocks */
+ if (!data->hws)
+ return 0;
+
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
GFP_KERNEL);
if (!clk_data)
@@ -1090,9 +1008,74 @@ static int spacemit_ccu_register(struct device *dev,
return ret;
}
+static void spacemit_cadev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&auxiliary_ids, adev->id);
+ kfree(to_spacemit_ccu_adev(adev));
+}
+
+static void spacemit_adev_unregister(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int spacemit_ccu_reset_register(struct device *dev,
+ struct regmap *regmap,
+ const char *reset_name)
+{
+ struct spacemit_ccu_adev *cadev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ /* Nothing to do if the CCU does not implement a reset controller */
+ if (!reset_name)
+ return 0;
+
+ cadev = kzalloc(sizeof(*cadev), GFP_KERNEL);
+ if (!cadev)
+ return -ENOMEM;
+
+ cadev->regmap = regmap;
+
+ adev = &cadev->adev;
+ adev->name = reset_name;
+ adev->dev.parent = dev;
+ adev->dev.release = spacemit_cadev_release;
+ adev->dev.of_node = dev->of_node;
+ ret = ida_alloc(&auxiliary_ids, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_cadev;
+ adev->id = ret;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_free_aux_id;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, spacemit_adev_unregister, adev);
+
+err_free_aux_id:
+ ida_free(&auxiliary_ids, adev->id);
+err_free_cadev:
+ kfree(cadev);
+
+ return ret;
+}
+
static int k1_ccu_probe(struct platform_device *pdev)
{
struct regmap *base_regmap, *lock_regmap = NULL;
+ const struct spacemit_ccu_data *data;
struct device *dev = &pdev->dev;
int ret;
@@ -1121,11 +1104,16 @@ static int k1_ccu_probe(struct platform_device *pdev)
"failed to get lock regmap\n");
}
- ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
- of_device_get_match_data(dev));
+ data = of_device_get_match_data(dev);
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap, data);
if (ret)
return dev_err_probe(dev, ret, "failed to register clocks\n");
+ ret = spacemit_ccu_reset_register(dev, base_regmap, data->reset_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register resets\n");
+
return 0;
}
@@ -1146,6 +1134,18 @@ static const struct of_device_id of_k1_ccu_match[] = {
.compatible = "spacemit,k1-syscon-apmu",
.data = &k1_ccu_apmu_data,
},
+ {
+ .compatible = "spacemit,k1-syscon-rcpu",
+ .data = &k1_ccu_rcpu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-rcpu2",
+ .data = &k1_ccu_rcpu2_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc2",
+ .data = &k1_ccu_apbc2_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
index 51d19f5d6aac..54d40cd39b27 100644
--- a/drivers/clk/spacemit/ccu_mix.h
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -101,17 +101,22 @@ static struct ccu_mix _name = { \
} \
}
-#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
- _mul) \
+#define CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, _flags) \
static struct ccu_mix _name = { \
.gate = CCU_GATE_INIT(_mask_gate), \
.factor = CCU_FACTOR_INIT(_div, _mul), \
.common = { \
.reg_ctrl = _reg_ctrl, \
- CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, _flags) \
} \
}
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+ CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, 0)
+
#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
_mask_gate, _flags) \
static struct ccu_mix _name = { \
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
index 4427dcfbbb97..45f540073a65 100644
--- a/drivers/clk/spacemit/ccu_pll.c
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -122,7 +122,7 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
WARN_ON_ONCE(!entry);
- return entry ? entry->rate : -EINVAL;
+ return entry ? entry->rate : 0;
}
static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 361d344bfaf0..fdfb26c67188 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -199,7 +199,7 @@ static struct frac_rate_tbl amba_synth_rtbl[] = {
* We can program this synthesizer to make cpu run on different clock
* frequencies.
* Following table provides configuration values to let cpu run on 200,
- * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * 250, 332, 400 or 500 MHz considering different possibilities of input
* (vco1div2) clock.
*
* --------------------------------------------------------------------
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index e738dafa4fe9..775519eb1cb6 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -26,7 +26,7 @@ struct sprd_gate {
* CLK_GATE_BIG_ENDIAN BIT(2)
* so we define new flags from BIT(3)
*/
-#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */
+#define SPRD_GATE_NON_AON BIT(3) /* not always powered on, check before read */
#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
_sc_offset, _enable_mask, _flags, \
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
index 9384ecc6c741..f763d83de9ee 100644
--- a/drivers/clk/sprd/ums512-clk.c
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -1550,7 +1550,7 @@ static struct sprd_clk_desc ums512_aon_gate_desc = {
/* audcp apb gates */
/* Audcp apb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_wdg_eb, "audcp-wdg-eb",
@@ -1592,7 +1592,7 @@ static const struct sprd_clk_desc ums512_audcpapb_gate_desc = {
/* audcp ahb gates */
/* Audcp aphb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_iis0_eb, "audcp-iis0-eb",
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index e9d8168d02b8..52833d4241c5 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -376,7 +376,7 @@ EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
/*
* This clock notifier is called when the rate of PLL0 clock is to be changed.
- * The cpu_root clock should save the curent parent clock and switch its parent
+ * The cpu_root clock should save the current parent clock and switch its parent
* clock to osc before PLL0 rate will be changed. Then switch its parent clock
* back after the PLL0 rate is completed.
*/
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index dca409d52652..4d2eb993ea08 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -4,7 +4,7 @@
menuconfig COMMON_CLK_STM32MP
bool "Clock support for common STM32MP clocks"
depends on ARCH_STM32 || COMPILE_TEST
- default y
+ default ARCH_STM32
select RESET_CONTROLLER
help
Support for STM32MP SoC family clocks.
@@ -14,21 +14,21 @@ if COMMON_CLK_STM32MP
config COMMON_CLK_STM32MP135
bool "Clock driver for stm32mp13x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp13x SoC family clocks.
config COMMON_CLK_STM32MP157
bool "Clock driver for stm32mp15x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp15x SoC family clocks.
config COMMON_CLK_STM32MP257
bool "Clock driver for stm32mp25x clocks"
depends on ARM64 || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp25x SoC family clocks.
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 5fcc4c77c11f..b8b45ed22f98 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -2041,7 +2041,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
- /* Particulary Kernel Clocks (no mux or no gate) */
+ /* Particularly Kernel Clocks (no mux or no gate) */
MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index acb4e8b9b1ba..d24fa3449303 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -80,7 +80,7 @@ static struct ccu_div r_apb2_clk = {
* in the BSP source code, although most of them are unused. The existence
* of the hardware block is verified with "3.1 Memory Mapping" chapter in
* "Allwinner H6 V200 User Manual V1.1"; and the parent APB buses are verified
- * with "3.3.2.1 System Bus Tree" chapter inthe same document.
+ * with "3.3.2.1 System Bus Tree" chapter in the same document.
*/
static SUNXI_CCU_GATE(r_apb1_timer_clk, "r-apb1-timer", "r-apb1",
0x11c, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
index b5464d8083c8..70ce0ca0cb7d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -204,6 +204,7 @@ static struct ccu_reset_map sun55i_a523_r_ccu_resets[] = {
[RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
[RST_BUS_R_RTC] = { 0x20c, BIT(16) },
[RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+ [RST_BUS_R_PPU0] = { 0x1ac, BIT(16) },
};
static const struct sunxi_ccu_desc sun55i_a523_r_ccu_desc = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 8b729c9b3545..44565830881d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -439,7 +439,7 @@ static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2",
0x06c, BIT(3), 0);
/*
- * In datasheet here's "Reserved", however the gate exists in BSP soucre
+ * In datasheet here's "Reserved", however the gate exists in BSP source
* code.
*/
static SUNXI_CCU_GATE(bus_can_clk, "bus-can", "apb2",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 52e4369664c5..05595ac51b76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -347,12 +347,13 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 2, BIT(31),
- CLK_SET_RATE_PARENT);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
static const char * const tcon_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
0x130, BIT(31), 0);
@@ -754,6 +755,21 @@ static int sun8i_v3s_ccu_probe(struct platform_device *pdev)
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
+ /*
+ * Assign the DE and TCON clock to the video PLL. Both clocks need to
+ * have the same parent for the units to work together.
+ */
+
+ val = readl(reg + de_clk.common.reg);
+ val &= ~GENMASK(de_clk.mux.shift + de_clk.mux.width - 1,
+ de_clk.mux.shift);
+ writel(val, reg + de_clk.common.reg);
+
+ val = readl(reg + tcon_clk.common.reg);
+ val &= ~GENMASK(tcon_clk.mux.shift + tcon_clk.mux.width - 1,
+ tcon_clk.mux.shift);
+ writel(val, reg + tcon_clk.common.reg);
+
return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 88ed89658d45..c7e00f0c29a5 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, "SUNXI_CCU");
* changed. In common PLL designs, changes to the dividers take effect
* almost immediately, while changes to the multipliers (implemented
* as dividers in the feedback loop) take a few cycles to work into
- * the feedback loop for the PLL to stablize.
+ * the feedback loop for the PLL to stabilize.
*
* Sometimes when the PLL clock rate is changed, the decrease in the
* divider is too much for the decrease in the multiplier to catch up.
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index 474a9e8831f8..30673fe4e3c2 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -91,8 +91,8 @@ static unsigned long ccu_gate_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_gate *cg = hw_to_ccu_gate(hw);
int div = 1;
@@ -101,14 +101,16 @@ static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
div = cg->common.prediv;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate;
+ unsigned long best_parent = req->rate;
if (cg->common.features & CCU_FEATURE_ALL_PREDIV)
best_parent *= div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int ccu_gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +129,7 @@ const struct clk_ops ccu_gate_ops = {
.disable = ccu_gate_disable,
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_gate_round_rate,
+ .determine_rate = ccu_gate_determine_rate,
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 555e99de2cc6..5db748fbb5bd 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -92,26 +92,26 @@ static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
struct _ccu_nk _nk;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nk->fixed_post_div;
+ req->rate *= nk->fixed_post_div;
_nk.min_n = nk->n.min ?: 1;
_nk.max_n = nk->n.max ?: 1 << nk->n.width;
_nk.min_k = nk->k.min ?: 1;
_nk.max_k = nk->k.max ?: 1 << nk->k.width;
- rate = ccu_nk_find_best(*parent_rate, rate, &_nk);
+ req->rate = ccu_nk_find_best(req->best_parent_rate, req->rate, &_nk);
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nk->fixed_post_div;
+ req->rate = req->rate / nk->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +155,7 @@ const struct clk_ops ccu_nk_ops = {
.is_enabled = ccu_nk_is_enabled,
.recalc_rate = ccu_nk_recalc_rate,
- .round_rate = ccu_nk_round_rate,
+ .determine_rate = ccu_nk_determine_rate,
.set_rate = ccu_nk_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 6e03b69d4028..25efb5b37607 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -127,20 +127,20 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nkmp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nkmp->fixed_post_div;
+ req->rate *= nkmp->fixed_post_div;
- if (nkmp->max_rate && rate > nkmp->max_rate) {
- rate = nkmp->max_rate;
+ if (nkmp->max_rate && req->rate > nkmp->max_rate) {
+ req->rate = nkmp->max_rate;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nkmp->fixed_post_div;
- return rate;
+ req->rate /= nkmp->fixed_post_div;
+ return 0;
}
_nkmp.min_n = nkmp->n.min ?: 1;
@@ -152,12 +152,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
- rate = ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
+ req->rate = ccu_nkmp_find_best(req->best_parent_rate, req->rate,
+ &_nkmp);
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nkmp->fixed_post_div;
+ req->rate = req->rate / nkmp->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -227,7 +228,7 @@ const struct clk_ops ccu_nkmp_ops = {
.is_enabled = ccu_nkmp_is_enabled,
.recalc_rate = ccu_nkmp_recalc_rate,
- .round_rate = ccu_nkmp_round_rate,
+ .determine_rate = ccu_nkmp_determine_rate,
.set_rate = ccu_nkmp_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a4e2243b8d6b..df01ed3b37a6 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -116,39 +116,39 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
struct _ccu_nm _nm;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nm->fixed_post_div;
+ req->rate *= nm->fixed_post_div;
- if (rate < nm->min_rate) {
- rate = nm->min_rate;
+ if (req->rate < nm->min_rate) {
+ req->rate = nm->min_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (nm->max_rate && rate > nm->max_rate) {
- rate = nm->max_rate;
+ if (nm->max_rate && req->rate > nm->max_rate) {
+ req->rate = nm->max_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
_nm.min_n = nm->n.min ?: 1;
@@ -156,12 +156,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rate = ccu_nm_find_best(&nm->common, *parent_rate, rate, &_nm);
+ req->rate = ccu_nm_find_best(&nm->common, req->best_parent_rate,
+ req->rate, &_nm);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
+ req->rate /= nm->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -233,7 +234,7 @@ const struct clk_ops ccu_nm_ops = {
.is_enabled = ccu_nm_is_enabled,
.recalc_rate = ccu_nm_recalc_rate,
- .round_rate = ccu_nm_round_rate,
+ .determine_rate = ccu_nm_determine_rate,
.set_rate = ccu_nm_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 0626650a7011..fa0cd7bb8ee6 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -51,7 +51,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
- unsigned long rate;
+ long rate;
__clk_hw_set_clk(div_hw, hw);
@@ -59,7 +59,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
if (rate < 0)
return rate;
- req->rate = rate;
+ req->rate = (unsigned long)rate;
return 0;
}
@@ -132,7 +132,7 @@ static void clk_periph_restore_context(struct clk_hw *hw)
clk_periph_set_parent(hw, parent_id);
}
-const struct clk_ops tegra_clk_periph_ops = {
+static const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index a3488aaac3f7..412902f573b5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -255,7 +255,7 @@
/* VIC register to handle during MBIST WAR */
#define NV_PVIC_THI_SLCG_OVERRIDE_LOW 0x8c
-/* APE, DISPA and VIC base addesses needed for MBIST WAR */
+/* APE, DISPA and VIC base addresses needed for MBIST WAR */
#define TEGRA210_AHUB_BASE 0x702d0000
#define TEGRA210_DISPA_BASE 0x54200000
#define TEGRA210_VIC_BASE 0x54340000
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 5d80d8b79b8e..9ea839af14bc 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -629,7 +629,6 @@ struct tegra_clk_periph {
#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
-extern const struct clk_ops tegra_clk_periph_ops;
struct clk *tegra_clk_register_periph(const char *name,
const char * const *parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index ebfb1d59401d..cf1bba58f641 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -42,8 +42,9 @@ struct ccu_common {
};
struct ccu_mux {
- struct ccu_internal mux;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_mux mux;
};
struct ccu_gate {
@@ -75,6 +76,17 @@ struct ccu_pll {
.flags = _flags, \
}
+#define TH_CCU_MUX(_name, _parents, _shift, _width) \
+ { \
+ .mask = GENMASK(_width - 1, 0), \
+ .shift = _shift, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA( \
+ _name, \
+ _parents, \
+ &clk_mux_ops, \
+ 0), \
+ }
+
#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _gate, _flags) \
struct ccu_gate _struct = { \
.enable = _gate, \
@@ -94,13 +106,6 @@ static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
return container_of(hw, struct ccu_common, hw);
}
-static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_mux, common);
-}
-
static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
@@ -415,32 +420,20 @@ static const struct clk_parent_data c910_i0_parents[] = {
};
static struct ccu_mux c910_i0_clk = {
- .mux = TH_CCU_ARG(1, 1),
- .common = {
- .clkid = CLK_C910_I0,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910-i0",
- c910_i0_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910_I0,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910-i0", c910_i0_parents, 1, 1),
};
static const struct clk_parent_data c910_parents[] = {
- { .hw = &c910_i0_clk.common.hw },
+ { .hw = &c910_i0_clk.mux.hw },
{ .hw = &cpu_pll1_clk.common.hw }
};
static struct ccu_mux c910_clk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_C910,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910",
- c910_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910", c910_parents, 0, 1),
};
static const struct clk_parent_data ahb2_cpusys_parents[] = {
@@ -582,7 +575,14 @@ static const struct clk_parent_data peri2sys_apb_pclk_pd[] = {
{ .hw = &peri2sys_apb_pclk.common.hw }
};
-static CLK_FIXED_FACTOR_FW_NAME(osc12m_clk, "osc_12m", "osc_24m", 2, 1, 0);
+static struct clk_fixed_factor osc12m_clk = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("osc_12m",
+ osc_24m_clk,
+ &clk_fixed_factor_ops,
+ 0),
+};
static const char * const out_parents[] = { "osc_24m", "osc_12m" };
@@ -792,11 +792,12 @@ static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_ac
0x134, BIT(8), 0);
static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
0x134, BIT(7), 0);
-static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
+static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
+ 0x138, BIT(8), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
0x140, BIT(9), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(9), 0);
+ 0x150, BIT(9), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
0x150, BIT(10), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
@@ -917,15 +918,9 @@ static const struct clk_parent_data uart_sclk_parents[] = {
};
static struct ccu_mux uart_sclk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_UART_SCLK,
- .cfg0 = 0x210,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("uart-sclk",
- uart_sclk_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_UART_SCLK,
+ .reg = 0x210,
+ .mux = TH_CCU_MUX("uart-sclk", uart_sclk_parents, 0, 1),
};
static struct ccu_common *th1520_pll_clks[] = {
@@ -962,10 +957,10 @@ static struct ccu_common *th1520_div_clks[] = {
&dpu1_clk.common,
};
-static struct ccu_common *th1520_mux_clks[] = {
- &c910_i0_clk.common,
- &c910_clk.common,
- &uart_sclk.common,
+static struct ccu_mux *th1520_mux_clks[] = {
+ &c910_i0_clk,
+ &c910_clk,
+ &uart_sclk,
};
static struct ccu_common *th1520_gate_clks[] = {
@@ -1067,7 +1062,7 @@ static const struct regmap_config th1520_clk_regmap_config = {
struct th1520_plat_data {
struct ccu_common **th1520_pll_clks;
struct ccu_common **th1520_div_clks;
- struct ccu_common **th1520_mux_clks;
+ struct ccu_mux **th1520_mux_clks;
struct ccu_common **th1520_gate_clks;
int nr_clks;
@@ -1154,23 +1149,15 @@ static int th1520_clk_probe(struct platform_device *pdev)
}
for (i = 0; i < plat_data->nr_mux_clks; i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
- const struct clk_init_data *init = cm->common.hw.init;
-
- plat_data->th1520_mux_clks[i]->map = map;
- hw = devm_clk_hw_register_mux_parent_data_table(dev,
- init->name,
- init->parent_data,
- init->num_parents,
- 0,
- base + cm->common.cfg0,
- cm->mux.shift,
- cm->mux.width,
- 0, NULL, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ struct ccu_mux *cm = plat_data->th1520_mux_clks[i];
+
+ cm->mux.reg = base + cm->reg;
+
+ ret = devm_clk_hw_register(dev, &cm->mux.hw);
+ if (ret)
+ return ret;
- priv->hws[cm->common.clkid] = hw;
+ priv->hws[cm->clkid] = &cm->mux.hw;
}
for (i = 0; i < plat_data->nr_gate_clks; i++) {
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 27e6b9cb1881..a99aaf2e7684 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -30,7 +30,7 @@ static LIST_HEAD(autoidle_clks);
/*
* we have some non-atomic read/write
- * operations behind it, so lets
+ * operations behind it, so let's
* take one lock for handling autoidle
* of all clocks
*/
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index f24f6eb2157a..35af3079c002 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -286,7 +286,7 @@ int __init am43xx_dt_clk_init(void)
/*
* cpsw_cpts_rft_clk has got the choice of 3 clocksources
* dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
- * By default dpll_core_m4_ck is selected, witn this as clock
+ * By default dpll_core_m4_ck is selected, with this as clock
* source the CPTS doesnot work properly. It gives clockcheck errors
* while running PTP.
* clockcheck: clock jumped backward or running slower than expected!
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 9c75dcc9a534..693a4459a01b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -118,13 +118,10 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
* Eventually we could standardize to using '_' for clk-*.c files to follow the
* TRM naming.
*/
-static struct device_node *ti_find_clock_provider(struct device_node *from,
- const char *name)
+static struct device_node *ti_find_clock_provider(const char *name)
{
char *tmp __free(kfree) = NULL;
struct device_node *np;
- bool found = false;
- const char *n;
char *p;
tmp = kstrdup_and_replace(name, '-', '_', GFP_KERNEL);
@@ -137,25 +134,13 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
*p = '\0';
/* Node named "clock" with "clock-output-names" */
- for_each_of_allnodes_from(from, np) {
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &n))
- continue;
-
- if (!strncmp(n, tmp, strlen(tmp))) {
- of_node_get(np);
- found = true;
- break;
- }
- }
-
- if (found) {
- of_node_put(from);
- return np;
+ for_each_node_with_property(np, "clock-output-names") {
+ if (of_property_match_string(np, "clock-output-names", tmp) == 0)
+ return np;
}
/* Fall back to using old node name base provider name */
- return of_find_node_by_name(from, tmp);
+ return of_find_node_by_name(NULL, tmp);
}
/**
@@ -208,7 +193,7 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
if (num_args && clkctrl_nodes_missing)
continue;
- node = ti_find_clock_provider(NULL, buf);
+ node = ti_find_clock_provider(buf);
if (num_args && compat_mode) {
parent = node;
child = of_get_child_by_name(parent, "clock");
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index f684fc306ecc..d6d247ff2be5 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -84,7 +84,7 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
/**
- * clk_mux_save_context - Save the parent selcted in the mux
+ * clk_mux_save_context - Save the parent selected in the mux
* @hw: pointer struct clk_hw
*
* Save the parent mux value.
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index d5cb372f0901..b69c3fbdfbce 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -194,7 +194,7 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
pr_err("ICST error: tried to use RDW != 22\n");
break;
default:
- /* Regular auxilary oscillator */
+ /* Regular auxiliary oscillator */
mask = VERSATILE_AUX_OSC_BITS;
val = vco.v | (vco.r << 9) | (vco.s << 16);
break;
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index 3f929cf8dd2f..8ca1bad61864 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -107,7 +107,7 @@ static long visconti_pll_round_rate(struct clk_hw *hw,
const struct visconti_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
if (rate >= rate_table[i].rate)
return rate_table[i].rate;
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index bbf7714480e7..0295a13a811c 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -669,7 +669,7 @@ static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
u32 m, d, o, div, f;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors_ver(hw, rate, *prate);
if (err)
return err;
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index 81501b48412e..1ded67bee06c 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/xlnx-vcu.h>
@@ -51,6 +52,7 @@
* @dev: Platform device
* @pll_ref: pll ref clock source
* @aclk: axi clock source
+ * @reset_gpio: vcu reset gpio
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
* @pll: handle for the VCU PLL
@@ -61,6 +63,7 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
+ struct gpio_desc *reset_gpio;
struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
struct clk_hw *pll;
@@ -587,8 +590,8 @@ static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
-
- clk_hw_unregister_fixed_factor(xvcu->pll_post);
+ if (!IS_ERR_OR_NULL(xvcu->pll_post))
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
}
/**
@@ -676,6 +679,24 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err_probe(&pdev->dev, ret, "failed to get reset gpio for vcu.\n");
+ goto error_get_gpio;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_dbg(&pdev->dev, "No reset gpio info found in dts for VCU. This may result in incorrect functionality if VCU isolation is removed after initialization in designs where the VCU reset is driven by gpio.\n");
+ }
+
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
ret = xvcu_register_clock_provider(xvcu);
@@ -690,6 +711,7 @@ static int xvcu_probe(struct platform_device *pdev)
error_clk_provider:
xvcu_unregister_clock_provider(xvcu);
+error_get_gpio:
clk_disable_unprepare(xvcu->aclk);
return ret;
}
@@ -711,6 +733,13 @@ static void xvcu_remove(struct platform_device *pdev)
xvcu_unregister_clock_provider(xvcu);
/* Add the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d38526b8e063..681d687b5a18 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
+CFLAGS_powernv-cpufreq.o := -I$(src)
amd_pstate-y := amd-pstate.o amd-pstate-trace.o
##################################################################################
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 06a1c7dd081f..f366d35c5840 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
{}
};
#endif
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index a8943e2a93be..7d9a5f656de8 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -21,7 +21,6 @@
#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
-#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -30,6 +29,9 @@
#include <asm/opal.h>
#include <linux/timer.h>
+#define CREATE_TRACE_POINTS
+#include "powernv-trace.h"
+
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
diff --git a/drivers/cpufreq/powernv-trace.h b/drivers/cpufreq/powernv-trace.h
new file mode 100644
index 000000000000..8cadb7c9427b
--- /dev/null
+++ b/drivers/cpufreq/powernv-trace.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_POWERNV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _POWERNV_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+TRACE_EVENT(powernv_throttle,
+
+ TP_PROTO(int chip_id, const char *reason, int pmax),
+
+ TP_ARGS(chip_id, reason, pmax),
+
+ TP_STRUCT__entry(
+ __field(int, chip_id)
+ __string(reason, reason)
+ __field(int, pmax)
+ ),
+
+ TP_fast_assign(
+ __entry->chip_id = chip_id;
+ __assign_str(reason);
+ __entry->pmax = pmax;
+ ),
+
+ TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+ __entry->pmax, __get_str(reason))
+);
+
+#endif /* _POWERNV_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE powernv-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 9ad85fe6fd05..7e1fbf9a091f 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -9,7 +9,6 @@ use kernel::{
cpumask::CpumaskVar,
device::{Core, Device},
error::code::*,
- fmt,
macros::vtable,
module_platform_driver, of, opp, platform,
prelude::*,
@@ -19,7 +18,7 @@ use kernel::{
/// Finds exact supply name from the OF node.
fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
- let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ let prop_name = CString::try_from_fmt(fmt!("{name}-supply")).ok()?;
dev.fwnode()?
.property_present(&prop_name)
.then(|| CString::try_from_fmt(fmt!("{name}")).ok())
@@ -221,7 +220,7 @@ impl platform::Driver for CPUFreqDTDriver {
module_platform_driver! {
type: CPUFreqDTDriver,
name: "cpufreq-dt",
- author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ authors: ["Viresh Kumar <viresh.kumar@linaro.org>"],
description: "Generic CPUFreq DT driver",
license: "GPL v2",
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 52d5d26fc7c6..81306612a5c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
+{
+ /* Update the repeating-pattern data. */
+ data->intervals[data->interval_ptr++] = interval_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
+}
+
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
+ } else if (!dev->last_residency_ns) {
+ /*
+ * This happens when the driver rejects the previously selected
+ * idle state and returns an error, so update the recent
+ * intervals table to prevent invalid information from being
+ * used going forward.
+ */
+ menu_update_intervals(data, UINT_MAX);
}
/* Find the shortest expected idle interval. */
@@ -482,10 +498,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
- if (data->interval_ptr >= INTERVALS)
- data->interval_ptr = 0;
+ menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9f8a3a5bed7e..04b4c43b6bae 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -188,6 +188,19 @@ config CRYPTO_PAES_S390
Select this option if you want to use the paes cipher
for example to use protected key encrypted devices.
+config CRYPTO_PHMAC_S390
+ tristate "PHMAC cipher algorithms"
+ depends on S390
+ depends on PKEY
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This is the s390 hardware accelerated implementation of the
+ protected key HMAC support for SHA224, SHA256, SHA384 and SHA512.
+
+ Select this option if you want to use the phmac digests
+ for example to use dm-integrity with secure/protected keys.
+
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index f9cf00d690e2..5663df49dd81 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -206,15 +206,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
+ if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(chan->backup_iv, areq->src,
offset, ivsize, 0);
}
memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
+ rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -278,8 +277,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
chan->timeout = areq->cryptlen;
- rctx->nr_sgs = nr_sgs;
- rctx->nr_sgd = nr_sgd;
+ rctx->nr_sgs = ns;
+ rctx->nr_sgd = nd;
return 0;
theend_sgs:
@@ -296,7 +295,8 @@ theend_sgs:
theend_iv:
if (areq->iv && ivsize > 0) {
if (!dma_mapping_error(ce->dev, rctx->addr_iv))
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
@@ -345,7 +345,8 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
if (areq->iv && ivsize > 0) {
if (cet->t_iv)
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index bef44f350167..13bdfb8a2c62 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -342,8 +342,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.base.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.base.halg.digestsize;
+ bs = crypto_ahash_blocksize(tfm);
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
@@ -455,7 +455,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
if (!err)
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+ memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
err_unmap_src:
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 83df4d719053..0f9a89067016 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -260,7 +260,6 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
@@ -270,7 +269,6 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- unsigned int ivlen;
int nr_sgs;
int nr_sgd;
dma_addr_t addr_iv;
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0b6e49c06eff..f8f37c9d5f3c 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -5,7 +5,6 @@
#include "aspeed-hace.h"
#include <crypto/engine.h>
-#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
@@ -14,6 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
@@ -59,6 +59,46 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
+static int aspeed_sham_init(struct ahash_request *req);
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+
+ memcpy(out, rctx->digest, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ put_unaligned(rctx->digcnt[0], p.u64++);
+ if (rctx->ivsize == 64)
+ put_unaligned(rctx->digcnt[1], p.u64);
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int err;
+
+ err = aspeed_sham_init(req);
+ if (err)
+ return err;
+
+ memcpy(rctx->digest, in, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ rctx->digcnt[0] = get_unaligned(p.u64++);
+ if (rctx->ivsize == 64)
+ rctx->digcnt[1] = get_unaligned(p.u64);
+ return 0;
+}
+
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -74,10 +114,10 @@ static const __be64 sha512_iv[8] = {
* - if message length < 112 bytes then padlen = 112 - message length
* - else padlen = 128 + 112 - message length
*/
-static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
- struct aspeed_sham_reqctx *rctx)
+static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int index, padlen, bitslen;
__be64 bits[2];
AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
@@ -87,25 +127,32 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
case SHA_FLAGS_SHA224:
case SHA_FLAGS_SHA256:
bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
- index = rctx->bufcnt & 0x3f;
+ index = rctx->digcnt[0] & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
- rctx->bufcnt += padlen + 8;
+ bitslen = 8;
break;
default:
bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
rctx->digcnt[0] >> 61);
- index = rctx->bufcnt & 0x7f;
+ index = rctx->digcnt[0] & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
- rctx->bufcnt += padlen + 16;
+ bitslen = 16;
break;
}
+ buf[0] = 0x80;
+ memset(buf + 1, 0, padlen - 1);
+ memcpy(buf + padlen, bits, bitslen);
+ return padlen + bitslen;
+}
+
+static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
+ unsigned int len)
+{
+ rctx->offset += len;
+ rctx->digcnt[0] += len;
+ if (rctx->digcnt[0] < len)
+ rctx->digcnt[1]++;
}
/*
@@ -117,31 +164,31 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int length, remain;
+ unsigned int length, remain;
+ bool final = false;
- length = rctx->total + rctx->bufcnt;
- remain = length % rctx->block_size;
+ length = rctx->total - rctx->offset;
+ remain = length - round_down(length, rctx->block_size);
AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
- if (rctx->bufcnt)
- memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
-
- if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
- rctx->bufcnt, rctx->src_sg,
- rctx->offset, rctx->total - remain, 0);
- rctx->offset += rctx->total - remain;
-
- } else {
- dev_warn(hace_dev->dev, "Hash data length is too large\n");
- return -EINVAL;
- }
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
- rctx->offset, remain, 0);
+ if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
+ length = ASPEED_HASH_SRC_DMA_BUF_LEN;
+ else if (rctx->flags & SHA_FLAGS_FINUP) {
+ if (round_up(length, rctx->block_size) + rctx->block_size >
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
+ length = round_down(length - 1, rctx->block_size);
+ else
+ final = true;
+ } else
+ length -= remain;
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
+ rctx->offset, length, 0);
+ aspeed_ahash_update_counter(rctx, length);
+ if (final)
+ length += aspeed_ahash_fill_padding(
+ hace_dev, rctx, hash_engine->ahash_src_addr + length);
- rctx->bufcnt = remain;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
DMA_BIDIRECTIONAL);
@@ -150,7 +197,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
return -ENOMEM;
}
- hash_engine->src_length = length - remain;
+ hash_engine->src_length = length;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
@@ -166,16 +213,20 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ bool final = rctx->flags & SHA_FLAGS_FINUP;
+ int remain, sg_len, i, max_sg_nents;
+ unsigned int length, offset, total;
struct aspeed_sg_list *src_list;
struct scatterlist *s;
- int length, remain, sg_len, i;
int rc = 0;
- remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
- length = rctx->total + rctx->bufcnt - remain;
+ offset = rctx->offset;
+ length = rctx->total - offset;
+ remain = final ? 0 : length - round_down(length, rctx->block_size);
+ length -= remain;
- AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
- "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total,
"length", length, "remain", remain);
sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
@@ -186,6 +237,8 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto end;
}
+ max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
+ sg_len = min(sg_len, max_sg_nents);
src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
@@ -196,68 +249,66 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto free_src_sg;
}
- if (rctx->bufcnt != 0) {
- u32 phy_addr;
- u32 len;
+ total = 0;
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
+ if (len <= offset) {
+ offset -= len;
+ continue;
}
- phy_addr = rctx->buffer_dma_addr;
- len = rctx->bufcnt;
- length -= len;
+ len -= offset;
+ phy_addr += offset;
+ offset = 0;
- /* Last sg list */
- if (length == 0)
- len |= HASH_SG_LAST_LIST;
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ length = 0;
+ }
- src_list[0].phy_addr = cpu_to_le32(phy_addr);
- src_list[0].len = cpu_to_le32(len);
- src_list++;
+ total += len;
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
if (length != 0) {
- for_each_sg(rctx->src_sg, s, sg_len, i) {
- u32 phy_addr = sg_dma_address(s);
- u32 len = sg_dma_len(s);
-
- if (length > len)
- length -= len;
- else {
- /* Last sg list */
- len = length;
- len |= HASH_SG_LAST_LIST;
- length = 0;
- }
+ total = round_down(total, rctx->block_size);
+ final = false;
+ }
+
+ aspeed_ahash_update_counter(rctx, total);
+ if (final) {
+ int len = aspeed_ahash_fill_padding(hace_dev, rctx,
+ rctx->buffer);
- src_list[i].phy_addr = cpu_to_le32(phy_addr);
- src_list[i].len = cpu_to_le32(len);
+ total += len;
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ sizeof(rctx->buffer),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
}
- }
- if (length != 0) {
- rc = -EINVAL;
- goto free_rctx_buffer;
+ src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
+ src_list[i].len = cpu_to_le32(len);
+ i++;
}
+ src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
- rctx->offset = rctx->total - remain;
- hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_length = total;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
return 0;
-free_rctx_buffer:
- if (rctx->bufcnt != 0)
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
free_rctx_digest:
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
@@ -272,24 +323,6 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
-
- AHASH_DBG(hace_dev, "\n");
-
- hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
-
- crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
-
- return 0;
-}
-
-/*
- * Copy digest to the corresponding request result.
- * This function will be called at final() stage.
- */
-static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
AHASH_DBG(hace_dev, "\n");
@@ -297,12 +330,19 @@ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
+ if (rctx->total - rctx->offset >= rctx->block_size ||
+ (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+ return aspeed_ahash_req_update(hace_dev);
- memcpy(req->result, rctx->digest, rctx->digsize);
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
- return aspeed_ahash_complete(hace_dev);
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
+ rctx->total - rctx->offset);
+
+ return 0;
}
/*
@@ -338,118 +378,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
return -EINPROGRESS;
}
-/*
- * HMAC resume aims to do the second pass produces
- * the final HMAC code derived from the inner hash
- * result and the outer key.
- */
-static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
-
- /* o key pad + hash sum 1 */
- memcpy(rctx->buffer, bctx->opad, rctx->block_size);
- memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
-
- rctx->bufcnt = rctx->block_size + rctx->digsize;
- rctx->digcnt[0] = rctx->block_size + rctx->digsize;
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
- memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
-static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
- rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- if (rctx->flags & SHA_FLAGS_HMAC)
- return aspeed_hace_ahash_trigger(hace_dev,
- aspeed_ahash_hmac_resume);
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -461,40 +389,12 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
DMA_TO_DEVICE);
- if (rctx->bufcnt != 0)
+ if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
+ sizeof(rctx->buffer), DMA_TO_DEVICE);
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
- rctx->total - rctx->offset, 0);
-
- rctx->bufcnt = rctx->total - rctx->offset;
rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
- return aspeed_ahash_complete(hace_dev);
-}
-
-static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
return aspeed_ahash_complete(hace_dev);
}
@@ -513,7 +413,7 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
resume = aspeed_ahash_update_resume_sg;
} else {
- resume = aspeed_ahash_update_resume;
+ resume = aspeed_ahash_complete;
}
ret = hash_engine->dma_prepare(hace_dev);
@@ -530,26 +430,47 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
hace_dev->crypt_engine_hash, req);
}
+static noinline int aspeed_ahash_fallback(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ HASH_FBREQ_ON_STACK(fbreq, req);
+ u8 *state = rctx->buffer;
+ struct scatterlist sg[2];
+ struct scatterlist *ssg;
+ int ret;
+
+ ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
+ ahash_request_set_crypt(fbreq, ssg, req->result,
+ rctx->total - rctx->offset);
+
+ ret = aspeed_sham_export(req, state) ?:
+ crypto_ahash_import_core(fbreq, state);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ ret = ret ?: crypto_ahash_finup(fbreq);
+ else
+ ret = ret ?: crypto_ahash_update(fbreq) ?:
+ crypto_ahash_export_core(fbreq, state) ?:
+ aspeed_sham_import(req, state);
+ HASH_REQUEST_ZERO(fbreq);
+ return ret;
+}
+
static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
struct aspeed_engine_hash *hash_engine;
- int ret = 0;
+ int ret;
hash_engine = &hace_dev->hash_engine;
hash_engine->flags |= CRYPTO_FLAGS_BUSY;
- if (rctx->op == SHA_OP_UPDATE)
- ret = aspeed_ahash_req_update(hace_dev);
- else if (rctx->op == SHA_OP_FINAL)
- ret = aspeed_ahash_req_final(hace_dev);
-
+ ret = aspeed_ahash_req_update(hace_dev);
if (ret != -EINPROGRESS)
- return ret;
+ return aspeed_ahash_fallback(req);
return 0;
}
@@ -590,45 +511,7 @@ static int aspeed_sham_update(struct ahash_request *req)
rctx->total = req->nbytes;
rctx->src_sg = req->src;
rctx->offset = 0;
- rctx->src_nents = sg_nents(req->src);
- rctx->op = SHA_OP_UPDATE;
-
- rctx->digcnt[0] += rctx->total;
- if (rctx->digcnt[0] < rctx->total)
- rctx->digcnt[1]++;
-
- if (rctx->bufcnt + rctx->total < rctx->block_size) {
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
- rctx->src_sg, rctx->offset,
- rctx->total, 0);
- rctx->bufcnt += rctx->total;
-
- return 0;
- }
-
- return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
-static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-static int aspeed_sham_final(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
- req->nbytes, rctx->total);
- rctx->op = SHA_OP_FINAL;
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
return aspeed_hace_hash_handle_queue(hace_dev, req);
}
@@ -639,23 +522,12 @@ static int aspeed_sham_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- int rc1, rc2;
AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
rctx->flags |= SHA_FLAGS_FINUP;
- rc1 = aspeed_sham_update(req);
- if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
- return rc1;
-
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- rc2 = aspeed_sham_final(req);
-
- return rc1 ? : rc2;
+ return aspeed_sham_update(req);
}
static int aspeed_sham_init(struct ahash_request *req)
@@ -664,7 +536,6 @@ static int aspeed_sham_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
AHASH_DBG(hace_dev, "%s: digest size:%d\n",
crypto_tfm_alg_name(&tfm->base),
@@ -679,7 +550,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA1;
rctx->digsize = SHA1_DIGEST_SIZE;
rctx->block_size = SHA1_BLOCK_SIZE;
- rctx->sha_iv = sha1_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha1_iv, rctx->ivsize);
break;
@@ -688,7 +558,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA224;
rctx->digsize = SHA224_DIGEST_SIZE;
rctx->block_size = SHA224_BLOCK_SIZE;
- rctx->sha_iv = sha224_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha224_iv, rctx->ivsize);
break;
@@ -697,7 +566,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA256;
rctx->digsize = SHA256_DIGEST_SIZE;
rctx->block_size = SHA256_BLOCK_SIZE;
- rctx->sha_iv = sha256_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha256_iv, rctx->ivsize);
break;
@@ -707,7 +575,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA384;
rctx->digsize = SHA384_DIGEST_SIZE;
rctx->block_size = SHA384_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha384_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha384_iv, rctx->ivsize);
break;
@@ -717,7 +584,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA512;
rctx->digsize = SHA512_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha512_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_iv, rctx->ivsize);
break;
@@ -727,19 +593,10 @@ static int aspeed_sham_init(struct ahash_request *req)
return -EINVAL;
}
- rctx->bufcnt = 0;
rctx->total = 0;
rctx->digcnt[0] = 0;
rctx->digcnt[1] = 0;
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
return 0;
}
@@ -748,102 +605,14 @@ static int aspeed_sham_digest(struct ahash_request *req)
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
}
-static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
{
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int ds = crypto_shash_digestsize(bctx->shash);
- int bs = crypto_shash_blocksize(bctx->shash);
- int err = 0;
- int i;
-
- AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
- keylen);
-
- if (keylen > bs) {
- err = aspeed_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
- if (err)
- return err;
- keylen = ds;
-
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return err;
-}
-
-static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
-{
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
- tctx->flags = 0;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct aspeed_sham_reqctx));
-
- if (ast_alg->alg_base) {
- /* hmac related */
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- dev_warn(ast_alg->hace_dev->dev,
- "base driver '%s' could not be loaded.\n",
- ast_alg->alg_base);
- return PTR_ERR(bctx->shash);
- }
- }
-
- return 0;
-}
-
-static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
-{
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static int aspeed_sham_export(struct ahash_request *req, void *out)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(out, rctx, sizeof(*rctx));
-
- return 0;
-}
-
-static int aspeed_sham_import(struct ahash_request *req, const void *in)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(rctx, in, sizeof(*rctx));
return 0;
}
@@ -853,11 +622,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -867,13 +636,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -885,11 +654,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -899,13 +668,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -917,11 +686,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -931,118 +700,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha1",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "aspeed-hmac-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha224",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "aspeed-hmac-sha224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha256",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "aspeed-hmac-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1057,11 +721,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1071,13 +735,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1089,11 +753,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1103,83 +767,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha384",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "aspeed-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha512",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "aspeed-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 68f70e01fccb..b1d07730d543 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -119,7 +119,6 @@
#define SHA_FLAGS_SHA512 BIT(4)
#define SHA_FLAGS_SHA512_224 BIT(5)
#define SHA_FLAGS_SHA512_256 BIT(6)
-#define SHA_FLAGS_HMAC BIT(8)
#define SHA_FLAGS_FINUP BIT(9)
#define SHA_FLAGS_MASK (0xff)
@@ -161,22 +160,18 @@ struct aspeed_engine_hash {
aspeed_hace_fn_t dma_prepare;
};
-struct aspeed_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE];
- u8 opad[SHA512_BLOCK_SIZE];
-};
-
struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
- unsigned long flags; /* hmac flag */
-
- struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
+ /* DMA buffer written by hardware */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+
+ /* Software state sorted by size. */
+ u64 digcnt[2];
+
unsigned long flags; /* final update flag should no use*/
- unsigned long op; /* final or update */
u32 cmd; /* trigger cmd */
/* walk state */
@@ -188,17 +183,12 @@ struct aspeed_sham_reqctx {
size_t digsize;
size_t block_size;
size_t ivsize;
- const __be32 *sha_iv;
- /* remain data buffer */
- u8 buffer[SHA512_BLOCK_SIZE * 2];
dma_addr_t buffer_dma_addr;
- size_t bufcnt; /* buffer counter */
-
- /* output buffer */
- u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
dma_addr_t digest_dma_addr;
- u64 digcnt[2];
+
+ /* This is DMA too but read-only for hardware. */
+ u8 buffer[SHA512_BLOCK_SIZE + 16];
};
struct aspeed_engine_crypto {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 27c5d000b4b2..3a2684208dda 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2297,6 +2297,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x500:
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 2cc36da163e8..3d7573c7bd1c 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2534,6 +2534,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x510:
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index acf1b197eb84..d2eaf5205b1c 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -25,10 +25,6 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
-ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-endif
-
caam-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 38ff931059b4..a93be395c878 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -24,7 +24,7 @@
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
#include "qi.h"
#endif
@@ -573,7 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
- { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8Q*", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -831,7 +831,7 @@ static int caam_ctrl_suspend(struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
caam_state_save(dev);
return 0;
@@ -842,7 +842,7 @@ static int caam_ctrl_resume(struct device *dev)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
int ret = 0;
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
caam_state_restore(dev);
/* HW and rng will be reset so deinstantiation can be removed */
@@ -908,6 +908,7 @@ static int caam_probe(struct platform_device *pdev)
imx_soc_data = imx_soc_match->data;
reg_access = reg_access && imx_soc_data->page0_access;
+ ctrlpriv->no_page0 = !reg_access;
/*
* CAAM clocks cannot be controlled from kernel.
*/
@@ -967,7 +968,7 @@ iomap_ctrl:
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/* If (DPAA 1.x) QI present, check whether dependencies are available */
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
@@ -1098,7 +1099,7 @@ set_dma_mask:
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 6358d3cabf57..718352b7afb5 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -22,7 +22,7 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 8b5d1acd21a7..ef238c71f92a 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -18,7 +18,7 @@ static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
{}
#endif
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
void caam_debugfs_qi_congested(void);
void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
#else
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e51320150872..a88da0d31b23 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -115,6 +115,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ u8 no_page0; /* Nonzero if register page 0 is not controlled by Linux */
bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -226,7 +227,7 @@ static inline int caam_prng_register(struct device *dev)
static inline void caam_prng_unregister(void *data) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
int caam_qi_algapi_init(struct device *dev);
void caam_qi_algapi_exit(void);
@@ -242,7 +243,7 @@ static inline void caam_qi_algapi_exit(void)
{
}
-#endif /* CONFIG_CAAM_QI */
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
static inline u64 caam_get_dma_mask(struct device *dev)
{
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9fcdb64084ac..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b6e7c0b29d4e..1e731ed8702b 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -442,11 +442,8 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
- *pcpu = cpumask_next(*pcpu, cpus);
- if (*pcpu >= nr_cpu_ids)
- *pcpu = cpumask_first(cpus);
+ *pcpu = cpumask_next_wrap(*pcpu, cpus);
*cpu = *pcpu;
-
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index a1055554b47a..dc26bc22c91d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
void ccp5_debugfs_destroy(void)
{
+ mutex_lock(&ccp_debugfs_lock);
debugfs_remove_recursive(ccp_debugfs_dir);
+ ccp_debugfs_dir = NULL;
+ mutex_unlock(&ccp_debugfs_lock);
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 109b5aef4034..d78865d9d5f0 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -633,10 +633,16 @@ static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx, final_wa, tag;
- struct ccp_data src, dst;
- struct ccp_data aad;
- struct ccp_op op;
+ struct {
+ struct ccp_dm_workarea key;
+ struct ccp_dm_workarea ctx;
+ struct ccp_dm_workarea final;
+ struct ccp_dm_workarea tag;
+ struct ccp_data src;
+ struct ccp_data dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+ } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
@@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
+ if (!wa)
+ return -ENOMEM;
+
if (!aes->iv)
return -EINVAL;
@@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
jobid = CCP_NEW_JOBID(cmd_q->ccp);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
/* Copy the key to the LSB */
- ret = ccp_init_dm_workarea(&key, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -726,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- op.init = 1;
+ wa->op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
- ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHAAD;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHAAD;
- while (aad.sg_wa.bytes_left) {
- ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+ while (wa->aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
- ccp_process_data(&aad, NULL, &op);
- op.init = 0;
+ ccp_process_data(&wa->aad, NULL, &wa->op);
+ wa->op.init = 0;
}
}
- op.u.aes.mode = CCP_AES_MODE_GCTR;
- op.u.aes.action = aes->action;
+ wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
+ wa->op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
- ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
@@ -785,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_aad;
if (in_place) {
- dst = src;
+ wa->dst = wa->src;
} else {
- ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
- op.soc = 0;
- op.eom = 0;
- op.init = 1;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
+ wa->op.soc = 0;
+ wa->op.eom = 0;
+ wa->op.init = 1;
+ while (wa->src.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
+ if (!wa->src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
- op.eom = 1;
- op.u.aes.size = (nbytes * 8) - 1;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = (nbytes * 8) - 1;
}
}
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ccp_process_data(&src, &dst, &op);
- op.init = 0;
+ ccp_process_data(&wa->src, &wa->dst, &wa->op);
+ wa->op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
- ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (__be64 *)final_wa.address;
+ final = (__be64 *)wa->final.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHFINAL;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = final_wa.dma.address;
- op.src.u.dma.length = AES_BLOCK_SIZE;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = final_wa.dma.address;
- op.dst.u.dma.length = AES_BLOCK_SIZE;
- op.eom = 1;
- op.u.aes.size = 0;
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHFINAL;
+ wa->op.src.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.src.u.dma.address = wa->final.dma.address;
+ wa->op.src.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.dst.u.dma.address = wa->final.dma.address;
+ wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret)
goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_final_wa;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
if (ret) {
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
goto e_final_wa;
}
- ret = crypto_memneq(tag.address, final_wa.address,
+ ret = crypto_memneq(wa->tag.address, wa->final.address,
authsize) ? -EBADMSG : 0;
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
}
e_final_wa:
- ccp_dm_free(&final_wa);
+ ccp_dm_free(&wa->final);
e_dst:
if (ilen > 0 && !in_place)
- ccp_free_data(&dst, cmd_q);
+ ccp_free_data(&wa->dst, cmd_q);
e_src:
if (ilen > 0)
- ccp_free_data(&src, cmd_q);
+ ccp_free_data(&wa->src, cmd_q);
e_aad:
if (aes->aad_len)
- ccp_free_data(&aad, cmd_q);
+ ccp_free_data(&wa->aad, cmd_q);
e_ctx:
- ccp_dm_free(&ctx);
+ ccp_dm_free(&wa->ctx);
e_key:
- ccp_dm_free(&key);
+ ccp_dm_free(&wa->key);
return ret;
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 3451bada884e..e058ba027792 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -434,7 +434,7 @@ cleanup:
return rc;
}
-static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
{
unsigned long npages = 1ul << order, paddr;
struct sev_device *sev;
@@ -453,7 +453,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
return page;
paddr = __pa((unsigned long)page_address(page));
- if (rmp_mark_pages_firmware(paddr, npages, false))
+ if (rmp_mark_pages_firmware(paddr, npages, locked))
return NULL;
return page;
@@ -463,7 +463,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask)
{
struct page *page;
- page = __snp_alloc_firmware_pages(gfp_mask, 0);
+ page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
return page ? page_address(page) : NULL;
}
@@ -498,7 +498,7 @@ static void *sev_fw_alloc(unsigned long len)
{
struct page *page;
- page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
if (!page)
return NULL;
@@ -1276,9 +1276,11 @@ static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
static int __sev_platform_init_locked(int *error)
{
- int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ int rc, psp_ret, dfflush_error;
struct sev_device *sev;
+ psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
+
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
@@ -1320,10 +1322,10 @@ static int __sev_platform_init_locked(int *error)
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
if (rc) {
dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
- *error, rc);
+ dfflush_error, rc);
return rc;
}
@@ -1785,8 +1787,14 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
/* Reset TMR size back to default */
sev_es_tmr_size = SEV_TMR_SIZE;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index e1be2072d680..e7bb803912a6 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -453,6 +453,7 @@ static const struct psp_vdata pspv6 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index bcca55bff910..3963bb91321f 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -224,7 +224,7 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
@@ -239,7 +239,7 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
{
unsigned int index = sgl_data->num_of_buffers;
- dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n",
index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
@@ -298,7 +298,7 @@ cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
@@ -323,7 +323,7 @@ static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
@@ -359,11 +359,11 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
if (src != dst) {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
- dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
} else {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
}
}
@@ -391,11 +391,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
ivsize, info);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
@@ -506,7 +506,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
(areq_ctx->mlli_params.mlli_virt_addr)) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -514,13 +514,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
areq_ctx->mlli_params.mlli_dma_addr);
}
- dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
- dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
@@ -566,7 +566,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
hw_iv_size, req->iv);
kfree_sensitive(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
@@ -574,7 +574,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
goto chain_iv_exit;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
chain_iv_exit:
@@ -977,7 +977,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
@@ -991,7 +991,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
@@ -1009,7 +1009,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1019,7 +1019,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1030,7 +1030,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
@@ -1042,7 +1042,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
@@ -1152,7 +1152,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1236,7 +1236,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1246,7 +1246,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
areq_ctx->in_nents = 0;
if (total_in_len < block_size) {
- dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
@@ -1265,7 +1265,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
/* Copy the new residue to next buffer */
if (*next_buff_cnt) {
- dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
cc_copy_sg_portion(dev, next_buff, src,
@@ -1338,7 +1338,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -1347,14 +1347,14 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
}
if (src && areq_ctx->in_nents) {
- dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len) {
- dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d39c067672fd..e2cbfdf7a0e4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -211,11 +211,11 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping Key %u B at va=%p for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
goto free_key;
}
- dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped key %u B at va=%p to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
return 0;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index d0612bec4d58..c6d085c8ff79 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -125,7 +125,7 @@ static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
digestsize);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n",
digestsize, state->digest_result_buff,
&state->digest_result_dma_addr);
@@ -184,11 +184,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_buff,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n",
ctx->inter_digestsize, state->digest_buff);
return -EINVAL;
}
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
&state->digest_buff_dma_addr);
@@ -197,11 +197,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_bytes_len,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len);
goto unmap_digest_buf;
}
- dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
}
@@ -212,12 +212,12 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
goto unmap_digest_len;
}
- dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
}
@@ -272,7 +272,7 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
if (state->digest_result_dma_addr) {
dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
DMA_BIDIRECTIONAL);
- dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
memcpy(result, state->digest_result_buff, digestsize);
@@ -287,7 +287,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -1077,11 +1077,11 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
@@ -1090,12 +1090,12 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
ctx->opad_tmp_keys_buff);
goto fail;
}
- dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
&ctx->opad_tmp_keys_dma_addr);
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6124fbbbed94..bbd118f8de0e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -77,6 +77,5 @@ int cc_pm_get(struct device *dev)
void cc_pm_put_suspend(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 61b5e1c5d019..1550c3818383 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1491,11 +1491,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
p = sg_virt(areq->dst);
memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
@@ -1808,9 +1810,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 7c41f9593d03..2e4ee7ecfdfb 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -912,7 +912,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 703920b49c7c..81d0beda93b2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -7,6 +7,12 @@
#include <linux/hisi_acc_qm.h>
#include "sec_crypto.h"
+#define SEC_PBUF_SZ 512
+#define SEC_MAX_MAC_LEN 64
+#define SEC_IV_SIZE 24
+#define SEC_SGE_NR_NUM 4
+#define SEC_SGL_ALIGN_SIZE 64
+
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
u8 *pbuf;
@@ -20,6 +26,40 @@ struct sec_alg_res {
u16 depth;
};
+struct sec_hw_sge {
+ dma_addr_t buf;
+ void *page_ctrl;
+ __le32 len;
+ __le32 pad;
+ __le32 pad0;
+ __le32 pad1;
+};
+
+struct sec_hw_sgl {
+ dma_addr_t next_dma;
+ __le16 entry_sum_in_chain;
+ __le16 entry_sum_in_sgl;
+ __le16 entry_length_in_sgl;
+ __le16 pad0;
+ __le64 pad1[5];
+ struct sec_hw_sgl *next;
+ struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM];
+} __aligned(SEC_SGL_ALIGN_SIZE);
+
+struct sec_src_dst_buf {
+ struct sec_hw_sgl in;
+ struct sec_hw_sgl out;
+};
+
+struct sec_request_buf {
+ union {
+ struct sec_src_dst_buf data_buf;
+ __u8 pbuf[SEC_PBUF_SZ];
+ };
+ dma_addr_t in_dma;
+ dma_addr_t out_dma;
+};
+
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl *c_out;
@@ -29,6 +69,7 @@ struct sec_cipher_req {
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
+ __u8 c_ivin_buf[SEC_IV_SIZE];
};
struct sec_aead_req {
@@ -37,6 +78,13 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ __u8 a_ivin_buf[SEC_IV_SIZE];
+ __u8 out_mac_buf[SEC_MAX_MAC_LEN];
+};
+
+struct sec_instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
};
/* SEC request of Crypto */
@@ -55,15 +103,17 @@ struct sec_req {
dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
- struct list_head backlog_head;
+ struct crypto_async_request *base;
int err_type;
int req_id;
u32 flag;
- /* Status of the SEC request */
- bool fake_busy;
bool use_pbuf;
+
+ struct list_head list;
+ struct sec_instance_backlog *backlog;
+ struct sec_request_buf buf;
};
/**
@@ -119,9 +169,11 @@ struct sec_qp_ctx {
struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
- struct list_head backlog;
+ spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
+ struct sec_instance_backlog backlog;
+ u16 send_head;
};
enum sec_alg_type {
@@ -139,9 +191,6 @@ struct sec_ctx {
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
- /* Threshold for fake busy, trigger to return -EBUSY to user */
- u32 fake_req_limit;
-
/* Current cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 8ea5305bc320..d044ded0f290 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -67,7 +67,6 @@
#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
-#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
@@ -102,6 +101,8 @@
#define IV_LAST_BYTE_MASK 0xFF
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+#define SEC_GCM_MIN_AUTH_SZ 0x8
+#define SEC_RETRY_MAX_CNT 5U
static DEFINE_MUTEX(sec_algs_lock);
static unsigned int sec_available_devs;
@@ -116,40 +117,19 @@ struct sec_aead {
struct aead_alg alg;
};
-/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
- ctx->hlf_q_num;
-
- return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
- ctx->hlf_q_num;
-}
-
-static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- atomic_dec(&ctx->enc_qcyclic);
- else
- atomic_dec(&ctx->dec_qcyclic);
-}
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt);
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt);
static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(req_id < 0)) {
- dev_err(req->ctx->dev, "alloc req id fail!\n");
- return req_id;
- }
-
- req->qp_ctx = qp_ctx;
- qp_ctx->req_list[req_id] = req;
-
+ spin_unlock_bh(&qp_ctx->id_lock);
return req_id;
}
@@ -163,12 +143,9 @@ static void sec_free_req_id(struct sec_req *req)
return;
}
- qp_ctx->req_list[req_id] = NULL;
- req->qp_ctx = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- spin_unlock_bh(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->id_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -229,6 +206,90 @@ static int sec_cb_status_check(struct sec_req *req,
return 0;
}
+static int qp_send_message(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
+ return -EBUSY;
+
+ spin_lock_bh(&qp_ctx->req_lock);
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
+
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
+ req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
+ qp_ctx->req_list[qp_ctx->send_head] = req;
+ }
+
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ if (ret) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+ }
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
+ qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
+
+ spin_unlock_bh(&qp_ctx->req_lock);
+
+ atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ return -EINPROGRESS;
+}
+
+static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_del(&req->list);
+ ctx->req_op->buf_unmap(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
+
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
+
+ /* Wake up the busy thread first, then return the errno. */
+ crypto_request_complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, ret);
+ }
+}
+
+static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ spin_lock_bh(&qp_ctx->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ ret = qp_send_message(req);
+ switch (ret) {
+ case -EINPROGRESS:
+ list_del(&req->list);
+ crypto_request_complete(req->base, -EINPROGRESS);
+ break;
+ case -EBUSY:
+ /* Device is busy and stop send any request. */
+ goto unlock;
+ default:
+ /* Release memory resources and send all requests through software. */
+ sec_alg_send_backlog_soft(ctx, qp_ctx);
+ goto unlock;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&qp_ctx->backlog.lock);
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
@@ -273,40 +334,54 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
ctx->req_op->callback(ctx, req, err);
}
-static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+static int sec_alg_send_message_retry(struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ctr = 0;
int ret;
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ do {
+ ret = qp_send_message(req);
+ } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
- spin_lock_bh(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
- list_add_tail(&req->backlog_head, &qp_ctx->backlog);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+}
+
+static int sec_alg_try_enqueue(struct sec_req *req)
+{
+ /* Check if any request is already backlogged */
+ if (!list_empty(&req->backlog->list))
return -EBUSY;
- }
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(ret == -EBUSY))
- return -ENOBUFS;
+ /* Try to enqueue to HW ring */
+ return qp_send_message(req);
+}
- if (likely(!ret)) {
- ret = -EINPROGRESS;
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- }
+
+static int sec_alg_send_message_maybacklog(struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_alg_try_enqueue(req);
+ if (ret != -EBUSY)
+ return ret;
+
+ spin_lock_bh(&req->backlog->lock);
+ ret = sec_alg_try_enqueue(req);
+ if (ret == -EBUSY)
+ list_add_tail(&req->list, &req->backlog->list);
+ spin_unlock_bh(&req->backlog->lock);
return ret;
}
-/* Get DMA memory resources */
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return sec_alg_send_message_maybacklog(req);
+
+ return sec_alg_send_message_retry(req);
+}
+
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
u16 q_depth = res->depth;
@@ -558,7 +633,10 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ spin_lock_init(&qp_ctx->backlog.lock);
+ spin_lock_init(&qp_ctx->id_lock);
+ INIT_LIST_HEAD(&qp_ctx->backlog.list);
+ qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
@@ -602,9 +680,6 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
-
- /* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -706,7 +781,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
int ret;
ctx->alg_type = SEC_SKCIPHER;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
pr_err("get error skcipher iv size!\n");
@@ -883,24 +958,25 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct sec_aead_req *a_req = &req->aead_req;
- struct aead_request *aead_req = a_req->aead_req;
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
struct crypto_aead *tfm;
+ u8 *mac_offset, *pbuf;
size_t authsize;
- u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf, copy_size);
+
+ pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
@@ -908,8 +984,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
tfm = crypto_aead_reqtfm(aead_req);
authsize = crypto_aead_authsize(tfm);
- mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
- memcpy(a_req->out_mac, mac_offset, authsize);
+ mac_offset = pbuf + copy_size - authsize;
+ memcpy(req->aead_req.out_mac, mac_offset, authsize);
+ }
+
+ if (req->req_id < 0) {
+ buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, buf->in_dma)))
+ return -ENOMEM;
+
+ buf->out_dma = buf->in_dma;
+ return 0;
}
req->in_dma = qp_ctx->res[req_id].pbuf_dma;
@@ -924,6 +1009,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -932,10 +1018,16 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf, copy_size);
+ if (req->req_id < 0)
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
+ else
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size))
dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+
+ if (req->req_id < 0)
+ dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
}
static int sec_aead_mac_init(struct sec_aead_req *req)
@@ -957,14 +1049,95 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
return 0;
}
-static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
+{
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
+}
+
+static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
+ struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
+ int dma_dir)
+{
+ struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
+ u32 i, sg_n, sg_n_mapped;
+ struct scatterlist *sg;
+ u32 sge_var = 0;
+
+ sg_n = sg_nents(src);
+ sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
+ if (unlikely(!sg_n_mapped)) {
+ dev_err(dev, "dma mapping for SG error!\n");
+ return -EINVAL;
+ } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
+ dev_err(dev, "the number of entries in input scatterlist error!\n");
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -EINVAL;
+ }
+
+ for_each_sg(src, sg, sg_n_mapped, i) {
+ fill_sg_to_hw_sge(sg, curr_hw_sge);
+ curr_hw_sge++;
+ sge_var++;
+ }
+
+ src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
+ src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
+ src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
+ *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
+ dma_addr_t src_in, int dma_dir)
+{
+ dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
+}
+
+static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
+ struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
+ int ret;
+
+ if (dst == src) {
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
+ DMA_BIDIRECTIONAL);
+ req->buf.out_dma = req->buf.in_dma;
+ return ret;
+ }
+
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
+ if (unlikely(ret))
+ return ret;
+
+ ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
+ DMA_FROM_DEVICE);
+ if (unlikely(ret)) {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
struct device *dev = ctx->dev;
+ enum dma_data_direction src_direction;
int ret;
if (req->use_pbuf) {
@@ -977,10 +1150,9 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
- ret = sec_cipher_pbuf_map(ctx, req, src);
-
- return ret;
+ return sec_cipher_pbuf_map(ctx, req, src);
}
+
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
@@ -990,10 +1162,11 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->out_mac_dma;
}
+ src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
req->req_id,
- &req->in_dma);
+ &req->in_dma, src_direction);
if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(req->in);
@@ -1003,7 +1176,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
ret = sec_aead_mac_init(a_req);
if (unlikely(ret)) {
dev_err(dev, "fail to init mac data for ICV!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return ret;
}
}
@@ -1015,11 +1188,12 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
req->req_id,
- &c_req->c_out_dma);
+ &c_req->c_out_dma,
+ DMA_FROM_DEVICE);
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return PTR_ERR(c_req->c_out);
}
}
@@ -1027,19 +1201,108 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
return 0;
}
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ bool is_aead = (ctx->alg_type == SEC_AEAD);
+ struct device *dev = ctx->dev;
+ int ret = -ENOMEM;
+
+ if (req->req_id >= 0)
+ return sec_cipher_map_inner(ctx, req, src, dst);
+
+ c_req->c_ivin = c_req->c_ivin_buf;
+ c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
+ return -ENOMEM;
+
+ if (is_aead) {
+ a_req->a_ivin = a_req->a_ivin_buf;
+ a_req->out_mac = a_req->out_mac_buf;
+ a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
+ goto free_c_ivin_dma;
+
+ a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
+ SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
+ goto free_a_ivin_dma;
+ }
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ if (unlikely(ret))
+ goto free_out_mac_dma;
+
+ return 0;
+ }
+
+ if (!c_req->encrypt && is_aead) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ goto free_out_mac_dma;
+ }
+ }
+
+ ret = sec_cipher_map_sgl(dev, req, src, dst);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ goto free_out_mac_dma;
+ }
+
+ return 0;
+
+free_out_mac_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+free_a_ivin_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+free_c_ivin_dma:
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ return ret;
+}
+
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->dev;
+ if (req->req_id >= 0) {
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src) {
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
+ } else {
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
+ }
+ }
+ return;
+ }
+
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ if (dst != src) {
+ sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ } else {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
+ }
+ }
- hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (ctx->alg_type == SEC_AEAD) {
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
}
}
@@ -1257,8 +1520,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
+ de = 0x1 << SEC_DE_OFFSET;
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
SEC_CMODE_OFFSET);
@@ -1284,13 +1554,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (req->in_dma != c_req->c_out_dma)
- de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
- sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
@@ -1307,8 +1574,15 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
c_ctx->c_mode;
@@ -1334,8 +1608,6 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
}
bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
- if (req->in_dma != c_req->c_out_dma)
- bd_param |= 0x1 << SEC_DE_OFFSET_V3;
bd_param |= SEC_BD_TYPE3;
sec_sqe3->bd_param = cpu_to_le32(bd_param);
@@ -1367,15 +1639,12 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
size_t sz;
u8 *iv;
- if (req->c_req.encrypt)
- sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
- else
- sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
-
if (alg_type == SEC_SKCIPHER) {
+ sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
iv = sk_req->iv;
cryptlen = sk_req->cryptlen;
} else {
+ sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
iv = aead_req->iv;
cryptlen = aead_req->cryptlen;
}
@@ -1386,57 +1655,26 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
if (unlikely(sz != iv_size))
dev_err(req->ctx->dev, "copy output iv error!\n");
} else {
- sz = cryptlen / iv_size;
- if (cryptlen % iv_size)
- sz += 1;
+ sz = (cryptlen + iv_size - 1) / iv_size;
ctr_iv_inc(iv, iv_size, sz);
}
}
-static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_req *backlog_req = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
- if (ctx->fake_req_limit >=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !list_empty(&qp_ctx->backlog)) {
- backlog_req = list_first_entry(&qp_ctx->backlog,
- typeof(*backlog_req), backlog_head);
- list_del(&backlog_req->backlog_head);
- }
- spin_unlock_bh(&qp_ctx->req_lock);
-
- return backlog_req;
-}
-
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
- struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct skcipher_request *backlog_sk_req;
- struct sec_req *backlog_req;
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
/* IV output at encrypto of CBC/CTR mode */
if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- while (1) {
- backlog_req = sec_back_req_clear(ctx, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_sk_req = backlog_req->c_req.sk_req;
- skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
- atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
- }
-
- skcipher_request_complete(sk_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(ctx, qp_ctx);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
@@ -1675,21 +1913,14 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
size_t authsize = crypto_aead_authsize(tfm);
- struct sec_aead_req *aead_req = &req->aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct aead_request *backlog_aead_req;
- struct sec_req *backlog_req;
size_t sz;
- if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
- sec_update_iv(req, SEC_AEAD);
-
- /* Copy output mac */
- if (!err && c_req->encrypt) {
- struct scatterlist *sgl = a_req->dst;
+ if (!err && req->c_req.encrypt) {
+ if (c->c_ctx.c_mode == SEC_CMODE_CBC)
+ sec_update_iv(req, SEC_AEAD);
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
@@ -1697,48 +1928,39 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
}
}
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
- while (1) {
- backlog_req = sec_back_req_clear(c, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_aead_req = backlog_req->aead_req.aead_req;
- aead_request_complete(backlog_aead_req, -EINPROGRESS);
- atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
- }
-
- aead_request_complete(a_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(c, qp_ctx);
}
-static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_request_uninit(struct sec_req *req)
{
- sec_free_req_id(req);
- sec_free_queue_id(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int queue_id;
-
- /* To load balance */
- queue_id = sec_alloc_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[queue_id];
+ int i;
- req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (unlikely(req->req_id < 0)) {
- sec_free_queue_id(ctx, req);
- return req->req_id;
+ for (i = 0; i < ctx->sec->ctx_q_num; i++) {
+ qp_ctx = &ctx->qp_ctx[i];
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (req->req_id >= 0)
+ break;
}
+ req->qp_ctx = qp_ctx;
+ req->backlog = &qp_ctx->backlog;
+
return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1755,8 +1977,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
- (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1767,16 +1988,23 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
+ memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
}
sec_request_untransfer(ctx, req);
+
err_uninit_req:
- sec_request_uninit(ctx, req);
+ sec_request_uninit(req);
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
return ret;
}
@@ -1850,7 +2078,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
- crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
@@ -2087,7 +2315,7 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
- struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_req *req = skcipher_request_ctx_dma(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
bool need_fallback = false;
int ret;
@@ -2102,6 +2330,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &sk_req->base;
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
@@ -2236,6 +2465,9 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
return -EINVAL;
+ } else if (c_mode == SEC_CMODE_GCM) {
+ if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
+ return -EINVAL;
}
return 0;
@@ -2309,7 +2541,7 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
- struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_req *req = aead_request_ctx_dma(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
size_t sz = crypto_aead_authsize(tfm);
bool need_fallback = false;
@@ -2319,6 +2551,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &a_req->base;
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
ret = sec_aead_param_check(ctx, req, &need_fallback);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index c974f95cd126..7a9ef2a9972a 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -210,15 +210,15 @@ static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
* @pool: Pool which hw sgl memory will be allocated in.
* @index: Index of hisi_acc_hw_sgl in pool.
* @hw_sgl_dma: The dma address of allocated hw sgl.
+ * @dir: DMA direction.
*
* This function builds hw sgl according input sgl, user can use hw_sgl_dma
* as src/dst in its BD. Only support single hw sgl currently.
*/
struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma)
+hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_sgl_pool *pool, u32 index,
+ dma_addr_t *hw_sgl_dma, enum dma_data_direction dir)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
unsigned int i, sg_n_mapped;
@@ -232,7 +232,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
- sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, dir);
if (!sg_n_mapped) {
dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
@@ -276,16 +276,17 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
* @dev: The device which hw sgl belongs to.
* @sgl: Related scatterlist.
* @hw_sgl: Virtual address of hw sgl.
+ * @dir: DMA direction.
*
* This function unmaps allocated hw sgl.
*/
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl)
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir)
{
if (!dev || !sgl || !hw_sgl)
return;
- dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sgl, sg_nents(sgl), dir);
clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 7327f8f29b01..b97513981a3b 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -224,7 +224,8 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &req->dma_src);
+ req->req_id << 1, &req->dma_src,
+ DMA_TO_DEVICE);
if (IS_ERR(req->hw_src)) {
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
PTR_ERR(req->hw_src));
@@ -233,7 +234,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
- &req->dma_dst);
+ &req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
@@ -258,9 +259,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
return ret;
}
@@ -303,8 +304,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
err = -EIO;
}
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
acomp_req->dlen = ops->get_dstlen(sqe);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index a8f735390f0d..76b7ecb5624b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->flags & DRIVER_FLAGS_SG)
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index fd34dc8f5707..ef0ba4832928 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
safexcel_complete(priv, ring);
if (sreq->nents) {
- dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
sreq->nents = 0;
}
@@ -491,7 +493,9 @@ unmap_result:
DMA_FROM_DEVICE);
unmap_sg:
if (req->nents) {
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
req->nents = 0;
}
cdesc_rollback:
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 95dc8979918d..8f9e21ced0fe 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
* @sg_data_total: Total data in the SG list at any time.
* @sg_data_offset: Offset into the data of the current individual SG node.
* @sg_dma_nents: Number of sg entries mapped in dma_list.
+ * @nents: Number of entries in the scatterlist.
*/
struct ocs_hcu_rctx {
struct ocs_hcu_dev *hcu_dev;
@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
unsigned int sg_data_total;
unsigned int sg_data_offset;
unsigned int sg_dma_nents;
+ unsigned int nents;
};
/**
@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
/* Unmap req->src (if mapped). */
if (rctx->sg_dma_nents) {
- dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
rctx->sg_dma_nents = 0;
}
@@ -260,6 +262,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
rc = -ENOMEM;
goto cleanup;
}
+
+ /* Save the value of nents to pass to dma_unmap_sg. */
+ rctx->nents = nents;
+
/*
* The value returned by dma_map_sg() can be < nents; so update
* nents accordingly.
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..bb6f33f6b4d3 100644
--- a/drivers/crypto/intel/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -1473,8 +1474,7 @@ int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
ll = dll_desc->vaddr;
for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
ll[i].src_addr = sg_dma_address(sg) + data_offset;
- ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
- (sg_dma_len(sg) - data_offset) : data_size;
+ ll[i].src_len = min(sg_dma_len(sg) - data_offset, data_size);
data_offset = 0;
data_size -= ll[i].src_len;
/* Current element points to the DMA address of the next one. */
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 7c3c0f561c95..53fa91d577ed 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -191,7 +191,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_ZUC |
- ICP_ACCEL_CAPABILITIES_ZUC_256 |
ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
@@ -223,17 +222,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
}
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
- }
-
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_SM2 |
@@ -303,11 +296,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
@@ -473,6 +468,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index bd0b1b1015c0..740f68a36ac5 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,6 +3,7 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -221,11 +222,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -448,8 +451,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- hw_data->bank_state_save = adf_gen4_bank_state_save;
- hw_data->bank_state_restore = adf_gen4_bank_state_restore;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -459,6 +462,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
index 359a6447ccb8..bed88d3ce8ca 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -10,6 +10,7 @@
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -18,6 +19,7 @@
#include <adf_gen6_pm.h>
#include <adf_gen6_ras.h>
#include <adf_gen6_shared.h>
+#include <adf_gen6_tl.h>
#include <adf_timer.h>
#include "adf_6xxx_hw_data.h"
#include "icp_qat_fw_comp.h"
@@ -76,6 +78,10 @@ static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
};
+static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
+};
+
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
@@ -97,7 +103,7 @@ static bool services_supported(unsigned long mask)
{
int num_svc;
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
return false;
num_svc = hweight_long(mask);
@@ -126,10 +132,13 @@ static int get_service(unsigned long *mask)
if (test_and_clear_bit(SVC_DCC, mask))
return SVC_DCC;
+ if (test_and_clear_bit(SVC_DECOMP, mask))
+ return SVC_DECOMP;
+
return -EINVAL;
}
-static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+static enum adf_cfg_service_type get_ring_type(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -139,12 +148,14 @@ static enum adf_cfg_service_type get_ring_type(enum adf_services service)
case SVC_DC:
case SVC_DCC:
return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
default:
return UNUSED;
}
}
-static const unsigned long *get_thrd_mask(enum adf_services service)
+static const unsigned long *get_thrd_mask(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -155,6 +166,8 @@ static const unsigned long *get_thrd_mask(enum adf_services service)
return thrd_mask_cpr;
case SVC_DCC:
return thrd_mask_dcc;
+ case SVC_DECOMP:
+ return thrd_mask_dcpr;
default:
return NULL;
}
@@ -511,6 +524,55 @@ static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
+static void init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ enum adf_fw_objs obj_type, obj_iter;
+ unsigned int svc, i, num_grp;
+ u32 ae_mask;
+
+ for (svc = 0; svc < SVC_BASE_COUNT; svc++) {
+ switch (svc) {
+ case SVC_SYM:
+ case SVC_ASYM:
+ obj_type = ADF_FW_CY_OBJ;
+ break;
+ case SVC_DC:
+ case SVC_DECOMP:
+ obj_type = ADF_FW_DC_OBJ;
+ break;
+ }
+
+ num_grp = ARRAY_SIZE(adf_default_fw_config);
+ for (i = 0; i < num_grp; i++) {
+ obj_iter = adf_default_fw_config[i].obj;
+ if (obj_iter == obj_type) {
+ ae_mask = adf_default_fw_config[i].ae_mask;
+ device_data->svc_ae_mask[svc] = hweight32(ae_mask);
+ break;
+ }
+ }
+ }
+}
+
+static u32 adf_gen6_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.cpr_cnt + device_data->slices.dcpr_cnt;
+ case SVC_DECOMP:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+
static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
{
u32 value;
@@ -520,8 +582,8 @@ static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
* driver must program the ringmodectl CSRs.
*/
value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
}
@@ -537,7 +599,7 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
* Read PVC0CTL then write the masked values.
*/
pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
if (err) {
dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
@@ -546,8 +608,8 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
/* Read PVC1CTL then write masked values */
pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
if (err)
dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
@@ -618,7 +680,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
@@ -627,7 +688,15 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- capabilities_asym = 0;
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
@@ -648,7 +717,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
caps |= capabilities_asym;
if (test_bit(SVC_SYM, &mask))
caps |= capabilities_sym;
- if (test_bit(SVC_DC, &mask))
+ if (test_bit(SVC_DC, &mask) || test_bit(SVC_DECOMP, &mask))
caps |= capabilities_dc;
if (test_bit(SVC_DCC, &mask)) {
/*
@@ -744,7 +813,16 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
static int enable_pm(struct adf_accel_dev *accel_dev)
{
- return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ int ret;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Initialize PM internal data */
+ adf_gen6_init_dev_pm_data(accel_dev);
+
+ return 0;
}
static int dev_config(struct adf_accel_dev *accel_dev)
@@ -776,6 +854,25 @@ static int dev_config(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN6_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN6_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN6_RL_C2S_OFFSET;
+ rl_data->pcie_scale_div = ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->max_tp[SVC_ASYM] = ADF_6XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_6XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_6XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_DECOMP] = ADF_6XXX_RL_MAX_TP_DECOMP;
+ rl_data->scan_interval = ADF_6XXX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_6XXX_RL_SLICE_REF;
+
+ init_num_svc_aes(rl_data);
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -824,6 +921,8 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = ring_pair_reset;
hw_data->dev_config = dev_config;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->get_hb_clock = get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->start_timer = adf_timer_start;
@@ -831,11 +930,17 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
hw_data->services_supported = services_supported;
+ hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
+ hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_gen6_init_ras_ops(&hw_data->ras_ops);
+ adf_gen6_init_tl_data(&hw_data->tl_data);
+ adf_gen6_init_rl_data(&hw_data->rl_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
index 78e2e2c5816e..d822911fe68c 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -99,7 +99,7 @@
#define ADF_GEN6_PVC0CTL_OFFSET 0x204
#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
-#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x3F
/* VC1 Resource Control Register */
#define ADF_GEN6_PVC1CTL_OFFSET 0x210
@@ -122,6 +122,13 @@
/* Number of heartbeat counter pairs */
#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+/* Rate Limiting */
+#define ADF_GEN6_RL_R2L_OFFSET 0x508000
+#define ADF_GEN6_RL_L2C_OFFSET 0x509000
+#define ADF_GEN6_RL_C2S_OFFSET 0x508818
+#define ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
+#define ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+
/* Physical function fuses */
#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
@@ -133,6 +140,19 @@
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+/* RL constants */
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_6XXX_RL_SCANS_PER_SEC 954
+#define ADF_6XXX_RL_MAX_TP_ASYM 173750UL
+#define ADF_6XXX_RL_MAX_TP_SYM 95000UL
+#define ADF_6XXX_RL_MAX_TP_DC 40000UL
+#define ADF_6XXX_RL_MAX_TP_DECOMP 40000UL
+#define ADF_6XXX_RL_SLICE_REF 1000UL
+
+/* Clock frequency */
+#define ADF_6XXX_AE_FREQ (1000 * HZ_PER_MHZ)
+
enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 66bb295ace28..89845754841b 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
+ adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
adf_clock.o \
@@ -48,9 +49,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \
adf_fw_counters.o \
adf_gen4_pm_debugfs.o \
adf_gen4_tl.o \
+ adf_gen6_pm_dbgfs.o \
+ adf_gen6_tl.o \
adf_heartbeat_dbgfs.o \
adf_heartbeat.o \
adf_pm_dbgfs.o \
+ adf_pm_dbgfs_utils.o \
adf_telemetry.o \
adf_tl_debugfs.o \
adf_transport_debug.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 2ee526063213..9fe3239f0114 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -157,39 +157,7 @@ struct admin_info {
u32 mailbox_offset;
};
-struct ring_config {
- u64 base;
- u32 config;
- u32 head;
- u32 tail;
- u32 reserved0;
-};
-
-struct bank_state {
- u32 ringstat0;
- u32 ringstat1;
- u32 ringuostat;
- u32 ringestat;
- u32 ringnestat;
- u32 ringnfstat;
- u32 ringfstat;
- u32 ringcstat0;
- u32 ringcstat1;
- u32 ringcstat2;
- u32 ringcstat3;
- u32 iaintflagen;
- u32 iaintflagreg;
- u32 iaintflagsrcsel0;
- u32 iaintflagsrcsel1;
- u32 iaintcolen;
- u32 iaintcolctl;
- u32 iaintflagandcolen;
- u32 ringexpstat;
- u32 ringexpintenable;
- u32 ringsrvarben;
- u32 reserved0;
- struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
-};
+struct adf_bank_state;
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
@@ -338,9 +306,9 @@ struct adf_hw_device_data {
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
+ struct adf_bank_state *state);
int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
+ u32 bank_number, struct adf_bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -351,6 +319,8 @@ struct adf_hw_device_data {
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
bool (*services_supported)(unsigned long mask);
+ u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 4cb8bd83f570..35679b21ff63 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -229,7 +229,7 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+static int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
{
if (accel_dev->autoreset_on_error)
return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.c b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
new file mode 100644
index 000000000000..225d55d56a4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/printk.h>
+#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
+#include "adf_common_drv.h"
+
+/* Ring interrupt masks */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+
+static inline int check_stat(u32 (*op)(void __iomem *, u32), u32 expect_val,
+ const char *name, void __iomem *base, u32 bank)
+{
+ u32 actual_val = op(base, bank);
+
+ if (expect_val == actual_val)
+ return 0;
+
+ pr_err("Fail to restore %s register. Expected %#x, actual %#x\n",
+ name, expect_val, actual_val);
+
+ return -EINVAL;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+
+ /*
+ * Verify whether any exceptions were raised during the bank save process.
+ * If exceptions occurred, the status and exception registers cannot
+ * be directly restored. Consequently, further restoration is not
+ * feasible, and the current state of the ring should be maintained.
+ */
+ val = state->ringexpstat;
+ if (val) {
+ pr_info("Bank %u state not fully restored due to exception in saved state (%#x)\n",
+ bank, val);
+ return 0;
+ }
+
+ /* Ensure that the restoration process completed without exceptions */
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ if (tmp_val) {
+ pr_err("Bank %u restored with exception: %#x\n", bank, tmp_val);
+ return -EFAULT;
+ }
+
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = check_stat(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * adf_bank_state_save() - save state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function saves the state of a bank by reading the bank CSRs and
+ * writing them in the @state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_save);
+
+/**
+ * adf_bank_state_restore() - restore state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function attempts to restore the state of a bank by writing the
+ * bank CSRs to the values in the state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.h b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
new file mode 100644
index 000000000000..48b573d692dd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_BANK_STATE_H_
+#define ADF_BANK_STATE_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct adf_bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 15fdf9854b81..81e9e9d7eccd 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -29,6 +29,7 @@ enum adf_cfg_service_type {
COMP,
SYM,
ASYM,
+ DECOMP,
USED
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index c39871291da7..7d00bcb41ce7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include "adf_cfg.h"
+#include "adf_cfg_common.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
@@ -15,13 +16,14 @@ static const char *const adf_cfg_services[] = {
[SVC_SYM] = ADF_CFG_SYM,
[SVC_DC] = ADF_CFG_DC,
[SVC_DCC] = ADF_CFG_DCC,
+ [SVC_DECOMP] = ADF_CFG_DECOMP,
};
/*
* Ensure that the size of the array matches the number of services,
- * SVC_BASE_COUNT, that is used to size the bitmap.
+ * SVC_COUNT, that is used to size the bitmap.
*/
-static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT);
+static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_COUNT);
/*
* Ensure that the maximum number of concurrent services that can be
@@ -34,7 +36,7 @@ static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC);
* Ensure that the number of services fit a single unsigned long, as each
* service is represented by a bit in the mask.
*/
-static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
+static_assert(BITS_PER_LONG >= SVC_COUNT);
/*
* Ensure that size of the concatenation of all service strings is smaller
@@ -43,6 +45,7 @@ static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER
ADF_CFG_ASYM ADF_SERVICES_DELIMITER
ADF_CFG_DC ADF_SERVICES_DELIMITER
+ ADF_CFG_DECOMP ADF_SERVICES_DELIMITER
ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES);
static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf,
@@ -88,7 +91,7 @@ static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len)
if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES)
return -ENOSPC;
- for_each_set_bit(bit, &mask, SVC_BASE_COUNT) {
+ for_each_set_bit(bit, &mask, SVC_COUNT) {
if (offset)
offset += scnprintf(buf + offset, len - offset,
ADF_SERVICES_DELIMITER);
@@ -167,9 +170,43 @@ int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
if (test_bit(SVC_DC, &mask))
return SVC_DC;
+ if (test_bit(SVC_DECOMP, &mask))
+ return SVC_DECOMP;
+
if (test_bit(SVC_DCC, &mask))
return SVC_DCC;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(adf_get_service_enabled);
+
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc)
+{
+ switch (svc) {
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_SYM:
+ return SYM;
+ case SVC_DC:
+ return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
+ default:
+ return UNUSED;
+ }
+}
+
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc)
+{
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(svc);
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u8 rps_per_bundle = hw_data->num_banks_per_vf;
+ int i;
+
+ for (i = 0; i < rps_per_bundle; i++) {
+ if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index 3742c450878f..913d717280af 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -7,16 +7,21 @@
struct adf_accel_dev;
-enum adf_services {
+enum adf_base_services {
SVC_ASYM = 0,
SVC_SYM,
SVC_DC,
- SVC_DCC,
+ SVC_DECOMP,
SVC_BASE_COUNT
};
+enum adf_extended_services {
+ SVC_DCC = SVC_BASE_COUNT,
+ SVC_COUNT
+};
+
enum adf_composed_services {
- SVC_SYM_ASYM = SVC_BASE_COUNT,
+ SVC_SYM_ASYM = SVC_COUNT,
SVC_SYM_DC,
SVC_ASYM_DC,
};
@@ -33,5 +38,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc);
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index b79982c4a856..30107a02ee7f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -24,6 +24,7 @@
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
+#define ADF_CFG_DECOMP "decomp"
#define ADF_CFG_CY "sym;asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_ASYM "asym"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index eaa6388a6678..6cf3a95489e8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -86,7 +86,6 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
@@ -189,6 +188,7 @@ void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay);
+void adf_misc_wq_flush(void);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 0406cb09c5bb..349fdb323763 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <asm/div64.h>
@@ -259,7 +262,10 @@ bool adf_gen4_services_supported(unsigned long mask)
{
unsigned long num_svc = hweight_long(mask);
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
+ return false;
+
+ if (test_bit(SVC_DECOMP, &mask))
return false;
switch (num_svc) {
@@ -485,187 +491,6 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
return ret;
}
-static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings)
-{
- u32 i;
-
- state->ringstat0 = ops->read_csr_stat(base, bank);
- state->ringuostat = ops->read_csr_uo_stat(base, bank);
- state->ringestat = ops->read_csr_e_stat(base, bank);
- state->ringnestat = ops->read_csr_ne_stat(base, bank);
- state->ringnfstat = ops->read_csr_nf_stat(base, bank);
- state->ringfstat = ops->read_csr_f_stat(base, bank);
- state->ringcstat0 = ops->read_csr_c_stat(base, bank);
- state->iaintflagen = ops->read_csr_int_en(base, bank);
- state->iaintflagreg = ops->read_csr_int_flag(base, bank);
- state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
- state->iaintcolen = ops->read_csr_int_col_en(base, bank);
- state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
- state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
- state->ringexpstat = ops->read_csr_exp_stat(base, bank);
- state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
- state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
-
- for (i = 0; i < num_rings; i++) {
- state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
- state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
- state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
- state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
- }
-}
-
-#define CHECK_STAT(op, expect_val, name, args...) \
-({ \
- u32 __expect_val = (expect_val); \
- u32 actual_val = op(args); \
- (__expect_val == actual_val) ? 0 : \
- (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
- name, __expect_val, actual_val), -EINVAL); \
-})
-
-static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings,
- int tx_rx_gap)
-{
- u32 val, tmp_val, i;
- int ret;
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
-
- for (i = 0; i < num_rings / 2; i++) {
- int tx = i * (tx_rx_gap + 1);
- int rx = tx + tx_rx_gap;
-
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
-
- /*
- * The TX ring head needs to be updated again to make sure that
- * the HW will not consider the ring as full when it is empty
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringestat & BIT(tx)) {
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- }
-
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- /*
- * The RX ring tail needs to be updated again to make sure that
- * the HW will not consider the ring as empty when it is full
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringfstat & BIT(rx))
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- }
-
- ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
- ops->write_csr_int_en(base, bank, state->iaintflagen);
- ops->write_csr_int_col_en(base, bank, state->iaintcolen);
- ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
- ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
- ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
- ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
-
- /* Check that all ring statuses match the saved state. */
- ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
- base, bank);
- if (ret)
- return ret;
-
- tmp_val = ops->read_csr_exp_stat(base, bank);
- val = state->ringexpstat;
- if (tmp_val && !val) {
- pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
-
- bank_state_save(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
-
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
- int ret;
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
-
- ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
- if (ret)
- dev_err(&GET_DEV(accel_dev),
- "Unable to restore state of bank %d\n", bank_number);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
-
static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
{
struct icp_qat_fw_comp_req *req_tmpl = ctx;
@@ -733,3 +558,43 @@ void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
+
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ struct adf_hw_device_data *hw_data;
+ unsigned int i;
+ u32 ae_cnt;
+
+ hw_data = container_of(device_data, struct adf_hw_device_data, rl_data);
+ ae_cnt = hweight32(hw_data->get_ae_mask(hw_data));
+ if (!ae_cnt)
+ return;
+
+ for (i = 0; i < SVC_BASE_COUNT; i++)
+ device_data->svc_ae_mask[i] = ae_cnt - 1;
+
+ /*
+ * The decompression service is not supported on QAT GEN4 devices.
+ * Therefore, set svc_ae_mask to 0.
+ */
+ device_data->svc_ae_mask[SVC_DECOMP] = 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_num_svc_aes);
+
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_svc_slice_cnt);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index e4f4d5fa616d..cd26b6724c43 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -84,9 +84,6 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
-#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
-#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
-#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
#define ADF_COALESCED_POLL_DELAY_US 1000
#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
@@ -176,11 +173,10 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
u32 bank_number, int timeout_us);
void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
u32 bank_number);
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data);
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index 2e4095c4c12c..b7e38842a46d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -1,47 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include <linux/stringify.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
+#include "adf_pm_dbgfs_utils.h"
#include "icp_qat_fw_init_admin.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
-#define PM_INFO_MEMBER_OFF(member) \
- (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
-
-#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
-{ \
- .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
- .key = __stringify(_field_), \
- .field_mask = _mask_, \
-}
-
-#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
- PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
-
#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK)
-#define PM_INFO_MAX_KEY_LEN 21
-
-struct pm_status_row {
- int reg_offset;
- u32 field_mask;
- const char *key;
-};
-
static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
@@ -109,44 +80,6 @@ static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size, int table_len,
- bool lowercase)
-{
- char key[PM_INFO_MAX_KEY_LEN];
- int wr = 0;
- int i;
-
- for (i = 0; i < table_len; i++) {
- if (lowercase)
- string_lower(key, table[i].key);
- else
- string_upper(key, table[i].key);
-
- wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
- field_get(table[i].field_mask,
- pm_info_regs[table[i].reg_offset]));
- }
-
- return wr;
-}
-
-static int pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, false);
-}
-
-static int pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, true);
-}
-
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
@@ -191,9 +124,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* Fusectl related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- PM Fuse info ---------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_fuse_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
pm_info->max_pwrreq);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
@@ -204,28 +137,28 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
"------------ PM Info ------------\n");
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
pm_info->pwr_state == PM_SET_MIN ? "min" : "max");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_info_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
/* SSM related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- SSM_PM Info ----------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_ssm_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
/* Log related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"------------- PM Log -------------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_log_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_log_rows));
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_event_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_event_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
pm->idle_irq_counters);
@@ -241,9 +174,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* CSRs content */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- HW PM CSRs -----------\n");
- len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_csrs_rows));
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
index a62eb5e8dbe6..adb21656a3ba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -9,6 +9,7 @@
#include <asm/errno.h>
#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pfvf.h"
@@ -358,7 +359,7 @@ static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to load regs for vf%d bank%d\n",
@@ -585,7 +586,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to save regs for vf%d bank%d\n",
@@ -593,7 +594,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
return ret;
}
- return sizeof(struct bank_state);
+ return sizeof(struct adf_bank_state);
}
static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
index 9a5b995f7ada..4c0d576e8c21 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -24,5 +24,29 @@ struct adf_accel_dev;
/* cpm_pm_status bitfields */
#define ADF_GEN6_PM_INIT_STATE BIT(21)
+#define ADF_GEN6_PM_CPM_PM_STATE_MASK GENMASK(22, 20)
+
+/* fusectl0 bitfields */
+#define ADF_GEN6_PM_ENABLE_PM_MASK BIT(21)
+#define ADF_GEN6_PM_ENABLE_PM_IDLE_MASK BIT(22)
+#define ADF_GEN6_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23)
+
+/* cpm_pm_fw_init bitfields */
+#define ADF_GEN6_PM_IDLE_FILTER_MASK GENMASK(5, 3)
+#define ADF_GEN6_PM_IDLE_ENABLE_MASK BIT(2)
+
+/* ssm_pm_enable bitfield */
+#define ADF_GEN6_PM_SSM_PM_ENABLE_MASK BIT(0)
+
+/* ssm_pm_domain_status bitfield */
+#define ADF_GEN6_PM_DOMAIN_POWERED_UP_MASK BIT(0)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
new file mode 100644
index 000000000000..603aefba0fdb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/string_helpers.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_gen6_pm.h"
+#include "adf_pm_dbgfs_utils.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN6_PM_##_field_##_MASK)
+
+static struct pm_status_row pm_fuse_rows[] = {
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
+};
+
+static struct pm_status_row pm_info_rows[] = {
+ PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER),
+};
+
+static struct pm_status_row pm_ssm_rows[] = {
+ PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
+ PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWERED_UP),
+};
+
+static struct pm_status_row pm_csrs_rows[] = {
+ PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
+ PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
+};
+
+static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
+
+static ssize_t adf_gen6_print_pm_status(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct icp_qat_fw_init_admin_pm_info *pm_info;
+ dma_addr_t p_state_addr;
+ u32 *pm_info_regs;
+ size_t len = 0;
+ char *pm_kv;
+ u32 val;
+ int ret;
+
+ pm_info = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ pm_kv = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_kv) {
+ kfree(pm_info);
+ return -ENOMEM;
+ }
+
+ p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr);
+ if (ret)
+ goto out_free;
+
+ /* Query power management information from QAT FW */
+ ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
+ dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ pm_info_regs = (u32 *)pm_info;
+
+ /* Fuse control register */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Fuse info ---------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
+
+ /* Power management */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Info --------------\n");
+
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: ACTIVE\n");
+
+ /* Shared Slice Module */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- SSM_PM Info ----------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
+
+ /* Control status register content */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- HW PM CSRs -----------\n");
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN6_PM_INTERRUPT);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "CPM_PM_INTERRUPT: %#x\n", val);
+ ret = simple_read_from_buffer(buf, count, pos, pm_kv, len);
+
+out_free:
+ kfree(pm_info);
+ kfree(pm_kv);
+
+ return ret;
+}
+
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->power_management.print_pm_status = adf_gen6_print_pm_status;
+ accel_dev->power_management.present = true;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_dev_pm_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
index 58a072e2f936..c9b151006dca 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -5,6 +5,7 @@
#include "adf_gen4_config.h"
#include "adf_gen4_hw_csr_data.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_vf_mig.h"
#include "adf_gen6_shared.h"
struct adf_accel_dev;
@@ -47,3 +48,9 @@ int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
return adf_no_dev_config(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
+
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ adf_gen4_init_vf_mig_ops(vfmig_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
index bc8e71e984fc..fc6fad029a70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -4,6 +4,7 @@
#define ADF_GEN6_SHARED_H_
struct adf_hw_csr_ops;
+struct qat_migdev_ops;
struct adf_accel_dev;
struct adf_pfvf_ops;
@@ -12,4 +13,5 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
new file mode 100644
index 000000000000..cf804f95838a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Intel Corporation. */
+#include <linux/export.h>
+
+#include "adf_gen6_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_GEN6_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen6))
+
+#define ADF_GEN6_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen6))
+
+#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max "get to put" latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+};
+
+/* Accelerator utilization counters */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator utilization. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(cnv),
+ /* Decompression accelerator utilization. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(dcprz),
+ /* PKE accelerator utilization. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication accelerator utilization. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher accelerator utilization. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS accelerator utilization. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ucs),
+ /* Authentication accelerator utilization. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Accelerator execution counters */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(cnv),
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(dcprz),
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ucs),
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+};
+
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN6_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN6_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
new file mode 100644
index 000000000000..49db660b8eb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2025 Intel Corporation. */
+#ifndef ADF_GEN6_TL_H
+#define ADF_GEN6_TL_H
+
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN6_CPP_NS_PER_CYCLE 2
+#define ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value is in milliseconds. */
+#define ADF_GEN6_TL_MAX_AGGR_TIME_MS 4000
+/* Number of buffers to store historic values. */
+#define ADF_GEN6_TL_NUM_HIST_BUFFS \
+ (ADF_GEN6_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type */
+#define ADF_GEN6_TL_MAX_SLICES_PER_TYPE 32
+#define MAX_ATH_SL_COUNT 7
+#define MAX_CNV_SL_COUNT 2
+#define MAX_DCPRZ_SL_COUNT 2
+#define MAX_PKE_SL_COUNT 32
+#define MAX_UCS_SL_COUNT 4
+#define MAX_WAT_SL_COUNT 5
+#define MAX_WCP_SL_COUNT 5
+
+#define MAX_ATH_CMDQ_COUNT 14
+#define MAX_CNV_CMDQ_COUNT 6
+#define MAX_DCPRZ_CMDQ_COUNT 6
+#define MAX_PKE_CMDQ_COUNT 32
+#define MAX_UCS_CMDQ_COUNT 12
+#define MAX_WAT_CMDQ_COUNT 35
+#define MAX_WCP_CMDQ_COUNT 35
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN6_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen6_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen6_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN6_TL_SLICE_REG_SZ sizeof(struct adf_gen6_tl_slice_data_regs)
+
+/**
+ * struct adf_gen6_tl_cmdq_data_regs - HW CMDQ data as populated by FW.
+ * @reg_tm_cmdq_wait_cnt: CMDQ wait count.
+ * @reg_tm_cmdq_exec_cnt: CMDQ execution count.
+ * @reg_tm_cmdq_drain_cnt: CMDQ drain count.
+ */
+struct adf_gen6_tl_cmdq_data_regs {
+ __u32 reg_tm_cmdq_wait_cnt;
+ __u32 reg_tm_cmdq_exec_cnt;
+ __u32 reg_tm_cmdq_drain_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_CMDQ_REG_SZ sizeof(struct adf_gen6_tl_cmdq_data_regs)
+
+/**
+ * struct adf_gen6_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: "get to put" latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_prt_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_utlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cnv_slices: array of Compression slices utilization registers
+ * @dcprz_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ * @ath_cmdq: array of Authentication cmdq telemetry registers
+ * @cnv_cmdq: array of Compression cmdq telemetry registers
+ * @dcprz_cmdq: array of Decomopression cmdq telemetry registers
+ * @pke_cmdq: array of PKE cmdq telemetry registers
+ * @ucs_cmdq: array of UCS cmdq telemetry registers
+ * @wat_cmdq: array of Wireless Authentication cmdq telemetry registers
+ * @wcp_cmdq: array of Wireless Cipher cmdq telemetry registers
+ */
+struct adf_gen6_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_utlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen6_tl_slice_data_regs ath_slices[MAX_ATH_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs cnv_slices[MAX_CNV_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs dcprz_slices[MAX_DCPRZ_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs pke_slices[MAX_PKE_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs ucs_slices[MAX_UCS_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wat_slices[MAX_WAT_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wcp_slices[MAX_WCP_SL_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ath_cmdq[MAX_ATH_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs cnv_cmdq[MAX_CNV_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs dcprz_cmdq[MAX_DCPRZ_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs pke_cmdq[MAX_PKE_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ucs_cmdq[MAX_UCS_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wat_cmdq[MAX_WAT_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wcp_cmdq[MAX_WCP_CMDQ_COUNT];
+};
+
+/**
+ * struct adf_gen6_tl_ring_pair_data_regs - This structure stores ring pair
+ * telemetry counter values as they are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen6_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN6_TL_RP_REG_SZ sizeof(struct adf_gen6_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen6_tl_layout - This structure represents the entire telemetry
+ * counters data: Device + 4 Ring Pairs as they are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry message counter
+ * @reserved: reserved
+ */
+struct adf_gen6_tl_layout {
+ struct adf_gen6_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen6_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN6_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_LAYOUT_SZ sizeof(struct adf_gen6_tl_layout)
+#define ADF_GEN6_TL_MSG_CNT_OFF \
+ offsetof(struct adf_gen6_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN6_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f189cce7d153..46491048e0bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -404,6 +404,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev);
+ adf_misc_wq_flush();
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index cae1aee5479a..12e565613661 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -407,3 +407,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
+
+void adf_misc_wq_flush(void)
+{
+ flush_workqueue(adf_misc_wq);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
new file mode 100644
index 000000000000..69295a9ddf0a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/sprintf.h>
+#include <linux/string_helpers.h>
+
+#include "adf_pm_dbgfs_utils.h"
+
+/*
+ * This is needed because a variable is used to index the mask at
+ * pm_scnprint_table(), making it not compile time constant, so the compile
+ * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
+ */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+#define PM_INFO_MAX_KEY_LEN 21
+
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len,
+ bool lowercase)
+{
+ char key[PM_INFO_MAX_KEY_LEN];
+ int wr = 0;
+ int i;
+
+ for (i = 0; i < table_len; i++) {
+ if (lowercase)
+ string_lower(key, table[i].key);
+ else
+ string_upper(key, table[i].key);
+
+ wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
+ field_get(table[i].field_mask,
+ pm_info_regs[table[i].reg_offset]));
+ }
+
+ return wr;
+}
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, false);
+}
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, true);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
new file mode 100644
index 000000000000..854f058b35ed
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_PM_DBGFS_UTILS_H_
+#define ADF_PM_DBGFS_UTILS_H_
+
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_MEMBER_OFF(member) \
+ (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
+
+#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
+{ \
+ .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
+ .key = __stringify(_field_), \
+ .field_mask = _mask_, \
+}
+
+#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
+
+struct pm_status_row {
+ int reg_offset;
+ u32 field_mask;
+ const char *key;
+};
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+#endif /* ADF_PM_DBGFS_UTILS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index e782c23fc1bf..c6a54e465931 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -13,6 +13,7 @@
#include <linux/units.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_rl_admin.h"
#include "adf_rl.h"
@@ -55,7 +56,7 @@ static int validate_user_input(struct adf_accel_dev *accel_dev,
}
}
- if (sla_in->srv >= ADF_SVC_NONE) {
+ if (sla_in->srv >= SVC_BASE_COUNT) {
dev_notice(&GET_DEV(accel_dev),
"Wrong service type\n");
return -EINVAL;
@@ -168,20 +169,6 @@ static struct rl_sla *find_parent(struct adf_rl *rl_data,
return NULL;
}
-static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
-{
- switch (rl_srv) {
- case ADF_SVC_ASYM:
- return ASYM;
- case ADF_SVC_SYM:
- return SYM;
- case ADF_SVC_DC:
- return COMP;
- default:
- return UNUSED;
- }
-}
-
/**
* adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
@@ -209,22 +196,6 @@ u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
}
}
-static bool is_service_enabled(struct adf_accel_dev *accel_dev,
- enum adf_base_services rl_srv)
-{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- u8 rps_per_bundle = hw_data->num_banks_per_vf;
- int i;
-
- for (i = 0; i < rps_per_bundle; i++) {
- if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
- return true;
- }
-
- return false;
-}
-
/**
* prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
* @accel_dev: pointer to acceleration device structure
@@ -243,7 +214,7 @@ static bool is_service_enabled(struct adf_accel_dev *accel_dev,
static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
const unsigned long rp_mask)
{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(sla->srv);
u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
@@ -558,21 +529,9 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
if (!sla_val)
return 0;
+ /* Handle generation specific slice count adjustment */
avail_slice_cycles = hw_data->clock_frequency;
-
- switch (svc_type) {
- case ADF_SVC_ASYM:
- avail_slice_cycles *= device_data->slices.pke_cnt;
- break;
- case ADF_SVC_SYM:
- avail_slice_cycles *= device_data->slices.cph_cnt;
- break;
- case ADF_SVC_DC:
- avail_slice_cycles *= device_data->slices.dcpr_cnt;
- break;
- default:
- break;
- }
+ avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type);
do_div(avail_slice_cycles, device_data->scan_interval);
allocated_tokens = avail_slice_cycles * sla_val;
@@ -581,6 +540,17 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
return allocated_tokens;
}
+static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ if (svc >= SVC_BASE_COUNT)
+ return 0;
+
+ return device_data->svc_ae_mask[svc];
+}
+
u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type)
{
@@ -592,7 +562,7 @@ u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
return 0;
avail_ae_cycles = hw_data->clock_frequency;
- avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
+ avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type);
do_div(avail_ae_cycles, device_data->scan_interval);
sla_val *= device_data->max_tp[svc_type];
@@ -617,9 +587,8 @@ u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
sla_to_bytes *= device_data->max_tp[svc_type];
do_div(sla_to_bytes, device_data->scale_ref);
- sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
- BYTES_PER_MBIT;
- if (svc_type == ADF_SVC_DC && is_bw_out)
+ sla_to_bytes *= (svc_type == SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : BYTES_PER_MBIT;
+ if (svc_type == SVC_DC && is_bw_out)
sla_to_bytes *= device_data->slices.dcpr_cnt -
device_data->dcpr_correction;
@@ -660,7 +629,7 @@ static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
}
*sla_out = sla;
- if (!is_service_enabled(accel_dev, sla_in->srv)) {
+ if (!adf_is_service_enabled(accel_dev, sla_in->srv)) {
dev_notice(&GET_DEV(accel_dev),
"Provided service is not enabled\n");
ret = -EINVAL;
@@ -730,8 +699,8 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
sla_in.type = RL_ROOT;
sla_in.parent_id = RL_PARENT_DEFAULT_ID;
- for (i = 0; i < ADF_SVC_NONE; i++) {
- if (!is_service_enabled(accel_dev, i))
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
+ if (!adf_is_service_enabled(accel_dev, i))
continue;
sla_in.cir = device_data->scale_ref;
@@ -745,10 +714,9 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
/* Init default cluster for each root */
sla_in.type = RL_CLUSTER;
- for (i = 0; i < ADF_SVC_NONE; i++) {
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
if (!rl_data->root[i])
continue;
-
sla_in.cir = rl_data->root[i]->cir;
sla_in.pir = sla_in.cir;
sla_in.srv = rl_data->root[i]->srv;
@@ -987,7 +955,7 @@ int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
struct rl_sla *sla = NULL;
int i;
- if (srv >= ADF_SVC_NONE)
+ if (srv >= SVC_BASE_COUNT)
return -EINVAL;
if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
@@ -1086,9 +1054,9 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
int ret = 0;
/* Validate device parameters */
- if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
+ if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_ASYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_SYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_DC]) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index bfe750ea0e83..c1f3f9a51195 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -7,6 +7,8 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "adf_cfg_services.h"
+
struct adf_accel_dev;
#define RL_ROOT_MAX 4
@@ -24,13 +26,6 @@ enum rl_node_type {
RL_LEAF,
};
-enum adf_base_services {
- ADF_SVC_ASYM = 0,
- ADF_SVC_SYM,
- ADF_SVC_DC,
- ADF_SVC_NONE,
-};
-
/**
* struct adf_rl_sla_input_data - ratelimiting user input data structure
* @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
@@ -73,6 +68,7 @@ struct rl_slice_cnt {
u8 dcpr_cnt;
u8 pke_cnt;
u8 cph_cnt;
+ u8 cpr_cnt;
};
struct adf_rl_interface_data {
@@ -94,6 +90,7 @@ struct adf_rl_hw_data {
u32 pcie_scale_div;
u32 dcpr_correction;
u32 max_tp[RL_ROOT_MAX];
+ u32 svc_ae_mask[SVC_BASE_COUNT];
struct rl_slice_cnt slices;
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
index 698a14f4ce66..4a3e0591fdba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
@@ -63,6 +63,7 @@ int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
slices_int->pke_cnt = slices_resp.pke_cnt;
/* For symmetric crypto, slice tokens are relative to the UCS slice */
slices_int->cph_cnt = slices_resp.ucs_cnt;
+ slices_int->cpr_cnt = slices_resp.cpr_cnt;
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index c75d0b6cb0ad..31d1ef0cb1f5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
dev_warn(&GET_DEV(accel_dev),
"IOMMU should be enabled for SR-IOV to work correctly\n");
- return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 6c39194647f0..79c63dfa8ff3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -269,6 +269,8 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
case ASYM:
return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
+ case DECOMP:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_DECOMP);
default:
break;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index bedb514d4e30..f31556beed8b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -32,9 +32,10 @@ enum rl_params {
};
static const char *const rl_services[] = {
- [ADF_SVC_ASYM] = "asym",
- [ADF_SVC_SYM] = "sym",
- [ADF_SVC_DC] = "dc",
+ [SVC_ASYM] = "asym",
+ [SVC_SYM] = "sym",
+ [SVC_DC] = "dc",
+ [SVC_DECOMP] = "decomp",
};
static const char *const rl_operations[] = {
@@ -282,7 +283,7 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (get == ADF_SVC_NONE)
+ if (get == SVC_BASE_COUNT)
return -EINVAL;
return sysfs_emit(buf, "%s\n", rl_services[get]);
@@ -291,14 +292,22 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
static ssize_t srv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct adf_accel_dev *accel_dev;
unsigned int val;
int ret;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
val = ret;
+ if (!adf_is_service_enabled(accel_dev, val))
+ return -EINVAL;
+
ret = set_param_u(dev, SRV, val);
if (ret)
return ret;
@@ -439,8 +448,8 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to create qat_rl attribute group\n");
- data->cap_rem_srv = ADF_SVC_NONE;
- data->input.srv = ADF_SVC_NONE;
+ data->cap_rem_srv = SVC_BASE_COUNT;
+ data->input.srv = SVC_BASE_COUNT;
data->sysfs_added = true;
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index f20ae7e35a0d..a32db273842a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -538,6 +538,9 @@ static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
case ASYM:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
break;
+ case DECOMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DECOMP);
+ break;
default:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
break;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index e2dd568b87b5..6c22bc9b28e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
@@ -10,16 +10,21 @@
static DEFINE_MUTEX(ring_read_lock);
static DEFINE_MUTEX(bank_read_lock);
+#define ADF_RING_NUM_MSGS(ring) \
+ (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / \
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size))
+
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
mutex_lock(&ring_read_lock);
- if (*pos == 0)
+ if (val == 0)
return SEQ_START_TOKEN;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
return ring->base_addr +
@@ -29,13 +34,15 @@ static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
+
+ (*pos)++;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
- return ring->base_addr +
- (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+ return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * val);
}
static int adf_ring_show(struct seq_file *sfile, void *v)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index c03a69851114..43e6dd9b77b7 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -1277,7 +1277,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1294,7 +1294,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1311,7 +1311,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 5e4dad4693ca..9b2338f58d97 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->buffers[i].addr,
blout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
buffers[y].len = sg->length;
@@ -204,7 +204,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
if (!buf->sgl_dst_valid)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index c285b45b8679..53a4db5507ec 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -196,7 +196,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
struct adf_dc_data *dc_data = NULL;
u8 *obuff = NULL;
- dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
if (!dc_data)
goto err;
@@ -204,7 +204,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
if (!obuff)
goto err;
- obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, obuff_p)))
goto err;
@@ -232,9 +232,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
return;
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree_sensitive(dc_data->ovf_buff);
- devm_kfree(dev, dc_data);
+ kfree(dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 48c5c8ea8c43..3fe0fd9226cf 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_cleanup(req);
+
+ atomic_sub(req->cryptlen, &engine->load);
}
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(skreq->cryptlen, &engine->load);
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 6815eddc9068..5103d36cdfdb 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
+
+ atomic_sub(req->nbytes, &engine->load);
}
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
@@ -362,16 +365,13 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
- __le32 *data = NULL;
+ const void *data;
/*
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = le32_to_cpu(data[i]);
-
memcpy(ahashreq->result, data, digsize);
} else {
for (i = 0; i < digsize / 4; i++)
@@ -395,8 +395,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
}
}
}
-
- atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index d529bcb03775..062def303dce 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -18,9 +18,8 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
-#define OTX2_CPT_RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index e27e849b01df..e64ca30335de 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -34,6 +34,9 @@
#define SG_COMP_2 2
#define SG_COMP_1 1
+#define OTX2_CPT_DPTR_RPTR_ALIGN 8
+#define OTX2_CPT_RES_ADDR_ALIGN 32
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -347,22 +350,48 @@ static inline struct otx2_cpt_inst_info *
cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- u32 dlen = 0, g_len, sg_len, info_len;
- int align = OTX2_CPT_DMA_MINALIGN;
+ u32 dlen = 0, g_len, s_len, sg_len, info_len;
struct otx2_cpt_inst_info *info;
- u16 g_sz_bytes, s_sz_bytes;
u32 total_mem_len;
int i;
- g_sz_bytes = ((req->in_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_len = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ sg_len = g_len + s_len;
- g_len = ALIGN(g_sz_bytes, align);
- sg_len = ALIGN(g_len + s_sz_bytes, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(sg_len, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
@@ -372,7 +401,8 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
dlen += req->in[i].size;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + g_len;
info->gthr_sz = req->in_cnt;
info->sctr_sz = req->out_cnt;
@@ -384,7 +414,7 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
- &info->in_buffer[g_len])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -401,8 +431,10 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + sg_len;
- info->comp_baddr = info->dptr_baddr + sg_len;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -417,10 +449,9 @@ static inline struct otx2_cpt_inst_info *
otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- int align = OTX2_CPT_DMA_MINALIGN;
struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
+ u32 dlen, info_len;
+ u16 g_len, s_len;
u32 total_mem_len;
if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
@@ -429,22 +460,54 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
return NULL;
}
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Header of 8 Byte |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_len = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_len + s_len + SG_LIST_HDR_SIZE;
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(dlen, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
return NULL;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + SG_LIST_HDR_SIZE + g_len;
((u16 *)info->in_buffer)[0] = req->out_cnt;
((u16 *)info->in_buffer)[1] = req->in_cnt;
@@ -460,7 +523,7 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -476,8 +539,10 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -490,6 +555,7 @@ struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type);
#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 6e004a5568d8..1b9f75214d18 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -124,7 +124,8 @@ struct otx2_cptlfs_info {
struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
- u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kcrypto_se_eng_grp_num; /* Crypto symmetric engine group number */
+ u8 kcrypto_ae_eng_grp_num; /* Crypto asymmetric engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 12c0e966fa65..b4b2d3d1cbc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -142,7 +142,7 @@ static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
@@ -184,7 +184,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
- nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_pf_func =
+ OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
@@ -392,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
- msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
- ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
-
+ msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
+ (vf->vf_id + 1));
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
@@ -469,8 +469,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
switch (msg->id) {
case MBOX_MSG_READY:
- cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
- RVU_PFVF_PF_MASK;
+ cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 78367849c3d5..cc47e361089a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -176,7 +176,9 @@ static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ rvu_make_pcifunc(cptpf->pdev,
+ cptpf->pf_id, 0),
+ blkaddr);
if (ret)
return ret;
@@ -1491,11 +1493,13 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
union otx2_cpt_opcode opcode;
union otx2_cpt_res_s *result;
union otx2_cpt_inst_s inst;
+ dma_addr_t result_baddr;
dma_addr_t rptr_baddr;
struct pci_dev *pdev;
- u32 len, compl_rlen;
+ int timeout = 10000;
+ void *base, *rptr;
int ret, etype;
- void *rptr;
+ u32 len;
/*
* We don't get capabilities if it was already done
@@ -1518,22 +1522,28 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
- len = compl_rlen + LOADFVC_RLEN;
+ /* Allocate extra memory for "rptr" and "result" pointer alignment */
+ len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
+ sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
- result = kzalloc(len, GFP_KERNEL);
- if (!result) {
+ base = kzalloc(len, GFP_KERNEL);
+ if (!base) {
ret = -ENOMEM;
goto lf_cleanup;
}
- rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
- DMA_BIDIRECTIONAL);
+
+ rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
+ rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
dev_err(&pdev->dev, "DMA mapping failed\n");
ret = -EFAULT;
- goto free_result;
+ goto free_rptr;
}
- rptr = (u8 *)result + compl_rlen;
+
+ result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
+ result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
/* Fill in the command */
opcode.s.major = LOADFVC_MAJOR_OP;
@@ -1545,27 +1555,38 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
/* 64-bit swap for microcode data reads, not needed for addresses */
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = 0;
- iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.rptr = rptr_baddr;
iq_cmd.cptr.u = 0;
for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
- otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ timeout = 10000;
while (lfs->ops->cpt_get_compcode(result) ==
- OTX2_CPT_COMPLETION_CODE_INIT)
+ OTX2_CPT_COMPLETION_CODE_INIT) {
cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ ret = -ENODEV;
+ cptpf->is_eng_caps_discovered = false;
+ dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
+ goto error_no_response;
+ }
+ }
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
}
- dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
cptpf->is_eng_caps_discovered = true;
-free_result:
- kfree(result);
+error_no_response:
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+free_rptr:
+ kfree(base);
lf_cleanup:
otx2_cptlf_shutdown(lfs);
delete_grps:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 7eb0bc13994d..8d9f394d6b50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -384,7 +384,8 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
req_info->req.cptr = ctx->er_ctx.hw_ctx;
req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
@@ -1288,7 +1289,8 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
if (status)
return status;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
/*
* We perform an asynchronous send and once
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 56904bdfd6e8..c1c44a7b89fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -265,17 +265,33 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
- cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ cptvf->lfs.kcrypto_se_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
- if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
- dev_err(dev, "Engine group for kernel crypto not available\n");
- ret = -ENOENT;
+ if (cptvf->lfs.kcrypto_se_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Symmetric Engine group for crypto not available\n");
+ return -ENOENT;
+ }
+
+ /* Get engine group number for asymmetric crypto */
+ cptvf->lfs.kcrypto_ae_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_AE_TYPES);
+ if (ret)
return ret;
+
+ if (cptvf->lfs.kcrypto_ae_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Asymmetric Engine group for crypto not available\n");
+ return -ENOENT;
}
- eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ eng_grp_msk = BIT(cptvf->lfs.kcrypto_se_eng_grp_num) |
+ BIT(cptvf->lfs.kcrypto_ae_eng_grp_num);
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 931b72580fd9..5277bcfa275e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -75,6 +75,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
+ u8 grp_num;
int i;
if (msg->id >= MBOX_MSG_MAX) {
@@ -122,7 +123,11 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
- cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ grp_num = rsp_grp->eng_grp_num;
+ if (rsp_grp->eng_type == OTX2_CPT_SE_TYPES)
+ cptvf->lfs.kcrypto_se_eng_grp_num = grp_num;
+ else if (rsp_grp->eng_type == OTX2_CPT_AE_TYPES)
+ cptvf->lfs.kcrypto_ae_eng_grp_num = grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
@@ -189,7 +194,7 @@ int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
@@ -210,7 +215,7 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
@@ -230,7 +235,7 @@ int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_CAPS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 426244107037..e71494486c64 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -391,9 +391,19 @@ void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type eng_type)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
- return cptvf->lfs.kcrypto_eng_grp_num;
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ return cptvf->lfs.kcrypto_se_eng_grp_num;
+ case OTX2_CPT_AE_TYPES:
+ return cptvf->lfs.kcrypto_ae_eng_grp_num;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported engine type");
+ break;
+ }
+ return -ENXIO;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index c498950402e8..1f4586509ca4 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -38,7 +38,6 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
crypto_finalize_aead_request(dd->engine, req, ret);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 1ecf5f6ac04e..244e24e52987 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -400,7 +400,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index a099460d5f21..9c5538ae17db 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -489,7 +489,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 56f192cb976d..6328e8026b91 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1167,7 +1167,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
ctx->offset = 0;
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 5ce88e7a8f65..a89b4c5d62a0 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -851,7 +851,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
if (is_gcm(cryp) || is_ccm(cryp))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 768b27de4737..a4436728b0db 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1373,7 +1373,6 @@ static void stm32_hash_unprepare_request(struct ahash_request *req)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
pm_runtime:
- pm_runtime_mark_last_busy(hdev->dev);
pm_runtime_put_autosuspend(hdev->dev);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 7059bbe5a2eb..19c934af3df6 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -113,8 +113,6 @@ struct virtio_crypto_request {
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_devmgr_get_first(void);
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0d522049f595..3d241446099c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index bddbd8ebfebe..06c74fa132cd 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -82,42 +82,6 @@ void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
}
/*
- * virtcrypto_devmgr_get_first()
- *
- * Function returns the first virtio crypto device from the acceleration
- * framework.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: pointer to vcrypto_dev or NULL if not found.
- */
-struct virtio_crypto *virtcrypto_devmgr_get_first(void)
-{
- struct virtio_crypto *dev = NULL;
-
- mutex_lock(&table_lock);
- if (!list_empty(&virtio_crypto_table))
- dev = list_first_entry(&virtio_crypto_table,
- struct virtio_crypto,
- list);
- mutex_unlock(&table_lock);
- return dev;
-}
-
-/*
- * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
- * @vcrypto_dev: Pointer to virtio crypto device.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
-{
- return atomic_read(&vcrypto_dev->ref_count) != 0;
-}
-
-/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index a1a99ec3f12c..712624cba2b6 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -335,6 +335,63 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
return rc;
}
+static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct range *hpa = &cxld->hpa_range;
+ resource_size_t size = range_len(hpa);
+ resource_size_t start = hpa->start;
+ resource_size_t cache_size;
+ struct resource res;
+ int nid, rc;
+
+ res = DEFINE_RES(start, size, 0);
+ nid = phys_to_target_node(start);
+
+ rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
+ if (rc)
+ return rc;
+
+ /*
+ * The cache range is expected to be within the CFMWS.
+ * Currently there is only support cache_size == cxl_size. CXL
+ * size is then half of the total CFMWS window size.
+ */
+ size = size >> 1;
+ if (cache_size && size != cache_size) {
+ dev_warn(&cxld->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ cxlrd->cache_size = cache_size;
+
+ return 0;
+}
+
+static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
+{
+ int rc;
+
+ rc = cxl_acpi_set_cache_size(cxlrd);
+ if (!rc)
+ return;
+
+ if (rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlrd->cxlsd.cxld.dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
+ /* Ignoring return code */
+ cxlrd->cache_size = 0;
+}
+
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
@@ -394,6 +451,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
ig = CXL_DECODER_MIN_GRANULARITY;
cxld->interleave_granularity = ig;
+ cxl_setup_extended_linear_cache(cxlrd);
+
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
if (ways != 1 && ways != 3) {
cxims_ctx = (struct cxl_cxims_context) {
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 79e2ef81fde8..5ad8fef210b5 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -15,7 +15,6 @@ cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-y += ras.o
-cxl_core-y += acpi.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
deleted file mode 100644
index f13b4dae6ac5..000000000000
--- a/drivers/cxl/core/acpi.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
-#include <linux/acpi.h>
-#include "cxl.h"
-#include "core.h"
-
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size)
-{
- return hmat_get_extended_linear_cache_size(backing_res, nid, size);
-}
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 0ccef2f2a26a..c0af645425f4 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -336,7 +336,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
cxlrd = to_cxl_root_decoder(dev);
cxlsd = &cxlrd->cxlsd;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
if (host_bridge == cxlsd->target[i]->dport_dev)
return 1;
@@ -987,7 +987,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
bool is_root;
int rc;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
struct xarray *usp_xa __free(free_perf_xa) =
kzalloc(sizeof(*usp_xa), GFP_KERNEL);
@@ -1057,7 +1057,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
{
struct cxl_dpa_perf *perf;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 29b61828a847..2669f251d677 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -5,6 +5,7 @@
#define __CXL_CORE_H__
#include <cxl/mailbox.h>
+#include <linux/rwsem.h>
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
@@ -12,6 +13,11 @@ extern const struct device_type cxl_pmu_type;
extern struct attribute_group cxl_base_attribute_group;
+enum cxl_detach_mode {
+ DETACH_ONLY,
+ DETACH_INVALIDATE,
+};
+
#ifdef CONFIG_CXL_REGION
extern struct device_attribute dev_attr_create_pmem_region;
extern struct device_attribute dev_attr_create_ram_region;
@@ -20,7 +26,11 @@ extern struct device_attribute dev_attr_region;
extern const struct device_type cxl_pmem_region_type;
extern const struct device_type cxl_dax_region_type;
extern const struct device_type cxl_region_type;
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode);
+
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
#define CXL_REGION_TYPE(x) (&cxl_region_type)
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
@@ -48,8 +58,11 @@ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
}
-static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+static inline int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ int pos, enum cxl_detach_mode mode)
{
+ return 0;
}
static inline int cxl_region_init(void)
{
@@ -80,6 +93,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
enum cxl_rcrb {
CXL_RCRB_DOWNSTREAM,
@@ -96,8 +110,20 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
#define PCI_CAP_EXP_SIZEOF 0x3c
-extern struct rw_semaphore cxl_dpa_rwsem;
-extern struct rw_semaphore cxl_region_rwsem;
+struct cxl_rwsem {
+ /*
+ * All changes to HPA (interleave configuration) occur with this
+ * lock held for write.
+ */
+ struct rw_semaphore region;
+ /*
+ * All changes to a device DPA space occur with this lock held
+ * for write.
+ */
+ struct rw_semaphore dpa;
+};
+
+extern struct cxl_rwsem cxl_rwsem;
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
@@ -120,8 +146,6 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
index 623aaa4439c4..79994ca9bc9f 100644
--- a/drivers/cxl/core/edac.c
+++ b/drivers/cxl/core/edac.c
@@ -115,10 +115,9 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
flags, min_cycle);
}
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -158,10 +157,9 @@ static int cxl_scrub_set_attrbs_region(struct device *dev,
struct cxl_region *cxlr;
int ret, i;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -697,7 +695,7 @@ static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
ECS_THRESHOLD_IDX_4096);
break;
default:
- dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%u) to set\n",
val);
dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
@@ -1340,16 +1338,15 @@ cxl_mem_perform_sparing(struct device *dev,
struct cxl_memdev_sparing_in_payload sparing_pi;
struct cxl_event_dram *rec = NULL;
u16 validity_flags = 0;
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_sparing_ctx->cap_safe_when_in_use) {
/* Memory to repair must be offline */
@@ -1523,7 +1520,7 @@ static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
ctx->dpa = dpa;
@@ -1787,16 +1784,15 @@ static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_mem_repair_attrbs attrbs = { 0 };
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
/* Memory to repair must be offline */
@@ -1892,7 +1888,7 @@ static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
cxl_ppr_ctx->dpa = dpa;
@@ -1923,8 +1919,11 @@ static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
{
struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ if (val != EDAC_DO_MEM_REPAIR ||
+ !cxl_resource_contains_addr(&cxlds->dpa_res, cxl_ppr_ctx->dpa))
return -EINVAL;
return cxl_mem_perform_ppr(cxl_ppr_ctx);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index ab1007495f6b..e9e1d555cec6 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -16,7 +16,10 @@
* for enumerating these registers and capabilities.
*/
-DECLARE_RWSEM(cxl_dpa_rwsem);
+struct cxl_rwsem cxl_rwsem = {
+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
+};
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
@@ -214,7 +217,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
@@ -266,7 +269,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
/* save @skip_start, before @res is released */
skip_start = res->start - cxled->skip;
@@ -281,7 +284,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
__cxl_dpa_release(cxled);
}
@@ -293,7 +296,7 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
__cxl_dpa_release(cxled);
}
@@ -361,7 +364,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct resource *res;
int rc;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
if (!len) {
dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
@@ -470,7 +473,7 @@ int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
{
struct device *dev = cxlds->dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxlds->nr_partitions)
return -EBUSY;
@@ -516,9 +519,8 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_port *port = cxled_to_port(cxled);
int rc;
- down_write(&cxl_dpa_rwsem);
- rc = __cxl_dpa_reserve(cxled, base, len, skipped);
- up_write(&cxl_dpa_rwsem);
+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
if (rc)
return rc;
@@ -529,7 +531,7 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
if (cxled->dpa_res)
return resource_size(cxled->dpa_res);
@@ -540,19 +542,26 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
return base;
}
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
+{
+ struct resource _addr = DEFINE_RES_MEM(addr, 1);
+
+ return resource_contains(res, &_addr);
+}
+
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (!cxled->dpa_res)
return 0;
if (cxled->cxld.region) {
@@ -582,7 +591,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
struct device *dev = &cxled->cxld.dev;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
return -EBUSY;
@@ -614,7 +623,7 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
struct resource *p, *last;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
@@ -764,46 +773,12 @@ static int cxld_await_commit(void __iomem *hdm, int id)
return -ETIMEDOUT;
}
-static int cxl_decoder_commit(struct cxl_decoder *cxld)
+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
{
- struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- int id = cxld->id, rc;
+ int id = cxld->id;
u64 base, size;
u32 ctrl;
- if (cxld->flags & CXL_DECODER_F_ENABLE)
- return 0;
-
- if (cxl_num_decoders_committed(port) != id) {
- dev_dbg(&port->dev,
- "%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id,
- cxl_num_decoders_committed(port));
- return -EBUSY;
- }
-
- /*
- * For endpoint decoders hosted on CXL memory devices that
- * support the sanitize operation, make sure sanitize is not in-flight.
- */
- if (is_endpoint_decoder(&cxld->dev)) {
- struct cxl_endpoint_decoder *cxled =
- to_cxl_endpoint_decoder(&cxld->dev);
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
-
- if (mds && mds->security.sanitize_active) {
- dev_dbg(&cxlmd->dev,
- "attempted to commit %s during sanitize\n",
- dev_name(&cxld->dev));
- return -EBUSY;
- }
- }
-
- down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
cxld_set_interleave(cxld, &ctrl);
@@ -837,7 +812,47 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
}
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)
+ setup_hw_decoder(cxld, hdm);
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
@@ -875,7 +890,7 @@ void cxl_port_commit_reap(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/*
* Once the highest committed decoder is disabled, free any other
@@ -907,7 +922,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -916,7 +930,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
cxld->flags &= ~CXL_DECODER_F_ENABLE;
@@ -1025,7 +1038,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_DEVMEM;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 2689e6453c5a..fa6dd0c94656 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -899,6 +899,10 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
return;
}
+ if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
+ trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
+ return;
+ }
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
@@ -909,8 +913,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
- guard(rwsem_read)(&cxl_region_rwsem);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
@@ -926,12 +930,30 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+ if (evt->gen_media.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->gen_media.cme_count));
+ else
+ WARN_ON_ONCE(evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
} else if (event_type == CXL_CPER_EVENT_DRAM) {
if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+ if (evt->dram.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->dram.cvme_count));
+ else
+ WARN_ON_ONCE(evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
}
@@ -952,6 +974,8 @@ static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
ev_type = CXL_CPER_EVENT_DRAM;
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
ev_type = CXL_CPER_EVENT_MEM_MODULE;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_SPARING;
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
@@ -1265,7 +1289,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
@@ -1401,8 +1425,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int nr_records = 0;
int rc;
- rc = mutex_lock_interruptible(&mds->poison.lock);
- if (rc)
+ ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
return rc;
po = mds->poison.list_out;
@@ -1437,7 +1461,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
}
} while (po->flags & CXL_POISON_FLAG_MORE);
- mutex_unlock(&mds->poison.lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
@@ -1473,7 +1496,7 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
return rc;
}
- mutex_init(&mds->poison.lock);
+ mutex_init(&mds->poison.mutex);
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
index ace73424eeb6..ca272e8db6c7 100644
--- a/drivers/cxl/core/mce.h
+++ b/drivers/cxl/core/mce.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_CXL_MCE
int devm_cxl_register_mce_notifier(struct device *dev,
- struct notifier_block *mce_notifer);
+ struct notifier_block *mce_notifier);
#else
static inline int
devm_cxl_register_mce_notifier(struct device *dev,
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f88a13adf7fa..c569e00a511f 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -232,15 +232,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
@@ -249,8 +247,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
/* Regions mapped, collect poison by endpoint */
rc = cxl_get_poison_by_endpoint(port);
}
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
return rc;
}
@@ -267,7 +263,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
dev_dbg(cxlds->dev, "device has no dpa resource\n");
return -EINVAL;
}
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
dpa, &cxlds->dpa_res);
return -EINVAL;
@@ -292,19 +288,17 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
inject.address = cpu_to_le64(dpa);
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -314,7 +308,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
};
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -327,11 +321,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
@@ -347,19 +338,17 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
/*
* In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -378,7 +367,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -391,11 +380,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index eb46c6764d20..29197376b18e 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -30,18 +30,12 @@
* instantiated by the core.
*/
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-DECLARE_RWSEM(cxl_region_rwsem);
-
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
int cxl_num_decoders_committed(struct cxl_port *port)
{
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
return port->commit_end + 1;
}
@@ -176,7 +170,7 @@ static ssize_t target_list_show(struct device *dev,
ssize_t offset;
int rc;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
@@ -196,7 +190,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- /* without @cxl_dpa_rwsem, make sure @part is not reloaded */
+ /* without @cxl_rwsem.dpa, make sure @part is not reloaded */
int part = READ_ONCE(cxled->part);
const char *desc;
@@ -235,7 +229,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
@@ -560,7 +554,7 @@ static ssize_t decoders_committed_show(struct device *dev,
{
struct cxl_port *port = to_cxl_port(dev);
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
@@ -1722,7 +1716,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
if (xa_empty(&port->dports))
return -EINVAL;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
@@ -2001,12 +1995,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
static void cxld_unregister(void *dev)
{
- struct cxl_endpoint_decoder *cxled;
-
- if (is_endpoint_decoder(dev)) {
- cxled = to_cxl_endpoint_decoder(dev);
- cxl_decoder_kill_region(cxled);
- }
+ if (is_endpoint_decoder(dev))
+ cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1,
+ DETACH_INVALIDATE);
device_unregister(dev);
}
@@ -2293,7 +2284,7 @@ static const struct attribute_group *cxl_bus_attribute_groups[] = {
NULL,
};
-struct bus_type cxl_bus_type = {
+const struct bus_type cxl_bus_type = {
.name = "cxl",
.uevent = cxl_bus_uevent,
.match = cxl_bus_match,
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 6e5e1460068d..71cc42d05248 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -141,16 +141,12 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
if (cxlr->mode != CXL_PARTMODE_PMEM)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%pUb\n", &p->uuid);
}
static int is_dup(struct device *match, void *data)
@@ -162,7 +158,7 @@ static int is_dup(struct device *match, void *data)
if (!is_cxl_region(match))
return 0;
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
cxlr = to_cxl_region(match);
p = &cxlr->params;
@@ -192,27 +188,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
if (uuid_is_null(&temp))
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &region_rwsem)))
return rc;
if (uuid_equal(&p->uuid, &temp))
- goto out;
+ return len;
- rc = -EBUSY;
if (p->state >= CXL_CONFIG_ACTIVE)
- goto out;
+ return -EBUSY;
rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
if (rc < 0)
- goto out;
+ return rc;
uuid_copy(&p->uuid, &temp);
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
return len;
}
static DEVICE_ATTR_RW(uuid);
@@ -349,33 +340,40 @@ err:
return rc;
}
-static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static int queue_reset(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- bool commit;
- ssize_t rc;
+ int rc;
- rc = kstrtobool(buf, &commit);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ /* Already in the requested state? */
+ if (p->state < CXL_CONFIG_COMMIT)
+ return 0;
+
+ p->state = CXL_CONFIG_RESET_PENDING;
+
+ return 0;
+}
+
+static int __commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
/* Already in the requested state? */
- if (commit && p->state >= CXL_CONFIG_COMMIT)
- goto out;
- if (!commit && p->state < CXL_CONFIG_COMMIT)
- goto out;
+ if (p->state >= CXL_CONFIG_COMMIT)
+ return 0;
/* Not ready to commit? */
- if (commit && p->state < CXL_CONFIG_ACTIVE) {
- rc = -ENXIO;
- goto out;
- }
+ if (p->state < CXL_CONFIG_ACTIVE)
+ return -ENXIO;
/*
* Invalidate caches before region setup to drop any speculative
@@ -383,33 +381,61 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
- goto out;
+ return rc;
- if (commit) {
- rc = cxl_region_decode_commit(cxlr);
- if (rc == 0)
- p->state = CXL_CONFIG_COMMIT;
- } else {
- p->state = CXL_CONFIG_RESET_PENDING;
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
+ rc = cxl_region_decode_commit(cxlr);
+ if (rc)
+ return rc;
- /*
- * The lock was dropped, so need to revalidate that the reset is
- * still pending.
- */
- if (p->state == CXL_CONFIG_RESET_PENDING) {
- cxl_region_decode_reset(cxlr, p->interleave_ways);
- p->state = CXL_CONFIG_ACTIVE;
- }
- }
+ p->state = CXL_CONFIG_COMMIT;
-out:
- up_write(&cxl_region_rwsem);
+ return 0;
+}
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
if (rc)
return rc;
+
+ if (commit) {
+ rc = __commit(cxlr);
+ if (rc)
+ return rc;
+ return len;
+ }
+
+ rc = queue_reset(cxlr);
+ if (rc)
+ return rc;
+
+ /*
+ * Unmap the region and depend the reset-pending state to ensure
+ * it does not go active again until post reset
+ */
+ device_release_driver(&cxlr->dev);
+
+ /*
+ * With the reset pending take cxl_rwsem.region unconditionally
+ * to ensure the reset gets handled before returning.
+ */
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ /*
+ * Revalidate that the reset is still pending in case another
+ * thread already handled this reset.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
return len;
}
@@ -420,13 +446,10 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
}
static DEVICE_ATTR_RW(commit);
@@ -450,15 +473,12 @@ static ssize_t interleave_ways_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_ways);
}
static const struct attribute_group *get_cxl_region_target_group(void);
@@ -493,23 +513,21 @@ static ssize_t interleave_ways_store(struct device *dev,
return -EINVAL;
}
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
save = p->interleave_ways;
p->interleave_ways = val;
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
- if (rc)
+ if (rc) {
p->interleave_ways = save;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
return rc;
+ }
+
return len;
}
static DEVICE_ATTR_RW(interleave_ways);
@@ -520,15 +538,12 @@ static ssize_t interleave_granularity_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_granularity);
}
static ssize_t interleave_granularity_store(struct device *dev,
@@ -561,19 +576,15 @@ static ssize_t interleave_granularity_store(struct device *dev,
if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
p->interleave_granularity = val;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
+
return len;
}
static DEVICE_ATTR_RW(interleave_granularity);
@@ -584,17 +595,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
u64 resource = -1ULL;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
+
if (p->res)
resource = p->res->start;
- rc = sysfs_emit(buf, "%#llx\n", resource);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", resource);
}
static DEVICE_ATTR_RO(resource);
@@ -622,7 +631,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct resource *res;
u64 remainder = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/* Nothing to do... */
if (p->res && resource_size(p->res) == size)
@@ -664,7 +673,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
struct cxl_region_params *p = &cxlr->params;
if (device_is_registered(&cxlr->dev))
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (p->res) {
/*
* Autodiscovered regions may not have been able to insert their
@@ -681,7 +690,7 @@ static int free_hpa(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (!p->res)
return 0;
@@ -705,15 +714,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
if (val)
rc = alloc_hpa(cxlr, val);
else
rc = free_hpa(cxlr);
- up_write(&cxl_region_rwsem);
if (rc)
return rc;
@@ -729,15 +737,12 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
u64 size = 0;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (p->res)
size = resource_size(p->res);
- rc = sysfs_emit(buf, "%#llx\n", size);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", size);
}
static DEVICE_ATTR_RW(size);
@@ -763,26 +768,20 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
struct cxl_endpoint_decoder *cxled;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
p->interleave_ways);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
cxled = p->targets[pos];
if (!cxled)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
-out:
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
}
static int check_commit_order(struct device *dev, void *data)
@@ -897,7 +896,7 @@ cxl_port_pick_region_decoder(struct cxl_port *port,
/*
* This decoder is pinned registered as long as the endpoint decoder is
* registered, and endpoint decoder unregistration holds the
- * cxl_region_rwsem over unregister events, so no need to hold on to
+ * cxl_rwsem.region over unregister events, so no need to hold on to
* this extra reference.
*/
put_device(dev);
@@ -1088,7 +1087,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
unsigned long index;
int rc = -EBUSY;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (cxl_rr) {
@@ -1198,7 +1197,7 @@ static void cxl_port_detach_region(struct cxl_port *port,
struct cxl_region_ref *cxl_rr;
struct cxl_ep *ep = NULL;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (!cxl_rr)
@@ -2094,27 +2093,43 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return 0;
}
-static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+__cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
- struct cxl_region *cxlr = cxled->cxld.region;
struct cxl_region_params *p;
- int rc = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
- if (!cxlr)
- return 0;
+ if (!cxled) {
+ p = &cxlr->params;
- p = &cxlr->params;
- get_device(&cxlr->dev);
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n",
+ pos, p->interleave_ways);
+ return NULL;
+ }
+
+ if (!p->targets[pos])
+ return NULL;
+ cxled = p->targets[pos];
+ } else {
+ cxlr = cxled->cxld.region;
+ if (!cxlr)
+ return NULL;
+ p = &cxlr->params;
+ }
+
+ if (mode == DETACH_INVALIDATE)
+ cxled->part = -1;
if (p->state > CXL_CONFIG_ACTIVE) {
cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
- for (iter = ep_port; !is_cxl_root(iter);
+ for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
@@ -2125,7 +2140,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
cxled->pos);
- goto out;
+ return NULL;
}
if (p->state == CXL_CONFIG_ACTIVE) {
@@ -2139,74 +2154,79 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
.end = -1,
};
- /* notify the region driver that one of its targets has departed */
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
-out:
- put_device(&cxlr->dev);
- return rc;
+ get_device(&cxlr->dev);
+ return cxlr;
}
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+/*
+ * Cleanup a decoder's interest in a region. There are 2 cases to
+ * handle, removing an unknown @cxled from a known position in a region
+ * (detach_target()) or removing a known @cxled from an unknown @cxlr
+ * (cxld_unregister())
+ *
+ * When the detachment finds a region release the region driver.
+ */
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- down_write(&cxl_region_rwsem);
- cxled->part = -1;
- cxl_region_detach(cxled);
- up_write(&cxl_region_rwsem);
+ struct cxl_region *detach;
+
+ /* when the decoder is being destroyed lock unconditionally */
+ if (mode == DETACH_INVALIDATE) {
+ guard(rwsem_write)(&cxl_rwsem.region);
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ } else {
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ }
+
+ if (detach) {
+ device_release_driver(&detach->dev);
+ put_device(&detach->dev);
+ }
+ return 0;
+}
+
+static int __attach_target(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ unsigned int state)
+{
+ int rc;
+
+ if (state == TASK_INTERRUPTIBLE) {
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
+ }
+ guard(rwsem_write)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
}
static int attach_target(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos,
unsigned int state)
{
- int rc = 0;
+ int rc = __attach_target(cxlr, cxled, pos, state);
- if (state == TASK_INTERRUPTIBLE)
- rc = down_write_killable(&cxl_region_rwsem);
- else
- down_write(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- down_read(&cxl_dpa_rwsem);
- rc = cxl_region_attach(cxlr, cxled, pos);
- up_read(&cxl_dpa_rwsem);
- up_write(&cxl_region_rwsem);
-
- if (rc)
- dev_warn(cxled->cxld.dev.parent,
- "failed to attach %s to %s: %d\n",
- dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+ if (rc == 0)
+ return 0;
+ dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
return rc;
}
static int detach_target(struct cxl_region *cxlr, int pos)
{
- struct cxl_region_params *p = &cxlr->params;
- int rc;
-
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- if (pos >= p->interleave_ways) {
- dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
- p->interleave_ways);
- rc = -ENXIO;
- goto out;
- }
-
- if (!p->targets[pos]) {
- rc = 0;
- goto out;
- }
-
- rc = cxl_region_detach(p->targets[pos]);
-out:
- up_write(&cxl_region_rwsem);
- return rc;
+ return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY);
}
static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
@@ -2451,16 +2471,16 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct cxl_region *cxlr = container_of(nb, struct cxl_region,
- memory_notifier);
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
+ node_notifier);
+ struct node_notify *nn = arg;
+ int nid = nn->nid;
int region_nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_DONE;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2483,7 +2503,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int region_nid;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2632,17 +2652,13 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (cxld->region)
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
- else
- rc = sysfs_emit(buf, "\n");
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ return sysfs_emit(buf, "\n");
}
DEVICE_ATTR_RO(region);
@@ -2847,7 +2863,7 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ if (!cxl_resource_contains_addr(cxled->dpa_res, dpa))
return 0;
/*
@@ -2959,7 +2975,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
if (cxlrd->hpa_to_spa)
hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
- if (hpa < p->res->start || hpa > p->res->end) {
+ if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return ULLONG_MAX;
@@ -2981,7 +2997,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
struct device *dev;
int i;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return -ENXIO;
@@ -2993,7 +3009,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
- /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ /* Snapshot the region configuration underneath the cxl_rwsem.region */
cxlr_pmem->nr_mappings = p->nr_targets;
for (i = 0; i < p->nr_targets; i++) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -3070,7 +3086,7 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return ERR_PTR(-ENXIO);
@@ -3270,7 +3286,7 @@ static int match_region_by_range(struct device *dev, const void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->res && p->res->start == r->start && p->res->end == r->end)
return 1;
@@ -3282,15 +3298,10 @@ static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
- int nid = phys_to_target_node(res->start);
resource_size_t size = resource_size(res);
resource_size_t cache_size, start;
- int rc;
-
- rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
- if (rc)
- return rc;
+ cache_size = cxlrd->cache_size;
if (!cache_size)
return 0;
@@ -3330,7 +3341,7 @@ static int __construct_region(struct cxl_region *cxlr,
struct resource *res;
int rc;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
@@ -3466,10 +3477,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
- down_read(&cxl_region_rwsem);
- p = &cxlr->params;
- attach = p->state == CXL_CONFIG_COMMIT;
- up_read(&cxl_region_rwsem);
+ scoped_guard(rwsem_read, &cxl_rwsem.region) {
+ p = &cxlr->params;
+ attach = p->state == CXL_CONFIG_COMMIT;
+ }
if (attach) {
/*
@@ -3494,12 +3505,12 @@ u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
if (!endpoint)
return ~0ULL;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
xa_for_each(&endpoint->regions, index, iter) {
struct cxl_region_params *p = &iter->region->params;
- if (p->res->start <= spa && spa <= p->res->end) {
+ if (cxl_resource_contains_addr(p->res, spa)) {
if (!p->cache_size)
return ~0ULL;
@@ -3527,48 +3538,53 @@ static void shutdown_notifiers(void *_cxlr)
{
struct cxl_region *cxlr = _cxlr;
- unregister_memory_notifier(&cxlr->memory_notifier);
+ unregister_node_notifier(&cxlr->node_notifier);
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
-static int cxl_region_probe(struct device *dev)
+static int cxl_region_can_probe(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc) {
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) {
dev_dbg(&cxlr->dev, "probe interrupted\n");
return rc;
}
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
dev_err(&cxlr->dev,
"failed to activate, re-commit region and retry\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
+ return 0;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = cxl_region_can_probe(cxlr);
+ if (rc)
+ return rc;
+
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
-out:
- up_read(&cxl_region_rwsem);
-
- if (rc)
- return rc;
- cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
- cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
- register_memory_notifier(&cxlr->memory_notifier);
+ cxlr->node_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->node_notifier.priority = CXL_CALLBACK_PRI;
+ register_node_notifier(&cxlr->node_notifier);
cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
cxlr->adist_notifier.priority = 100;
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 25ebfbc1616c..a53ec4798b12 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -214,12 +214,16 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
+#define CXL_EVENT_RECORD_FLAG_LD_ID_VALID BIT(7)
+#define CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID BIT(8)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
{ CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
- { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_LD_ID_VALID, "LD_ID_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID, "HEAD_ID_VALID" } \
)
/*
@@ -247,7 +251,9 @@ TRACE_EVENT(cxl_overflow,
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
__field(u8, hdr_maint_op_class) \
- __field(u8, hdr_maint_op_sub_class)
+ __field(u8, hdr_maint_op_sub_class) \
+ __field(u16, hdr_ld_id) \
+ __field(u8, hdr_head_id)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -260,18 +266,22 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
__entry->hdr_maint_op_class = (hdr).maint_op_class; \
- __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class; \
+ __entry->hdr_ld_id = le16_to_cpu((hdr).ld_id); \
+ __entry->hdr_head_id = (hdr).head_id
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u " \
+ "ld_id=%x head_id=%x : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
__entry->hdr_maint_op_sub_class, \
+ __entry->hdr_ld_id, __entry->hdr_head_id, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -496,7 +506,10 @@ TRACE_EVENT(cxl_general_media,
uuid_copy(&__entry->region_uuid, &uuid_null);
}
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ else
+ __entry->cme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
@@ -648,7 +661,10 @@ TRACE_EVENT(cxl_dram,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->sub_channel = rec->sub_channel;
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ else
+ __entry->cvme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
@@ -871,6 +887,111 @@ TRACE_EVENT(cxl_memory_module,
)
);
+/*
+ * Memory Sparing Event Record - MSER
+ *
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+#define CXL_MSER_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_MSER_HARD_SPARING_FLAG BIT(1)
+#define CXL_MSER_DEV_INITED_FLAG BIT(2)
+#define show_mem_sparing_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_QUERY_RESOURCE_FLAG, "Query Resources" }, \
+ { CXL_MSER_HARD_SPARING_FLAG, "Hard Sparing" }, \
+ { CXL_MSER_DEV_INITED_FLAG, "Device Initiated Sparing" } \
+)
+
+#define CXL_MSER_VALID_CHANNEL BIT(0)
+#define CXL_MSER_VALID_RANK BIT(1)
+#define CXL_MSER_VALID_NIBBLE BIT(2)
+#define CXL_MSER_VALID_BANK_GROUP BIT(3)
+#define CXL_MSER_VALID_BANK BIT(4)
+#define CXL_MSER_VALID_ROW BIT(5)
+#define CXL_MSER_VALID_COLUMN BIT(6)
+#define CXL_MSER_VALID_COMPONENT_ID BIT(7)
+#define CXL_MSER_VALID_COMPONENT_ID_FORMAT BIT(8)
+#define CXL_MSER_VALID_SUB_CHANNEL BIT(9)
+#define show_mem_sparing_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_MSER_VALID_RANK, "RANK" }, \
+ { CXL_MSER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_MSER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_MSER_VALID_BANK, "BANK" }, \
+ { CXL_MSER_VALID_ROW, "ROW" }, \
+ { CXL_MSER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_MSER_VALID_COMPONENT_ID, "COMPONENT ID" }, \
+ { CXL_MSER_VALID_COMPONENT_ID_FORMAT, "COMPONENT ID PLDM FORMAT" }, \
+ { CXL_MSER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
+)
+
+TRACE_EVENT(cxl_memory_sparing,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_mem_sparing *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+
+ /* Memory Sparing Event */
+ __field(u8, flags)
+ __field(u8, result)
+ __field(u16, validity_flags)
+ __field(u16, res_avail)
+ __field(u8, channel)
+ __field(u8, rank)
+ __field(u32, nibble_mask)
+ __field(u8, bank_group)
+ __field(u8, bank)
+ __field(u32, row)
+ __field(u16, column)
+ __field(u8, sub_channel)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_SPARING_UUID;
+
+ /* Memory Sparing Event */
+ __entry->flags = rec->flags;
+ __entry->result = rec->result;
+ __entry->validity_flags = le16_to_cpu(rec->validity_flags);
+ __entry->res_avail = le16_to_cpu(rec->res_avail);
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask);
+ __entry->bank_group = rec->bank_group;
+ __entry->bank = rec->bank;
+ __entry->row = get_unaligned_le24(rec->row);
+ __entry->column = le16_to_cpu(rec->column);
+ __entry->sub_channel = rec->sub_channel;
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ ),
+
+ CXL_EVT_TP_printk("flags='%s' result=%u validity_flags='%s' " \
+ "spare resource avail=%u channel=%u rank=%u " \
+ "nibble_mask=%x bank_group=%u bank=%u " \
+ "row=%u column=%u sub_channel=%u " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
+ show_mem_sparing_flags(__entry->flags),
+ __entry->result,
+ show_mem_sparing_valid_flags(__entry->validity_flags),
+ __entry->res_avail, __entry->channel, __entry->rank,
+ __entry->nibble_mask, __entry->bank_group, __entry->bank,
+ __entry->row, __entry->column, __entry->sub_channel,
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
+ )
+);
+
#define show_poison_trace_type(type) \
__print_symbolic(type, \
{ CXL_POISON_TRACE_LIST, "List" }, \
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 3f1695c96abc..847e37be42c4 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/node.h>
#include <linux/io.h>
+#include <linux/range.h>
extern const struct nvdimm_security_ops *cxl_security_ops;
@@ -423,6 +424,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
+ * @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
* @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
@@ -432,6 +434,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
*/
struct cxl_root_decoder {
struct resource *res;
+ resource_size_t cache_size;
atomic_t region_id;
cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
@@ -469,7 +472,7 @@ enum cxl_config_state {
* @nr_targets: number of targets
* @cache_size: extended linear cache size if exists, otherwise zero.
*
- * State transitions are protected by the cxl_region_rwsem
+ * State transitions are protected by cxl_rwsem.region
*/
struct cxl_region_params {
enum cxl_config_state state;
@@ -513,7 +516,7 @@ enum cxl_partition_mode {
* @flags: Region state flags
* @params: active + config params for the region
* @coord: QoS access coordinates for the region
- * @memory_notifier: notifier for setting the access coordinates to node
+ * @node_notifier: notifier for setting the access coordinates to node
* @adist_notifier: notifier for calculating the abstract distance of node
*/
struct cxl_region {
@@ -526,7 +529,7 @@ struct cxl_region {
unsigned long flags;
struct cxl_region_params params;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct notifier_block memory_notifier;
+ struct notifier_block node_notifier;
struct notifier_block adist_notifier;
};
@@ -815,7 +818,7 @@ int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
bool is_cxl_region(struct device *dev);
-extern struct bus_type cxl_bus_type;
+extern const struct bus_type cxl_bus_type;
struct cxl_driver {
const char *name;
@@ -912,15 +915,4 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#endif
u16 cxl_gpf_get_dvsec(struct device *dev);
-
-static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
-{
- if (down_read_interruptible(rwsem))
- return NULL;
-
- return rwsem;
-}
-
-DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
-
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 551b0ba2caa1..751478dfc410 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -254,7 +254,7 @@ enum security_cmd_enabled_bits {
* @max_errors: Maximum media error records held in device cache
* @enabled_cmds: All poison commands enabled in the CEL
* @list_out: The poison list payload returned by device
- * @lock: Protect reads of the poison list
+ * @mutex: Protect reads of the poison list
*
* Reads of the poison list are synchronized to ensure that a reader
* does not get an incomplete list because their request overlapped
@@ -265,7 +265,7 @@ struct cxl_poison_state {
u32 max_errors;
DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);
struct cxl_mbox_poison_out *list_out;
- struct mutex lock; /* Protect reads of poison list */
+ struct mutex mutex; /* Protect reads of poison list */
};
/*
@@ -634,6 +634,14 @@ struct cxl_mbox_identify {
0x13, 0xb7, 0x74)
/*
+ * Memory Sparing Event Record UUID
+ * CXL rev 3.2 section 8.2.10.2.1.4: Table 8-60
+ */
+#define CXL_EVENT_MEM_SPARING_UUID \
+ UUID_INIT(0xe71f3a40, 0x2d29, 0x4092, 0x8a, 0x39, 0x4d, 0x1c, 0x96, \
+ 0x6c, 0x7c, 0x65)
+
+/*
* Get Event Records output payload
* CXL rev 3.0 section 8.2.9.2.2; Table 8-50
*/
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 785aa2af5eaa..bd100ac31672 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -379,7 +379,7 @@ static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
{
int rc;
- mutex_lock_io(&cxl_mbox->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
mutex_unlock(&cxl_mbox->mbox_mutex);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 328231cfb028..2bb40a6060af 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -4,7 +4,6 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
@@ -73,7 +72,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}
-static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
unsigned long fault_size)
{
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
@@ -89,7 +88,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
- struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i);
+ struct folio *folio = pfn_folio(pfn + i);
if (folio->mapping)
continue;
@@ -104,7 +103,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -125,11 +124,11 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
+ return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
vmf->flags & FAULT_FLAG_WRITE);
}
@@ -140,7 +139,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -169,11 +168,11 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
+ return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
vmf->flags & FAULT_FLAG_WRITE);
}
@@ -185,7 +184,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PUD_SIZE;
@@ -215,11 +214,11 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
+ return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
vmf->flags & FAULT_FLAG_WRITE);
}
#else
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5e7c53f18491..c18451a37e4f 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -2,7 +2,6 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include <linux/dax.h>
#include "../bus.h"
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 584c70a34b52..c036e4d0b610 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -5,7 +5,6 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index c8ebf4e281f2..bee93066a849 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include "../nvdimm/pfn.h"
#include "../nvdimm/nd.h"
#include "bus.h"
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e16d1d40d773..54c480e874cb 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,7 +7,6 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -148,7 +147,7 @@ enum dax_device_flags {
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
{
long avail;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index fee04fdb0822..b46eb8a552d7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,7 +36,6 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
- select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 9663ba1bb6ac..a8a90acf4f34 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence,
}
const struct dma_fence_ops dma_fence_chain_ops = {
- .use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
.get_timeline_name = dma_fence_chain_get_timeline_name,
.enable_signaling = dma_fence_chain_enable_signaling,
@@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
chain->prev_seqno = 0;
/* Try to reuse the context of the previous chain node. */
- if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) {
context = prev->context;
chain->prev_seqno = prev->seqno;
} else {
@@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno);
}
- dma_fence_init(&chain->base, &dma_fence_chain_ops,
- &chain->lock, context, seqno);
+ dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
+ context, seqno);
/*
* Chaining dma_fence_chain container together is only allowed through
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index f0cdd3e99d36..3f78c56b58dc 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -511,12 +511,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
dma_fence_enable_sw_signaling(fence);
- trace_dma_fence_wait_start(fence);
+ if (trace_dma_fence_wait_start_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_start(fence);
+ rcu_read_unlock();
+ }
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
else
ret = dma_fence_default_wait(fence, intr, timeout);
- trace_dma_fence_wait_end(fence);
+ if (trace_dma_fence_wait_end_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_end(fence);
+ rcu_read_unlock();
+ }
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
@@ -533,16 +541,23 @@ void dma_fence_release(struct kref *kref)
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
+ rcu_read_lock();
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
- "Fence %s:%s:%llx:%llx released with pending signals!\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->context, fence->seqno)) {
+ if (!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ const char __rcu *timeline;
+ const char __rcu *driver;
unsigned long flags;
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ WARN(1,
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ rcu_dereference(driver), rcu_dereference(timeline),
+ fence->context, fence->seqno);
+
/*
* Failed to signal before release, likely a refcounting issue.
*
@@ -556,6 +571,8 @@ void dma_fence_release(struct kref *kref)
spin_unlock_irqrestore(fence->lock, flags);
}
+ rcu_read_unlock();
+
if (fence->ops->release)
fence->ops->release(fence);
else
@@ -982,13 +999,43 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ timeline = dma_fence_timeline_name(fence);
+ driver = dma_fence_driver_name(fence);
+
seq_printf(seq, "%s %s seq %llu %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence), fence->seqno,
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
+ fence->seqno,
dma_fence_is_signaled(fence) ? "" : "un");
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_describe);
+static void
+__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
+{
+ BUG_ON(!lock);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+ kref_init(&fence->refcount);
+ fence->ops = ops;
+ INIT_LIST_HEAD(&fence->cb_list);
+ fence->lock = lock;
+ fence->context = context;
+ fence->seqno = seqno;
+ fence->flags = flags;
+ fence->error = 0;
+
+ trace_dma_fence_init(fence);
+}
+
/**
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
@@ -1008,18 +1055,94 @@ void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno)
{
- BUG_ON(!lock);
- BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+ __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
+}
+EXPORT_SYMBOL(dma_fence_init);
- kref_init(&fence->refcount);
- fence->ops = ops;
- INIT_LIST_HEAD(&fence->cb_list);
- fence->lock = lock;
- fence->context = context;
- fence->seqno = seqno;
- fence->flags = 0UL;
- fence->error = 0;
+/**
+ * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * Context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno)
+{
+ __dma_fence_init(fence, ops, lock, context, seqno,
+ BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
+}
+EXPORT_SYMBOL(dma_fence_init64);
- trace_dma_fence_init(fence);
+/**
+ * dma_fence_driver_name - Access the driver name
+ * @fence: the fence to query
+ *
+ * Returns a driver name backing the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "detached-driver";
}
-EXPORT_SYMBOL(dma_fence_init);
+EXPORT_SYMBOL(dma_fence_driver_name);
+
+/**
+ * dma_fence_timeline_name - Access the timeline name
+ * @fence: the fence to query
+ *
+ * Returns a timeline name provided by the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "signaled-timeline";
+}
+EXPORT_SYMBOL(dma_fence_timeline_name);
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c4226..bb369b38b001 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,13 @@ config DMABUF_HEAPS_CMA
Choose this option to enable dma-buf CMA heap. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
+
+config DMABUF_HEAPS_CMA_LEGACY
+ bool "Legacy DMA-BUF CMA Heap"
+ default y
+ depends on DMABUF_HEAPS_CMA
+ help
+ Add a duplicate CMA-backed dma-buf heap with legacy naming derived
+ from the CMA area's devicetree node, or "reserved" if the area is not
+ defined in the devicetree. This uses the same underlying allocator as
+ CONFIG_DMABUF_HEAPS_CMA.
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 9512d050563a..0df007111975 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -9,6 +9,9 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
+
+#define pr_fmt(fmt) "cma_heap: " fmt
+
#include <linux/cma.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
@@ -22,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#define DEFAULT_CMA_NAME "default_cma_region"
struct cma_heap {
struct dma_heap *heap;
@@ -366,17 +370,17 @@ static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
-static int __init __add_cma_heap(struct cma *cma, void *data)
+static int __init __add_cma_heap(struct cma *cma, const char *name)
{
- struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
+ struct cma_heap *cma_heap;
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
if (!cma_heap)
return -ENOMEM;
cma_heap->cma = cma;
- exp_info.name = cma_get_name(cma);
+ exp_info.name = name;
exp_info.ops = &cma_heap_ops;
exp_info.priv = cma_heap;
@@ -394,12 +398,30 @@ static int __init __add_cma_heap(struct cma *cma, void *data)
static int __init add_default_cma_heap(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
- int ret = 0;
+ const char *legacy_cma_name;
+ int ret;
- if (default_cma)
- ret = __add_cma_heap(default_cma, NULL);
+ if (!default_cma)
+ return 0;
- return ret;
+ ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) {
+ legacy_cma_name = cma_get_name(default_cma);
+ if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) {
+ pr_warn("legacy name and default name are the same, skipping legacy heap\n");
+ return 0;
+ }
+
+ ret = __add_cma_heap(default_cma, legacy_cma_name);
+ if (ret)
+ pr_warn("failed to add legacy heap: %pe\n",
+ ERR_PTR(ret));
+ }
+
+ return 0;
}
module_init(add_default_cma_heap);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 82b1b714300d..bbe7881f1360 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -33,7 +33,7 @@ struct system_heap_buffer {
struct dma_heap_attachment {
struct device *dev;
- struct sg_table *table;
+ struct sg_table table;
struct list_head list;
bool mapped;
};
@@ -52,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
-static struct sg_table *dup_sg_table(struct sg_table *table)
+static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
- struct sg_table *new_table;
- int ret, i;
struct scatterlist *sg, *new_sg;
+ int ret, i;
- new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
-
- ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
- if (ret) {
- kfree(new_table);
- return ERR_PTR(-ENOMEM);
- }
+ ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
+ if (ret)
+ return ret;
- new_sg = new_table->sgl;
- for_each_sgtable_sg(table, sg, i) {
+ new_sg = to->sgl;
+ for_each_sgtable_sg(from, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg = sg_next(new_sg);
}
- return new_table;
+ return 0;
}
static int system_heap_attach(struct dma_buf *dmabuf,
@@ -82,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf,
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
- struct sg_table *table;
+ int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
- table = dup_sg_table(&buffer->sg_table);
- if (IS_ERR(table)) {
+ ret = dup_sg_table(&buffer->sg_table, &a->table);
+ if (ret) {
kfree(a);
- return -ENOMEM;
+ return ret;
}
- a->table = table;
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
@@ -118,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf,
list_del(&a->list);
mutex_unlock(&buffer->lock);
- sg_free_table(a->table);
- kfree(a->table);
+ sg_free_table(&a->table);
kfree(a);
}
@@ -127,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
- struct sg_table *table = a->table;
+ struct sg_table *table = &a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
@@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
@@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 4f27ee93a00c..3c20f1d31cf5 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
+ return !__dma_fence_is_later(fence, fence->seqno, parent->value);
}
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d9b1c1b2a72b..747e377fb954 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
snprintf(buf, len, "%s-%s%llu-%lld",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
fence->context,
fence->seqno);
+ rcu_read_unlock();
}
return buf;
@@ -262,9 +268,17 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ strscpy(info->obj_name, rcu_dereference(timeline),
sizeof(info->obj_name));
- strscpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, rcu_dereference(driver),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
@@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence,
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
+ rcu_read_unlock();
+
return info->status;
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c9d0c68d2fcb..40399c26e6be 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- unsigned long *pfns;
+ struct page **pages;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- /**
- * HVO may free tail pages, so just use pfn to map each folio
- * into vmalloc area.
- */
- pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
- if (!pfns)
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++) {
- unsigned long pfn = folio_pfn(ubuf->folios[pg]);
-
- pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
- pfns[pg] = pfn;
- }
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ pages[pg] = folio_page(ubuf->folios[pg],
+ ubuf->offsets[pg] >> PAGE_SHIFT);
- vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
- kvfree(pfns);
+ vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index db87dd2a07f7..05c7c7d9e5a4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,7 +89,6 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
- default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
@@ -111,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
- depends on ARCH_AT91
+ depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@@ -572,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
+config SOPHGO_CV1800B_DMAMUX
+ tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
+ depends on MFD_SYSCON
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for the DMA multiplexer on Sophgo CV1800/SG2000
+ series SoCs.
+ Say Y here if your board have this soc.
+
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ba9732644752..a54d7688392b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
+obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
new file mode 100644
index 000000000000..e900d6595617
--- /dev/null
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/llist.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define REG_DMA_CHANNEL_REMAP0 0x154
+#define REG_DMA_CHANNEL_REMAP1 0x158
+#define REG_DMA_INT_MUX 0x298
+
+#define DMAMUX_NCELLS 2
+#define MAX_DMA_MAPPING_ID 42
+#define MAX_DMA_CPU_ID 2
+#define MAX_DMA_CH_ID 7
+
+#define DMAMUX_INTMUX_REGISTER_LEN 4
+#define DMAMUX_NR_CH_PER_REGISTER 4
+#define DMAMUX_BIT_PER_CH 8
+#define DMAMUX_CH_MASk GENMASK(5, 0)
+#define DMAMUX_INT_BIT_PER_CPU 10
+#define DMAMUX_CH_UPDATE_BIT BIT(31)
+
+#define DMAMUX_CH_REGPOS(chid) \
+ ((chid) / DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REGOFF(chid) \
+ ((chid) % DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REG(chid) \
+ ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
+ REG_DMA_CHANNEL_REMAP0)
+#define DMAMUX_CH_SET(chid, val) \
+ (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
+ DMAMUX_CH_UPDATE_BIT)
+#define DMAMUX_CH_MASK(chid) \
+ DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
+
+#define DMAMUX_INT_BIT(chid, cpuid) \
+ BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
+#define DMAMUX_INTEN_BIT(cpuid) \
+ DMAMUX_INT_BIT(8, cpuid)
+#define DMAMUX_INT_CH_BIT(chid, cpuid) \
+ (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
+#define DMAMUX_INT_MASK(chid) \
+ (DMAMUX_INT_BIT(chid, 0) | \
+ DMAMUX_INT_BIT(chid, 1) | \
+ DMAMUX_INT_BIT(chid, 2))
+#define DMAMUX_INT_CH_MASK(chid, cpuid) \
+ (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
+
+struct cv1800_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *regmap;
+ spinlock_t lock;
+ struct llist_head free_maps;
+ struct llist_head reserve_maps;
+ DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
+};
+
+struct cv1800_dmamux_map {
+ struct llist_node node;
+ unsigned int channel;
+ unsigned int peripheral;
+ unsigned int cpu;
+};
+
+static void cv1800_dmamux_free(struct device *dev, void *route_data)
+{
+ struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct cv1800_dmamux_map *map = route_data;
+
+ guard(spinlock_irqsave)(&dmamux->lock);
+
+ regmap_update_bits(dmamux->regmap,
+ DMAMUX_CH_REG(map->channel),
+ DMAMUX_CH_MASK(map->channel),
+ DMAMUX_CH_UPDATE_BIT);
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(map->channel, map->cpu),
+ DMAMUX_INTEN_BIT(map->cpu));
+
+ dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
+ map->channel, map->peripheral, map->cpu);
+}
+
+static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct cv1800_dmamux_map *map;
+ struct llist_node *node;
+ unsigned long flags;
+ unsigned int chid, devid, cpuid;
+ int ret;
+
+ if (dma_spec->args_count != DMAMUX_NCELLS) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ devid = dma_spec->args[0];
+ cpuid = dma_spec->args[1];
+ dma_spec->args_count = 1;
+
+ if (devid > MAX_DMA_MAPPING_ID) {
+ dev_err(&pdev->dev, "invalid device id: %u\n", devid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cpuid > MAX_DMA_CPU_ID) {
+ dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ if (test_bit(devid, dmamux->mapped_peripherals)) {
+ llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
+ if (map->peripheral == devid && map->cpu == cpuid)
+ goto found;
+ }
+
+ ret = -EINVAL;
+ goto failed;
+ } else {
+ node = llist_del_first(&dmamux->free_maps);
+ if (!node) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ map = llist_entry(node, struct cv1800_dmamux_map, node);
+ llist_add(&map->node, &dmamux->reserve_maps);
+ set_bit(devid, dmamux->mapped_peripherals);
+ }
+
+found:
+ chid = map->channel;
+ map->peripheral = devid;
+ map->cpu = cpuid;
+
+ regmap_set_bits(dmamux->regmap,
+ DMAMUX_CH_REG(chid),
+ DMAMUX_CH_SET(chid, devid));
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(chid, cpuid),
+ DMAMUX_INT_CH_BIT(chid, cpuid));
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[0] = chid;
+
+ dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
+ chid, devid, cpuid);
+
+ return map;
+
+failed:
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ of_node_put(dma_spec->np);
+ dev_err(&pdev->dev, "errno %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static int cv1800_dmamux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mux_node = dev->of_node;
+ struct cv1800_dmamux_data *data;
+ struct cv1800_dmamux_map *tmp;
+ struct device *parent = dev->parent;
+ struct regmap *regmap = NULL;
+ unsigned int i;
+
+ if (!parent)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(parent->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ init_llist_head(&data->free_maps);
+ init_llist_head(&data->reserve_maps);
+
+ for (i = 0; i <= MAX_DMA_CH_ID; i++) {
+ tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ /* It is OK for not allocating all channel */
+ dev_warn(dev, "can not allocate channel %u\n", i);
+ continue;
+ }
+
+ init_llist_node(&tmp->node);
+ tmp->channel = i;
+ llist_add(&tmp->node, &data->free_maps);
+ }
+
+ /* if no channel is allocated, the probe must fail */
+ if (llist_empty(&data->free_maps))
+ return -ENOMEM;
+
+ data->regmap = regmap;
+ data->dmarouter.dev = dev;
+ data->dmarouter.route_free = cv1800_dmamux_free;
+
+ platform_set_drvdata(pdev, data);
+
+ return of_dma_router_register(mux_node,
+ cv1800_dmamux_route_allocate,
+ &data->dmarouter);
+}
+
+static void cv1800_dmamux_remove(struct platform_device *pdev)
+{
+ of_dma_controller_free(pdev->dev.of_node);
+}
+
+static const struct of_device_id cv1800_dmamux_ids[] = {
+ { .compatible = "sophgo,cv1800b-dmamux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
+
+static struct platform_driver cv1800_dmamux_driver = {
+ .probe = cv1800_dmamux_probe,
+ .remove = cv1800_dmamux_remove,
+ .driver = {
+ .name = "cv1800-dmamux",
+ .of_match_table = cv1800_dmamux_ids,
+ },
+};
+module_platform_driver(cv1800_dmamux_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 36943b0c6d60..5b06b0dc67ee 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/fpga/adi-axi-common.h>
#include <dt-bindings/dma/axi-dmac.h>
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c2b88cc99e5d..b43255f914f3 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -24,18 +24,6 @@
#include "../virt-dma.h"
static inline
-struct device *dchan2dev(struct dma_chan *dchan)
-{
- return &dchan->dev->device;
-}
-
-static inline
-struct device *chan2dev(struct dw_edma_chan *chan)
-{
- return &chan->vc.chan.dev->device;
-}
-
-static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct dw_edma_desc, vd);
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index b4323d243d6d..4be81db24a19 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (val & MAKE_UMASK64(width)) << lsoffset;
-}
-
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 823f5c6bc2e1..21e13f1207cb 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
+ * @__reserved2: Reserved field.
+ * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
+ * others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 80355d03004d..35bdefd3728b 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -1036,7 +1036,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev)
const char *idxd_name;
int rc;
- dev = &idxd->pdev->dev;
idxd_name = dev_name(idxd_confdev(idxd));
struct idxd_saved_states *idxd_saved __free(kfree) =
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 006ba206ab1b..9c1c546fe443 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -45,7 +45,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
-} __packed;
+};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@@ -65,7 +65,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@@ -79,7 +79,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
-} __packed;
+};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@@ -88,7 +88,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
-} __packed;
+};
#define IDXD_ENGCAP_OFFSET 0x38
@@ -114,7 +114,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_TABLE_MULT 0x100
@@ -128,7 +128,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@@ -139,7 +139,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@@ -149,7 +149,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
-} __packed;
+};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@@ -183,7 +183,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
-} __packed;
+};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@@ -213,7 +213,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
-} __packed;
+};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@@ -284,7 +284,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
-} __packed;
+};
union iaa_cap_reg {
struct {
@@ -303,7 +303,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
-} __packed;
+};
#define IDXD_IAACAP_OFFSET 0x180
@@ -320,7 +320,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@@ -334,7 +334,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
-} __packed;
+};
union group_flags {
struct {
@@ -352,13 +352,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
-} __packed;
+};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
-} __packed;
+};
union wqcfg {
struct {
@@ -410,7 +410,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
-} __packed;
+};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@@ -474,7 +474,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@@ -483,7 +483,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
-} __packed;
+};
struct idxd_event {
union {
@@ -493,7 +493,7 @@ struct idxd_event {
};
u32 val;
};
-} __packed;
+};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@@ -506,7 +506,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
-} __packed;
+};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@@ -516,7 +516,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
-} __packed;
+};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@@ -533,7 +533,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
-} __packed;
+};
#define IDXD_FLTCFG_OFFSET 0x300
@@ -543,7 +543,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
-} __packed;
+};
union event_cfg {
struct {
@@ -551,7 +551,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
-} __packed;
+};
union filter_cfg {
struct {
@@ -562,7 +562,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
-} __packed;
+};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@@ -580,7 +580,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
-} __packed;
+};
#define IDXD_MAX_BATCH_IDENT 256
@@ -620,17 +620,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
-} __packed;
+};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
-} __packed;
+};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
-} __packed;
+};
#endif
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c8dc504510f1..b7fb843c67a6 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
+ type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa6e4646fdc2..1fdcb0f5c9e7 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
+ return ERR_PTR(-ENOMEM);
+
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
+ ret = -ENOMEM;
+ goto err_unmap_src;
+ }
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
- if (!mv_chan->dma_desc_pool_virt)
- return ERR_PTR(-ENOMEM);
+ if (!mv_chan->dma_desc_pool_virt) {
+ ret = -ENOMEM;
+ goto err_unmap_dst;
+ }
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@@ -1155,6 +1165,13 @@ err_free_irq:
err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+err_unmap_dst:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+err_unmap_src:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 7a2488a0d6a3..765462303de0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dchan->device->dev,
+ ldesc->hwdesc_dma_addr))
+ goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
+
+unmap_error:
+ while (i--) {
+ ldesc--; hwdesc--;
+
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(hwdesc), DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index b1f0001cc99c..8e87738086b2 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
-/* gpi_write_reg_field - write to specific bit field */
-static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
- u32 mask, u32 shift, u32 val)
-{
- u32 tmp = gpi_read_reg(gpii, addr);
-
- tmp &= ~mask;
- val = tmp | ((val << shift) & mask);
- gpi_write_reg(gpii, addr, val);
-}
-
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 6ea5a880b433..8184d475a49a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
- default y
+ default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 917f8e922373..04389936c8a6 100644
--- a/drivers/dma/stm32/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
- else if (scr & STM32_DMA_SCR_DBM)
+ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0c6c4258b195..50e7106c5cb7 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
- dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index e6d525901de7..080c1c725216 100644
--- a/drivers/dma/stm32/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 24796aaaddfa..00d2fd38d17f 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No clock specified\n");
- return PTR_ERR(priv->clk);
- }
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
@@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the clock\n");
- return ret;
- }
-
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_clk_disable;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
- ret = dma_async_device_register(&priv->slave);
- if (ret) {
- dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
- goto err_clk_disable;
- }
+ ret = dmaenginem_async_device_register(&priv->slave);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
- if (ret) {
- dev_err(&pdev->dev, "of_dma_controller_register failed\n");
- goto err_dma_unregister;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
-
-err_dma_unregister:
- dma_async_device_unregister(&priv->slave);
-err_clk_disable:
- clk_disable_unprepare(priv->clk);
- return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->slave);
-
- clk_disable_unprepare(priv->clk);
}
static struct sun4i_dma_config sun4i_a10_dma_cfg = {
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 2adc2cca10e9..dbf168146d35 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default y
+ default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
- default y
+ default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index 20607ed54243..ade872c915ac 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -3,5 +3,11 @@
# Generic DPLL drivers configuration
#
+menu "DPLL device support"
+
config DPLL
bool
+
+source "drivers/dpll/zl3073x/Kconfig"
+
+endmenu
diff --git a/drivers/dpll/Makefile b/drivers/dpll/Makefile
index 2e5b27850110..9e7a3a3e592e 100644
--- a/drivers/dpll/Makefile
+++ b/drivers/dpll/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_DPLL) += dpll.o
dpll-y += dpll_core.o
dpll-y += dpll_netlink.o
dpll-y += dpll_nl.o
+
+obj-$(CONFIG_ZL3073X) += zl3073x/
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 20bdc52f63a5..a461095efd8a 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -506,6 +506,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ xa_init_flags(&pin->ref_sync_pins, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret < 0)
@@ -514,6 +515,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
@@ -595,6 +597,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
kfree_rcu(pin, rcu);
}
@@ -659,11 +662,26 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_register);
+static void dpll_pin_ref_sync_pair_del(u32 ref_sync_pin_id)
+{
+ struct dpll_pin *pin, *ref_sync_pin;
+ unsigned long i;
+
+ xa_for_each(&dpll_pin_xa, i, pin) {
+ ref_sync_pin = xa_load(&pin->ref_sync_pins, ref_sync_pin_id);
+ if (ref_sync_pin) {
+ xa_erase(&pin->ref_sync_pins, ref_sync_pin_id);
+ __dpll_pin_change_ntf(pin);
+ }
+ }
+}
+
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
+ dpll_pin_ref_sync_pair_del(pin->id);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
@@ -783,6 +801,33 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
+/**
+ * dpll_pin_ref_sync_pair_add - create a reference sync signal pin pair
+ * @pin: pin which produces the base frequency
+ * @ref_sync_pin: pin which produces the sync signal
+ *
+ * Once pins are paired, the user-space configuration of reference sync pair
+ * is possible.
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = xa_insert(&pin->ref_sync_pins, ref_sync_pin->id,
+ ref_sync_pin, GFP_KERNEL);
+ __dpll_pin_change_ntf(pin);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_ref_sync_pair_add);
+
static struct dpll_device_registration *
dpll_device_registration_first(struct dpll_device *dpll)
{
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 2b6d8ef1cdf3..8ce969bbeb64 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -44,8 +44,8 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
+ * @ref_sync_pins: hold references to pins for Reference SYNC feature
* @prop: pin properties copied from the registerer
- * @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
* @rcu: rcu_head for kfree_rcu()
**/
@@ -56,6 +56,7 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
+ struct xarray ref_sync_pins;
struct dpll_pin_properties prop;
refcount_t refcount;
struct rcu_head rcu;
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index c130f87147fa..036f21cac0a9 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -48,6 +48,24 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
return 0;
}
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+ struct dpll_pin_ref *par_ref;
+ unsigned long i;
+
+ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+ return false;
+ xa_for_each(&pin->parent_refs, i, par_ref)
+ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+ DPLL_REGISTERED))
+ return true;
+ xa_for_each(&pin->dpll_refs, i, par_ref)
+ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+ DPLL_REGISTERED))
+ return true;
+ return false;
+}
+
/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
@@ -127,6 +145,26 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state;
+ int ret;
+
+ if (ops->phase_offset_monitor_set && ops->phase_offset_monitor_get) {
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll),
+ &state, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_MONITOR, state))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -408,6 +446,47 @@ nest_cancel:
return -EMSGSIZE;
}
+static int
+dpll_msg_add_pin_ref_sync(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ void *pin_priv, *ref_sync_pin_priv;
+ struct dpll_pin *ref_sync_pin;
+ enum dpll_pin_state state;
+ struct nlattr *nest;
+ unsigned long index;
+ int ret;
+
+ pin_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ xa_for_each(&pin->ref_sync_pins, index, ref_sync_pin) {
+ if (!dpll_pin_available(ref_sync_pin))
+ continue;
+ ref_sync_pin_priv = dpll_pin_on_dpll_priv(dpll, ref_sync_pin);
+ if (WARN_ON(!ops->ref_sync_get))
+ return -EOPNOTSUPP;
+ ret = ops->ref_sync_get(pin, pin_priv, ref_sync_pin,
+ ref_sync_pin_priv, &state, extack);
+ if (ret)
+ return ret;
+ nest = nla_nest_start(msg, DPLL_A_PIN_REFERENCE_SYNC);
+ if (!nest)
+ return -EMSGSIZE;
+ if (nla_put_s32(msg, DPLL_A_PIN_ID, ref_sync_pin->id))
+ goto nest_cancel;
+ if (nla_put_s32(msg, DPLL_A_PIN_STATE, state))
+ goto nest_cancel;
+ nla_nest_end(msg, nest);
+ }
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+}
+
static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
@@ -552,6 +631,10 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
if (ret)
return ret;
+ if (!xa_empty(&pin->ref_sync_pins))
+ ret = dpll_msg_add_pin_ref_sync(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
@@ -591,6 +674,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
return ret;
if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
return -EMSGSIZE;
+ ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -642,24 +728,6 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
-static bool dpll_pin_available(struct dpll_pin *pin)
-{
- struct dpll_pin_ref *par_ref;
- unsigned long i;
-
- if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
- return false;
- xa_for_each(&pin->parent_refs, i, par_ref)
- if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
- DPLL_REGISTERED))
- return true;
- xa_for_each(&pin->dpll_refs, i, par_ref)
- if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
- DPLL_REGISTERED))
- return true;
- return false;
-}
-
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@@ -722,7 +790,7 @@ int dpll_pin_delete_ntf(struct dpll_pin *pin)
return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
}
-static int __dpll_pin_change_ntf(struct dpll_pin *pin)
+int __dpll_pin_change_ntf(struct dpll_pin *pin)
{
return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
}
@@ -747,6 +815,31 @@ int dpll_pin_change_ntf(struct dpll_pin *pin)
EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
static int
+dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state = nla_get_u32(a), old_state;
+ int ret;
+
+ if (!(ops->phase_offset_monitor_set && ops->phase_offset_monitor_get)) {
+ NL_SET_ERR_MSG_ATTR(extack, a, "dpll device not capable of phase offset monitor");
+ return -EOPNOTSUPP;
+ }
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll), &old_state,
+ extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current state of phase offset monitor");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+
+ return ops->phase_offset_monitor_set(dpll, dpll_priv(dpll), state,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -888,6 +981,108 @@ rollback:
}
static int
+dpll_pin_ref_sync_state_set(struct dpll_pin *pin,
+ unsigned long ref_sync_pin_idx,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+
+{
+ struct dpll_pin_ref *ref, *failed;
+ const struct dpll_pin_ops *ops;
+ enum dpll_pin_state old_state;
+ struct dpll_pin *ref_sync_pin;
+ struct dpll_device *dpll;
+ unsigned long i;
+ int ret;
+
+ ref_sync_pin = xa_find(&pin->ref_sync_pins, &ref_sync_pin_idx,
+ ULONG_MAX, XA_PRESENT);
+ if (!ref_sync_pin) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not found");
+ return -EINVAL;
+ }
+ if (!dpll_pin_available(ref_sync_pin)) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not available");
+ return -EINVAL;
+ }
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->ref_sync_set || !ops->ref_sync_get) {
+ NL_SET_ERR_MSG(extack, "reference sync not supported by this pin");
+ return -EOPNOTSUPP;
+ }
+ dpll = ref->dpll;
+ ret = ops->ref_sync_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ &old_state, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get old reference sync state");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ ret = ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll,
+ ref_sync_pin),
+ state, extack);
+ if (ret) {
+ failed = ref;
+ NL_SET_ERR_MSG_FMT(extack, "reference sync set failed for dpll_id:%u",
+ dpll->id);
+ goto rollback;
+ }
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+
+rollback:
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ if (ref == failed)
+ break;
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ if (ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ old_state, extack))
+ NL_SET_ERR_MSG(extack, "set reference sync rollback failed");
+ }
+ return ret;
+}
+
+static int
+dpll_pin_ref_sync_set(struct dpll_pin *pin, struct nlattr *nest,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DPLL_A_PIN_MAX + 1];
+ enum dpll_pin_state state;
+ u32 sync_pin_id;
+
+ nla_parse_nested(tb, DPLL_A_PIN_MAX, nest,
+ dpll_reference_sync_nl_policy, extack);
+ if (!tb[DPLL_A_PIN_ID]) {
+ NL_SET_ERR_MSG(extack, "sync pin id expected");
+ return -EINVAL;
+ }
+ sync_pin_id = nla_get_u32(tb[DPLL_A_PIN_ID]);
+
+ if (!tb[DPLL_A_PIN_STATE]) {
+ NL_SET_ERR_MSG(extack, "sync pin state expected");
+ return -EINVAL;
+ }
+ state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+ return dpll_pin_ref_sync_state_set(pin, sync_pin_id, state, extack);
+}
+
+static int
dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
@@ -1193,6 +1388,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
if (ret)
return ret;
break;
+ case DPLL_A_PIN_REFERENCE_SYNC:
+ ret = dpll_pin_ref_sync_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
}
}
@@ -1533,12 +1733,29 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+static int
+dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- /* placeholder for set command */
+ int ret;
+
+ if (info->attrs[DPLL_A_PHASE_OFFSET_MONITOR]) {
+ struct nlattr *a = info->attrs[DPLL_A_PHASE_OFFSET_MONITOR];
+
+ ret = dpll_phase_offset_monitor_set(dpll, a, info->extack);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll = info->user_ptr[0];
+
+ return dpll_set_from_nlattr(dpll, info);
+}
+
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
diff --git a/drivers/dpll/dpll_netlink.h b/drivers/dpll/dpll_netlink.h
index a9cfd55f57fc..dd28b56d27c5 100644
--- a/drivers/dpll/dpll_netlink.h
+++ b/drivers/dpll/dpll_netlink.h
@@ -11,3 +11,5 @@ int dpll_device_delete_ntf(struct dpll_device *dpll);
int dpll_pin_create_ntf(struct dpll_pin *pin);
int dpll_pin_delete_ntf(struct dpll_pin *pin);
+
+int __dpll_pin_change_ntf(struct dpll_pin *pin);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index fe9b6893d261..9f2efaf25268 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -24,6 +24,11 @@ const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1] = {
[DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
};
+const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+};
+
/* DPLL_CMD_DEVICE_ID_GET - do */
static const struct nla_policy dpll_device_id_get_nl_policy[DPLL_A_TYPE + 1] = {
[DPLL_A_MODULE_NAME] = { .type = NLA_NUL_STRING, },
@@ -37,8 +42,9 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_ID + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_MONITOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
+ [DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -62,7 +68,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] =
};
/* DPLL_CMD_PIN_SET - do */
-static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = {
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_REFERENCE_SYNC + 1] = {
[DPLL_A_PIN_ID] = { .type = NLA_U32, },
[DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
[DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
@@ -72,6 +78,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY
[DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
[DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, },
[DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, },
+ [DPLL_A_PIN_REFERENCE_SYNC] = NLA_POLICY_NESTED(dpll_reference_sync_nl_policy),
};
/* Ops table for dpll */
@@ -105,7 +112,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_ID,
+ .maxattr = DPLL_A_PHASE_OFFSET_MONITOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
@@ -139,7 +146,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_pin_set_doit,
.post_doit = dpll_pin_post_doit,
.policy = dpll_pin_set_nl_policy,
- .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY,
+ .maxattr = DPLL_A_PIN_REFERENCE_SYNC,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
};
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index f491262bee4f..3da10cfe9a6e 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -14,6 +14,7 @@
/* Common nested types */
extern const struct nla_policy dpll_pin_parent_device_nl_policy[DPLL_A_PIN_PHASE_OFFSET + 1];
extern const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1];
+extern const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1];
int dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig
new file mode 100644
index 000000000000..5bbca1400581
--- /dev/null
+++ b/drivers/dpll/zl3073x/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config ZL3073X
+ tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
+ depends on NET
+ select DPLL
+ select NET_DEVLINK
+ select REGMAP
+ help
+ This driver supports Microchip Azurite family DPLL/PTP/SyncE
+ devices that support up to 5 independent DPLL channels,
+ 10 input pins and up to 20 output pins.
+
+ To compile this driver as a module, choose M here. The module
+ will be called zl3073x.
+
+config ZL3073X_I2C
+ tristate "I2C bus implementation for Microchip Azurite devices"
+ depends on I2C && NET
+ select REGMAP_I2C
+ select ZL3073X
+ help
+ This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_i2c.
+
+config ZL3073X_SPI
+ tristate "SPI bus implementation for Microchip Azurite devices"
+ depends on NET && SPI
+ select REGMAP_SPI
+ select ZL3073X
+ help
+ This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_spi.
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
new file mode 100644
index 000000000000..c3e2f02f319d
--- /dev/null
+++ b/drivers/dpll/zl3073x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_ZL3073X) += zl3073x.o
+zl3073x-objs := core.o devlink.o dpll.o prop.o
+
+obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
+zl3073x_i2c-objs := i2c.o
+
+obj-$(CONFIG_ZL3073X_SPI) += zl3073x_spi.o
+zl3073x_spi-objs := spi.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
new file mode 100644
index 000000000000..7ebcfc5ec1f0
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string_choices.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "regs.h"
+
+/* Chip IDs for zl30731 */
+static const u16 zl30731_ids[] = {
+ 0x0E93,
+ 0x1E93,
+ 0x2E93,
+};
+
+const struct zl3073x_chip_info zl30731_chip_info = {
+ .ids = zl30731_ids,
+ .num_ids = ARRAY_SIZE(zl30731_ids),
+ .num_channels = 1,
+};
+EXPORT_SYMBOL_NS_GPL(zl30731_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30732 */
+static const u16 zl30732_ids[] = {
+ 0x0E30,
+ 0x0E94,
+ 0x1E94,
+ 0x1F60,
+ 0x2E94,
+ 0x3FC4,
+};
+
+const struct zl3073x_chip_info zl30732_chip_info = {
+ .ids = zl30732_ids,
+ .num_ids = ARRAY_SIZE(zl30732_ids),
+ .num_channels = 2,
+};
+EXPORT_SYMBOL_NS_GPL(zl30732_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30733 */
+static const u16 zl30733_ids[] = {
+ 0x0E95,
+ 0x1E95,
+ 0x2E95,
+};
+
+const struct zl3073x_chip_info zl30733_chip_info = {
+ .ids = zl30733_ids,
+ .num_ids = ARRAY_SIZE(zl30733_ids),
+ .num_channels = 3,
+};
+EXPORT_SYMBOL_NS_GPL(zl30733_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30734 */
+static const u16 zl30734_ids[] = {
+ 0x0E96,
+ 0x1E96,
+ 0x2E96,
+};
+
+const struct zl3073x_chip_info zl30734_chip_info = {
+ .ids = zl30734_ids,
+ .num_ids = ARRAY_SIZE(zl30734_ids),
+ .num_channels = 4,
+};
+EXPORT_SYMBOL_NS_GPL(zl30734_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30735 */
+static const u16 zl30735_ids[] = {
+ 0x0E97,
+ 0x1E97,
+ 0x2E97,
+};
+
+const struct zl3073x_chip_info zl30735_chip_info = {
+ .ids = zl30735_ids,
+ .num_ids = ARRAY_SIZE(zl30735_ids),
+ .num_channels = 5,
+};
+EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
+
+#define ZL_RANGE_OFFSET 0x80
+#define ZL_PAGE_SIZE 0x80
+#define ZL_NUM_PAGES 15
+#define ZL_PAGE_SEL 0x7F
+#define ZL_PAGE_SEL_MASK GENMASK(3, 0)
+#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
+
+/* Regmap range configuration */
+static const struct regmap_range_cfg zl3073x_regmap_range = {
+ .range_min = ZL_RANGE_OFFSET,
+ .range_max = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .selector_reg = ZL_PAGE_SEL,
+ .selector_mask = ZL_PAGE_SEL_MASK,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = ZL_PAGE_SIZE,
+};
+
+static bool
+zl3073x_is_volatile_reg(struct device *dev __maybe_unused, unsigned int reg)
+{
+ /* Only page selector is non-volatile */
+ return reg != ZL_PAGE_SEL;
+}
+
+const struct regmap_config zl3073x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .ranges = &zl3073x_regmap_range,
+ .num_ranges = 1,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = zl3073x_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS_GPL(zl3073x_regmap_config, "ZL3073X");
+
+/**
+ * zl3073x_ref_freq_factorize - factorize given frequency
+ * @freq: input frequency
+ * @base: base frequency
+ * @mult: multiplier
+ *
+ * Checks if the given frequency can be factorized using one of the
+ * supported base frequencies. If so the base frequency and multiplier
+ * are stored into appropriate parameters if they are not NULL.
+ *
+ * Return: 0 on success, -EINVAL if the frequency cannot be factorized
+ */
+int
+zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
+{
+ static const u16 base_freqs[] = {
+ 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
+ 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
+ 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
+ 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
+ 32000, 40000, 50000, 62500,
+ };
+ u32 div;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
+ div = freq / base_freqs[i];
+
+ if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
+ if (base)
+ *base = base_freqs[i];
+ if (mult)
+ *mult = div;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static bool
+zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
+{
+ /* Check that multiop lock is held when accessing registers
+ * from page 10 and above.
+ */
+ if (ZL_REG_PAGE(reg) >= 10)
+ lockdep_assert_held(&zldev->multiop_lock);
+
+ /* Check the index is in valid range for indexed register */
+ if (ZL_REG_OFFSET(reg) > ZL_REG_MAX_OFFSET(reg)) {
+ dev_err(zldev->dev, "Index out of range for reg 0x%04lx\n",
+ ZL_REG_ADDR(reg));
+ return false;
+ }
+ /* Check the requested size corresponds to register size */
+ if (ZL_REG_SIZE(reg) != size) {
+ dev_err(zldev->dev, "Invalid size %zu for reg 0x%04lx\n",
+ size, ZL_REG_ADDR(reg));
+ return false;
+ }
+
+ return true;
+}
+
+static int
+zl3073x_read_reg(struct zl3073x_dev *zldev, unsigned int reg, void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_read(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_write_reg(struct zl3073x_dev *zldev, unsigned int reg, const void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_write(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to write reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_read_u8 - read value from 8bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val)
+{
+ return zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+}
+
+/**
+ * zl3073x_write_u8 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val)
+{
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u16 - read value from 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be16_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u16 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val)
+{
+ cpu_to_be16s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u32 - read value from 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be32_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u32 - write value to 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val)
+{
+ cpu_to_be32s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u48 - read value from 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 48bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val)
+{
+ u8 buf[6];
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, buf, sizeof(buf));
+ if (!rc)
+ *val = get_unaligned_be48(buf);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u48 - write value to 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 48bit register.
+ * The value must be from the interval -S48_MIN to U48_MAX.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val)
+{
+ u8 buf[6];
+
+ /* Check the value belongs to <S48_MIN, U48_MAX>
+ * Any value >= S48_MIN has bits 47..63 set.
+ */
+ if (val > GENMASK_ULL(47, 0) && val < GENMASK_ULL(63, 47)) {
+ dev_err(zldev->dev, "Value 0x%0llx out of range\n", val);
+ return -EINVAL;
+ }
+
+ put_unaligned_be48(val, buf);
+
+ return zl3073x_write_reg(zldev, reg, buf, sizeof(buf));
+}
+
+/**
+ * zl3073x_poll_zero_u8 - wait for register to be cleared by device
+ * @zldev: zl3073x device pointer
+ * @reg: register to poll (has to be 8bit register)
+ * @mask: bit mask for polling
+ *
+ * Waits for bits specified by @mask in register @reg value to be cleared
+ * by the device.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask)
+{
+ /* Register polling sleep & timeout */
+#define ZL_POLL_SLEEP_US 10
+#define ZL_POLL_TIMEOUT_US 2000000
+ unsigned int val;
+
+ /* Check the register is 8bit */
+ if (ZL_REG_SIZE(reg) != 1) {
+ dev_err(zldev->dev, "Invalid reg 0x%04lx size for polling\n",
+ ZL_REG_ADDR(reg));
+ return -EINVAL;
+ }
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ return regmap_read_poll_timeout(zldev->regmap, reg, val, !(val & mask),
+ ZL_POLL_SLEEP_US, ZL_POLL_TIMEOUT_US);
+}
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val)
+{
+ int rc;
+
+ /* Set mask for the operation */
+ rc = zl3073x_write_u16(zldev, mask_reg, mask_val);
+ if (rc)
+ return rc;
+
+ /* Trigger the operation */
+ rc = zl3073x_write_u8(zldev, op_reg, op_val);
+ if (rc)
+ return rc;
+
+ /* Wait for the operation to actually finish */
+ return zl3073x_poll_zero_u8(zldev, op_reg, op_val);
+}
+
+/**
+ * zl3073x_ref_state_fetch - get input reference state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to fetch state for
+ *
+ * Function fetches information for the given input reference that are
+ * invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_ref *input = &zldev->ref[index];
+ u8 ref_config;
+ int rc;
+
+ /* If the input is differential then the configuration for N-pin
+ * reference is ignored and P-pin config is used for both.
+ */
+ if (zl3073x_is_n_pin(index) &&
+ zl3073x_ref_is_diff(zldev, index - 1)) {
+ input->enabled = zl3073x_ref_is_enabled(zldev, index - 1);
+ input->diff = true;
+
+ return 0;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read ref_config register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref_config);
+ if (rc)
+ return rc;
+
+ input->enabled = FIELD_GET(ZL_REF_CONFIG_ENABLE, ref_config);
+ input->diff = FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref_config);
+
+ dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
+ str_enabled_disabled(input->enabled),
+ input->diff ? "differential" : "single-ended");
+
+ return rc;
+}
+
+/**
+ * zl3073x_out_state_fetch - get output state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to fetch state for
+ *
+ * Function fetches information for the given output (not output pin)
+ * that are invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_out *out = &zldev->out[index];
+ u8 output_ctrl, output_mode;
+ int rc;
+
+ /* Read output configuration */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &output_ctrl);
+ if (rc)
+ return rc;
+
+ /* Store info about output enablement and synthesizer the output
+ * is connected to.
+ */
+ out->enabled = FIELD_GET(ZL_OUTPUT_CTRL_EN, output_ctrl);
+ out->synth = FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, output_ctrl);
+
+ dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
+ str_enabled_disabled(out->enabled), out->synth);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read output_mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Extract and store output signal format */
+ out->signal_format = FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT,
+ output_mode);
+
+ dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
+ out->signal_format);
+
+ return rc;
+}
+
+/**
+ * zl3073x_synth_state_fetch - get synth state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to fetch state for
+ *
+ * Function fetches information for the given synthesizer that are
+ * invariant and stores them for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_synth *synth = &zldev->synth[index];
+ u16 base, m, n;
+ u8 synth_ctrl;
+ u32 mult;
+ int rc;
+
+ /* Read synth control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth_ctrl);
+ if (rc)
+ return rc;
+
+ /* Store info about synth enablement and DPLL channel the synth is
+ * driven by.
+ */
+ synth->enabled = FIELD_GET(ZL_SYNTH_CTRL_EN, synth_ctrl);
+ synth->dpll = FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth_ctrl);
+
+ dev_dbg(zldev->dev, "SYNTH%u is %s and driven by DPLL%u\n", index,
+ str_enabled_disabled(synth->enabled), synth->dpll);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read synth configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
+ ZL_REG_SYNTH_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* The output frequency is determined by the following formula:
+ * base * multiplier * numerator / denominator
+ *
+ * Read registers with these values
+ */
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &base);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &mult);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &m);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &n);
+ if (rc)
+ return rc;
+
+ /* Check denominator for zero to avoid div by 0 */
+ if (!n) {
+ dev_err(zldev->dev,
+ "Zero divisor for SYNTH%u retrieved from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ /* Compute and store synth frequency */
+ zldev->synth[index].freq = div_u64(mul_u32_u32(base * m, mult), n);
+
+ dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
+ zldev->synth[index].freq);
+
+ return rc;
+}
+
+static int
+zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_ref_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch input state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_SYNTHS; i++) {
+ rc = zl3073x_synth_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch synth state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_OUTS; i++) {
+ rc = zl3073x_out_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch output state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_phase_offsets_update - update reference phase offsets
+ * @zldev: pointer to zl3073x_dev structure
+ * @channel: DPLL channel number or -1
+ *
+ * The function asks device to update phase offsets latch registers with
+ * the latest measured values. There are 2 sets of latch registers:
+ *
+ * 1) Up to 5 DPLL-to-connected-ref registers that contain phase offset
+ * values between particular DPLL channel and its *connected* input
+ * reference.
+ *
+ * 2) 10 selected-DPLL-to-all-ref registers that contain phase offset values
+ * between selected DPLL channel and all input references.
+ *
+ * If the caller is interested in 2) then it has to pass DPLL channel number
+ * in @channel parameter. If it is interested only in 1) then it should pass
+ * @channel parameter with value of -1.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel)
+{
+ int rc;
+
+ /* Per datasheet we have to wait for 'dpll_ref_phase_err_rqst_rd'
+ * to be zero to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Select DPLL channel if it is specified */
+ if (channel != -1) {
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_IDX, channel);
+ if (rc)
+ return rc;
+ }
+
+ /* Request to update phase offsets measurement values */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+}
+
+/**
+ * zl3073x_ref_ffo_update - update reference fractional frequency offsets
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * The function asks device to update fractional frequency offsets latch
+ * registers the latest measured values, reads and stores them into
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_ref_ffo_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ /* Per datasheet we have to wait for 'ref_freq_meas_ctrl' to be zero
+ * to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Select all references for measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_3_0,
+ GENMASK(7, 0)); /* REF0P..REF3N */
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_4,
+ GENMASK(1, 0)); /* REF4P..REF4N */
+ if (rc)
+ return rc;
+
+ /* Request frequency offset measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Read DPLL-to-REFx frequency offset measurements */
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ s32 value;
+
+ /* Read value stored in units of 2^-32 signed */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_FREQ(i), &value);
+ if (rc)
+ return rc;
+
+ /* Convert to ppm -> ffo = (10^6 * value) / 2^32 */
+ zldev->ref[i].ffo = mul_s64_u64_shr(value, 1000000, 32);
+ }
+
+ return 0;
+}
+
+static void
+zl3073x_dev_periodic_work(struct kthread_work *work)
+{
+ struct zl3073x_dev *zldev = container_of(work, struct zl3073x_dev,
+ work.work);
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ /* Update DPLL-to-connected-ref phase offsets registers */
+ rc = zl3073x_ref_phase_offsets_update(zldev, -1);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+
+ /* Update references' fractional frequency offsets */
+ rc = zl3073x_ref_ffo_update(zldev);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to update fractional frequency offsets: %pe\n",
+ ERR_PTR(rc));
+
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ zl3073x_dpll_changes_check(zldpll);
+
+ /* Run twice a second */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work,
+ msecs_to_jiffies(500));
+}
+
+static void zl3073x_dev_dpll_fini(void *ptr)
+{
+ struct zl3073x_dpll *zldpll, *next;
+ struct zl3073x_dev *zldev = ptr;
+
+ /* Stop monitoring thread */
+ if (zldev->kworker) {
+ kthread_cancel_delayed_work_sync(&zldev->work);
+ kthread_destroy_worker(zldev->kworker);
+ zldev->kworker = NULL;
+ }
+
+ /* Release DPLLs */
+ list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
+ zl3073x_dpll_unregister(zldpll);
+ list_del(&zldpll->list);
+ zl3073x_dpll_free(zldpll);
+ }
+}
+
+static int
+zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
+{
+ struct kthread_worker *kworker;
+ struct zl3073x_dpll *zldpll;
+ unsigned int i;
+ int rc;
+
+ INIT_LIST_HEAD(&zldev->dplls);
+
+ /* Initialize all DPLLs */
+ for (i = 0; i < num_dplls; i++) {
+ zldpll = zl3073x_dpll_alloc(zldev, i);
+ if (IS_ERR(zldpll)) {
+ dev_err_probe(zldev->dev, PTR_ERR(zldpll),
+ "Failed to alloc DPLL%u\n", i);
+ rc = PTR_ERR(zldpll);
+ goto error;
+ }
+
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n", i);
+ zl3073x_dpll_free(zldpll);
+ goto error;
+ }
+
+ list_add_tail(&zldpll->list, &zldev->dplls);
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ goto error;
+ }
+
+ /* Initialize monitoring thread */
+ kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
+ kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
+ if (IS_ERR(kworker)) {
+ rc = PTR_ERR(kworker);
+ goto error;
+ }
+
+ zldev->kworker = kworker;
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ /* Add devres action to release DPLL related resources */
+ rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ zl3073x_dev_dpll_fini(zldev);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ * @num_channels: number of DPLL channels
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev, int num_channels)
+{
+ u8 dpll_meas_ctrl, mask;
+ int i, rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Setup phase measurement averaging factor */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, 3);
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ for (i = 0, mask = 0; i < num_channels; i++)
+ mask |= BIT(i);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_probe - initialize zl3073x device
+ * @zldev: pointer to zl3073x device
+ * @chip_info: chip info based on compatible
+ *
+ * Common initialization of zl3073x device structure.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info)
+{
+ u16 id, revision, fw_ver;
+ unsigned int i;
+ u32 cfg_ver;
+ int rc;
+
+ /* Read chip ID */
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ /* Check it matches */
+ for (i = 0; i < chip_info->num_ids; i++) {
+ if (id == chip_info->ids[i])
+ break;
+ }
+
+ if (i == chip_info->num_ids) {
+ return dev_err_probe(zldev->dev, -ENODEV,
+ "Unknown or non-match chip ID: 0x%0x\n",
+ id);
+ }
+
+ /* Read revision, firmware version and custom config version */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "ChipID(%X), ChipRev(%X), FwVer(%u)\n", id,
+ revision, fw_ver);
+ dev_dbg(zldev->dev, "Custom config version: %lu.%lu.%lu.%lu\n",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ /* Generate random clock ID as the device has not such property that
+ * could be used for this purpose. A user can later change this value
+ * using devlink.
+ */
+ zldev->clock_id = get_random_u64();
+
+ /* Initialize mutex for operations where multiple reads, writes
+ * and/or polls are required to be done atomically.
+ */
+ rc = devm_mutex_init(zldev->dev, &zldev->multiop_lock);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to initialize mutex\n");
+
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev, chip_info->num_channels);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to setup phase measurement\n");
+
+ /* Register DPLL channels */
+ rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
+ if (rc)
+ return rc;
+
+ /* Register the devlink instance and parameters */
+ rc = zl3073x_devlink_register(zldev);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to register devlink instance\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_dev_probe, "ZL3073X");
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
new file mode 100644
index 000000000000..71af2c800110
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_CORE_H
+#define _ZL3073X_CORE_H
+
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct device;
+struct regmap;
+struct zl3073x_dpll;
+
+/*
+ * Hardware limits for ZL3073x chip family
+ */
+#define ZL3073X_MAX_CHANNELS 5
+#define ZL3073X_NUM_REFS 10
+#define ZL3073X_NUM_OUTS 10
+#define ZL3073X_NUM_SYNTHS 5
+#define ZL3073X_NUM_INPUT_PINS ZL3073X_NUM_REFS
+#define ZL3073X_NUM_OUTPUT_PINS (ZL3073X_NUM_OUTS * 2)
+#define ZL3073X_NUM_PINS (ZL3073X_NUM_INPUT_PINS + \
+ ZL3073X_NUM_OUTPUT_PINS)
+
+/**
+ * struct zl3073x_ref - input reference invariant info
+ * @enabled: input reference is enabled or disabled
+ * @diff: true if input reference is differential
+ * @ffo: current fractional frequency offset
+ */
+struct zl3073x_ref {
+ bool enabled;
+ bool diff;
+ s64 ffo;
+};
+
+/**
+ * struct zl3073x_out - output invariant info
+ * @enabled: out is enabled or disabled
+ * @synth: synthesizer the out is connected to
+ * @signal_format: out signal format
+ */
+struct zl3073x_out {
+ bool enabled;
+ u8 synth;
+ u8 signal_format;
+};
+
+/**
+ * struct zl3073x_synth - synthesizer invariant info
+ * @freq: synthesizer frequency
+ * @dpll: ID of DPLL the synthesizer is driven by
+ * @enabled: synth is enabled or disabled
+ */
+struct zl3073x_synth {
+ u32 freq;
+ u8 dpll;
+ bool enabled;
+};
+
+/**
+ * struct zl3073x_dev - zl3073x device
+ * @dev: pointer to device
+ * @regmap: regmap to access device registers
+ * @multiop_lock: to serialize multiple register operations
+ * @clock_id: clock id of the device
+ * @ref: array of input references' invariants
+ * @out: array of outs' invariants
+ * @synth: array of synths' invariants
+ * @dplls: list of DPLLs
+ * @kworker: thread for periodic work
+ * @work: periodic work
+ */
+struct zl3073x_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex multiop_lock;
+ u64 clock_id;
+
+ /* Invariants */
+ struct zl3073x_ref ref[ZL3073X_NUM_REFS];
+ struct zl3073x_out out[ZL3073X_NUM_OUTS];
+ struct zl3073x_synth synth[ZL3073X_NUM_SYNTHS];
+
+ /* DPLL channels */
+ struct list_head dplls;
+
+ /* Monitor */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+};
+
+struct zl3073x_chip_info {
+ const u16 *ids;
+ size_t num_ids;
+ int num_channels;
+};
+
+extern const struct zl3073x_chip_info zl30731_chip_info;
+extern const struct zl3073x_chip_info zl30732_chip_info;
+extern const struct zl3073x_chip_info zl30733_chip_info;
+extern const struct zl3073x_chip_info zl30734_chip_info;
+extern const struct zl3073x_chip_info zl30735_chip_info;
+extern const struct regmap_config zl3073x_regmap_config;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info);
+
+/**********************
+ * Registers operations
+ **********************/
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val);
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val);
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val);
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val);
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val);
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+
+/*****************
+ * Misc operations
+ *****************/
+
+int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel);
+
+static inline bool
+zl3073x_is_n_pin(u8 id)
+{
+ /* P-pins ids are even while N-pins are odd */
+ return id & 1;
+}
+
+static inline bool
+zl3073x_is_p_pin(u8 id)
+{
+ return !zl3073x_is_n_pin(id);
+}
+
+/**
+ * zl3073x_input_pin_ref_get - get reference for given input pin
+ * @id: input pin id
+ *
+ * Return: reference id for the given input pin
+ */
+static inline u8
+zl3073x_input_pin_ref_get(u8 id)
+{
+ return id;
+}
+
+/**
+ * zl3073x_output_pin_out_get - get output for the given output pin
+ * @id: output pin id
+ *
+ * Return: output id for the given output pin
+ */
+static inline u8
+zl3073x_output_pin_out_get(u8 id)
+{
+ /* Output pin pair shares the single output */
+ return id / 2;
+}
+
+/**
+ * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: the latest measured fractional frequency offset
+ */
+static inline s64
+zl3073x_ref_ffo_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].ffo;
+}
+
+/**
+ * zl3073x_ref_is_diff - check if the given input reference is differential
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].diff;
+}
+
+/**
+ * zl3073x_ref_is_enabled - check if the given input reference is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if input refernce is enabled, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->ref[index].enabled;
+}
+
+/**
+ * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: ID of DPLL the given synthetizer is driven by
+ */
+static inline u8
+zl3073x_synth_dpll_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].dpll;
+}
+
+/**
+ * zl3073x_synth_freq_get - get synth current freq
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32
+zl3073x_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].freq;
+}
+
+/**
+ * zl3073x_synth_is_enabled - check if the given synth is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: true if synth is enabled, false otherwise
+ */
+static inline bool
+zl3073x_synth_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->synth[index].enabled;
+}
+
+/**
+ * zl3073x_out_synth_get - get synth connected to given output
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8
+zl3073x_out_synth_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->out[index].synth;
+}
+
+/**
+ * zl3073x_out_is_enabled - check if the given output is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if the output is enabled, false otherwise
+ */
+static inline bool
+zl3073x_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ u8 synth;
+
+ /* Output is enabled only if associated synth is enabled */
+ synth = zl3073x_out_synth_get(zldev, index);
+ if (zl3073x_synth_is_enabled(zldev, synth))
+ return zldev->out[index].enabled;
+
+ return false;
+}
+
+/**
+ * zl3073x_out_signal_format_get - get output signal format
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: signal format of given output
+ */
+static inline u8
+zl3073x_out_signal_format_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return zldev->out[index].signal_format;
+}
+
+/**
+ * zl3073x_out_dpll_get - get DPLL ID the output is driven by
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: ID of DPLL the given output is driven by
+ */
+static inline
+u8 zl3073x_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
+{
+ u8 synth;
+
+ /* Get synthesizer connected to given output */
+ synth = zl3073x_out_synth_get(zldev, index);
+
+ /* Return DPLL that drives the synth */
+ return zl3073x_synth_dpll_get(zldev, synth);
+}
+
+/**
+ * zl3073x_out_is_diff - check if the given output is differential
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool
+zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ switch (zl3073x_out_signal_format_get(zldev, index)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_output_pin_is_enabled - check if the given output pin is enabled
+ * @zldev: pointer to zl3073x device
+ * @id: output pin id
+ *
+ * Checks if the output of the given output pin is enabled and also that
+ * its signal format also enables the given pin.
+ *
+ * Return: true if output pin is enabled, false if output pin is disabled
+ */
+static inline bool
+zl3073x_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
+{
+ u8 output = zl3073x_output_pin_out_get(id);
+
+ /* Check if the whole output is enabled */
+ if (!zl3073x_out_is_enabled(zldev, output))
+ return false;
+
+ /* Check signal format */
+ switch (zl3073x_out_signal_format_get(zldev, output)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED:
+ /* Both output pins are disabled by signal format */
+ return false;
+
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P:
+ /* Output is one single ended P-pin output */
+ if (zl3073x_is_n_pin(id))
+ return false;
+ break;
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N:
+ /* Output is one single ended N-pin output */
+ if (zl3073x_is_p_pin(id))
+ return false;
+ break;
+ default:
+ /* For other format both pins are enabled */
+ break;
+ }
+
+ return true;
+}
+
+#endif /* _ZL3073X_CORE_H */
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
new file mode 100644
index 000000000000..7e7fe726ee37
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/device/devres.h>
+#include <linux/netlink.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "regs.h"
+
+/**
+ * zl3073x_devlink_info_get - Devlink device info callback
+ * @devlink: devlink structure pointer
+ * @req: devlink request pointer to store information
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ u16 id, revision, fw_ver;
+ char buf[16];
+ u32 cfg_ver;
+ int rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", id);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", revision);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%u", fw_ver);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ /* No custom config version */
+ if (cfg_ver == U32_MAX)
+ return 0;
+
+ snprintf(buf, sizeof(buf), "%lu.%lu.%lu.%lu",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ return devlink_info_version_running_put(req, "custom_cfg", buf);
+}
+
+static int
+zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_dpll *zldpll;
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ zl3073x_dpll_unregister(zldpll);
+
+ return 0;
+}
+
+static int
+zl3073x_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ union devlink_param_value val;
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ rc = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ &val);
+ if (rc)
+ return rc;
+
+ if (zldev->clock_id != val.vu64) {
+ dev_dbg(zldev->dev,
+ "'clock_id' changed to %016llx\n", val.vu64);
+ zldev->clock_id = val.vu64;
+ }
+
+ /* Re-register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to re-register DPLL%u\n", zldpll->id);
+ }
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return 0;
+}
+
+static const struct devlink_ops zl3073x_devlink_ops = {
+ .info_get = zl3073x_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = zl3073x_devlink_reload_down,
+ .reload_up = zl3073x_devlink_reload_up,
+};
+
+static void
+zl3073x_devlink_free(void *ptr)
+{
+ devlink_free(ptr);
+}
+
+/**
+ * zl3073x_devm_alloc - allocates zl3073x device structure
+ * @dev: pointer to device structure
+ *
+ * Allocates zl3073x device structure as device resource.
+ *
+ * Return: pointer to zl3073x device on success, error pointer on error
+ */
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev)
+{
+ struct zl3073x_dev *zldev;
+ struct devlink *devlink;
+ int rc;
+
+ devlink = devlink_alloc(&zl3073x_devlink_ops, sizeof(*zldev), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ /* Add devres action to free devlink device */
+ rc = devm_add_action_or_reset(dev, zl3073x_devlink_free, devlink);
+ if (rc)
+ return ERR_PTR(rc);
+
+ zldev = devlink_priv(devlink);
+ zldev->dev = dev;
+ dev_set_drvdata(zldev->dev, zldev);
+
+ return zldev;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_devm_alloc, "ZL3073X");
+
+static int
+zl3073x_devlink_param_clock_id_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (!val.vu64) {
+ NL_SET_ERR_MSG_MOD(extack, "'clock_id' must be non-zero");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param zl3073x_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(CLOCK_ID, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ zl3073x_devlink_param_clock_id_validate),
+};
+
+static void
+zl3073x_devlink_unregister(void *ptr)
+{
+ struct devlink *devlink = priv_to_devlink(ptr);
+
+ devl_lock(devlink);
+
+ /* Unregister devlink params */
+ devl_params_unregister(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+
+ /* Unregister devlink instance */
+ devl_unregister(devlink);
+
+ devl_unlock(devlink);
+}
+
+/**
+ * zl3073x_devlink_register - register devlink instance and params
+ * @zldev: zl3073x device to register the devlink for
+ *
+ * Register the devlink instance and parameters associated with the device.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_devlink_register(struct zl3073x_dev *zldev)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+ union devlink_param_value value;
+ int rc;
+
+ devl_lock(devlink);
+
+ /* Register devlink params */
+ rc = devl_params_register(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+ if (rc) {
+ devl_unlock(devlink);
+
+ return rc;
+ }
+
+ value.vu64 = zldev->clock_id;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ value);
+
+ /* Register devlink instance */
+ devl_register(devlink);
+
+ devl_unlock(devlink);
+
+ /* Add devres action to unregister devlink device */
+ return devm_add_action_or_reset(zldev->dev, zl3073x_devlink_unregister,
+ zldev);
+}
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
new file mode 100644
index 000000000000..037720db204f
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DEVLINK_H
+#define _ZL3073X_DEVLINK_H
+
+struct zl3073x_dev;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+
+int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+
+#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
new file mode 100644
index 000000000000..3e42e9e7fd27
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -0,0 +1,2318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/dpll.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sprintf.h>
+
+#include "core.h"
+#include "dpll.h"
+#include "prop.h"
+#include "regs.h"
+
+#define ZL3073X_DPLL_REF_NONE ZL3073X_NUM_REFS
+#define ZL3073X_DPLL_REF_IS_VALID(_ref) ((_ref) != ZL3073X_DPLL_REF_NONE)
+
+/**
+ * struct zl3073x_dpll_pin - DPLL pin
+ * @list: this DPLL pin list entry
+ * @dpll: DPLL the pin is registered to
+ * @dpll_pin: pointer to registered dpll_pin
+ * @label: package label
+ * @dir: pin direction
+ * @id: pin id
+ * @prio: pin priority <0, 14>
+ * @selectable: pin is selectable in automatic mode
+ * @esync_control: embedded sync is controllable
+ * @pin_state: last saved pin state
+ * @phase_offset: last saved pin phase offset
+ * @freq_offset: last saved fractional frequency offset
+ */
+struct zl3073x_dpll_pin {
+ struct list_head list;
+ struct zl3073x_dpll *dpll;
+ struct dpll_pin *dpll_pin;
+ char label[8];
+ enum dpll_pin_direction dir;
+ u8 id;
+ u8 prio;
+ bool selectable;
+ bool esync_control;
+ enum dpll_pin_state pin_state;
+ s64 phase_offset;
+ s64 freq_offset;
+};
+
+/*
+ * Supported esync ranges for input and for output per output pair type
+ */
+static const struct dpll_pin_frequency esync_freq_ranges[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, 1),
+};
+
+/**
+ * zl3073x_dpll_is_input_pin - check if the pin is input one
+ * @pin: pin to check
+ *
+ * Return: true if pin is input, false if pin is output.
+ */
+static bool
+zl3073x_dpll_is_input_pin(struct zl3073x_dpll_pin *pin)
+{
+ return pin->dir == DPLL_PIN_DIRECTION_INPUT;
+}
+
+/**
+ * zl3073x_dpll_is_p_pin - check if the pin is P-pin
+ * @pin: pin to check
+ *
+ * Return: true if the pin is P-pin, false if it is N-pin
+ */
+static bool
+zl3073x_dpll_is_p_pin(struct zl3073x_dpll_pin *pin)
+{
+ return zl3073x_is_p_pin(pin->id);
+}
+
+static int
+zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *direction = pin->dir;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_input_ref_frequency_get - get input reference frequency
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref_id: reference id
+ * @frequency: pointer to variable to store frequency
+ *
+ * Reads frequency of given input reference.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_input_ref_frequency_get(struct zl3073x_dpll *zldpll, u8 ref_id,
+ u32 *frequency)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u16 base, mult, num, denom;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref_id));
+ if (rc)
+ return rc;
+
+ /* Read registers to compute resulting frequency */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &base);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &num);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &denom);
+ if (rc)
+ return rc;
+
+ /* Sanity check that HW has not returned zero denominator */
+ if (!denom) {
+ dev_err(zldev->dev,
+ "Zero divisor for ref %u frequency got from device\n",
+ ref_id);
+ return -EINVAL;
+ }
+
+ /* Compute the frequency */
+ *frequency = mul_u64_u32_div(base * mult, num, denom);
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 ref, ref_sync_ctrl, sync_mode;
+ u32 esync_div, ref_freq;
+ int rc;
+
+ /* Get reference frequency */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, pin->id, &ref_freq);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Get ref sync mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Get esync divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &esync_div);
+ if (rc)
+ return rc;
+
+ sync_mode = FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref_sync_ctrl);
+
+ switch (sync_mode) {
+ case ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75:
+ esync->freq = (esync_div == ZL_REF_ESYNC_DIV_1HZ) ? 1 : 0;
+ esync->pulse = 25;
+ break;
+ default:
+ esync->freq = 0;
+ esync->pulse = 0;
+ break;
+ }
+
+ /* If the pin supports esync control expose its range but only
+ * if the current reference frequency is > 1 Hz.
+ */
+ if (pin->esync_control && ref_freq > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 ref, ref_sync_ctrl, sync_mode;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Get ref sync mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Use freq == 0 to disable esync */
+ if (!freq)
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF;
+ else
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75;
+
+ ref_sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
+ ref_sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
+
+ /* Update ref sync control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL, ref_sync_ctrl);
+ if (rc)
+ return rc;
+
+ if (freq) {
+ /* 1 Hz is only supported frequnecy currently */
+ rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
+ ZL_REF_ESYNC_DIV_1HZ);
+ if (rc)
+ return rc;
+ }
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+static int
+zl3073x_dpll_input_pin_ffo_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *ffo = pin->freq_offset;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u32 ref_freq;
+ u8 ref;
+ int rc;
+
+ /* Read and return ref frequency */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref, &ref_freq);
+ if (!rc)
+ *frequency = ref_freq;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u16 base, mult;
+ u8 ref;
+ int rc;
+
+ /* Get base frequency and multiplier for the requested frequency */
+ rc = zl3073x_ref_freq_factorize(frequency, &base, &mult);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Load reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+
+ /* Update base frequency, multiplier, numerator & denominator */
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE, base);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT, mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M, 1);
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N, 1);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+/**
+ * zl3073x_dpll_selected_ref_get - get currently selected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Check for currently selected reference the DPLL should be locked to
+ * and stores its index to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 state, value;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* For automatic mode read refsel_status register */
+ rc = zl3073x_read_u8(zldev,
+ ZL_REG_DPLL_REFSEL_STATUS(zldpll->id),
+ &value);
+ if (rc)
+ return rc;
+
+ /* Extract reference state */
+ state = FIELD_GET(ZL_DPLL_REFSEL_STATUS_STATE, value);
+
+ /* Return the reference only if the DPLL is locked to it */
+ if (state == ZL_DPLL_REFSEL_STATUS_STATE_LOCK)
+ *ref = FIELD_GET(ZL_DPLL_REFSEL_STATUS_REFSEL, value);
+ else
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* For manual mode return stored value */
+ *ref = zldpll->forced_ref;
+ break;
+ default:
+ /* For other modes like NCO, freerun... there is no input ref */
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_selected_ref_set - select reference in manual mode
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: input reference to be selected
+ *
+ * Selects the given reference for the DPLL channel it should be
+ * locked to.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_set(struct zl3073x_dpll *zldpll, u8 ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mode, mode_refsel;
+ int rc;
+
+ mode = zldpll->refsel_mode;
+
+ switch (mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Manual mode with ref selected */
+ if (ref == ZL3073X_DPLL_REF_NONE) {
+ switch (zldpll->lock_status) {
+ case DPLL_LOCK_STATUS_LOCKED_HO_ACQ:
+ case DPLL_LOCK_STATUS_HOLDOVER:
+ /* Switch to forced holdover */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER;
+ break;
+ default:
+ /* Switch to freerun */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_FREERUN;
+ break;
+ }
+ /* Keep selected reference */
+ ref = zldpll->forced_ref;
+ } else if (ref == zldpll->forced_ref) {
+ /* No register update - same mode and same ref */
+ return 0;
+ }
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ /* Manual mode without no ref */
+ if (ref == ZL3073X_DPLL_REF_NONE)
+ /* No register update - keep current mode */
+ return 0;
+
+ /* Switch to reflock mode and update ref selection */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_REFLOCK;
+ break;
+ default:
+ /* For other modes like automatic or NCO ref cannot be selected
+ * manually
+ */
+ return -EOPNOTSUPP;
+ }
+
+ /* Build mode_refsel value */
+ mode_refsel = FIELD_PREP(ZL_DPLL_MODE_REFSEL_MODE, mode) |
+ FIELD_PREP(ZL_DPLL_MODE_REFSEL_REF, ref);
+
+ /* Update dpll_mode_refsel register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Store new mode and forced reference */
+ zldpll->refsel_mode = mode;
+ zldpll->forced_ref = ref;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_connected_ref_get - get currently connected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Looks for currently connected the DPLL is locked to and stores its index
+ * to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_connected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ int rc;
+
+ /* Get currently selected input reference */
+ rc = zl3073x_dpll_selected_ref_get(zldpll, ref);
+ if (rc)
+ return rc;
+
+ if (ZL3073X_DPLL_REF_IS_VALID(*ref)) {
+ u8 ref_status;
+
+ /* Read the reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(*ref),
+ &ref_status);
+ if (rc)
+ return rc;
+
+ /* If the monitor indicates an error nothing is connected */
+ if (ref_status != ZL_REF_MON_STATUS_OK)
+ *ref = ZL3073X_DPLL_REF_NONE;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, s64 *phase_offset,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 conn_ref, ref, ref_status;
+ s64 ref_phase;
+ int rc;
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_ref);
+ if (rc)
+ return rc;
+
+ /* Report phase offset only for currently connected pin if the phase
+ * monitor feature is disabled.
+ */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ if (!zldpll->phase_monitor && ref != conn_ref) {
+ *phase_offset = 0;
+
+ return 0;
+ }
+
+ /* Get this pin monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &ref_status);
+ if (rc)
+ return rc;
+
+ /* Report phase offset only if the input pin signal is present */
+ if (ref_status != ZL_REF_MON_STATUS_OK) {
+ *phase_offset = 0;
+
+ return 0;
+ }
+
+ ref_phase = pin->phase_offset;
+
+ /* The DPLL being locked to a higher freq than the current ref
+ * the phase offset is modded to the period of the signal
+ * the dpll is locked to.
+ */
+ if (ZL3073X_DPLL_REF_IS_VALID(conn_ref) && conn_ref != ref) {
+ u32 conn_freq, ref_freq;
+
+ /* Get frequency of connected ref */
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, conn_ref,
+ &conn_freq);
+ if (rc)
+ return rc;
+
+ /* Get frequency of given ref */
+ rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref,
+ &ref_freq);
+ if (rc)
+ return rc;
+
+ if (conn_freq > ref_freq) {
+ s64 conn_period, div_factor;
+
+ conn_period = div_s64(PSEC_PER_SEC, conn_freq);
+ div_factor = div64_s64(ref_phase, conn_period);
+ ref_phase -= conn_period * div_factor;
+ }
+ }
+
+ *phase_offset = ref_phase * DPLL_PHASE_OFFSET_DIVIDER;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ s64 phase_comp;
+ u8 ref;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Read current phase offset compensation */
+ rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, &phase_comp);
+ if (rc)
+ return rc;
+
+ /* Perform sign extension for 48bit signed value */
+ phase_comp = sign_extend64(phase_comp, 47);
+
+ /* Reverse two's complement negation applied during set and convert
+ * to 32bit signed int
+ */
+ *phase_adjust = (s32)-phase_comp;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ s64 phase_comp;
+ u8 ref;
+ int rc;
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value.
+ */
+ phase_comp = -phase_adjust;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+ if (rc)
+ return rc;
+
+ /* Write the requested value into the compensation register */
+ rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(ref));
+}
+
+/**
+ * zl3073x_dpll_ref_prio_get - get priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Reads current priority for the given input pin and stores the value
+ * to @prio.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_get(struct zl3073x_dpll_pin *pin, u8 *prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value for P&N pins (4 bits/pin) */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2),
+ &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Select nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin))
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_P, ref_prio);
+ else
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_N, ref_prio);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_ref_prio_set - set priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Sets priority for the given input pin.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_set(struct zl3073x_dpll_pin *pin, u8 prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value shared between P&N pins */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Update nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_P;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_P, prio);
+ } else {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_N;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_N, prio);
+ }
+
+ /* Update reference priority */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), ref_prio);
+ if (rc)
+ return rc;
+
+ /* Commit configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_WR,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+}
+
+/**
+ * zl3073x_dpll_ref_state_get - get status for given input pin
+ * @pin: pointer to pin
+ * @state: place to store status
+ *
+ * Checks current status for the given input pin and stores the value
+ * to @state.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
+ enum dpll_pin_state *state)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_conn, status;
+ int rc;
+
+ ref = zl3073x_input_pin_ref_get(pin->id);
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &ref_conn);
+ if (rc)
+ return rc;
+
+ if (ref == ref_conn) {
+ *state = DPLL_PIN_STATE_CONNECTED;
+ return 0;
+ }
+
+ /* If the DPLL is running in automatic mode and the reference is
+ * selectable and its monitor does not report any error then report
+ * pin as selectable.
+ */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ pin->selectable) {
+ /* Read reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
+ &status);
+ if (rc)
+ return rc;
+
+ /* If the monitor indicates errors report the reference
+ * as disconnected
+ */
+ if (status == ZL_REF_MON_STATUS_OK) {
+ *state = DPLL_PIN_STATE_SELECTABLE;
+ return 0;
+ }
+ }
+
+ /* Otherwise report the pin as disconnected */
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ return zl3073x_dpll_ref_state_get(pin, state);
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 new_ref;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ /* Choose the pin as new selected reference */
+ new_ref = zl3073x_input_pin_ref_get(pin->id);
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ /* No reference */
+ new_ref = ZL3073X_DPLL_REF_NONE;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pin state for manual mode");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dpll_selected_ref_set(zldpll, new_ref);
+ break;
+
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ if (pin->selectable)
+ return 0; /* Pin is already selectable */
+
+ /* Restore pin priority in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin, pin->prio);
+ if (rc)
+ return rc;
+
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ if (!pin->selectable)
+ return 0; /* Pin is already disconnected */
+
+ /* Set pin priority to none in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin,
+ ZL_DPLL_REF_PRIO_NONE);
+ if (rc)
+ return rc;
+
+ /* Mark pin as non-selectable */
+ pin->selectable = false;
+ } else {
+ NL_SET_ERR_MSG(extack,
+ "Invalid pin state for automatic mode");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ /* In other modes we cannot change input reference */
+ NL_SET_ERR_MSG(extack,
+ "Pin state cannot be changed in current mode");
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *prio = pin->prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_set(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ int rc;
+
+ if (prio > ZL_DPLL_REF_PRIO_MAX)
+ return -EINVAL;
+
+ /* If the pin is selectable then update HW registers */
+ if (pin->selectable) {
+ rc = zl3073x_dpll_ref_prio_set(pin, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Save priority */
+ pin->prio = prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u32 esync_period, esync_width;
+ u8 clock_type, synth;
+ u8 out, output_mode;
+ u32 output_div;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(zldev, out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Read output divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(dev, "Zero divisor for OUTPUT%u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get synth attached to output pin */
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, output_mode);
+ if (clock_type != ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC) {
+ /* No need to read esync data if it is not enabled */
+ esync->freq = 0;
+ esync->pulse = 0;
+
+ goto finish;
+ }
+
+ /* Read esync period */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &esync_period);
+ if (rc)
+ return rc;
+
+ /* Check esync divisor for zero */
+ if (!esync_period) {
+ dev_err(dev, "Zero esync divisor for OUTPUT%u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get esync pulse width in units of half synth cycles */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, &esync_width);
+ if (rc)
+ return rc;
+
+ /* Compute esync frequency */
+ esync->freq = synth_freq / output_div / esync_period;
+
+ /* By comparing the esync_pulse_width to the half of the pulse width
+ * the esync pulse percentage can be determined.
+ * Note that half pulse width is in units of half synth cycles, which
+ * is why it reduces down to be output_div.
+ */
+ esync->pulse = (50 * esync_width) / output_div;
+
+finish:
+ /* Set supported esync ranges if the pin supports esync control and
+ * if the output frequency is > 1 Hz.
+ */
+ if (pin->esync_control && (synth_freq / output_div) > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ u32 esync_period, esync_width, output_div;
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 clock_type, out, output_mode, synth;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(zldev, out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
+ if (rc)
+ return rc;
+
+ /* Select clock type */
+ if (freq)
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC;
+ else
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL;
+
+ /* Update clock type in output mode */
+ output_mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
+ output_mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
+ rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, output_mode);
+ if (rc)
+ return rc;
+
+ /* If esync is being disabled just write mailbox and finish */
+ if (!freq)
+ goto write_mailbox;
+
+ /* Get synth attached to output pin */
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(zldev->dev,
+ "Zero divisor for OUTPUT%u got from device\n", out);
+ return -EINVAL;
+ }
+
+ /* Compute and update esync period */
+ esync_period = synth_freq / (u32)freq / output_div;
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, esync_period);
+ if (rc)
+ return rc;
+
+ /* Half of the period in units of 1/2 synth cycle can be represented by
+ * the output_div. To get the supported esync pulse width of 25% of the
+ * period the output_div can just be divided by 2. Note that this
+ * assumes that output_div is even, otherwise some resolution will be
+ * lost.
+ */
+ esync_width = output_div / 2;
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, esync_width);
+ if (rc)
+ return rc;
+
+write_mailbox:
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u8 out, signal_format, synth;
+ u32 output_div, synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!output_div) {
+ dev_err(dev, "Zero divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Read used signal format for the given output */
+ signal_format = zl3073x_out_signal_format_get(zldev, out);
+
+ switch (signal_format) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ /* In case of divided format we have to distiguish between
+ * given output pin type.
+ */
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ /* For P-pin the resulting frequency is computed as
+ * simple division of synth frequency and output
+ * divisor.
+ */
+ *frequency = synth_freq / output_div;
+ } else {
+ /* For N-pin we have to divide additionally by
+ * divisor stored in esync_period output mailbox
+ * register that is used as N-pin divisor for these
+ * modes.
+ */
+ u32 ndiv;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ &ndiv);
+ if (rc)
+ return rc;
+
+ /* Check N-pin divisor for zero */
+ if (!ndiv) {
+ dev_err(dev,
+ "Zero N-pin divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Compute final divisor for N-pin */
+ *frequency = synth_freq / output_div / ndiv;
+ }
+ break;
+ default:
+ /* In other modes the resulting frequency is computed as
+ * division of synth frequency and output divisor.
+ */
+ *frequency = synth_freq / output_div;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct device *dev = zldev->dev;
+ u32 output_n_freq, output_p_freq;
+ u8 out, signal_format, synth;
+ u32 cur_div, new_div, ndiv;
+ u32 synth_freq;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ new_div = synth_freq / (u32)frequency;
+
+ /* Get used signal format for the given output */
+ signal_format = zl3073x_out_signal_format_get(zldev, out);
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Load output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Check signal format */
+ if (signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV &&
+ signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV) {
+ /* For non N-divided signal formats the frequency is computed
+ * as division of synth frequency and output divisor.
+ */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev,
+ ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ }
+
+ /* For N-divided signal format get current divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &cur_div);
+ if (rc)
+ return rc;
+
+ /* Check output divisor for zero */
+ if (!cur_div) {
+ dev_err(dev, "Zero divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Get N-pin divisor (shares the same register with esync */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &ndiv);
+ if (rc)
+ return rc;
+
+ /* Check N-pin divisor for zero */
+ if (!ndiv) {
+ dev_err(dev,
+ "Zero N-pin divisor for output %u got from device\n",
+ out);
+ return -EINVAL;
+ }
+
+ /* Compute current output frequency for P-pin */
+ output_p_freq = synth_freq / cur_div;
+
+ /* Compute current N-pin frequency */
+ output_n_freq = output_p_freq / ndiv;
+
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ /* We are going to change output frequency for P-pin but
+ * if the requested frequency is less than current N-pin
+ * frequency then indicate a failure as we are not able
+ * to compute N-pin divisor to keep its frequency unchanged.
+ */
+ if (frequency <= output_n_freq)
+ return -EINVAL;
+
+ /* Update the output divisor */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
+ if (rc)
+ return rc;
+
+ /* Compute new divisor for N-pin */
+ ndiv = (u32)frequency / output_n_freq;
+ } else {
+ /* We are going to change frequency of N-pin but if
+ * the requested freq is greater or equal than freq of P-pin
+ * in the output pair we cannot compute divisor for the N-pin.
+ * In this case indicate a failure.
+ */
+ if (output_p_freq <= frequency)
+ return -EINVAL;
+
+ /* Compute new divisor for N-pin */
+ ndiv = output_p_freq / (u32)frequency;
+ }
+
+ /* Update divisor for the N-pin */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, ndiv);
+ if (rc)
+ return rc;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, ndiv);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u32 synth_freq;
+ s32 phase_comp;
+ u8 out, synth;
+ int rc;
+
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Check synth freq for zero */
+ if (!synth_freq) {
+ dev_err(zldev->dev, "Got zero synth frequency for output %u\n",
+ out);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Read current output phase compensation */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, &phase_comp);
+ if (rc)
+ return rc;
+
+ /* Value in register is expressed in half synth clock cycles */
+ phase_comp *= (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+
+ /* Reverse two's complement negation applied during 'set' */
+ *phase_adjust = -phase_comp;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ int half_synth_cycle;
+ u32 synth_freq;
+ u8 out, synth;
+ int rc;
+
+ /* Get attached synth */
+ out = zl3073x_output_pin_out_get(pin->id);
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth's frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Value in register is expressed in half synth clock cycles so
+ * the given phase adjustment a multiple of half synth clock.
+ */
+ half_synth_cycle = (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+
+ if ((phase_adjust % half_synth_cycle) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase adjustment value has to be multiple of %d",
+ half_synth_cycle);
+ return -EINVAL;
+ }
+ phase_adjust /= half_synth_cycle;
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value.
+ */
+ phase_adjust = -phase_adjust;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ if (rc)
+ return rc;
+
+ /* Write the requested value into the compensation register */
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, phase_adjust);
+ if (rc)
+ return rc;
+
+ /* Update output configuration from mailbox */
+ return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(out));
+}
+
+static int
+zl3073x_dpll_output_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ /* If the output pin is registered then it is always connected */
+ *state = DPLL_PIN_STATE_CONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mon_status, state;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ /* In FREERUN and NCO modes the DPLL is always unlocked */
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+
+ return 0;
+ default:
+ break;
+ }
+
+ /* Read DPLL monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MON_STATUS(zldpll->id),
+ &mon_status);
+ if (rc)
+ return rc;
+ state = FIELD_GET(ZL_DPLL_MON_STATUS_STATE, mon_status);
+
+ switch (state) {
+ case ZL_DPLL_MON_STATUS_STATE_LOCK:
+ if (FIELD_GET(ZL_DPLL_MON_STATUS_HO_READY, mon_status))
+ *status = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *status = DPLL_LOCK_STATUS_LOCKED;
+ break;
+ case ZL_DPLL_MON_STATUS_STATE_HOLDOVER:
+ case ZL_DPLL_MON_STATUS_STATE_ACQUIRING:
+ *status = DPLL_LOCK_STATUS_HOLDOVER;
+ break;
+ default:
+ dev_warn(zldev->dev, "Unknown DPLL monitor status: 0x%02x\n",
+ mon_status);
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Use MANUAL for device FREERUN, HOLDOVER, NCO and
+ * REFLOCK modes
+ */
+ *mode = DPLL_MODE_MANUAL;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* Use AUTO for device AUTO mode */
+ *mode = DPLL_MODE_AUTOMATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ if (zldpll->phase_monitor)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ zldpll->phase_monitor = (state == DPLL_FEATURE_STATE_ENABLE);
+
+ return 0;
+}
+
+static const struct dpll_pin_ops zl3073x_dpll_input_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_input_pin_esync_get,
+ .esync_set = zl3073x_dpll_input_pin_esync_set,
+ .ffo_get = zl3073x_dpll_input_pin_ffo_get,
+ .frequency_get = zl3073x_dpll_input_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_input_pin_frequency_set,
+ .phase_offset_get = zl3073x_dpll_input_pin_phase_offset_get,
+ .phase_adjust_get = zl3073x_dpll_input_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_input_pin_phase_adjust_set,
+ .prio_get = zl3073x_dpll_input_pin_prio_get,
+ .prio_set = zl3073x_dpll_input_pin_prio_set,
+ .state_on_dpll_get = zl3073x_dpll_input_pin_state_on_dpll_get,
+ .state_on_dpll_set = zl3073x_dpll_input_pin_state_on_dpll_set,
+};
+
+static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_output_pin_esync_get,
+ .esync_set = zl3073x_dpll_output_pin_esync_set,
+ .frequency_get = zl3073x_dpll_output_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_output_pin_frequency_set,
+ .phase_adjust_get = zl3073x_dpll_output_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_output_pin_phase_adjust_set,
+ .state_on_dpll_get = zl3073x_dpll_output_pin_state_on_dpll_get,
+};
+
+static const struct dpll_device_ops zl3073x_dpll_device_ops = {
+ .lock_status_get = zl3073x_dpll_lock_status_get,
+ .mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
+ .phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
+};
+
+/**
+ * zl3073x_dpll_pin_alloc - allocate DPLL pin
+ * @zldpll: pointer to zl3073x_dpll
+ * @dir: pin direction
+ * @id: pin id
+ *
+ * Allocates and initializes zl3073x_dpll_pin structure for given
+ * pin id and direction.
+ *
+ * Return: pointer to allocated structure on success, error pointer on error
+ */
+static struct zl3073x_dpll_pin *
+zl3073x_dpll_pin_alloc(struct zl3073x_dpll *zldpll, enum dpll_pin_direction dir,
+ u8 id)
+{
+ struct zl3073x_dpll_pin *pin;
+
+ pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+ if (!pin)
+ return ERR_PTR(-ENOMEM);
+
+ pin->dpll = zldpll;
+ pin->dir = dir;
+ pin->id = id;
+
+ return pin;
+}
+
+/**
+ * zl3073x_dpll_pin_free - deallocate DPLL pin
+ * @pin: pin to free
+ *
+ * Deallocates DPLL pin previously allocated by @zl3073x_dpll_pin_alloc.
+ */
+static void
+zl3073x_dpll_pin_free(struct zl3073x_dpll_pin *pin)
+{
+ WARN(pin->dpll_pin, "DPLL pin is still registered\n");
+
+ kfree(pin);
+}
+
+/**
+ * zl3073x_dpll_pin_register - register DPLL pin
+ * @pin: pointer to DPLL pin
+ * @index: absolute pin index for registration
+ *
+ * Registers given DPLL pin into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_pin_props *props;
+ const struct dpll_pin_ops *ops;
+ int rc;
+
+ /* Get pin properties */
+ props = zl3073x_pin_props_get(zldpll->dev, pin->dir, pin->id);
+ if (IS_ERR(props))
+ return PTR_ERR(props);
+
+ /* Save package label & esync capability */
+ strscpy(pin->label, props->package_label);
+ pin->esync_control = props->esync_control;
+
+ if (zl3073x_dpll_is_input_pin(pin)) {
+ rc = zl3073x_dpll_ref_prio_get(pin, &pin->prio);
+ if (rc)
+ goto err_prio_get;
+
+ if (pin->prio == ZL_DPLL_REF_PRIO_NONE) {
+ /* Clamp prio to max value & mark pin non-selectable */
+ pin->prio = ZL_DPLL_REF_PRIO_MAX;
+ pin->selectable = false;
+ } else {
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ }
+ }
+
+ /* Create or get existing DPLL pin */
+ pin->dpll_pin = dpll_pin_get(zldpll->dev->clock_id, index, THIS_MODULE,
+ &props->dpll_props);
+ if (IS_ERR(pin->dpll_pin)) {
+ rc = PTR_ERR(pin->dpll_pin);
+ goto err_pin_get;
+ }
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Register the pin */
+ rc = dpll_pin_register(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+ if (rc)
+ goto err_register;
+
+ /* Free pin properties */
+ zl3073x_pin_props_put(props);
+
+ return 0;
+
+err_register:
+ dpll_pin_put(pin->dpll_pin);
+err_prio_get:
+ pin->dpll_pin = NULL;
+err_pin_get:
+ zl3073x_pin_props_put(props);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_pin_unregister - unregister DPLL pin
+ * @pin: pointer to DPLL pin
+ *
+ * Unregisters pin previously registered by @zl3073x_dpll_pin_register.
+ */
+static void
+zl3073x_dpll_pin_unregister(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ const struct dpll_pin_ops *ops;
+
+ WARN(!pin->dpll_pin, "DPLL pin is not registered\n");
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Unregister the pin */
+ dpll_pin_unregister(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+
+ dpll_pin_put(pin->dpll_pin);
+ pin->dpll_pin = NULL;
+}
+
+/**
+ * zl3073x_dpll_pins_unregister - unregister all registered DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all DPLL pins registered to given DPLL device and
+ * unregisters them.
+ */
+static void
+zl3073x_dpll_pins_unregister(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin, *next;
+
+ list_for_each_entry_safe(pin, next, &zldpll->pins, list) {
+ zl3073x_dpll_pin_unregister(pin);
+ list_del(&pin->list);
+ zl3073x_dpll_pin_free(pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_pin_is_registrable - check if the pin is registrable
+ * @zldpll: pointer to zl3073x_dpll structure
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * Checks if the given pin can be registered to given DPLL. For both
+ * directions the pin can be registered if it is enabled. In case of
+ * differential signal type only P-pin is reported as registrable.
+ * And additionally for the output pin, the pin can be registered only
+ * if it is connected to synthesizer that is driven by given DPLL.
+ *
+ * Return: true if the pin is registrable, false if not
+ */
+static bool
+zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
+ enum dpll_pin_direction dir, u8 index)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ bool is_diff, is_enabled;
+ const char *name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref = zl3073x_input_pin_ref_get(index);
+
+ name = "REF";
+
+ /* Skip the pin if the DPLL is running in NCO mode */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_NCO)
+ return false;
+
+ is_diff = zl3073x_ref_is_diff(zldev, ref);
+ is_enabled = zl3073x_ref_is_enabled(zldev, ref);
+ } else {
+ /* Output P&N pair shares single HW output */
+ u8 out = zl3073x_output_pin_out_get(index);
+
+ name = "OUT";
+
+ /* Skip the pin if it is connected to different DPLL channel */
+ if (zl3073x_out_dpll_get(zldev, out) != zldpll->id) {
+ dev_dbg(zldev->dev,
+ "%s%u is driven by different DPLL\n", name,
+ out);
+
+ return false;
+ }
+
+ is_diff = zl3073x_out_is_diff(zldev, out);
+ is_enabled = zl3073x_out_is_enabled(zldev, out);
+ }
+
+ /* Skip N-pin if the corresponding input/output is differential */
+ if (is_diff && zl3073x_is_n_pin(index)) {
+ dev_dbg(zldev->dev, "%s%u is differential, skipping N-pin\n",
+ name, index / 2);
+
+ return false;
+ }
+
+ /* Skip the pin if it is disabled */
+ if (!is_enabled) {
+ dev_dbg(zldev->dev, "%s%u%c is disabled\n", name, index / 2,
+ zl3073x_is_p_pin(index) ? 'P' : 'N');
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * zl3073x_dpll_pins_register - register all registerable DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all possible input/output pins and registers all of them
+ * that are registrable.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pins_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin;
+ enum dpll_pin_direction dir;
+ u8 id, index;
+ int rc;
+
+ /* Process input pins */
+ for (index = 0; index < ZL3073X_NUM_PINS; index++) {
+ /* First input pins and then output pins */
+ if (index < ZL3073X_NUM_INPUT_PINS) {
+ id = index;
+ dir = DPLL_PIN_DIRECTION_INPUT;
+ } else {
+ id = index - ZL3073X_NUM_INPUT_PINS;
+ dir = DPLL_PIN_DIRECTION_OUTPUT;
+ }
+
+ /* Check if the pin registrable to this DPLL */
+ if (!zl3073x_dpll_pin_is_registrable(zldpll, dir, id))
+ continue;
+
+ pin = zl3073x_dpll_pin_alloc(zldpll, dir, id);
+ if (IS_ERR(pin)) {
+ rc = PTR_ERR(pin);
+ goto error;
+ }
+
+ rc = zl3073x_dpll_pin_register(pin, index);
+ if (rc)
+ goto error;
+
+ list_add(&pin->list, &zldpll->pins);
+ }
+
+ return 0;
+
+error:
+ zl3073x_dpll_pins_unregister(zldpll);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_register - register DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_device_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 dpll_mode_refsel;
+ int rc;
+
+ /* Read DPLL mode and forcibly selected reference */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ &dpll_mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Extract mode and selected input reference */
+ zldpll->refsel_mode = FIELD_GET(ZL_DPLL_MODE_REFSEL_MODE,
+ dpll_mode_refsel);
+ zldpll->forced_ref = FIELD_GET(ZL_DPLL_MODE_REFSEL_REF,
+ dpll_mode_refsel);
+
+ zldpll->dpll_dev = dpll_device_get(zldev->clock_id, zldpll->id,
+ THIS_MODULE);
+ if (IS_ERR(zldpll->dpll_dev)) {
+ rc = PTR_ERR(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+
+ return rc;
+ }
+
+ rc = dpll_device_register(zldpll->dpll_dev,
+ zl3073x_prop_dpll_type_get(zldev, zldpll->id),
+ &zl3073x_dpll_device_ops, zldpll);
+ if (rc) {
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_unregister - unregister DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device from DPLL sub-system previously registered
+ * by @zl3073x_dpll_device_register.
+ */
+static void
+zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
+{
+ WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+
+ dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
+ zldpll);
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+}
+
+/**
+ * zl3073x_dpll_pin_phase_offset_check - check for pin phase offset change
+ * @pin: pin to check
+ *
+ * Check for the change of DPLL to connected pin phase offset change.
+ *
+ * Return: true on phase offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_phase_offset_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ unsigned int reg;
+ s64 phase_offset;
+ u8 ref;
+ int rc;
+
+ ref = zl3073x_input_pin_ref_get(pin->id);
+
+ /* Select register to read phase offset value depending on pin and
+ * phase monitor state:
+ * 1) For connected pin use dpll_phase_err_data register
+ * 2) For other pins use appropriate ref_phase register if the phase
+ * monitor feature is enabled and reference monitor does not
+ * report signal errors for given input pin
+ */
+ if (pin->pin_state == DPLL_PIN_STATE_CONNECTED) {
+ reg = ZL_REG_DPLL_PHASE_ERR_DATA(zldpll->id);
+ } else if (zldpll->phase_monitor) {
+ u8 status;
+
+ /* Get reference monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
+ &status);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to read %s refmon status: %pe\n",
+ pin->label, ERR_PTR(rc));
+
+ return false;
+ }
+
+ if (status != ZL_REF_MON_STATUS_OK)
+ return false;
+
+ reg = ZL_REG_REF_PHASE(ref);
+ } else {
+ /* The pin is not connected or phase monitor disabled */
+ return false;
+ }
+
+ /* Read measured phase offset value */
+ rc = zl3073x_read_u48(zldev, reg, &phase_offset);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read ref phase offset: %pe\n",
+ ERR_PTR(rc));
+
+ return false;
+ }
+
+ /* Convert to ps */
+ phase_offset = div_s64(sign_extend64(phase_offset, 47), 100);
+
+ /* Compare with previous value */
+ if (phase_offset != pin->phase_offset) {
+ dev_dbg(zldev->dev, "%s phase offset changed: %lld -> %lld\n",
+ pin->label, pin->phase_offset, phase_offset);
+ pin->phase_offset = phase_offset;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_pin_ffo_check - check for pin fractional frequency offset change
+ * @pin: pin to check
+ *
+ * Check for the given pin's fractional frequency change.
+ *
+ * Return: true on fractional frequency offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_ffo_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, status;
+ s64 ffo;
+ int rc;
+
+ /* Get reference monitor status */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &status);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read %s refmon status: %pe\n",
+ pin->label, ERR_PTR(rc));
+
+ return false;
+ }
+
+ /* Do not report ffo changes if the reference monitor report errors */
+ if (status != ZL_REF_MON_STATUS_OK)
+ return false;
+
+ /* Get the latest measured ref's ffo */
+ ffo = zl3073x_ref_ffo_get(zldev, ref);
+
+ /* Compare with previous value */
+ if (pin->freq_offset != ffo) {
+ dev_dbg(zldev->dev, "%s freq offset changed: %lld -> %lld\n",
+ pin->label, pin->freq_offset, ffo);
+ pin->freq_offset = ffo;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_changes_check - check for changes and send notifications
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Checks for changes on given DPLL device and its registered DPLL pins
+ * and sends notifications about them.
+ *
+ * This function is periodically called from @zl3073x_dev_periodic_work.
+ */
+void
+zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ enum dpll_lock_status lock_status;
+ struct device *dev = zldev->dev;
+ struct zl3073x_dpll_pin *pin;
+ int rc;
+
+ zldpll->check_count++;
+
+ /* Get current lock status for the DPLL */
+ rc = zl3073x_dpll_lock_status_get(zldpll->dpll_dev, zldpll,
+ &lock_status, NULL, NULL);
+ if (rc) {
+ dev_err(dev, "Failed to get DPLL%u lock status: %pe\n",
+ zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ /* If lock status was changed then notify DPLL core */
+ if (zldpll->lock_status != lock_status) {
+ zldpll->lock_status = lock_status;
+ dpll_device_change_ntf(zldpll->dpll_dev);
+ }
+
+ /* Input pin monitoring does make sense only in automatic
+ * or forced reference modes.
+ */
+ if (zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_REFLOCK)
+ return;
+
+ /* Update phase offset latch registers for this DPLL if the phase
+ * offset monitor feature is enabled.
+ */
+ if (zldpll->phase_monitor) {
+ rc = zl3073x_ref_phase_offsets_update(zldev, zldpll->id);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+ return;
+ }
+ }
+
+ list_for_each_entry(pin, &zldpll->pins, list) {
+ enum dpll_pin_state state;
+ bool pin_changed = false;
+
+ /* Output pins change checks are not necessary because output
+ * states are constant.
+ */
+ if (!zl3073x_dpll_is_input_pin(pin))
+ continue;
+
+ rc = zl3073x_dpll_ref_state_get(pin, &state);
+ if (rc) {
+ dev_err(dev,
+ "Failed to get %s on DPLL%u state: %pe\n",
+ pin->label, zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ if (state != pin->pin_state) {
+ dev_dbg(dev, "%s state changed: %u->%u\n", pin->label,
+ pin->pin_state, state);
+ pin->pin_state = state;
+ pin_changed = true;
+ }
+
+ /* Check for phase offset and ffo change once per second */
+ if (zldpll->check_count % 2 == 0) {
+ if (zl3073x_dpll_pin_phase_offset_check(pin))
+ pin_changed = true;
+
+ if (zl3073x_dpll_pin_ffo_check(pin))
+ pin_changed = true;
+ }
+
+ if (pin_changed)
+ dpll_pin_change_ntf(pin->dpll_pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_init_fine_phase_adjust - do initial fine phase adjustments
+ * @zldev: pointer to zl3073x device
+ *
+ * Performs initial fine phase adjustments needed per datasheet.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev)
+{
+ int rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_MASK, 0x1f);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_INTVL, 0x01);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u16(zldev, ZL_REG_SYNTH_PHASE_SHIFT_DATA, 0xffff);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_CTRL, 0x01);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_alloc - allocate DPLL device
+ * @zldev: pointer to zl3073x device
+ * @ch: DPLL channel number
+ *
+ * Allocates DPLL device structure for given DPLL channel.
+ *
+ * Return: pointer to DPLL device on success, error pointer on error
+ */
+struct zl3073x_dpll *
+zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = kzalloc(sizeof(*zldpll), GFP_KERNEL);
+ if (!zldpll)
+ return ERR_PTR(-ENOMEM);
+
+ zldpll->dev = zldev;
+ zldpll->id = ch;
+ INIT_LIST_HEAD(&zldpll->pins);
+
+ return zldpll;
+}
+
+/**
+ * zl3073x_dpll_free - free DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Deallocates given DPLL device previously allocated by @zl3073x_dpll_alloc.
+ */
+void
+zl3073x_dpll_free(struct zl3073x_dpll *zldpll)
+{
+ WARN(zldpll->dpll_dev, "DPLL device is still registered\n");
+
+ kfree(zldpll);
+}
+
+/**
+ * zl3073x_dpll_register - register DPLL device and all its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device and all its pins into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_register(struct zl3073x_dpll *zldpll)
+{
+ int rc;
+
+ rc = zl3073x_dpll_device_register(zldpll);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_dpll_pins_register(zldpll);
+ if (rc) {
+ zl3073x_dpll_device_unregister(zldpll);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_unregister - unregister DPLL device and its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device and all its pins from DPLL sub-system
+ * previously registered by @zl3073x_dpll_register.
+ */
+void
+zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll)
+{
+ /* Unregister all pins and dpll */
+ zl3073x_dpll_pins_unregister(zldpll);
+ zl3073x_dpll_device_unregister(zldpll);
+}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
new file mode 100644
index 000000000000..304910ffc9c0
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DPLL_H
+#define _ZL3073X_DPLL_H
+
+#include <linux/dpll.h>
+#include <linux/list.h>
+
+#include "core.h"
+
+/**
+ * struct zl3073x_dpll - ZL3073x DPLL sub-device structure
+ * @list: this DPLL list entry
+ * @dev: pointer to multi-function parent device
+ * @id: DPLL index
+ * @refsel_mode: reference selection mode
+ * @forced_ref: selected reference in forced reference lock mode
+ * @check_count: periodic check counter
+ * @phase_monitor: is phase offset monitor enabled
+ * @dpll_dev: pointer to registered DPLL device
+ * @lock_status: last saved DPLL lock status
+ * @pins: list of pins
+ */
+struct zl3073x_dpll {
+ struct list_head list;
+ struct zl3073x_dev *dev;
+ u8 id;
+ u8 refsel_mode;
+ u8 forced_ref;
+ u8 check_count;
+ bool phase_monitor;
+ struct dpll_device *dpll_dev;
+ enum dpll_lock_status lock_status;
+ struct list_head pins;
+};
+
+struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
+void zl3073x_dpll_free(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_register(struct zl3073x_dpll *zldpll);
+void zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev);
+void zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll);
+
+#endif /* _ZL3073X_DPLL_H */
diff --git a/drivers/dpll/zl3073x/i2c.c b/drivers/dpll/zl3073x/i2c.c
new file mode 100644
index 000000000000..7bbfdd4ed867
--- /dev/null
+++ b/drivers/dpll/zl3073x/i2c.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "core.h"
+
+static int zl3073x_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_i2c(client, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, i2c_get_match_data(client));
+}
+
+static const struct i2c_device_id zl3073x_i2c_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info,
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, zl3073x_i2c_id);
+
+static const struct of_device_id zl3073x_i2c_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_i2c_of_match);
+
+static struct i2c_driver zl3073x_i2c_driver = {
+ .driver = {
+ .name = "zl3073x-i2c",
+ .of_match_table = zl3073x_i2c_of_match,
+ },
+ .probe = zl3073x_i2c_probe,
+ .id_table = zl3073x_i2c_id,
+};
+module_i2c_driver(zl3073x_i2c_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x I2C driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
new file mode 100644
index 000000000000..4cf7e8aefcb3
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fwnode.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "prop.h"
+
+/**
+ * zl3073x_pin_check_freq - verify frequency for given pin
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @id: pin index
+ * @freq: frequency to check
+ *
+ * The function checks the given frequency is valid for the device. For input
+ * pins it checks that the frequency can be factorized using supported base
+ * frequencies. For output pins it checks that the frequency divides connected
+ * synth frequency without remainder.
+ *
+ * Return: true if the frequency is valid, false if not.
+ */
+static bool
+zl3073x_pin_check_freq(struct zl3073x_dev *zldev, enum dpll_pin_direction dir,
+ u8 id, u64 freq)
+{
+ if (freq > U32_MAX)
+ goto err_inv_freq;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ int rc;
+
+ /* Check if the frequency can be factorized */
+ rc = zl3073x_ref_freq_factorize(freq, NULL, NULL);
+ if (rc)
+ goto err_inv_freq;
+ } else {
+ u32 synth_freq;
+ u8 out, synth;
+
+ /* Get output pin synthesizer */
+ out = zl3073x_output_pin_out_get(id);
+ synth = zl3073x_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_synth_freq_get(zldev, synth);
+
+ /* Check the frequency divides synth frequency */
+ if (synth_freq % (u32)freq)
+ goto err_inv_freq;
+ }
+
+ return true;
+
+err_inv_freq:
+ dev_warn(zldev->dev,
+ "Unsupported frequency %llu Hz in firmware node\n", freq);
+
+ return false;
+}
+
+/**
+ * zl3073x_prop_pin_package_label_set - get package label for the pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Generates package label string and stores it into pin properties structure.
+ *
+ * Possible formats:
+ * REF<n> - differential input reference
+ * REF<n>P & REF<n>N - single-ended input reference (P or N pin)
+ * OUT<n> - differential output
+ * OUT<n>P & OUT<n>N - single-ended output (P or N pin)
+ */
+static void
+zl3073x_prop_pin_package_label_set(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ const char *prefix, *suffix;
+ bool is_diff;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref;
+
+ prefix = "REF";
+ ref = zl3073x_input_pin_ref_get(id);
+ is_diff = zl3073x_ref_is_diff(zldev, ref);
+ } else {
+ u8 out;
+
+ prefix = "OUT";
+ out = zl3073x_output_pin_out_get(id);
+ is_diff = zl3073x_out_is_diff(zldev, out);
+ }
+
+ if (!is_diff)
+ suffix = zl3073x_is_p_pin(id) ? "P" : "N";
+ else
+ suffix = ""; /* No suffix for differential one */
+
+ snprintf(props->package_label, sizeof(props->package_label), "%s%u%s",
+ prefix, id / 2, suffix);
+
+ /* Set package_label pointer in DPLL core properties to generated
+ * string.
+ */
+ props->dpll_props.package_label = props->package_label;
+}
+
+/**
+ * zl3073x_prop_pin_fwnode_get - get fwnode for given pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Return: 0 on success, -ENOENT if the firmware node does not exist
+ */
+static int
+zl3073x_prop_pin_fwnode_get(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ struct fwnode_handle *pins_node, *pin_node;
+ const char *node_name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT)
+ node_name = "input-pins";
+ else
+ node_name = "output-pins";
+
+ /* Get node containing input or output pins */
+ pins_node = device_get_named_child_node(zldev->dev, node_name);
+ if (!pins_node) {
+ dev_dbg(zldev->dev, "'%s' sub-node is missing\n", node_name);
+ return -ENOENT;
+ }
+
+ /* Enumerate child pin nodes and find the requested one */
+ fwnode_for_each_child_node(pins_node, pin_node) {
+ u32 reg;
+
+ if (fwnode_property_read_u32(pin_node, "reg", &reg))
+ continue;
+
+ if (id == reg)
+ break;
+ }
+
+ /* Release pin parent node */
+ fwnode_handle_put(pins_node);
+
+ /* Save found node */
+ props->fwnode = pin_node;
+
+ dev_dbg(zldev->dev, "Firmware node for %s %sfound\n",
+ props->package_label, pin_node ? "" : "NOT ");
+
+ return pin_node ? 0 : -ENOENT;
+}
+
+/**
+ * zl3073x_pin_props_get - get pin properties
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * The function looks for firmware node for the given pin if it is provided
+ * by the system firmware (DT or ACPI), allocates pin properties structure,
+ * generates package label string according pin type and optionally fetches
+ * board label, connection type, supported frequencies and esync capability
+ * from the firmware node if it does exist.
+ *
+ * Pointer that is returned by this function should be freed using
+ * @zl3073x_pin_props_put().
+ *
+ * Return:
+ * * pointer to allocated pin properties structure on success
+ * * error pointer in case of error
+ */
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index)
+{
+ struct dpll_pin_frequency *ranges;
+ struct zl3073x_pin_props *props;
+ int i, j, num_freqs, rc;
+ const char *type;
+ u64 *freqs;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return ERR_PTR(-ENOMEM);
+
+ /* Set default pin type and capabilities */
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ props->dpll_props.capabilities =
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ } else {
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+ }
+
+ props->dpll_props.phase_range.min = S32_MIN;
+ props->dpll_props.phase_range.max = S32_MAX;
+
+ zl3073x_prop_pin_package_label_set(zldev, props, dir, index);
+
+ /* Get firmware node for the given pin */
+ rc = zl3073x_prop_pin_fwnode_get(zldev, props, dir, index);
+ if (rc)
+ return props; /* Return if it does not exist */
+
+ /* Look for label property and store the value as board label */
+ fwnode_property_read_string(props->fwnode, "label",
+ &props->dpll_props.board_label);
+
+ /* Look for pin type property and translate its value to DPLL
+ * pin type enum if it is present.
+ */
+ if (!fwnode_property_read_string(props->fwnode, "connection-type",
+ &type)) {
+ if (!strcmp(type, "ext"))
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ else if (!strcmp(type, "gnss"))
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+ else if (!strcmp(type, "int"))
+ props->dpll_props.type = DPLL_PIN_TYPE_INT_OSCILLATOR;
+ else if (!strcmp(type, "synce"))
+ props->dpll_props.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ else
+ dev_warn(zldev->dev,
+ "Unknown or unsupported pin type '%s'\n",
+ type);
+ }
+
+ /* Check if the pin supports embedded sync control */
+ props->esync_control = fwnode_property_read_bool(props->fwnode,
+ "esync-control");
+
+ /* Read supported frequencies property if it is specified */
+ num_freqs = fwnode_property_count_u64(props->fwnode,
+ "supported-frequencies-hz");
+ if (num_freqs <= 0)
+ /* Return if the property does not exist or number is 0 */
+ return props;
+
+ /* The firmware node specifies list of supported frequencies while
+ * DPLL core pin properties requires list of frequency ranges.
+ * So read the frequency list into temporary array.
+ */
+ freqs = kcalloc(num_freqs, sizeof(*freqs), GFP_KERNEL);
+ if (!freqs) {
+ rc = -ENOMEM;
+ goto err_alloc_freqs;
+ }
+
+ /* Read frequencies list from firmware node */
+ fwnode_property_read_u64_array(props->fwnode,
+ "supported-frequencies-hz", freqs,
+ num_freqs);
+
+ /* Allocate frequency ranges list and fill it */
+ ranges = kcalloc(num_freqs, sizeof(*ranges), GFP_KERNEL);
+ if (!ranges) {
+ rc = -ENOMEM;
+ goto err_alloc_ranges;
+ }
+
+ /* Convert list of frequencies to list of frequency ranges but
+ * filter-out frequencies that are not representable by device
+ */
+ for (i = 0, j = 0; i < num_freqs; i++) {
+ struct dpll_pin_frequency freq = DPLL_PIN_FREQUENCY(freqs[i]);
+
+ if (zl3073x_pin_check_freq(zldev, dir, index, freqs[i])) {
+ ranges[j] = freq;
+ j++;
+ }
+ }
+
+ /* Save number of freq ranges and pointer to them into pin properties */
+ props->dpll_props.freq_supported = ranges;
+ props->dpll_props.freq_supported_num = j;
+
+ /* Free temporary array */
+ kfree(freqs);
+
+ return props;
+
+err_alloc_ranges:
+ kfree(freqs);
+err_alloc_freqs:
+ fwnode_handle_put(props->fwnode);
+ kfree(props);
+
+ return ERR_PTR(rc);
+}
+
+/**
+ * zl3073x_pin_props_put - release pin properties
+ * @props: pin properties to free
+ *
+ * The function deallocates given pin properties structure.
+ */
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props)
+{
+ /* Free supported frequency ranges list if it is present */
+ kfree(props->dpll_props.freq_supported);
+
+ /* Put firmware handle if it is present */
+ if (props->fwnode)
+ fwnode_handle_put(props->fwnode);
+
+ kfree(props);
+}
+
+/**
+ * zl3073x_prop_dpll_type_get - get DPLL channel type
+ * @zldev: pointer to zl3073x device
+ * @index: DPLL channel index
+ *
+ * Return: DPLL type for given DPLL channel
+ */
+enum dpll_type
+zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const char *types[ZL3073X_MAX_CHANNELS];
+ int count;
+
+ /* Read dpll types property from firmware */
+ count = device_property_read_string_array(zldev->dev, "dpll-types",
+ types, ARRAY_SIZE(types));
+
+ /* Return default if property or entry for given channel is missing */
+ if (index >= count)
+ return DPLL_TYPE_PPS;
+
+ if (!strcmp(types[index], "pps"))
+ return DPLL_TYPE_PPS;
+ else if (!strcmp(types[index], "eec"))
+ return DPLL_TYPE_EEC;
+
+ dev_info(zldev->dev, "Unknown DPLL type '%s', using default\n",
+ types[index]);
+
+ return DPLL_TYPE_PPS; /* Default */
+}
diff --git a/drivers/dpll/zl3073x/prop.h b/drivers/dpll/zl3073x/prop.h
new file mode 100644
index 000000000000..721a18f05938
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_PROP_H
+#define _ZL3073X_PROP_H
+
+#include <linux/dpll.h>
+
+#include "core.h"
+
+struct fwnode_handle;
+
+/**
+ * struct zl3073x_pin_props - pin properties
+ * @fwnode: pin firmware node
+ * @dpll_props: DPLL core pin properties
+ * @package_label: pin package label
+ * @esync_control: embedded sync support
+ */
+struct zl3073x_pin_props {
+ struct fwnode_handle *fwnode;
+ struct dpll_pin_properties dpll_props;
+ char package_label[8];
+ bool esync_control;
+};
+
+enum dpll_type zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index);
+
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index);
+
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props);
+
+#endif /* _ZL3073X_PROP_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
new file mode 100644
index 000000000000..614e33128a5c
--- /dev/null
+++ b/drivers/dpll/zl3073x/regs.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REGS_H
+#define _ZL3073X_REGS_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Register address structure:
+ * ===========================
+ * 25 19 18 16 15 7 6 0
+ * +------------------------------------------+
+ * | max_offset | size | page | page_offset |
+ * +------------------------------------------+
+ *
+ * page_offset ... <0x00..0x7F>
+ * page .......... HW page number
+ * size .......... register byte size (1, 2, 4 or 6)
+ * max_offset .... maximal offset for indexed registers
+ * (for non-indexed regs max_offset == page_offset)
+ */
+
+#define ZL_REG_OFFSET_MASK GENMASK(6, 0)
+#define ZL_REG_PAGE_MASK GENMASK(15, 7)
+#define ZL_REG_SIZE_MASK GENMASK(18, 16)
+#define ZL_REG_MAX_OFFSET_MASK GENMASK(25, 19)
+#define ZL_REG_ADDR_MASK GENMASK(15, 0)
+
+#define ZL_REG_OFFSET(_reg) FIELD_GET(ZL_REG_OFFSET_MASK, _reg)
+#define ZL_REG_PAGE(_reg) FIELD_GET(ZL_REG_PAGE_MASK, _reg)
+#define ZL_REG_MAX_OFFSET(_reg) FIELD_GET(ZL_REG_MAX_OFFSET_MASK, _reg)
+#define ZL_REG_SIZE(_reg) FIELD_GET(ZL_REG_SIZE_MASK, _reg)
+#define ZL_REG_ADDR(_reg) FIELD_GET(ZL_REG_ADDR_MASK, _reg)
+
+/**
+ * ZL_REG_IDX - define indexed register
+ * @_idx: index of register to access
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ * @_items: number of register indices
+ * @_stride: stride between items in bytes
+ *
+ * All parameters except @_idx should be constant.
+ */
+#define ZL_REG_IDX(_idx, _page, _offset, _size, _items, _stride) \
+ (FIELD_PREP(ZL_REG_OFFSET_MASK, \
+ (_offset) + (_idx) * (_stride)) | \
+ FIELD_PREP_CONST(ZL_REG_PAGE_MASK, _page) | \
+ FIELD_PREP_CONST(ZL_REG_SIZE_MASK, _size) | \
+ FIELD_PREP_CONST(ZL_REG_MAX_OFFSET_MASK, \
+ (_offset) + ((_items) - 1) * (_stride)))
+
+/**
+ * ZL_REG - define simple (non-indexed) register
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ *
+ * All parameters should be constant.
+ */
+#define ZL_REG(_page, _offset, _size) \
+ ZL_REG_IDX(0, _page, _offset, _size, 1, 0)
+
+/**************************
+ * Register Page 0, General
+ **************************/
+
+#define ZL_REG_ID ZL_REG(0, 0x01, 2)
+#define ZL_REG_REVISION ZL_REG(0, 0x03, 2)
+#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
+#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+
+/*************************
+ * Register Page 2, Status
+ *************************/
+
+#define ZL_REG_REF_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x02, 1, ZL3073X_NUM_REFS, 1)
+#define ZL_REF_MON_STATUS_OK 0 /* all bits zeroed */
+
+#define ZL_REG_DPLL_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x10, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_MON_STATUS_STATE GENMASK(1, 0)
+#define ZL_DPLL_MON_STATUS_STATE_ACQUIRING 0
+#define ZL_DPLL_MON_STATUS_STATE_LOCK 1
+#define ZL_DPLL_MON_STATUS_STATE_HOLDOVER 2
+#define ZL_DPLL_MON_STATUS_HO_READY BIT(2)
+
+#define ZL_REG_DPLL_REFSEL_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x30, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_REFSEL_STATUS_REFSEL GENMASK(3, 0)
+#define ZL_DPLL_REFSEL_STATUS_STATE GENMASK(6, 4)
+#define ZL_DPLL_REFSEL_STATUS_STATE_LOCK 4
+
+#define ZL_REG_REF_FREQ(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x44, 4, ZL3073X_NUM_REFS, 4)
+
+/**********************
+ * Register Page 4, Ref
+ **********************/
+
+#define ZL_REG_REF_PHASE_ERR_READ_RQST ZL_REG(4, 0x0f, 1)
+#define ZL_REF_PHASE_ERR_READ_RQST_RD BIT(0)
+
+#define ZL_REG_REF_FREQ_MEAS_CTRL ZL_REG(4, 0x1c, 1)
+#define ZL_REF_FREQ_MEAS_CTRL GENMASK(1, 0)
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ 1
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF 2
+#define ZL_REF_FREQ_MEAS_CTRL_DPLL_FREQ_OFF 3
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_3_0 ZL_REG(4, 0x1d, 1)
+#define ZL_REF_FREQ_MEAS_MASK_3_0(_ref) BIT(_ref)
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_4 ZL_REG(4, 0x1e, 1)
+#define ZL_REF_FREQ_MEAS_MASK_4(_ref) BIT((_ref) - 8)
+
+#define ZL_REG_DPLL_MEAS_REF_FREQ_CTRL ZL_REG(4, 0x1f, 1)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_IDX GENMASK(6, 4)
+
+#define ZL_REG_REF_PHASE(_idx) \
+ ZL_REG_IDX(_idx, 4, 0x20, 6, ZL3073X_NUM_REFS, 6)
+
+/***********************
+ * Register Page 5, DPLL
+ ***********************/
+
+#define ZL_REG_DPLL_MODE_REFSEL(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x04, 1, ZL3073X_MAX_CHANNELS, 4)
+#define ZL_DPLL_MODE_REFSEL_MODE GENMASK(2, 0)
+#define ZL_DPLL_MODE_REFSEL_MODE_FREERUN 0
+#define ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER 1
+#define ZL_DPLL_MODE_REFSEL_MODE_REFLOCK 2
+#define ZL_DPLL_MODE_REFSEL_MODE_AUTO 3
+#define ZL_DPLL_MODE_REFSEL_MODE_NCO 4
+#define ZL_DPLL_MODE_REFSEL_REF GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_CTRL ZL_REG(5, 0x50, 1)
+#define ZL_DPLL_MEAS_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_CTRL_AVG_FACTOR GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_IDX ZL_REG(5, 0x51, 1)
+#define ZL_DPLL_MEAS_IDX GENMASK(2, 0)
+
+#define ZL_REG_DPLL_PHASE_ERR_READ_MASK ZL_REG(5, 0x54, 1)
+
+#define ZL_REG_DPLL_PHASE_ERR_DATA(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x55, 6, ZL3073X_MAX_CHANNELS, 6)
+
+/***********************************
+ * Register Page 9, Synth and Output
+ ***********************************/
+
+#define ZL_REG_SYNTH_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x00, 1, ZL3073X_NUM_SYNTHS, 1)
+#define ZL_SYNTH_CTRL_EN BIT(0)
+#define ZL_SYNTH_CTRL_DPLL_SEL GENMASK(6, 4)
+
+#define ZL_REG_SYNTH_PHASE_SHIFT_CTRL ZL_REG(9, 0x1e, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_MASK ZL_REG(9, 0x1f, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_INTVL ZL_REG(9, 0x20, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_DATA ZL_REG(9, 0x21, 2)
+
+#define ZL_REG_OUTPUT_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x28, 1, ZL3073X_NUM_OUTS, 1)
+#define ZL_OUTPUT_CTRL_EN BIT(0)
+#define ZL_OUTPUT_CTRL_SYNTH_SEL GENMASK(6, 4)
+
+/*******************************
+ * Register Page 10, Ref Mailbox
+ *******************************/
+
+#define ZL_REG_REF_MB_MASK ZL_REG(10, 0x02, 2)
+
+#define ZL_REG_REF_MB_SEM ZL_REG(10, 0x04, 1)
+#define ZL_REF_MB_SEM_WR BIT(0)
+#define ZL_REF_MB_SEM_RD BIT(1)
+
+#define ZL_REG_REF_FREQ_BASE ZL_REG(10, 0x05, 2)
+#define ZL_REG_REF_FREQ_MULT ZL_REG(10, 0x07, 2)
+#define ZL_REG_REF_RATIO_M ZL_REG(10, 0x09, 2)
+#define ZL_REG_REF_RATIO_N ZL_REG(10, 0x0b, 2)
+
+#define ZL_REG_REF_CONFIG ZL_REG(10, 0x0d, 1)
+#define ZL_REF_CONFIG_ENABLE BIT(0)
+#define ZL_REF_CONFIG_DIFF_EN BIT(2)
+
+#define ZL_REG_REF_PHASE_OFFSET_COMP ZL_REG(10, 0x28, 6)
+
+#define ZL_REG_REF_SYNC_CTRL ZL_REG(10, 0x2e, 1)
+#define ZL_REF_SYNC_CTRL_MODE GENMASK(2, 0)
+#define ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF 0
+#define ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75 2
+
+#define ZL_REG_REF_ESYNC_DIV ZL_REG(10, 0x30, 4)
+#define ZL_REF_ESYNC_DIV_1HZ 0
+
+/********************************
+ * Register Page 12, DPLL Mailbox
+ ********************************/
+
+#define ZL_REG_DPLL_MB_MASK ZL_REG(12, 0x02, 2)
+
+#define ZL_REG_DPLL_MB_SEM ZL_REG(12, 0x04, 1)
+#define ZL_DPLL_MB_SEM_WR BIT(0)
+#define ZL_DPLL_MB_SEM_RD BIT(1)
+
+#define ZL_REG_DPLL_REF_PRIO(_idx) \
+ ZL_REG_IDX(_idx, 12, 0x52, 1, ZL3073X_NUM_REFS / 2, 1)
+#define ZL_DPLL_REF_PRIO_REF_P GENMASK(3, 0)
+#define ZL_DPLL_REF_PRIO_REF_N GENMASK(7, 4)
+#define ZL_DPLL_REF_PRIO_MAX 14
+#define ZL_DPLL_REF_PRIO_NONE 15
+
+/*********************************
+ * Register Page 13, Synth Mailbox
+ *********************************/
+
+#define ZL_REG_SYNTH_MB_MASK ZL_REG(13, 0x02, 2)
+
+#define ZL_REG_SYNTH_MB_SEM ZL_REG(13, 0x04, 1)
+#define ZL_SYNTH_MB_SEM_WR BIT(0)
+#define ZL_SYNTH_MB_SEM_RD BIT(1)
+
+#define ZL_REG_SYNTH_FREQ_BASE ZL_REG(13, 0x06, 2)
+#define ZL_REG_SYNTH_FREQ_MULT ZL_REG(13, 0x08, 4)
+#define ZL_REG_SYNTH_FREQ_M ZL_REG(13, 0x0c, 2)
+#define ZL_REG_SYNTH_FREQ_N ZL_REG(13, 0x0e, 2)
+
+/**********************************
+ * Register Page 14, Output Mailbox
+ **********************************/
+#define ZL_REG_OUTPUT_MB_MASK ZL_REG(14, 0x02, 2)
+
+#define ZL_REG_OUTPUT_MB_SEM ZL_REG(14, 0x04, 1)
+#define ZL_OUTPUT_MB_SEM_WR BIT(0)
+#define ZL_OUTPUT_MB_SEM_RD BIT(1)
+
+#define ZL_REG_OUTPUT_MODE ZL_REG(14, 0x05, 1)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE GENMASK(2, 0)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL 0
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT GENMASK(7, 4)
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED 0
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF 2
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM 3
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2 4
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P 5
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N 6
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_INV 7
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV 12
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV 15
+
+#define ZL_REG_OUTPUT_DIV ZL_REG(14, 0x0c, 4)
+#define ZL_REG_OUTPUT_WIDTH ZL_REG(14, 0x10, 4)
+#define ZL_REG_OUTPUT_ESYNC_PERIOD ZL_REG(14, 0x14, 4)
+#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
+#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+
+#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/dpll/zl3073x/spi.c b/drivers/dpll/zl3073x/spi.c
new file mode 100644
index 000000000000..af901b4d6dda
--- /dev/null
+++ b/drivers/dpll/zl3073x/spi.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "core.h"
+
+static int zl3073x_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_spi(spi, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, spi_get_device_match_data(spi));
+}
+
+static const struct spi_device_id zl3073x_spi_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, zl3073x_spi_id);
+
+static const struct of_device_id zl3073x_spi_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_spi_of_match);
+
+static struct spi_driver zl3073x_spi_driver = {
+ .driver = {
+ .name = "zl3073x-spi",
+ .of_match_table = zl3073x_spi_of_match,
+ },
+ .probe = zl3073x_spi_probe,
+ .id_table = zl3073x_spi_id,
+};
+module_spi_driver(zl3073x_spi_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x SPI driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 01354b9de8b2..aae774e7a5c3 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+ struct fw_card *card = from_work(card, work, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
@@ -273,10 +273,6 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
fw_device_set_broadcast_channel);
}
-static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
@@ -286,7 +282,10 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
static void bm_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card = from_work(card, work, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
int root_id, new_root_id, irm_id, bm_id, local_id;
@@ -574,7 +573,6 @@ EXPORT_SYMBOL(fw_card_initialize);
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
- struct workqueue_struct *isoc_wq;
int ret;
// This workqueue should be:
@@ -589,29 +587,48 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
- isoc_wq = alloc_workqueue("firewire-isoc-card%u",
- WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- supported_isoc_contexts, card->index);
- if (!isoc_wq)
+ card->isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!card->isoc_wq)
return -ENOMEM;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for asynchronous context.
+ // * == WQ_MEM_RECLAIM Used for any backend of block device.
+ // * == WQ_FREEZABLE The target device would not be available when being freezed.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == 4 A hardIRQ could notify events for a pair of requests and
+ // response AR/AT contexts.
+ card->async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!card->async_wq) {
+ ret = -ENOMEM;
+ goto err_isoc;
+ }
+
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- guard(mutex)(&card_mutex);
+ scoped_guard(mutex, &card_mutex) {
+ generate_config_rom(card, tmp_config_rom);
+ ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
+ if (ret < 0)
+ goto err_async;
- generate_config_rom(card, tmp_config_rom);
- ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0) {
- destroy_workqueue(isoc_wq);
- return ret;
+ list_add_tail(&card->link, &card_list);
}
- card->isoc_wq = isoc_wq;
- list_add_tail(&card->link, &card_list);
-
return 0;
+err_async:
+ destroy_workqueue(card->async_wq);
+err_isoc:
+ destroy_workqueue(card->isoc_wq);
+ return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -744,6 +761,7 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
drain_workqueue(card->isoc_wq);
+ drain_workqueue(card->async_wq);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
@@ -753,6 +771,7 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
destroy_workqueue(card->isoc_wq);
+ destroy_workqueue(card->async_wq);
WARN_ON(!list_empty(&card->transaction_list));
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index bd04980009a4..78b10c6ef7fe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1313,8 +1313,7 @@ static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
- struct iso_resource *r =
- container_of(work, struct iso_resource, work.work);
+ struct iso_resource *r = from_work(r, work, work.work);
struct client *client = r->client;
unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index ec3e21ad2025..aeacd4cfd694 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -853,8 +853,7 @@ static void fw_schedule_device_work(struct fw_device *device,
static void fw_device_shutdown(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
@@ -921,8 +920,7 @@ static int update_unit(struct device *dev, void *data)
static void fw_device_update(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
@@ -1002,8 +1000,7 @@ static int compare_configuration_rom(struct device *dev, const void *data)
static void fw_device_init(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
struct device *found;
u32 minor;
@@ -1184,8 +1181,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
static void fw_device_refresh(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
int ret, node_id = device->node_id;
bool changed;
@@ -1251,8 +1247,7 @@ static void fw_device_refresh(struct work_struct *work)
static void fw_device_workfn(struct work_struct *work)
{
- struct fw_device *device = container_of(to_delayed_work(work),
- struct fw_device, work);
+ struct fw_device *device = from_work(device, to_delayed_work(work), work);
device->workfn(work);
}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 2bd5deb9054e..1d1c2d8f85ae 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static void complete_address_handler(struct kref *kref)
+{
+ struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
+
+ complete(&handler->done);
+}
+
+static void get_address_handler(struct fw_address_handler *handler)
+{
+ kref_get(&handler->kref);
+}
+
+static int put_address_handler(struct fw_address_handler *handler)
+{
+ return kref_put(&handler->kref, complete_address_handler);
+}
+
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -557,9 +574,10 @@ const struct fw_address_region fw_unit_space_region =
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked. The parameters passed to the callback
- * give the details of the particular request.
+ * When a request is received that falls within the specified address range, the specified callback
+ * is invoked. The parameters passed to the callback give the details of the particular request.
+ * The callback is invoked in the workqueue context in most cases. However, if the request is
+ * initiated by the local node, the callback is invoked in the initiator's context.
*
* To be called in process context.
* Return value: 0 on success, non-zero otherwise.
@@ -595,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
+ init_completion(&handler->done);
+ kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@@ -620,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
+
+ if (!put_address_handler(handler))
+ wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -913,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ get_address_handler(handler);
}
- if (!handler)
+ if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ // Outside the RCU read-side critical section. Without spinlock. With reference count.
+ handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
+ request->data, request->length, handler->callback_data);
+ put_address_handler(handler);
}
+// To use kmalloc allocator efficiently, this should be power of two.
+#define BUFFER_ON_KERNEL_STACK_SIZE 4
+
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
- struct fw_address_handler *handler;
- int tcode, destination, source;
+ struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
+ struct fw_address_handler *handler, **handlers;
+ int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@@ -949,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
+ count = 0;
+ handlers = buffer_on_kernel_stack;
+ buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ if (is_enclosing_handler(handler, offset, request->length)) {
+ if (count >= buffer_size) {
+ int next_size = buffer_size * 2;
+ struct fw_address_handler **buffer_on_kernel_heap;
+
+ if (handlers == buffer_on_kernel_stack)
+ buffer_on_kernel_heap = NULL;
+ else
+ buffer_on_kernel_heap = handlers;
+
+ buffer_on_kernel_heap =
+ krealloc_array(buffer_on_kernel_heap, next_size,
+ sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
+ // FCP is used for purposes unrelated to significant system
+ // resources (e.g. storage or networking), so allocation
+ // failures are not considered so critical.
+ if (!buffer_on_kernel_heap)
+ break;
+
+ if (handlers == buffer_on_kernel_stack) {
+ memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
+ sizeof(buffer_on_kernel_stack));
+ }
+
+ handlers = buffer_on_kernel_heap;
+ buffer_size = next_size;
+ }
+ get_address_handler(handler);
+ handlers[count++] = handler;
+ }
}
}
+ for (i = 0; i < count; ++i) {
+ handler = handlers[i];
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ put_address_handler(handler);
+ }
+
+ if (handlers != buffer_on_kernel_stack)
+ kfree(handlers);
+
fw_send_response(card, request, RCODE_COMPLETE);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1bf0e15c1540..6d6446713539 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1007,7 +1007,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
@@ -1026,7 +1026,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index edaedd156a6d..5d8301b0f3aa 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -101,7 +101,7 @@ struct ar_context {
void *pointer;
unsigned int last_buffer_index;
u32 regs;
- struct tasklet_struct tasklet;
+ struct work_struct work;
};
struct context;
@@ -128,7 +128,6 @@ struct context {
int total_allocation;
u32 current_bus;
bool running;
- bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
@@ -157,8 +156,12 @@ struct context {
int prev_z;
descriptor_callback_t callback;
+};
- struct tasklet_struct tasklet;
+struct at_context {
+ struct context context;
+ struct work_struct work;
+ bool flushing;
};
struct iso_context {
@@ -204,8 +207,8 @@ struct fw_ohci {
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
- struct context at_request_ctx;
- struct context at_response_ctx;
+ struct at_context at_request_ctx;
+ struct at_context at_response_ctx;
u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
@@ -1016,9 +1019,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
}
}
-static void ar_context_tasklet(unsigned long data)
+static void ohci_ar_context_work(struct work_struct *work)
{
- struct ar_context *ctx = (struct ar_context *)data;
+ struct ar_context *ctx = from_work(ctx, work, work);
unsigned int end_buffer_index, end_buffer_offset;
void *p, *end;
@@ -1026,23 +1029,19 @@ static void ar_context_tasklet(unsigned long data)
if (!p)
return;
- end_buffer_index = ar_search_last_active_buffer(ctx,
- &end_buffer_offset);
+ end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
if (end_buffer_index < ar_first_buffer_index(ctx)) {
- /*
- * The filled part of the overall buffer wraps around; handle
- * all packets up to the buffer end here. If the last packet
- * wraps around, its tail will be visible after the buffer end
- * because the buffer start pages are mapped there again.
- */
+ // The filled part of the overall buffer wraps around; handle all packets up to the
+ // buffer end here. If the last packet wraps around, its tail will be visible after
+ // the buffer end because the buffer start pages are mapped there again.
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
- /* adjust p to point back into the actual buffer */
+ // adjust p to point back into the actual buffer
p -= AR_BUFFERS * PAGE_SIZE;
}
@@ -1057,7 +1056,6 @@ static void ar_context_tasklet(unsigned long data)
ar_recycle_buffers(ctx, end_buffer_index);
return;
-
error:
ctx->pointer = NULL;
}
@@ -1073,7 +1071,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
ctx->regs = regs;
ctx->ohci = ohci;
- tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+ INIT_WORK(&ctx->work, ohci_ar_context_work);
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
@@ -1181,16 +1179,16 @@ static void context_retire_descriptors(struct context *ctx)
}
}
-static void context_tasklet(unsigned long data)
+static void ohci_at_context_work(struct work_struct *work)
{
- struct context *ctx = (struct context *) data;
+ struct at_context *ctx = from_work(ctx, work, work);
- context_retire_descriptors(ctx);
+ context_retire_descriptors(&ctx->context);
}
static void ohci_isoc_context_work(struct work_struct *work)
{
- struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct fw_iso_context *base = from_work(base, work, work);
struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
context_retire_descriptors(&isoc_ctx->context);
@@ -1248,7 +1246,6 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci,
ctx->buffer_tail = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
- tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
ctx->callback = callback;
/*
@@ -1388,17 +1385,17 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
-static int at_context_queue_packet(struct context *ctx,
- struct fw_packet *packet)
+static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
{
- struct fw_ohci *ohci = ctx->ohci;
+ struct context *context = &ctx->context;
+ struct fw_ohci *ohci = context->ohci;
dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
- d = context_get_descriptors(ctx, 4, &d_bus);
+ d = context_get_descriptors(context, 4, &d_bus);
if (d == NULL) {
packet->ack = RCODE_SEND_ERROR;
return -1;
@@ -1428,7 +1425,7 @@ static int at_context_queue_packet(struct context *ctx,
ohci1394_at_data_set_destination_id(header,
async_header_get_destination(packet->header));
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
} else {
ohci1394_at_data_set_destination_offset(header,
@@ -1517,37 +1514,42 @@ static int at_context_queue_packet(struct context *ctx,
return -1;
}
- context_append(ctx, d, z, 4 - z);
+ context_append(context, d, z, 4 - z);
- if (ctx->running)
- reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ if (context->running)
+ reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
else
- context_run(ctx, 0);
+ context_run(context, 0);
return 0;
}
-static void at_context_flush(struct context *ctx)
+static void at_context_flush(struct at_context *ctx)
{
- tasklet_disable(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return;
- ctx->flushing = true;
- context_tasklet((unsigned long)ctx);
- ctx->flushing = false;
+ disable_work_sync(&ctx->work);
- tasklet_enable(&ctx->tasklet);
+ WRITE_ONCE(ctx->flushing, true);
+ ohci_at_context_work(&ctx->work);
+ WRITE_ONCE(ctx->flushing, false);
+
+ enable_work(&ctx->work);
}
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
+ struct at_context *ctx = container_of(context, struct at_context, context);
+ struct fw_ohci *ohci = ctx->context.ohci;
struct driver_data *driver_data;
struct fw_packet *packet;
- struct fw_ohci *ohci = context->ohci;
int evt;
- if (last->transfer_status == 0 && !context->flushing)
+ if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1581,7 +1583,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_missing_ack:
- if (context->flushing)
+ if (READ_ONCE(ctx->flushing))
packet->ack = RCODE_GENERATION;
else {
/*
@@ -1603,7 +1605,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_no_status:
- if (context->flushing) {
+ if (READ_ONCE(ctx->flushing)) {
packet->ack = RCODE_GENERATION;
break;
}
@@ -1700,13 +1702,14 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_core_handle_response(&ohci->card, &response);
}
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
u64 offset, csr;
- if (ctx == &ctx->ohci->at_request_ctx) {
+ if (ctx == &ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
offset = async_header_get_offset(packet->header);
@@ -1714,54 +1717,55 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
/* Handle config rom reads. */
if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
- handle_local_rom(ctx->ohci, packet, csr);
+ handle_local_rom(ohci, packet, csr);
else switch (csr) {
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
- handle_local_lock(ctx->ohci, packet, csr);
+ handle_local_lock(ohci, packet, csr);
break;
default:
- if (ctx == &ctx->ohci->at_request_ctx)
- fw_core_handle_request(&ctx->ohci->card, packet);
+ if (ctx == &ohci->at_request_ctx)
+ fw_core_handle_request(&ohci->card, packet);
else
- fw_core_handle_response(&ctx->ohci->card, packet);
+ fw_core_handle_response(&ohci->card, packet);
break;
}
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
packet->ack = ACK_COMPLETE;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ spin_lock_irqsave(&ohci->lock, flags);
- if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
- ctx->ohci->generation == packet->generation) {
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ if (async_header_get_destination(packet->header) == ohci->node_id &&
+ ohci->generation == packet->generation) {
+ spin_unlock_irqrestore(&ohci->lock, flags);
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
handle_local_request(ctx, packet);
return;
}
ret = at_context_queue_packet(ctx, packet);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ spin_unlock_irqrestore(&ohci->lock, flags);
if (ret < 0) {
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
@@ -2028,8 +2032,7 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
static void bus_reset_work(struct work_struct *work)
{
- struct fw_ohci *ohci =
- container_of(work, struct fw_ohci, bus_reset_work);
+ struct fw_ohci *ohci = from_work(ohci, work, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2141,8 +2144,8 @@ static void bus_reset_work(struct work_struct *work)
// FIXME: Document how the locking works.
scoped_guard(spinlock_irq, &ohci->lock) {
ohci->generation = -1; // prevent AT packet queueing
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
+ context_stop(&ohci->at_request_ctx.context);
+ context_stop(&ohci->at_response_ctx.context);
}
/*
@@ -2239,16 +2242,16 @@ static irqreturn_t irq_handler(int irq, void *data)
}
if (event & OHCI1394_RQPkt)
- tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
if (event & OHCI1394_RSPkt)
- tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
if (event & OHCI1394_reqTxComplete)
- tasklet_schedule(&ohci->at_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
if (event & OHCI1394_respTxComplete)
- tasklet_schedule(&ohci->at_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
@@ -2528,7 +2531,7 @@ static int ohci_enable(struct fw_card *card,
* They shouldn't do that in this initial case where the link
* isn't enabled. This means we have to use the same
* workaround here, setting the bus header to 0 and then write
- * the right values in the bus reset tasklet.
+ * the right values in the bus reset work item.
*/
if (config_rom) {
@@ -2617,7 +2620,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
- * config rom isn't ready yet. In the bus reset tasklet we
+ * config rom isn't ready yet. In the bus reset work item we
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
@@ -2659,7 +2662,7 @@ static int ohci_set_config_rom(struct fw_card *card,
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
- * mappings in the bus reset tasklet, since the OHCI
+ * mappings in the bus reset work item, since the OHCI
* controller could need to access it before the bus reset
* takes effect.
*/
@@ -2686,11 +2689,14 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct context *ctx = &ohci->at_request_ctx;
+ struct at_context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable_in_atomic(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+ disable_work_sync(&ctx->work);
if (packet->ack != 0)
goto out;
@@ -2709,7 +2715,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
- tasklet_enable(&ctx->tasklet);
+ enable_work(&ctx->work);
return ret;
}
@@ -3767,15 +3773,17 @@ static int pci_probe(struct pci_dev *dev,
if (err < 0)
return err;
- err = context_init(&ohci->at_request_ctx, ohci,
+ err = context_init(&ohci->at_request_ctx.context, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
- err = context_init(&ohci->at_response_ctx, ohci,
+ err = context_init(&ohci->at_response_ctx.context, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 16baa038d412..d528c94c5859 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -263,6 +263,14 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config OVMF_DEBUG_LOG
+ bool "Expose OVMF firmware debug log via sysfs"
+ depends on EFI
+ help
+ Recent OVMF versions (edk2-stable202508 + newer) can write
+ their debug log to a memory buffer. This driver exposes the
+ log content via sysfs (/sys/firmware/efi/ovmf_debug_log).
+
config UNACCEPTED_MEMORY
bool
depends on EFI_STUB
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index a2d0009560d0..8efbcf699e4f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e57bff702b5f..1ce428e2ac8a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -45,6 +45,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
@@ -473,6 +474,10 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
+ if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
+ efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
+ ovmf_log_probe(efi.ovmf_debug_log);
+
return 0;
err_remove_group:
@@ -617,6 +622,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_OVMF_DEBUG_LOG
+ {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
+#endif
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 92e3c73502ba..832deee36e48 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -36,7 +36,7 @@ aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
-DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
- -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_EFI_PATH="\"$(abspath $(obj)/vmlinuz.efi.elf)\"" \
-DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
-DCOMP_TYPE="\"$(comp-type-y)\"" \
$(aflags-zboot-header-y)
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
index 3a67a2cea7bd..bc599212c05d 100644
--- a/drivers/firmware/efi/libstub/printk.c
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -5,13 +5,13 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/kernel.h>
-#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <linux/kern_levels.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+int efi_loglevel = LOGLEVEL_NOTICE;
/**
* efi_char16_puts() - Write a UCS-2 encoded string to the console
diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c
new file mode 100644
index 000000000000..5b2471ffaeed
--- /dev/null
+++ b/drivers/firmware/efi/ovmf-debug-log.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1"
+#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2"
+
+struct ovmf_debug_log_header {
+ u64 magic1;
+ u64 magic2;
+ u64 hdr_size;
+ u64 log_size;
+ u64 lock; // edk2 spinlock
+ u64 head_off;
+ u64 tail_off;
+ u64 truncated;
+ u8 fw_version[128];
+};
+
+static struct ovmf_debug_log_header *hdr;
+static u8 *logbuf;
+static u64 logbufsize;
+
+static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ u64 start, end;
+
+ start = hdr->head_off + offset;
+ if (hdr->head_off > hdr->tail_off && start >= hdr->log_size)
+ start -= hdr->log_size;
+
+ end = start + count;
+ if (start > hdr->tail_off) {
+ if (end > hdr->log_size)
+ end = hdr->log_size;
+ } else {
+ if (end > hdr->tail_off)
+ end = hdr->tail_off;
+ }
+
+ if (start > logbufsize || end > logbufsize)
+ return 0;
+ if (start >= end)
+ return 0;
+
+ memcpy(buf, logbuf + start, end - start);
+ return end - start;
+}
+
+static struct bin_attribute ovmf_log_bin_attr = {
+ .attr = {
+ .name = "ovmf_debug_log",
+ .mode = 0444,
+ },
+ .read = ovmf_log_read,
+};
+
+int __init ovmf_log_probe(unsigned long ovmf_debug_log_table)
+{
+ int ret = -EINVAL;
+ u64 size;
+
+ /* map + verify header */
+ hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: header map failed\n");
+ return -EINVAL;
+ }
+
+ if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 ||
+ hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) {
+ printk(KERN_ERR "OVMF debug log: magic mismatch\n");
+ goto err_unmap;
+ }
+
+ size = hdr->hdr_size + hdr->log_size;
+ pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version);
+ pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024);
+
+ /* map complete log buffer */
+ memunmap(hdr);
+ hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: buffer map failed\n");
+ return -EINVAL;
+ }
+ logbuf = (void *)hdr + hdr->hdr_size;
+ logbufsize = hdr->log_size;
+
+ ovmf_log_bin_attr.size = size;
+ ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr);
+ if (ret != 0) {
+ pr_err("OVMF debug log: sysfs register failed\n");
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ memunmap(hdr);
+ return ret;
+}
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4dd5c2c330bb..c226524efeba 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -141,8 +141,8 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set_rv = gen_74x164_set_value;
- chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
chip->gpio_chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index dc2b941c3726..e5ac2d211013 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -430,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set_rv = adnp_gpio_set;
+ chip->set = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 57d12c10cbda..6305c8b7dc05 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -122,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set_rv = adp5520_gpio_set_value;
+ gc->set = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index b2c8836c5f84..0fd3cc26d017 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -428,7 +428,7 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set_rv = adp5585_gpio_set_value;
+ gc->set = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
gc->request = adp5585_gpio_request;
gc->free = adp5585_gpio_free;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 6f941db02c04..af9d8b3a711d 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -534,8 +534,8 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set_rv = gpio_fwd_set;
- chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 77a674cf99e4..4524c18a87e7 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set_rv = altr_a10sr_gpio_set,
+ .set = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 1b28525726d7..9508d764cce4 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -259,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set_rv = altera_gpio_set;
+ altera_gc->gc.set = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index f8d0cea46049..e6c6c3ec7656 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -165,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set_rv = amd_fch_gpio_set;
+ priv->gc.set = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 425d8472f744..15fd5e210d74 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -165,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set_rv = amd_gpio_set,
+ .set = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 89ffde693019..a7e98d395d8e 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -138,7 +138,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set_rv = arizona_gpio_set,
+ .set = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 00b31497ecff..7622f9e9f54a 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -596,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set_rv = aspeed_sgpio_set;
+ gpio->chip.set = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2d340a343a17..7953a9c4e36d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1352,7 +1352,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set_rv = aspeed_gpio_set;
+ gpio->chip.set = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 8f22cb36004d..208b71c59d58 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -339,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set_rv = bcm_kona_gpio_set,
+ .set = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 36701500925e..afb18a5a9d79 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -85,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set_rv = bd71815gpo_set,
+ .set = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 4ba151e5cf25..e439dbfffc62 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -109,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set_rv = bd71828_gpio_set;
+ bdgpio->gpio.set = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 8df1361e3e84..7c95bb36511e 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set_rv = bd9571mwv_gpio_set,
+ .set = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7c9e81fea37a..05401da03ca3 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -145,7 +145,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set_rv = bt8xxgpio_gpio_set;
+ c->set = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 1495bec62456..0efa1b61001a 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -171,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set_rv = cgbc_gpio_set;
+ chip->set = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 8b49f02c7896..f8ea961fa1de 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set_rv = creg_gpio_set;
+ hcg->gc.set = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 53cd5ff6247b..435483826c6e 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -188,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set_rv = cros_ec_gpio_set;
+ gc->set = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 8db7cca3a060..0fb5c06d0886 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -349,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set_rv = crystalcove_gpio_set;
+ cg->chip.set = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 143d1f4173a6..8affe4e9f90e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -296,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set_rv = chip_gpio_set,
+ .set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6482c5b267db..495f0ee58505 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -172,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set_rv = da9052_gpio_set,
+ .set = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 3d9d0c700100..a09bd6eb93cf 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -116,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set_rv = da9055_gpio_set,
+ .set = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8f3a36d0191d..538f27209ce7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -202,7 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set_rv = davinci_gpio_set;
+ chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 4bd3c47eaf93..4670ffd7ea7f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -469,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set_rv = dln2_gpio_set;
+ dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f2973d0b7138..50fafeda8d7e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -663,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set_rv = sprd_eic_set;
+ sprd_eic->chip.set = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 015f1ac32dd9..a214b0672726 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -306,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set_rv = em_gio_set;
+ gpio_chip->set = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index beb98286d13e..9053662f1817 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -211,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set_rv = exar_set_value;
+ exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index dfcd3634f279..4d5b927ad70f 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -173,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set_rv = f7188x_gpio_set, \
+ .set = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f25283e5239d..121bf29a27f5 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set_rv = gnr_gpio_set,
+ .set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index a40ba99a3aea..2e5d97b7363f 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set_rv = gw_pld_set8;
+ gw->chip.set = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index b1844a676c7c..2eaed83214d8 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set_rv = egpio_set;
+ chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 67089b2423d8..1802c9116ffe 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -273,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set_rv = ichx_gpio_set;
+ chip->set = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 1693dbf1b777..0a75afecf9f8 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -102,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set_rv = imx_scu_gpio_set;
+ gc->set = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d8184b527bac..5d677bcfccf2 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -267,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set_rv = it87_gpio_set,
+ .set = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 9f548eda3888..b0c4a3346e7d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -171,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set_rv = ttl_set_value;
+ gpio->set = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index e38e604baa22..923aad3ab4d4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -169,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set_rv = kempld_gpio_set;
+ chip->set = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index 3d0ff09284fb..c64aaa896766 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -166,11 +166,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set_rv = gpio_latch_set_can_sleep;
+ priv->gc.set = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set_rv = gpio_latch_set;
+ priv->gc.set = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 61524a9ba765..3b4f8830c741 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -437,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set_rv = ljca_gpio_set_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 19cd2847467c..cb9dbcc290ad 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -134,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set_rv = logicvc_gpio_set;
+ logicvc->chip.set = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index add09971d26a..818c606fbc51 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -157,7 +157,7 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.get = loongson_gpio_get;
lgpio->chip.get_direction = loongson_gpio_get_direction;
lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set_rv = loongson_gpio_set;
+ lgpio->chip.set = loongson_gpio_set;
lgpio->chip.parent = dev;
spin_lock_init(&lgpio->lock);
}
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 8f3668169ebf..f3e0559f969d 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -106,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set_rv = loongson_gpio_set_value;
+ gc->set = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 52ab3ac4844c..e8e00daff7df 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -184,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set_rv = lp3943_gpio_set,
+ .set = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 1908ed302e92..5376708a81bf 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set_rv = lp873x_gpio_set,
+ .set = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 8ea687d5d028..0f337c1283b2 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -139,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set_rv = lp87565_gpio_set,
+ .set = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2dbfbf90176c..37a2342eb2e6 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -327,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set_rv = lpc18xx_gpio_set,
+ .set = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 6668b8bd9f1e..37fc54fc7385 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -407,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -423,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -439,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -454,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set_rv = lpc32xx_gpio_set_value_p3,
+ .set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -482,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set_rv = lpc32xx_gpo_set_value,
+ .set = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
index 7570d9e89adf..30ef258e7655 100644
--- a/drivers/gpio/gpio-macsmc.c
+++ b/drivers/gpio/gpio-macsmc.c
@@ -261,7 +261,7 @@ static int macsmc_gpio_probe(struct platform_device *pdev)
smcgp->gc.label = "macsmc-pmu-gpio";
smcgp->gc.owner = THIS_MODULE;
smcgp->gc.get = macsmc_gpio_get;
- smcgp->gc.set_rv = macsmc_gpio_set;
+ smcgp->gc.set = macsmc_gpio_set;
smcgp->gc.get_direction = macsmc_gpio_get_direction;
smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
smcgp->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index e73e72d62bc8..551faf9655b2 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -109,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set_rv = madera_gpio_set,
+ .set = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 75d414d8c992..84c7c2dca822 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -188,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set_rv = max7301_set;
+ ts->chip.set = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index d5ffedb086af..a61d670ceeda 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -585,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set_rv = max732x_gpio_set_value;
- gc->set_multiple_rv = max732x_gpio_set_multiple;
+ gc->set = max732x_gpio_set_value;
+ gc->set_multiple = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index af7af8e40afe..02eca400b307 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -311,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_rv = max77620_gpio_set;
+ mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index a553e141059f..4540da4c1418 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -166,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set_rv = max77650_gpio_set_value;
+ chip->gc.set = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
index 7fe8e6f697d0..5e48eb03e7b3 100644
--- a/drivers/gpio/gpio-max77759.c
+++ b/drivers/gpio/gpio-max77759.c
@@ -469,7 +469,7 @@ static int max77759_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77759_gpio_direction_input;
chip->gc.direction_output = max77759_gpio_direction_output;
chip->gc.get = max77759_gpio_get_value;
- chip->gc.set_rv = max77759_gpio_set_value;
+ chip->gc.set = max77759_gpio_set_value;
girq = &chip->gc.irq;
gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 5ee2991ecdfd..581a71872eab 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -180,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set_rv = mb86s70_gpio_set;
+ gchip->gc.set = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index e68956104161..9a40e9579e95 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -103,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set_rv = mc33880_set;
+ mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 12cf36f9ca63..f6af81bf2b13 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -224,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set_rv = ioh_gpio_set;
+ gpio->set = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6f3dda6b635f..390f2e74a9d8 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -397,7 +397,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
gc->ngpio = npins;
gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
girq = &gs->gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 9875e34bde72..ed29b07d16c1 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
- char *colon_ptr;
int ret, irq;
- long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- colon_ptr = strchr(dev_name(dev), ':');
- if (!colon_ptr) {
- dev_err(dev, "invalid device name format\n");
- return -EINVAL;
- }
-
- ret = kstrtol(++colon_ptr, 16, &num);
- if (ret) {
- dev_err(dev, "invalid device instance\n");
- return ret;
- }
-
- if (!num) {
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0) {
- girq = &gs->gc.irq;
- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
- girq->default_type = IRQ_TYPE_NONE;
- /* This will let us handle the parent IRQ in the driver */
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->parent_handler = NULL;
- girq->handler = handle_bad_irq;
-
- /*
- * Directly request the irq here instead of passing
- * a flow-handler because the irq is shared.
- */
- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
- IRQF_SHARED, dev_name(dev), gs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to request IRQ");
- }
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq >= 0) {
+ girq = &gs->gc.irq;
+ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->handler = handle_bad_irq;
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+ IRQF_SHARED, dev_name(dev), gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 897a1e004681..8f1405733d98 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -111,7 +111,7 @@ static int ltq_mm_probe(struct platform_device *pdev)
chip->mmchip.gc.ngpio = 16;
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set_rv = ltq_mm_set;
+ chip->mmchip.gc.set = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index cf878c2ea6bf..021ad62778c2 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -367,7 +367,7 @@ static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -432,14 +432,14 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
bgpio_dir_out(gc, gpio, val);
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -528,18 +528,18 @@ static int bgpio_setup_io(struct gpio_chip *gc,
if (set && clr) {
gc->reg_set = set;
gc->reg_clr = clr;
- gc->set_rv = bgpio_set_with_clear;
- gc->set_multiple_rv = bgpio_set_multiple_with_clear;
+ gc->set = bgpio_set_with_clear;
+ gc->set_multiple = bgpio_set_multiple_with_clear;
} else if (set && !clr) {
gc->reg_set = set;
- gc->set_rv = bgpio_set_set;
- gc->set_multiple_rv = bgpio_set_multiple_set;
+ gc->set = bgpio_set_set;
+ gc->set_multiple = bgpio_set_multiple_set;
} else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set_rv = bgpio_set_none;
- gc->set_multiple_rv = NULL;
+ gc->set = bgpio_set_none;
+ gc->set_multiple = NULL;
} else {
- gc->set_rv = bgpio_set;
- gc->set_multiple_rv = bgpio_set_multiple;
+ gc->set = bgpio_set;
+ gc->set_multiple = bgpio_set_multiple;
}
if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
@@ -676,7 +676,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
}
gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set_rv == bgpio_set_set &&
+ if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 266c0953d914..a7d69f3835c1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -449,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set_rv = gpio_mockup_set;
+ gc->set = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple_rv = gpio_mockup_set_multiple;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 27dd9c3e7b77..4eb9f1a2779b 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -140,7 +140,7 @@ static int moxtet_gpio_probe(struct device *dev)
chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
chip->gpio_chip.get = moxtet_gpio_get_value;
- chip->gpio_chip.set_rv = moxtet_gpio_set_value;
+ chip->gpio_chip.set = moxtet_gpio_set_value;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 40d587176a75..dad0eca1ca2e 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -153,7 +153,7 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
- gc->set_rv = mpc52xx_wkup_gpio_set;
+ gc->set = mpc52xx_wkup_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
@@ -315,7 +315,7 @@ static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
- gc->set_rv = mpc52xx_simple_gpio_set;
+ gc->set = mpc52xx_simple_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 3415cb7ebb0f..82d557a7e5d8 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -150,7 +150,7 @@ static int mpfs_gpio_probe(struct platform_device *pdev)
mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
mpfs_gpio->gc.get = mpfs_gpio_get;
- mpfs_gpio->gc.set_rv = mpfs_gpio_set;
+ mpfs_gpio->gc.set = mpfs_gpio_set;
mpfs_gpio->gc.base = -1;
mpfs_gpio->gc.ngpio = ngpios;
mpfs_gpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index b17de08e9e03..9f42bb30b4ec 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -448,9 +448,9 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.direction_input = gpio_mpsse_direction_input;
priv->gpio.direction_output = gpio_mpsse_direction_output;
priv->gpio.get = gpio_mpsse_gpio_get;
- priv->gpio.set_rv = gpio_mpsse_gpio_set;
+ priv->gpio.set = gpio_mpsse_gpio_set;
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
- priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple;
+ priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
priv->gpio.ngpio = 16;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 992339a89d19..b0cccd856840 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -658,7 +658,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpiochip->direction_input = msc313_gpio_direction_input;
gpiochip->direction_output = msc313_gpio_direction_output;
gpiochip->get = msc313_gpio_get;
- gpiochip->set_rv = msc313_gpio_set;
+ gpiochip->set = msc313_gpio_set;
gpiochip->base = -1;
gpiochip->ngpio = gpio->gpio_data->num;
gpiochip->names = gpio->gpio_data->names;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 24792b8eb083..5e3f54cb8bc4 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1168,7 +1168,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set_rv = mvebu_gpio_set;
+ mvchip->chip.set = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 296d13845b30..bcf4b07dd458 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -674,7 +674,7 @@ static int nmk_gpio_probe(struct platform_device *pdev)
chip->direction_input = nmk_gpio_make_input;
chip->get = nmk_gpio_get_input;
chip->direction_output = nmk_gpio_make_output;
- chip->set_rv = nmk_gpio_set_output;
+ chip->set = nmk_gpio_set_output;
chip->dbg_show = nmk_gpio_dbg_show;
chip->can_sleep = false;
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 25b203a89e38..83c77a2c0623 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,7 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- return gc->set_rv(gc, offset, val);
+ return gc->set(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -546,7 +546,7 @@ static int npcm_sgpio_probe(struct platform_device *pdev)
gpio->chip.direction_output = npcm_sgpio_dir_out;
gpio->chip.get_direction = npcm_sgpio_get_direction;
gpio->chip.get = npcm_sgpio_get;
- gpio->chip.set_rv = npcm_sgpio_set;
+ gpio->chip.set = npcm_sgpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 24966161742a..777e20c608dc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -108,7 +108,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->direction_input = octeon_gpio_dir_in;
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
- chip->set_rv = octeon_gpio_set;
+ chip->set = octeon_gpio_set;
err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index ed5c88a5c520..a268c76bdca6 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1046,8 +1046,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev)
bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
- bank->chip.set_rv = omap_gpio_set;
- bank->chip.set_multiple_rv = omap_gpio_set_multiple;
+ bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 9329d8ce8f59..e377f6dd4ccf 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -166,7 +166,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
- palmas_gpio->gpio_chip.set_rv = palmas_gpio_set;
+ palmas_gpio->gpio_chip.set = palmas_gpio_set;
palmas_gpio->gpio_chip.get = palmas_gpio_get;
palmas_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 69906a9af7e6..b46927f55038 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -789,10 +789,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set_rv = pca953x_gpio_set_value;
+ gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple_rv = pca953x_gpio_set_multiple;
+ gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index a33246f20fd8..c5a1287079a0 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -126,7 +126,7 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get_direction = pca9570_get_direction;
gpio->chip.get = pca9570_get;
- gpio->chip.set_rv = pca9570_set;
+ gpio->chip.set = pca9570_set;
gpio->chip.base = -1;
gpio->chip_data = device_get_match_data(&client->dev);
gpio->chip.ngpio = gpio->chip_data->ngpio;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a04203680333..3b9de8c3d924 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -295,8 +295,8 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
gpio->chip.get_multiple = pcf857x_get_multiple;
- gpio->chip.set_rv = pcf857x_set;
- gpio->chip.set_multiple_rv = pcf857x_set_multiple;
+ gpio->chip.set = pcf857x_set;
+ gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index c6f313342ba0..9925687e05fb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -219,7 +219,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->direction_input = pch_gpio_direction_input;
gpio->get = pch_gpio_get;
gpio->direction_output = pch_gpio_direction_output;
- gpio->set_rv = pch_gpio_set;
+ gpio->set = pch_gpio_set;
gpio->base = -1;
gpio->ngpio = gpio_pins[chip->ioh];
gpio->can_sleep = false;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 98cfac4eac85..02e4ffcf5a6f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -330,7 +330,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->gc.direction_input = pl061_direction_input;
pl061->gc.direction_output = pl061_direction_output;
pl061->gc.get = pl061_get_value;
- pl061->gc.set_rv = pl061_set_value;
+ pl061->gc.set = pl061_set_value;
pl061->gc.ngpio = PL061_GPIO_NR;
pl061->gc.label = dev_name(dev);
pl061->gc.parent = dev;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 13f7da2a9486..fa22f3faa163 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -355,7 +355,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iom
pchip->chip.direction_input = pxa_gpio_direction_input;
pchip->chip.direction_output = pxa_gpio_direction_output;
pchip->chip.get = pxa_gpio_get;
- pchip->chip.set_rv = pxa_gpio_set;
+ pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
pchip->chip.request = gpiochip_generic_request;
@@ -499,8 +499,6 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
-
- gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -520,21 +518,17 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
- gpiochip_enable_irq(&pchip->chip, gpio);
-
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static const struct irq_chip pxa_muxed_gpio_chip = {
+static struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index b4b607515a04..40413e06b69c 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -232,7 +232,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out;
rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction;
rpi_gpio->gc.get = rpi_exp_gpio_get;
- rpi_gpio->gc.set_rv = rpi_exp_gpio_set;
+ rpi_gpio->gc.set = rpi_exp_gpio_set;
rpi_gpio->gc.can_sleep = true;
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index cf3e91d235df..5a69e4534591 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -118,7 +118,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
- rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cd31580effa9..86777e097fd8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -535,8 +535,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
- gpio_chip->set_rv = gpio_rcar_set;
- gpio_chip->set_multiple_rv = gpio_rcar_set_multiple;
+ gpio_chip->set = gpio_rcar_set;
+ gpio_chip->set_multiple = gpio_rcar_set_multiple;
gpio_chip->label = name;
gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index a75ed8021de5..ba62b81aa8ae 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -159,7 +159,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
- rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
rdc321x_gpio_dev->chip.base = 0;
rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index d8da99f97385..f2238196faf1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -46,7 +46,7 @@ static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
if (r->direction & BIT(offset))
return -ENOTSUPP;
- gc->set_rv(gc, offset, value);
+ gc->set(gc, offset, value);
return 0;
}
@@ -161,9 +161,9 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
r->gc.get_direction = gpio_reg_get_direction;
r->gc.direction_input = gpio_reg_direction_input;
r->gc.direction_output = gpio_reg_direction_output;
- r->gc.set_rv = gpio_reg_set;
+ r->gc.set = gpio_reg_set;
r->gc.get = gpio_reg_get;
- r->gc.set_multiple_rv = gpio_reg_set_multiple;
+ r->gc.set_multiple = gpio_reg_set_multiple;
if (irqs)
r->gc.to_irq = gpio_reg_to_irq;
r->gc.base = base;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 87c4225784cf..e8a32dfebdcb 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -260,9 +260,9 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set_rv = gpio_regmap_set_with_clear;
+ chip->set = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set_rv = gpio_regmap_set;
+ chip->set = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index ecd60ff9e1dd..bcfc323a8315 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -327,7 +327,7 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static const struct gpio_chip rockchip_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = rockchip_gpio_set,
+ .set = rockchip_gpio_set,
.get = rockchip_gpio_get,
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index 25bbd749b019..d46b40dd5283 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -565,7 +565,7 @@ static int rtd_gpio_probe(struct platform_device *pdev)
data->gpio_chip.get_direction = rtd_gpio_get_direction;
data->gpio_chip.direction_input = rtd_gpio_direction_input;
data->gpio_chip.direction_output = rtd_gpio_direction_output;
- data->gpio_chip.set_rv = rtd_gpio_set;
+ data->gpio_chip.set = rtd_gpio_set;
data->gpio_chip.get = rtd_gpio_get;
data->gpio_chip.set_config = rtd_gpio_set_config;
data->gpio_chip.parent = dev;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index e9d054d78ccb..7f6a62f5d1ee 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -99,7 +99,7 @@ static struct sa1100_gpio_chip sa1100_gpio_chip = {
.get_direction = sa1100_get_direction,
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
- .set_rv = sa1100_gpio_set,
+ .set = sa1100_gpio_set,
.get = sa1100_gpio_get,
.to_irq = sa1100_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index c31244cf5e89..5005688f6e67 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -196,7 +196,7 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
piobu->chip.direction_input = sama5d2_piobu_direction_input;
piobu->chip.direction_output = sama5d2_piobu_direction_output;
piobu->chip.get = sama5d2_piobu_get;
- piobu->chip.set_rv = sama5d2_piobu_set;
+ piobu->chip.set = sama5d2_piobu_set;
piobu->chip.base = -1;
piobu->chip.ngpio = PIOBU_NUM;
piobu->chip.can_sleep = 0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 833ffdd98d74..966d16a6d515 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -167,7 +167,7 @@ static const struct gpio_chip sch_gpio_chip = {
.direction_input = sch_gpio_direction_in,
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
- .set_rv = sch_gpio_set,
+ .set = sch_gpio_set,
.get_direction = sch_gpio_get_direction,
};
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 44fb5fc21fb8..f95566998d30 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -297,7 +297,7 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.get_direction = sch311x_gpio_get_direction;
block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
- block->chip.set_rv = sch311x_gpio_set;
+ block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
block->chip.parent = &pdev->dev;
block->chip.base = sch311x_gpio_blocks[i].base;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 9503296422fd..050092583f79 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -486,9 +486,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set_rv = gpio_sim_set;
+ gc->set = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple_rv = gpio_sim_set_multiple;
+ gc->set_multiple = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 95355dda621b..958034b9f3f3 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -237,7 +237,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->get = gpio_siox_get;
- gc->set_rv = gpio_siox_set;
+ gc->set = gpio_siox_set;
gc->direction_input = gpio_siox_direction_input;
gc->direction_output = gpio_siox_direction_output;
gc->get_direction = gpio_siox_get_direction;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 55f0e8afa291..96a0e1211500 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -140,7 +140,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.request = spics_request;
spics->chip.free = spics_free;
spics->chip.direction_output = spics_direction_output;
- spics->chip.set_rv = spics_set_value;
+ spics->chip.set = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
spics->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index bbd5bf51c088..413bcd0a4240 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -245,7 +245,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.request = sprd_gpio_request;
sprd_gpio->chip.free = sprd_gpio_free;
sprd_gpio->chip.get = sprd_gpio_get;
- sprd_gpio->chip.set_rv = sprd_gpio_set;
+ sprd_gpio->chip.set = sprd_gpio_set;
sprd_gpio->chip.direction_input = sprd_gpio_direction_input;
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 0a270156e0be..5dd4c21a8e60 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -136,7 +136,7 @@ static const struct gpio_chip template_chip = {
.direction_input = stmpe_gpio_direction_input,
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
- .set_rv = stmpe_gpio_set,
+ .set = stmpe_gpio_set,
.request = stmpe_gpio_request,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index fdda8de6ca36..493c027afdd6 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -249,7 +249,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
chip->gc.get = xway_stp_get;
- chip->gc.set_rv = xway_stp_set;
+ chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
chip->gc.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index f86f78655c24..40064d4cf47f 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -115,7 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- return chip->set_rv(chip, offset, val);
+ return chip->set(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -251,7 +251,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
if (priv->data->flags & GPIO_SYSCON_FEAT_IN)
priv->chip.direction_input = syscon_gpio_dir_in;
if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) {
- priv->chip.set_rv = priv->data->set ? : syscon_gpio_set;
+ priv->chip.set = priv->data->set ? : syscon_gpio_set;
priv->chip.direction_output = syscon_gpio_dir_out;
}
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index ce17b98e0623..ba5a8ede8912 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -430,7 +430,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
gpio->chip.direction_input = tng_gpio_direction_input;
gpio->chip.direction_output = tng_gpio_direction_output;
gpio->chip.get = tng_gpio_get;
- gpio->chip.set_rv = tng_gpio_set;
+ gpio->chip.set = tng_gpio_set;
gpio->chip.get_direction = tng_gpio_get_direction;
gpio->chip.set_config = tng_gpio_set_config;
gpio->chip.base = info->base;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 0bd32809fd68..90d048f9da08 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -149,7 +149,7 @@ static const struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
.get = tc3589x_gpio_get,
- .set_rv = tc3589x_gpio_set,
+ .set = tc3589x_gpio_set,
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 126fd12550aa..15a5762a82c2 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -720,7 +720,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_input = tegra_gpio_direction_input;
tgi->gc.get = tegra_gpio_get;
tgi->gc.direction_output = tegra_gpio_direction_output;
- tgi->gc.set_rv = tegra_gpio_set;
+ tgi->gc.set = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index f902da15c419..5fd3ec3e2c53 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -891,7 +891,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get;
- gpio->gpio.set_rv = tegra186_gpio_set;
+ gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.init_valid_mask = tegra186_init_valid_mask;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index eb6a1f0279c0..be96853063ba 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -533,8 +533,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->direction_input = thunderx_gpio_dir_in;
chip->get = thunderx_gpio_get;
chip->direction_output = thunderx_gpio_dir_out;
- chip->set_rv = thunderx_gpio_set;
- chip->set_multiple_rv = thunderx_gpio_set_multiple;
+ chip->set = thunderx_gpio_set;
+ chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fbb883089189..679e27f00ff6 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -253,7 +253,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
- gc->set_rv = timbgpio_gpio_set;
+ gc->set = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index d5b8568ab061..866ff2d436d5 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -80,8 +80,8 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
.direction_output = tpic2810_direction_output,
- .set_rv = tpic2810_set,
- .set_multiple_rv = tpic2810_set_multiple,
+ .set = tpic2810_set,
+ .set_multiple = tpic2810_set_multiple,
.base = -1,
.ngpio = 8,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 08fa061b73ef..84b17b83476f 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -69,7 +69,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65086_gpio_direction_input,
.direction_output = tps65086_gpio_direction_output,
.get = tps65086_gpio_get,
- .set_rv = tps65086_gpio_set,
+ .set = tps65086_gpio_set,
.base = -1,
.ngpio = 4,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 49cd7754ed05..3b4c41f5ef55 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -169,7 +169,7 @@ static const struct gpio_chip template_chip = {
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
.get = tps65218_gpio_get,
- .set_rv = tps65218_gpio_set,
+ .set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index c0177088c54c..158f63bcf10c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -203,7 +203,7 @@ static const struct gpio_chip tps65214_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
@@ -216,7 +216,7 @@ static const struct gpio_chip tps65219_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 3,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index f1ced092f38a..aaacbb54bf5d 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -98,7 +98,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
/* FIXME: add handling of GPIOs as dedicated inputs */
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
- tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set;
+ tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 3204f55394cf..25e9f41efe78 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -139,7 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.can_sleep = true;
tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
- tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index d586ccfbfc56..7a2c5685c2fd 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -92,7 +92,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65912_gpio_direction_input,
.direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
- .set_rv = tps65912_gpio_set,
+ .set = tps65912_gpio_set,
.base = -1,
.ngpio = 5,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 3b8805c854f7..d4fbdf90e190 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -142,7 +142,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev)
tps68470_gpio->gc.direction_output = tps68470_gpio_output;
tps68470_gpio->gc.get = tps68470_gpio_get;
tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
- tps68470_gpio->gc.set_rv = tps68470_gpio_set;
+ tps68470_gpio->gc.set = tps68470_gpio_set;
tps68470_gpio->gc.can_sleep = true;
tps68470_gpio->gc.names = tps68470_names;
tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 056799ecce6a..27dd09273292 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -370,7 +370,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
chip->direction_output = tqmx86_gpio_direction_output;
chip->get_direction = tqmx86_gpio_get_direction;
chip->get = tqmx86_gpio_get;
- chip->set_rv = tqmx86_gpio_set;
+ chip->set = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
chip->parent = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 35dd2d09b4d4..d9ee8fc77ccd 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -119,7 +119,7 @@ static const struct gpio_chip template_chip = {
.direction_input = ts4900_gpio_direction_input,
.direction_output = ts4900_gpio_direction_output,
.get = ts4900_gpio_get,
- .set_rv = ts4900_gpio_set,
+ .set = ts4900_gpio_set,
.base = -1,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index bb432ed73698..3c7f2efe10fd 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -340,7 +340,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
priv->gpio_chip.direction_input = ts5500_gpio_input;
priv->gpio_chip.direction_output = ts5500_gpio_output;
priv->gpio_chip.get = ts5500_gpio_get;
- priv->gpio_chip.set_rv = ts5500_gpio_set;
+ priv->gpio_chip.set = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index e39e39e3ef85..a33dc7c7e7a0 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -419,7 +419,7 @@ static const struct gpio_chip template_chip = {
.direction_output = twl_direction_out,
.get_direction = twl_get_direction,
.get = twl_get,
- .set_rv = twl_set,
+ .set = twl_set,
.to_irq = twl_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b2196b62b528..4ec9bcd40439 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -69,7 +69,7 @@ static struct gpio_chip twl6040gpo_chip = {
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
.get_direction = twl6040gpo_get_direction,
- .set_rv = twl6040gpo_set,
+ .set = twl6040gpo_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 8939556f42b6..197bb1d22b3c 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -386,8 +386,8 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
chip->direction_input = uniphier_gpio_direction_input;
chip->direction_output = uniphier_gpio_direction_output;
chip->get = uniphier_gpio_get;
- chip->set_rv = uniphier_gpio_set;
- chip->set_multiple_rv = uniphier_gpio_set_multiple;
+ chip->set = uniphier_gpio_set;
+ chip->set_multiple = uniphier_gpio_set_multiple;
chip->to_irq = uniphier_gpio_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e8e906b54d51..15e495c109d2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -408,7 +408,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.base = -1;
vb_gpio->gpioa.ngpio = 16;
vb_gpio->gpioa.can_sleep = true;
- vb_gpio->gpioa.set_rv = vprbrd_gpioa_set;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
@@ -424,7 +424,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.base = -1;
vb_gpio->gpiob.ngpio = 16;
vb_gpio->gpiob.can_sleep = true;
- vb_gpio->gpiob.set_rv = vprbrd_gpiob_set;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 07552611da98..17e040991e46 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -567,7 +567,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
vgpio->gc.direction_input = virtio_gpio_direction_input;
vgpio->gc.direction_output = virtio_gpio_direction_output;
vgpio->gc.get = virtio_gpio_get;
- vgpio->gc.set_rv = virtio_gpio_set;
+ vgpio->gc.set = virtio_gpio_set;
vgpio->gc.ngpio = ngpio;
vgpio->gc.base = -1; /* Allocate base dynamically */
vgpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index a3bceac7854c..84b3a973a503 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -216,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_input = vx855gpio_direction_input;
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
- c->set_rv = vx855gpio_set;
+ c->set = vx855gpio_set;
c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index c89da9a22016..4af504c23e6f 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -98,7 +98,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->direction_output = wcd_gpio_direction_output;
chip->get_direction = wcd_gpio_get_direction;
chip->get = wcd_gpio_get;
- chip->set_rv = wcd_gpio_set;
+ chip->set = wcd_gpio_set;
chip->parent = dev;
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index f7df3d5fc71c..4a5e20e936a9 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -439,7 +439,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.direction_output = wcove_gpio_dir_out;
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
- wg->chip.set_rv = wcove_gpio_set;
+ wg->chip.set = wcove_gpio_set;
wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 421655b5d4c2..dcfda738fd69 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -494,7 +494,7 @@ static struct gpio_chip winbond_gpio_chip = {
.can_sleep = true,
.get = winbond_gpio_get,
.direction_input = winbond_gpio_direction_in,
- .set_rv = winbond_gpio_set,
+ .set = winbond_gpio_set,
.direction_output = winbond_gpio_direction_out,
};
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index ab58aa7c0b99..f03c0e808fab 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -253,7 +253,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
- .set_rv = wm831x_gpio_set,
+ .set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 9a7677f841fc..46923b23a72e 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -93,7 +93,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8350_gpio_direction_in,
.get = wm8350_gpio_get,
.direction_output = wm8350_gpio_direction_out,
- .set_rv = wm8350_gpio_set,
+ .set = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index ccc005628dd2..df47a27f508d 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -256,7 +256,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
- .set_rv = wm8994_gpio_set,
+ .set = wm8994_gpio_set,
.set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 28f794e5eb26..4f627de3f56c 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -178,7 +178,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
gpio->chip.direction_input = xgene_gpio_dir_in;
gpio->chip.direction_output = xgene_gpio_dir_out;
gpio->chip.get = xgene_gpio_get;
- gpio->chip.set_rv = xgene_gpio_set;
+ gpio->chip.set = xgene_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 36d91cacc2d9..83675ac81077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -604,10 +604,10 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
- chip->gc.set_rv = xgpio_set;
+ chip->gc.set = xgpio_set;
chip->gc.request = xgpio_request;
chip->gc.free = xgpio_free;
- chip->gc.set_multiple_rv = xgpio_set_multiple;
+ chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index bcd2dfec462d..aede6324387f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -274,7 +274,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->ngpio = 70;
gc->direction_output = xlp_gpio_dir_output;
gc->direction_input = xlp_gpio_dir_input;
- gc->set_rv = xlp_gpio_set;
+ gc->set = xlp_gpio_set;
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 70402c6b5407..faadcb4b0b2d 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -164,7 +164,7 @@ static int xra1403_probe(struct spi_device *spi)
xra->chip.direction_output = xra1403_direction_output;
xra->chip.get_direction = xra1403_get_direction;
xra->chip.get = xra1403_get;
- xra->chip.set_rv = xra1403_set;
+ xra->chip.set = xra1403_set;
xra->chip.dbg_show = xra1403_dbg_show;
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index e7ff3c60324d..4418947a10e5 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -132,7 +132,7 @@ static struct gpio_chip expstate_chip = {
.ngpio = 32,
.get_direction = xtensa_expstate_get_direction,
.get = xtensa_expstate_get_value,
- .set_rv = xtensa_expstate_set_value,
+ .set = xtensa_expstate_set_value,
};
static int xtensa_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 0799f7976710..29375bea2289 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -161,7 +161,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
- .set_rv = zevio_gpio_set,
+ .set = zevio_gpio_set,
.get = zevio_gpio_get,
.to_irq = zevio_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index b22b4e25c68d..0ffd76e8951f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -932,7 +932,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = zynq_gpio_get_value;
- chip->set_rv = zynq_gpio_set_value;
+ chip->set = zynq_gpio_set_value;
chip->request = zynq_gpio_request;
chip->free = zynq_gpio_free;
chip->direction_input = zynq_gpio_dir_in;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 6dc5d7acb89c..5e651482e985 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -130,7 +130,7 @@ static int modepin_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = modepin_gpio_get_value;
- chip->set_rv = modepin_gpio_set_value;
+ chip->set = modepin_gpio_set_value;
chip->direction_input = modepin_gpio_dir_in;
chip->direction_output = modepin_gpio_dir_out;
chip->label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a93d2a9355e2..0d2b470a252e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1037,11 +1037,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret;
- /* Only allow one set() and one set_multiple(). */
- if ((gc->set && gc->set_rv) ||
- (gc->set_multiple && gc->set_multiple_rv))
- return -EINVAL;
-
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -2891,19 +2886,14 @@ static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
lockdep_assert_held(&gc->gpiodev->srcu);
- if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ if (WARN_ON(unlikely(!gc->set)))
return -EOPNOTSUPP;
- if (gc->set_rv) {
- ret = gc->set_rv(gc, offset, value);
- if (ret > 0)
- ret = -EBADE;
-
- return ret;
- }
+ ret = gc->set(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
- gc->set(gc, offset, value);
- return 0;
+ return ret;
}
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
@@ -2919,7 +2909,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
@@ -3665,19 +3655,14 @@ static int gpiochip_set_multiple(struct gpio_chip *gc,
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->set_multiple_rv) {
- ret = gc->set_multiple_rv(gc, mask, bits);
+ if (gc->set_multiple) {
+ ret = gc->set_multiple(gc, mask, bits);
if (ret > 0)
ret = -EBADE;
return ret;
}
- if (gc->set_multiple) {
- gc->set_multiple(gc, mask, bits);
- return 0;
- }
-
/* set outputs if the corresponding mask bit is set */
for_each_set_bit(i, mask, gc->ngpio) {
ret = gpiochip_set(gc, i, test_bit(i, bits));
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
index fa6ee76f4d3c..05dc43c0b8c5 100644
--- a/drivers/gpu/drm/Kconfig.debug
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -70,6 +70,7 @@ config DRM_KUNIT_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5050ac32bba2..4dafbdc8f86a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -104,7 +104,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
-obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
+
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index 2b60128e2c69..cba7d32150a9 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -229,9 +229,10 @@ static int adp_mipi_probe(struct platform_device *pdev)
{
struct adp_mipi_drv_private *adp;
- adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
- if (!adp)
- return -ENOMEM;
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
adp->mipi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adp->mipi)) {
@@ -241,7 +242,6 @@ static int adp_mipi_probe(struct platform_device *pdev)
adp->dsi.dev = &pdev->dev;
adp->dsi.ops = &adp_dsi_host_ops;
- adp->bridge.funcs = &adp_dsi_bridge_funcs;
adp->bridge.of_node = pdev->dev.of_node;
adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(&pdev->dev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 87080c06e5fc..930de203d533 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index e13fbd974141..9569dc16dd3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
@@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a5ccd0ada16a..ef3af170dda4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -470,9 +470,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -886,6 +883,7 @@ struct amdgpu_mqd_prop {
uint64_t csa_addr;
uint64_t fence_address;
bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
@@ -1282,6 +1280,7 @@ struct amdgpu_device {
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
@@ -1336,6 +1335,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1387,7 +1391,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1558,16 +1563,16 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
int amdgpu_device_link_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1619,6 +1624,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1669,7 +1675,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1700,8 +1707,11 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
@@ -1714,7 +1724,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
#endif
#if defined(CONFIG_DRM_AMD_ISP)
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1760,4 +1770,19 @@ extern const struct attribute_group amdgpu_flash_attr_group;
void amdgpu_set_init_level(struct amdgpu_device *adev,
enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 3835f2592914..cbc40cad581b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -115,6 +115,11 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f5466c592d94..6c62e27b9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1545,7 +1545,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data)
return acpi_match_device(data, dev) ? 1 : 0;
}
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
{
struct device *pdev __free(put_device) = NULL;
struct acpi_device *acpi_pdev;
@@ -1559,7 +1559,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
if (!acpi_pdev)
return -ENODEV;
- strscpy(*hid, acpi_device_hid(acpi_pdev));
+ *dev = acpi_pdev;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d8ac4b1051a8..fbe7616555c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
+{
+ if (adev->kfd.dev)
+ kgd2kfd_suspend_process(adev->kfd.dev);
+}
+
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -642,7 +658,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -749,12 +765,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b6ca41859b53..33eb4826b58b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -154,8 +154,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -411,16 +413,18 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
@@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -489,12 +502,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index ffbaa8bc5eea..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 5a234eadae8b..25252231a68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -212,7 +212,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
- CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
section->hdr.valid_bits.err_info_cnt = 1;
@@ -326,7 +326,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
return -ENOMEM;
}
- amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
if (ret)
return ret;
@@ -457,7 +459,7 @@ calc:
void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
{
- u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+ u64 pos, wptr_old, rptr;
int rec_cnt_dw = count >> 2;
u32 chunk, ent_sz;
u8 *s = (u8 *)src;
@@ -470,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
return;
}
+ mutex_lock(&ring->adev->cper.ring_lock);
+
wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
- mutex_lock(&ring->adev->cper.ring_lock);
while (count) {
ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
chunk = umin(ent_sz, count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ea0d9b71f48..d3f220be2ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -293,7 +293,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
@@ -1138,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 02138aa55793..dfb6cfd83760 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f81608330a3d..0e6e2e2acf5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -2131,6 +2131,55 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2140,4 +2189,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 7b50741dc097..8a026bc9ea44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
coredump->reset_time.tv_nsec);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index aa32df7e2fb2..01d234cf8156 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -232,7 +232,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
{
int ret = 0;
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
ret = sysfs_create_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
@@ -241,7 +241,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
sysfs_remove_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
}
@@ -411,19 +411,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -432,15 +429,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
@@ -453,29 +448,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
* 1 if the device supports BACO;
* 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -495,10 +485,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -547,14 +539,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -1288,14 +1280,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1312,15 +1304,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1336,14 +1330,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1360,15 +1355,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1386,8 +1383,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1407,8 +1405,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
@@ -1694,7 +1693,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1734,9 +1735,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
r = pci_resize_resource(adev->pdev, 0, rbar_size);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
pci_assign_unassigned_bus_resources(adev->pdev->bus);
@@ -1838,8 +1840,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -2015,7 +2017,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -2030,7 +2032,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -2038,7 +2040,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -2190,7 +2192,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2202,12 +2205,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2274,8 +2278,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2308,8 +2313,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2525,9 +2531,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2538,8 +2546,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2561,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2585,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
chip_name = "navi12";
break;
}
@@ -2773,21 +2781,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
ip_block = &adev->ip_blocks[i];
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else if (ip_block->version->funcs->early_init) {
r = ip_block->version->funcs->early_init(ip_block);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
total = false;
} else {
adev->ip_blocks[i].status.valid = true;
@@ -2868,8 +2884,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2893,8 +2911,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2932,8 +2951,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2988,25 +3010,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -3038,8 +3064,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->sw_init) {
r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
goto init_failed;
}
}
@@ -3053,7 +3081,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3065,17 +3094,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3087,14 +3120,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -3235,6 +3270,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
@@ -3284,8 +3320,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3321,8 +3359,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3388,8 +3428,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3398,7 +3440,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
@@ -3412,7 +3454,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3445,7 +3487,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3459,17 +3503,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
if (!ip_block->version->funcs->hw_fini) {
- DRM_ERROR("hw_fini of IP block <%s> not defined\n",
- ip_block->version->funcs->name);
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
} else {
r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- ip_block->version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
}
@@ -3510,15 +3556,16 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
@@ -3533,7 +3580,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
}
return 0;
@@ -3581,8 +3629,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
}
}
adev->ip_blocks[i].status.sw = false;
@@ -3615,7 +3665,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3756,8 +3806,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
return r;
}
}
@@ -4041,12 +4092,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -4089,7 +4142,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -4108,7 +4163,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -4130,13 +4185,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -4150,7 +4205,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -4164,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
int ret = 0;
/*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
+ * By default timeout for jobs is 10 sec
*/
- adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
while ((timeout_setting = strsep(&input, ",")) &&
@@ -4274,7 +4322,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
}
/**
@@ -4290,7 +4338,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4342,9 +4389,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4461,8 +4510,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -4599,7 +4650,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4709,12 +4760,12 @@ fence_driver_init:
r = amdgpu_pm_sysfs_init(adev);
if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
} else
adev->ucode_sysfs_en = true;
@@ -4747,7 +4798,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4913,7 +4964,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->xcp_mgr);
adev->xcp_mgr = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4962,8 +5013,16 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -5035,6 +5094,28 @@ int amdgpu_device_prepare(struct drm_device *dev)
}
/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
+}
+
+/**
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
@@ -5055,14 +5136,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_s0ix && !adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
+ dev_warn(adev->dev, "smart shift update failed\n");
if (notify_clients)
drm_client_dev_suspend(adev_to_drm(adev), false);
@@ -5074,7 +5157,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
}
@@ -5098,6 +5181,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
return 0;
}
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
+ return 0;
+}
+
/**
* amdgpu_device_resume - initiate device resume
*
@@ -5119,6 +5228,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5140,7 +5255,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
}
if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
@@ -5159,6 +5274,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!adev->in_s0ix && !r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -5197,8 +5315,8 @@ exit:
amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -5729,7 +5847,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -6008,14 +6128,9 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
@@ -6080,14 +6195,15 @@ static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
}
-static int amdgpu_device_halt_activities(
- struct amdgpu_device *adev, struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context,
- struct list_head *device_list, struct amdgpu_hive_info *hive,
- bool need_emergency_restart)
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
{
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
+ int i;
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list, reset_list) {
@@ -6139,8 +6255,6 @@ static int amdgpu_device_halt_activities(
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
-
- return r;
}
static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
@@ -6245,8 +6359,10 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
@@ -6327,7 +6443,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
+ dev_warn(adev->dev, "Emergency reboot.");
ksys_sync_helper();
emergency_restart();
@@ -6351,11 +6467,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
- r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
- hive, need_emergency_restart);
- if (r)
- goto reset_unlock;
-
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
if (need_emergency_restart)
goto skip_sched_resume;
/*
@@ -6392,8 +6505,17 @@ end_reset:
atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
@@ -6712,12 +6834,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6727,13 +6848,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6767,7 +6887,6 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
- int r = 0;
dev_info(adev->dev, "PCI error: detected callback!!\n");
@@ -6794,14 +6913,12 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_prepare(adev, &device_list, hive);
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
- r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
- hive, false);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
if (hive) {
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
}
- if (r)
- return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6983,11 +7100,11 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
@@ -7008,7 +7125,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -7254,7 +7371,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
if (r)
- DRM_WARN("OOM tracking isolation\n");
+ dev_warn(adev->dev, "OOM tracking isolation\n");
out_grab_ref:
dma_fence_get(dep);
@@ -7322,9 +7439,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 81b3443c8d7f..efe0058b48ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -2555,40 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- /* this is not fatal. We have a fallback below
- * if the new firmwares are not present. some of
- * this will be overridden below to keep things
- * consistent with the current behavior.
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
*/
- r = amdgpu_discovery_reg_base_init(adev);
- if (!r) {
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- }
- break;
- default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- drm_err(&adev->ddev, "discovery failed: %d\n", r);
- return r;
- }
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- break;
- }
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2611,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2633,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@@ -2674,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@@ -2697,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@@ -2725,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@@ -2751,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 35c778426a7c..51bab32fd8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dfa0d642ac16..930c171473b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 44e120f9f764..ff98c87b2e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -513,8 +513,8 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
if (!adev)
return false;
- if (obj->import_attach) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dma_buf = obj->dma_buf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4db92e0a60da..395c6be901ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -144,6 +144,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -361,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* The second one is for Compute. The third and fourth ones are
* for SDMA and Video.
*
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 10000 for all jobs. "
+ "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -2278,6 +2279,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: VM mode debug for userptr is enabled\n");
adev->debug_vm_userptr = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2321,7 +2327,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2451,10 +2457,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2487,9 +2493,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2506,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
@@ -2535,6 +2542,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2551,11 +2562,14 @@ static int amdgpu_pmops_prepare(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2570,7 +2584,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2650,12 +2664,21 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering())
+ return 0;
+
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2828,7 +2851,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2869,7 +2892,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -3107,10 +3130,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
- if (r)
- goto error_fence;
-
r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3145,7 +3164,6 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
index 8b919ad3af29..23d7d0b0d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -143,7 +143,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
.enable_signaling = amdgpu_eviction_fence_enable_signaling,
@@ -169,9 +168,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock);
- dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
- &ev_fence->lock, evf_mgr->ev_fence_ctx,
- atomic_inc_return(&evf_mgr->ev_fence_seq));
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
return ev_fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5fec808d7f54..9e7506965cab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -41,21 +41,6 @@
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
/*
* Cast helper
*/
@@ -114,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -130,40 +115,35 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
+ if (!af) {
+ /* create a separate hw fence */
+ am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+ if (!am_fence)
return -ENOMEM;
+ am_fence->context = 0;
} else {
- /* take use of job-embedded fence */
- am_fence = &job->hw_fence;
+ am_fence = af;
}
fence = &am_fence->base;
am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
+ am_fence->seq = seq;
+ if (af) {
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ /* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
} else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(fence);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -276,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -288,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -310,7 +297,9 @@ static void amdgpu_fence_fallback(struct timer_list *t)
fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -748,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/**
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @fence: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+{
+ dma_fence_set_error(&fence->base, -ETIME);
+ amdgpu_fence_write(fence->ring, fence->seq);
+ amdgpu_fence_process(fence->ring);
+}
+
+void amdgpu_fence_save_wptr(struct dma_fence *fence)
+{
+ struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
+
+ am_fence->wptr = am_fence->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr, i, seqno;
+
+ seqno = amdgpu_fence_read(ring);
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
+ ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ }
+}
+
/*
* Common fence implementation
*/
@@ -814,7 +883,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+ kfree(to_amdgpu_fence(f));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 1ae88c459da5..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ecc88df7208..6626a6e64ff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -317,8 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
- !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) {
@@ -329,7 +328,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -1024,7 +1023,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c5646af055ab..c80c8f543532 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -149,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -712,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -734,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -748,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -769,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
@@ -1030,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6b0fbbb91e57..97b562a79ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -1041,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1056,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1429,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 80fa29c26e9e..397c6ccdb903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -84,6 +84,8 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+#define AMDGPU_MAX_MEM_RANGES 8
+
/*
* GMC page fault information
*/
@@ -394,6 +396,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -455,5 +458,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
int nps_mode);
void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
-
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 8179d0814db9..57101d24422f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 802743efa3b3..7d9bcb72e8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = &job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
@@ -161,6 +167,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = NULL;
}
if (!ring->sched.ready) {
@@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, f, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(*f);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 30f16968b578..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -218,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
new file mode 100644
index 000000000000..2490fd322aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13c60cac4261..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -620,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
/* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
- if (amdgpu_ras_is_rma(adev))
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
return -EINVAL;
if (!adev->irq.installed)
@@ -732,7 +736,7 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
&amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 43fc941dfa57..9cddbf50442a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,6 +33,8 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
/**
* isp_hw_init - start and test isp block
*
@@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 4f3b7b5d9c1f..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -54,6 +51,7 @@ struct amdgpu_isp {
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ddb9d3269357..9b1c55115921 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -124,53 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
if (unlikely(adev->debug_disable_gpu_ring_reset)) {
dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
- bool is_guilty;
-
- dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
-
- /* for engine resets, we need to reset the engine,
- * but individual queues may be unaffected.
- * check here to make sure the accounting is correct.
- */
- if (ring->funcs->is_guilty)
- is_guilty = ring->funcs->is_guilty(ring);
- else
- is_guilty = true;
-
- if (is_guilty)
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
- r = amdgpu_ring_reset(ring, job->vmid);
+ } else if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
- if (is_guilty) {
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- }
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched, 0);
- dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
- dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
@@ -198,13 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
if (num_ibs == 0)
return -EINVAL;
@@ -222,7 +202,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -232,7 +213,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
if (r)
return r;
@@ -384,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 931fed8892cc..2f302266662b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -91,7 +91,8 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index dda29132dfb2..82d58ac7afb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -463,7 +463,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
sizeof(uint32_t), GFP_KERNEL);
if (!adev->jpeg.ip_dump) {
- DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
return -ENOMEM;
}
adev->jpeg.reg_list = reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d2ce7d86dbc8..8a76960803c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -161,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.gfx_ring[i].sched.ready &&
!adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.compute_ring[i].sched.ready &&
!adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->sdma.instance[i].ring.sched.ready &&
!adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -1395,6 +1415,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
+
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 6fa9fa11c8f3..135598502c8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -47,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -256,7 +256,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -280,7 +280,7 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
+ dev_err(adev->dev, "failed to resume all gangs");
return r;
}
@@ -304,7 +304,7 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -329,7 +329,7 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -361,7 +361,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
return r;
}
@@ -469,7 +469,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -493,7 +494,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -507,7 +508,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -519,7 +521,7 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 79c2f807b9fe..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 73403744331a..122a88294883 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -351,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
-EXPORT_SYMBOL(amdgpu_bo_create_kernel);
/**
* amdgpu_bo_create_isp_user - create user BO for isp
@@ -420,7 +420,6 @@ error_unreserve:
return r;
}
-EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -524,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
-EXPORT_SYMBOL(amdgpu_bo_free_kernel);
/**
* amdgpu_bo_free_isp_user - free BO for isp use
@@ -547,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
}
amdgpu_bo_unref(&bo);
}
-EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -939,7 +936,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -967,7 +964,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -1018,7 +1015,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -1263,7 +1260,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1473,6 +1470,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 375448627f7b..c316920f3450 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -304,6 +304,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 98cc9c450192..23484317a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp_v14_0_set_psp_funcs(psp);
break;
case IP_VERSION(14, 0, 5):
@@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -654,6 +662,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "BOOT_CFG";
case GFX_CMD_ID_CONFIG_SQ_PERFMON:
return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
default:
return "UNKNOWN CMD";
}
@@ -871,6 +883,8 @@ static int psp_tmr_init(struct psp_context *psp)
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -984,6 +998,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1270,6 +1384,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -2337,11 +2456,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
return false;
}
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2440,6 +2575,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 428adc7f741d..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -51,6 +51,17 @@
#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -123,6 +134,9 @@ enum psp_reg_prog_id {
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -521,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -588,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 9c8829bd5a58..540817e296da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1107,6 +1107,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -2854,6 +2857,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
} else {
if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
return -EINVAL;
@@ -2885,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
struct eeprom_table_record *bps, struct ras_err_data *err_data,
enum amdgpu_memory_partition nps)
{
+ int i = 0;
enum amdgpu_memory_partition save_nps;
save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
@@ -2894,6 +2905,13 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
} else {
if (bps->address) {
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
@@ -3003,6 +3021,15 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
@@ -3294,7 +3321,6 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
uint64_t de_queried_count;
uint32_t new_detect_count, total_detect_count;
uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
@@ -3323,21 +3349,13 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
+ if (!--timeout)
break;
- }
msleep(1);
}
}
} while (total_detect_count < need_query_count);
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
-
if (total_detect_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
@@ -3488,8 +3506,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
control = &con->eeprom_control;
ret = amdgpu_ras_eeprom_init(control);
- if (ret)
- return ret;
+ control->is_eeprom_valid = !ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
@@ -3498,10 +3515,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev->umc.ras->get_retire_flip_bits)
adev->umc.ras->get_retire_flip_bits(adev);
- if (control->ras_num_recs) {
+ if (control->ras_num_recs && control->is_eeprom_valid) {
ret = amdgpu_ras_load_bad_pages(adev);
- if (ret)
- return ret;
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
amdgpu_dpm_send_hbm_bad_pages_num(
adev, control->ras_num_bad_pages);
@@ -3520,7 +3539,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
- return ret;
+ return 0;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
@@ -4414,8 +4433,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
- if (ras)
+ if (ras) {
ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
}
void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2c58e09e56f9..9bda9ad13f88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -476,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
@@ -607,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -761,18 +766,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
-
if ((amdgpu_bad_page_threshold != -1) &&
- (amdgpu_bad_page_threshold != -2))
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
-
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
+ }
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -787,8 +791,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -800,12 +805,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -866,11 +870,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
@@ -924,13 +929,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -964,11 +969,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1300,7 +1305,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1309,7 +1315,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1334,7 +1340,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1346,7 +1353,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1387,7 +1395,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
@@ -1452,8 +1461,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
control->ras_num_mca_recs * adev->umc.retire_unit;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_bad_pages);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1521,3 +1531,31 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras)
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ec6d7ea37ad0..ebfca4cb5688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
};
/*
@@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6ac0ce361a2d..6379bb25bf5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -333,6 +356,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Initialize cached_rptr to 0 */
ring->cached_rptr = 0;
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
+
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
@@ -342,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -385,6 +415,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
@@ -687,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
@@ -758,3 +791,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the fence of the bad job */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index e1f25218943a..7670f5d82b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -118,6 +118,7 @@ struct amdgpu_fence_driver {
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -141,6 +142,12 @@ struct amdgpu_fence {
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+ uint32_t seq;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
@@ -148,6 +155,8 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
+void amdgpu_fence_save_wptr(struct dma_fence *fence);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -157,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -268,9 +277,9 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
- bool (*is_guilty)(struct amdgpu_ring *ring);
};
/**
@@ -284,6 +293,9 @@ struct amdgpu_ring {
struct amdgpu_bo *ring_obj;
uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
@@ -425,7 +437,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -550,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 9b54a1ece447..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -534,71 +534,48 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
- int r = -EOPNOTSUPP;
-
- switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
- case IP_VERSION(4, 4, 2):
- case IP_VERSION(4, 4, 4):
- case IP_VERSION(4, 4, 5):
- /* For SDMA 4.x, use the existing DPM interface for backward compatibility,
- * we need to convert the logical instance ID to physical instance ID before reset.
- */
- r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
- break;
- case IP_VERSION(5, 0, 0):
- case IP_VERSION(5, 0, 1):
- case IP_VERSION(5, 0, 2):
- case IP_VERSION(5, 0, 5):
- case IP_VERSION(5, 2, 0):
- case IP_VERSION(5, 2, 2):
- case IP_VERSION(5, 2, 4):
- case IP_VERSION(5, 2, 5):
- case IP_VERSION(5, 2, 6):
- case IP_VERSION(5, 2, 3):
- case IP_VERSION(5, 2, 1):
- case IP_VERSION(5, 2, 7):
- if (sdma_instance->funcs->soft_reset_kernel_queue)
- r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
- break;
- default:
- break;
- }
- return r;
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
}
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
* @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
*
* Returns: 0 on success, or a negative error code on failure.
*/
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
{
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
- /* Stop the scheduler's work queue for the GFX and page rings if they are running.
- * This ensures that no new tasks are submitted to the queues while
- * the reset is in progress.
- */
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
}
- if (sdma_instance->funcs->stop_kernel_queue)
+ if (sdma_instance->funcs->stop_kernel_queue) {
sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
/* Perform the SDMA reset for the specified instance */
ret = amdgpu_sdma_soft_reset(adev, instance_id);
@@ -607,20 +584,25 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
goto exit;
}
- if (sdma_instance->funcs->start_kernel_queue)
+ if (sdma_instance->funcs->start_kernel_queue) {
sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
exit:
- /* Restart the scheduler's work queue for the GFX and page rings
- * if they were stopped by this function. This allows new tasks
- * to be submitted to the queues after the reset is complete.
- */
- if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_start(&page_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
}
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index e5f8951bbb6f..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -172,7 +172,8 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11dd2e0f7979..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9c5df35f05b7..27ab4e754b2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -299,7 +299,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -934,7 +935,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1060,7 +1061,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1781,7 +1782,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
@@ -1793,7 +1794,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
adev, adev->gmc.real_vram_size - reserve_size,
reserve_size, &adev->mman.fw_reserved_memory, NULL);
if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
NULL, NULL);
return ret;
@@ -1864,13 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
@@ -1878,7 +1880,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
return r;
}
@@ -1958,7 +1960,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
@@ -1981,10 +1983,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
if (adev->flags & AMD_IS_APU) {
@@ -1995,40 +1997,40 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2060,6 +2062,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
@@ -2089,7 +2093,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2121,8 +2125,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2130,8 +2135,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
@@ -2202,7 +2208,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2237,7 +2244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2356,7 +2363,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2416,7 +2424,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 450e4bf093b7..2309df3f68a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -86,6 +86,7 @@ struct amdgpu_mman {
uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index eaddc441c51a..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -32,6 +32,7 @@
static const struct kicker_device kicker_device_list[] = {
{0x744B, 0x00},
+ {0x7551, 0xC8}
};
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
@@ -1159,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1397,8 +1401,8 @@ bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
if (adev->pdev->device == kicker_device_list[i].device &&
- adev->pdev->revision == kicker_device_list[i].revision)
- return true;
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 295e7186e156..c3ace8030530 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -318,6 +318,10 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@@ -343,6 +347,46 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp,
return -EACCES;
}
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
{
@@ -352,6 +396,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
const struct amdgpu_userq_funcs *uq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_db_info db_info;
+ char *queue_name;
bool skip_map_queue;
uint64_t index;
int qid, r = 0;
@@ -475,6 +520,18 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
}
}
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
args->out.queue_id = qid;
@@ -664,7 +721,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
int ret;
- flush_work(&fpriv->evf_mgr.suspend_work.work);
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
mutex_lock(&uq_mgr->userq_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index ec040c2fd6c9..b1ca91b7cda4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -65,6 +65,7 @@ struct amdgpu_usermode_queue {
struct dma_fence *last_fence;
u32 xcp_id;
int priority;
+ struct dentry *debugfs_queue;
};
struct amdgpu_userq_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index a86616c6deef..c2a983ff23c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -239,8 +239,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv;
- dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
- fence_drv->context, seq);
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence);
@@ -334,7 +334,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_userq_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index c8885c3d54b3..f1f67521c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
return ret;
}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 83adf81defc7..0bc0a94d7cf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -330,7 +330,9 @@ struct amdgpu_vcn_inst {
struct dpg_pause_state *new_state);
int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -552,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
-
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 577c6194db78..3da3ebb1d9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -152,8 +152,10 @@ enum AMDGIM_REG_ACCESS_FLAG {
AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
- /* Use PSP to program L1_TLB_CNTL*/
+ /* Use PSP to program L1_TLB_CNTL */
AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -301,6 +303,9 @@ struct amdgpu_virt {
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -343,6 +348,10 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
@@ -386,6 +395,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3911c78f8282..0b87798daebd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -622,7 +622,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_warn_ratelimited("Evicted user BO is not reserved\n");
if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
+ pr_warn_ratelimited("pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
@@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
@@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -765,6 +772,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
+ struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -830,6 +838,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
+ /* this is part of the job's context */
+ af = container_of(fence, struct amdgpu_fence, base);
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
}
if (vm_flush_needed) {
@@ -1271,8 +1282,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->dma_buf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1631,7 +1642,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -2395,10 +2406,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2409,13 +2421,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
@@ -2447,7 +2457,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2507,11 +2518,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2564,8 +2575,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2607,7 +2618,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2658,8 +2669,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2774,7 +2785,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -2982,7 +2993,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -3156,3 +3167,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f3ad687125ad..fd086efd8457 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -668,4 +667,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07c936e90d8e..78f9e86ccc09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index b256cbc2bc27..2c88d5fd87da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
- to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 322816805bfb..c417f8689220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -218,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
}
-int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
{
- int mode;
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return xcp_mgr->mode;
+ return true;
- if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
@@ -445,6 +457,222 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 454b33f889fb..70a0f8400b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -39,6 +39,8 @@
#define AMDGPU_XCP_NO_PARTITION (~0)
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
@@ -144,10 +146,6 @@ struct amdgpu_xcp_mgr_funcs {
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
- int (*select_scheds)(struct amdgpu_device *adev,
- u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
- int (*update_partition_sched_list)(struct amdgpu_device *adev);
};
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -176,19 +174,18 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct drm_file *file_priv);
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
-#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->select_scheds ? \
- (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
-#define amdgpu_xcp_update_partition_sched_list(adev) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
- (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
-
static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
{
if (!xcp_mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index d9ad37711c3e..1ede308a7c67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1771,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
adev->gmc.xgmi.max_width = 16;
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
adev->gmc.xgmi.max_width = 16;
break;
default:
break;
}
}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index f994be985f42..bba0b26fee8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,12 +25,6 @@
#include <drm/task_barrier.h>
#include "amdgpu_ras.h"
-enum amdgpu_xgmi_link_speed {
- XGMI_SPEED_16GT = 16,
- XGMI_SPEED_25GT = 25,
- XGMI_SPEED_32GT = 32
-};
-
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -97,7 +91,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
struct amdgpu_xgmi_ras *ras;
- enum amdgpu_xgmi_link_speed max_speed;
+ uint16_t max_speed;
uint8_t max_width;
};
@@ -130,4 +124,6 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 92ca13097aaa..33edad1f9dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -113,7 +113,8 @@ union amd_sriov_reg_access_flags {
uint32_t vf_reg_access_mmhub : 1;
uint32_t vf_reg_access_gc : 1;
uint32_t vf_reg_access_l1_tlb_cntl : 1;
- uint32_t reserved : 28;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 1c083304ae77..811124ff88a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -29,12 +29,11 @@
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
#define XCP_INST_MASK(num_inst, xcp_id) \
(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-#define AMDGPU_XCP_OPS_KFD (1 << 0)
-
void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
{
int i;
@@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
- return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
- uint32_t inst_idx, struct amdgpu_ring *ring)
-{
- int xcp_id;
- enum AMDGPU_XCP_IP_BLOCK ip_blk;
- uint32_t inst_mask;
-
- ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
- return;
-
- inst_mask = 1 << inst_idx;
-
- switch (ring->funcs->type) {
- case AMDGPU_HW_IP_GFX:
- case AMDGPU_RING_TYPE_COMPUTE:
- case AMDGPU_RING_TYPE_KIQ:
- ip_blk = AMDGPU_XCP_GFX;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ip_blk = AMDGPU_XCP_SDMA;
- break;
- case AMDGPU_RING_TYPE_VCN_ENC:
- case AMDGPU_RING_TYPE_VCN_JPEG:
- ip_blk = AMDGPU_XCP_VCN;
- break;
- default:
- DRM_ERROR("Not support ring type %d!", ring->funcs->type);
- return;
- }
-
- for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
- if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
- ring->xcp_id = xcp_id;
- dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
- ring->xcp_id);
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
- break;
- }
- }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
- struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- unsigned int sel_xcp_id)
-{
- unsigned int *num_gpu_sched;
-
- num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
- .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
- adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
- .sched[(*num_gpu_sched)++] = &ring->sched;
- DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
- sel_xcp_id, ring->funcs->type,
- ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
- struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- int i;
-
- for (i = 0; i < MAX_XCP; i++) {
- atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
- memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
- }
-
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- ring = adev->rings[i];
- if (!ring || !ring->sched.ready || ring->no_scheduler)
- continue;
-
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
- /* VCN may be shared by two partitions under CPX MODE in certain
- * configs.
- */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- aqua_vanjaram_xcp_vcn_shared(adev))
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
- }
-
- return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
- ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
- aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
- else
- aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
- }
-
- return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
- struct amdgpu_device *adev,
- u32 hw_ip,
- u32 hw_prio,
- struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds,
- struct drm_gpu_scheduler ***scheds)
-{
- u32 sel_xcp_id;
- int i;
-
- if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
- u32 least_ref_cnt = ~0;
-
- fpriv->xcp_id = 0;
- for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
- u32 total_ref_cnt;
-
- total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
- if (total_ref_cnt < least_ref_cnt) {
- fpriv->xcp_id = i;
- least_ref_cnt = total_ref_cnt;
- }
- }
- }
- sel_xcp_id = fpriv->xcp_id;
-
- if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
- *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
- *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
- atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
- DRM_DEBUG("Selected partition #%d", sel_xcp_id);
- } else {
- DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
- return -ENOENT;
- }
-
- return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- int8_t inst)
-{
- int8_t dev_inst;
-
- switch (block) {
- case GC_HWIP:
- case SDMA0_HWIP:
- /* Both JPEG and VCN as JPEG is only alias of VCN */
- case VCN_HWIP:
- dev_inst = adev->ip_map.dev_inst[block][inst];
- break;
- default:
- /* For rest of the IPs, no look up required.
- * Assume 'logical instance == physical instance' for all configs. */
- dev_inst = inst;
- break;
- }
-
- return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- uint32_t mask)
-{
- uint32_t dev_mask = 0;
- int8_t log_inst, dev_inst;
-
- while (mask) {
- log_inst = ffs(mask) - 1;
- dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
- dev_mask |= (1 << dev_inst);
- mask &= ~(1 << log_inst);
- }
-
- return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type ip_block,
- uint32_t inst_mask)
-{
- int l = 0, i;
-
- while (inst_mask) {
- i = ffs(inst_mask) - 1;
- adev->ip_map.dev_inst[ip_block][l++] = i;
- inst_mask &= ~(1 << i);
- }
- for (; l < HWIP_MAX_INSTANCE; l++)
- adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
- u32 ip_map[][2] = {
- { GC_HWIP, adev->gfx.xcc_mask },
- { SDMA0_HWIP, adev->sdma.sdma_mask },
- { VCN_HWIP, adev->vcn.inst_mask },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
- aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
- adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
- adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
/* Fixed pattern for smn addressing on different AIDs:
* bit[34]: indicate cross AID access
* bit[33:32]: indicate target AID id
@@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
- if (mode != derv_mode)
+ if (mode != derv_mode) {
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
}
return mode;
@@ -453,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
@@ -476,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
- if (amdgpu_sriov_vf(adev))
+ if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
@@ -593,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return false;
}
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
-
- if (flags & AMDGPU_XCP_OPS_KFD)
- amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
- return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- int ret = 0;
-
- if (flags & AMDGPU_XCP_OPS_KFD) {
- amdgpu_amdkfd_device_probe(xcp_mgr->adev);
- amdgpu_amdkfd_device_init(xcp_mgr->adev);
- /* If KFD init failed, return failure */
- if (!xcp_mgr->adev->kfd.init_complete)
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void
-__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
-{
- struct amdgpu_device *adev = xcp_mgr->adev;
-
- xcp_mgr->supp_xcp_modes = 0;
-
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_QPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 6:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_TPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 4:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- /* this seems only existing in emulation phase */
- case 2:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 1:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
-
- default:
- break;
- }
-}
-
static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
int mode;
@@ -705,7 +416,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
- ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
if (ret)
goto unlock;
@@ -718,7 +429,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
- ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
if (!ret)
__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
@@ -801,9 +512,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
- .select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list =
- &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -818,7 +526,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
- __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
@@ -858,7 +566,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
if (ret)
return ret;
- aqua_vanjaram_ip_map_init(adev);
+ amdgpu_ip_map_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 75ea071744eb..7bd506f06eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4952,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
+
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -9046,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
-static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, mmSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -9522,7 +9511,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9532,15 +9523,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
u64 addr;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
@@ -9560,12 +9550,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
0, 1, 0x20);
gfx_v10_0_ring_emit_reg_wait(kiq_ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9575,11 +9562,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9587,12 +9588,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -9603,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
0, 0);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9641,13 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -9882,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
@@ -9923,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index ec9b84f92d46..c01c241a1b06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1806,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
- (adev->gfx.mec_fw_version >= 2410)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2410) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
break;
}
@@ -6283,21 +6288,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -6811,13 +6801,14 @@ static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -6840,7 +6831,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -6973,13 +6964,14 @@ static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -7000,7 +6992,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7236,7 +7228,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
@@ -7278,7 +7269,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 1234c8d64e20..3e138527d534 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
@@ -586,7 +587,7 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -1542,10 +1548,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
- (adev->gfx.mec_fw_version >= 2920)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2920) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
+ break;
+ default:
+ break;
}
if (!adev->enable_mes_kiq) {
@@ -4690,21 +4700,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5307,13 +5302,14 @@ static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -5335,7 +5331,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -5421,13 +5417,14 @@ static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -5448,7 +5445,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
@@ -5526,7 +5523,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
@@ -5565,7 +5561,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index da0534ff1271..2aa323dab34e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4884,76 +4884,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
-static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
-static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5003,7 +4933,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
- .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bc983ecf3d99..367449d8061b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6340,34 +6340,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
-static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6844,48 +6816,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
-static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6951,7 +6881,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
- .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ad9be3656653..20b30f4b3c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2410,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
@@ -7171,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v9_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
- return -ENOMEM;
- gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v9_0_ring_emit_reg_wait(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
- gfx_v9_0_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -7225,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7280,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
DRM_ERROR("fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7496,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
- .reset = gfx_v9_0_reset_kgq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c233edf60569..51babf5c78c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if (adev->gfx.mec_fw_version >= 155) {
+ if ((adev->gfx.mec_fw_version >= 155) &&
+ !amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
- if (adev->gfx.mec_fw_version >= 21) {
+ if ((adev->gfx.mec_fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -1349,7 +1351,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* ToDo: GC 9.4.4 */
case IP_VERSION(9, 4, 3):
- if (adev->gfx.mec_fw_version >= 184)
+ if (adev->gfx.mec_fw_version >= 184 &&
+ (amdgpu_sriov_reg_access_sq_config(adev) ||
+ !amdgpu_sriov_vf(adev)))
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break;
case IP_VERSION(9, 5, 0):
@@ -3552,7 +3556,8 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
}
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
@@ -3560,12 +3565,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -3591,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
pipe_reset:
- if(r) {
+ if (r) {
+ if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+ return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
@@ -3612,14 +3618,14 @@ pipe_reset:
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
enum amdgpu_gfx_cp_ras_mem_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index cb25f7f0dfc1..6c03bf9f1ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
int i;
@@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
- if (adev->gmc.pdb0_bo) {
+ if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index a3e2787501f1..7923f491cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72211409227b..f15d691e9a20 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b645d3e6a6c8..de763105fdfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 99ca08e9bdb5..b45fa0cea9d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1458,9 +1458,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 282197f4ffb1..c4d69cf4e06c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -78,8 +78,6 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
-#define MAX_MEM_RANGES 8
-
static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
@@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
(0x001d43e0 + 0x00001800),
};
-static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev)
-{
- return !!adev->aid_mask;
-}
-
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -641,10 +634,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -652,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -801,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1131,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1142,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
@@ -1172,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
- if (mapping->bo_va->is_xgmi)
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
@@ -1264,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
+ gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
+ flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1385,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static enum amdgpu_memory_partition
-gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
-{
- enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
-
- if (adev->nbio.funcs->get_memory_partition_mode)
- mode = adev->nbio.funcs->get_memory_partition_mode(adev,
- supp_modes);
-
- return mode;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
-{
- switch (adev->gmc.num_mem_partitions) {
- case 0:
- return UNKNOWN_MEMORY_PARTITION_MODE;
- case 1:
- return AMDGPU_NPS1_PARTITION_MODE;
- case 2:
- return AMDGPU_NPS2_PARTITION_MODE;
- case 4:
- return AMDGPU_NPS4_PARTITION_MODE;
- default:
- return AMDGPU_NPS1_PARTITION_MODE;
- }
-
- return AMDGPU_NPS1_PARTITION_MODE;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
- return gmc_v9_0_query_vf_memory_partition(adev);
-
- return gmc_v9_0_get_memory_partition(adev, NULL);
-}
-
static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
@@ -1446,7 +1396,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
- .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
@@ -1553,7 +1503,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1599,7 +1549,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
/* Mode detected by hardware and supported modes available */
if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
@@ -1635,7 +1585,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1722,7 +1672,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
@@ -1837,7 +1787,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
adev->gmc.vmid0_page_table_depth = 1;
adev->gmc.vmid0_page_table_block_size = 12;
} else {
@@ -1863,7 +1813,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
r = amdgpu_gmc_pdb0_alloc(adev);
}
@@ -1885,188 +1835,6 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
-{
- enum amdgpu_memory_partition mode;
- u32 supp_modes;
- bool valid;
-
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
-
- /* Mode detected by hardware not present in supported modes */
- if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
- !(BIT(mode - 1) & supp_modes))
- return false;
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- case AMDGPU_NPS1_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 1);
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 2);
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 3 ||
- adev->gmc.num_mem_partitions == 4);
- break;
- default:
- valid = false;
- }
-
- return valid;
-}
-
-static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
-{
- int i;
-
- /* Check if node with id 'nid' is present in 'node_ids' array */
- for (i = 0; i < num_ids; ++i)
- if (node_ids[i] == nid)
- return true;
-
- return false;
-}
-
-static void
-gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- struct amdgpu_numa_info numa_info;
- int node_ids[MAX_MEM_RANGES];
- int num_ranges = 0, ret;
- int num_xcc, xcc_id;
- uint32_t xcc_mask;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- xcc_mask = (1U << num_xcc) - 1;
-
- for_each_inst(xcc_id, xcc_mask) {
- ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
- if (ret)
- continue;
-
- if (numa_info.nid == NUMA_NO_NODE) {
- mem_ranges[0].size = numa_info.size;
- mem_ranges[0].numa.node = numa_info.nid;
- num_ranges = 1;
- break;
- }
-
- if (gmc_v9_0_is_node_present(node_ids, num_ranges,
- numa_info.nid))
- continue;
-
- node_ids[num_ranges] = numa_info.nid;
- mem_ranges[num_ranges].numa.node = numa_info.nid;
- mem_ranges[num_ranges].size = numa_info.size;
- ++num_ranges;
- }
-
- adev->gmc.num_mem_partitions = num_ranges;
-}
-
-static void
-gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- enum amdgpu_memory_partition mode;
- u32 start_addr = 0, size;
- int i, r, l;
-
- mode = gmc_v9_0_query_memory_partition(adev);
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 0;
- break;
- case AMDGPU_NPS1_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 1;
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 2;
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- if (adev->flags & AMD_IS_APU)
- adev->gmc.num_mem_partitions = 3;
- else
- adev->gmc.num_mem_partitions = 4;
- break;
- default:
- adev->gmc.num_mem_partitions = 1;
- break;
- }
-
- /* Use NPS range info, if populated */
- r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- &adev->gmc.num_mem_partitions);
- if (!r) {
- l = 0;
- for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
- if (mem_ranges[i].range.lpfn >
- mem_ranges[i - 1].range.lpfn)
- l = i;
- }
-
- } else {
- if (!adev->gmc.num_mem_partitions) {
- dev_err(adev->dev,
- "Not able to detect NPS mode, fall back to NPS1");
- adev->gmc.num_mem_partitions = 1;
- }
- /* Fallback to sw based calculation */
- size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
- size /= adev->gmc.num_mem_partitions;
-
- for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
- mem_ranges[i].range.fpfn = start_addr;
- mem_ranges[i].size =
- ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
- mem_ranges[i].range.lpfn = start_addr + size - 1;
- start_addr += size;
- }
-
- l = adev->gmc.num_mem_partitions - 1;
- }
-
- /* Adjust the last one */
- mem_ranges[l].range.lpfn =
- (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
- mem_ranges[l].size =
- adev->gmc.real_vram_size -
- ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
-}
-
-static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
-{
- bool valid;
-
- adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
- sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
- if (!adev->gmc.mem_partitions)
- return -ENOMEM;
-
- /* TODO : Get the range from PSP/Discovery for dGPU */
- if (adev->gmc.is_app_apu)
- gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
- else
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
-
- if (amdgpu_sriov_vf(adev))
- valid = true;
- else
- valid = gmc_v9_0_validate_partition_info(adev);
- if (!valid) {
- /* TODO: handle invalid case */
- dev_WARN(adev->dev,
- "Mem ranges not matching with hardware config");
- }
-
- return 0;
-}
-
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
@@ -2088,7 +1856,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
+ if (amdgpu_is_multi_aid(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2238,8 +2006,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
- r = gmc_v9_0_init_mem_ranges(adev);
+ if (amdgpu_is_multi_aid(adev)) {
+ r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
@@ -2267,7 +2035,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev)) ?
+ amdgpu_is_multi_aid(adev)) ?
3 :
8;
@@ -2279,7 +2047,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2289,7 +2057,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2363,7 +2131,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
amdgpu_gmc_init_pdb0(adev);
if (adev->gart.bo == NULL) {
@@ -2521,7 +2289,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
* information again.
*/
if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index df898dbb746e..58cd87db8061 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -34,12 +34,13 @@
MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin");
#define TRANSFER_RAM_MASK 0x001c0000
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
@@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
@@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data, size = 0;
- const u32 *data;
+ const u32 *data = NULL;
int r = -EINVAL;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 574880d67009..a887df520414 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -29,6 +29,12 @@
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+#define ISP_PERFORMANCE_STATE_LOW 0
+#define ISP_PERFORMANCE_STATE_HIGH 1
+
+#define ISP_HIGH_PERFORMANC_XCLK 788
+#define ISP_HIGH_PERFORMANC_ICLK 788
+
static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
@@ -56,17 +62,137 @@ static struct gpiod_lookup_table isp_sensor_gpio_table = {
},
};
+static int isp_poweroff(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0);
+}
+
+static int isp_poweron(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0);
+}
+
+static int isp_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ u32 iclk, xclk;
+ int ret;
+
+ switch (state) {
+ case ISP_PERFORMANCE_STATE_HIGH:
+ xclk = ISP_HIGH_PERFORMANC_XCLK;
+ iclk = ISP_HIGH_PERFORMANC_ICLK;
+ break;
+ case ISP_PERFORMANCE_STATE_LOW:
+ /* isp runs at default lowest clock-rate on power-on, do nothing */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n",
+ xclk, state, ret);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n",
+ iclk, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int isp_genpd_add_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to add\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n",
+ pdev->mfd_cell->name, ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to add */
+ return 0;
+}
+
+static int isp_genpd_remove_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to remove\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n",
+ pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to remove */
+ return 0;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
+ const struct software_node *amd_camera_node, *isp4_node;
struct amdgpu_device *adev = isp->adev;
+ struct acpi_device *acpi_dev;
int idx, int_idx, num_res, r;
- u8 isp_dev_hid[ACPI_ID_LEN];
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
- r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
+ r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
if (r) {
drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
/* allow GPU init to progress */
@@ -74,18 +200,28 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
}
/* add GPIO resources required for OMNI5C10 sensor */
- if (!strcmp("OMNI5C10", isp_dev_hid)) {
+ if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
gpiod_add_lookup_table(&isp_gpio_table);
gpiod_add_lookup_table(&isp_sensor_gpio_table);
}
isp_base = adev->rmmio_base;
+ isp->ispgpd.name = "ISP_v_4_1_1";
+ isp->ispgpd.power_off = isp_poweroff;
+ isp->ispgpd.power_on = isp_poweron;
+ isp->ispgpd.set_performance_state = isp_set_performance_state;
+
+ r = pm_genpd_init(&isp->ispgpd, NULL, true);
+ if (r) {
+ drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r);
+ return -EINVAL;
+ }
+
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
goto failure;
}
@@ -95,19 +231,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
goto failure;
}
+ amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
+ isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
+
/* initialize isp platform data */
isp->isp_pdata->adev = (void *)adev;
isp->isp_pdata->asic_type = adev->asic_type;
@@ -136,14 +273,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].swnode = isp4_node;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
goto failure;
}
@@ -162,8 +299,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp gpio res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);
goto failure;
}
@@ -179,10 +315,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[2].platform_data = isp->isp_pdata;
isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
+ /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
- drm_err(&adev->ddev,
- "%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r);
+ goto failure;
+ }
+
+ r = device_for_each_child(isp->parent, &isp->ispgpd,
+ isp_genpd_add_device);
+ if (r) {
+ drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r);
+ goto failure;
+ }
+
+ r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1);
+ if (r) {
+ drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r);
goto failure;
}
@@ -201,6 +350,9 @@ failure:
static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
{
+ device_for_each_child(isp->parent, NULL,
+ isp_genpd_remove_device);
+
mfd_remove_devices(isp->parent);
kfree(isp->isp_res);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 4cde8a8bcc83..58239c405fda 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -764,11 +767,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v2_0_stop(ring->adev);
- jpeg_v2_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v2_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v2_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 8b39e114f3be..3e2c389242db 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v2_5_stop_inst(ring->adev, ring->me);
jpeg_v2_5_start_inst(ring->adev, ring->me);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 2f8510c2986b..a44eb2667664 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v3_0_stop(ring->adev);
- jpeg_v3_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v3_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v3_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index f17ec5414fd6..da3ee69f1a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EINVAL;
+ int r;
- jpeg_v4_0_stop(ring->adev);
- jpeg_v4_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 79e342d5ab28..b86288a69e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return 0;
}
@@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -1143,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v4_0_3_core_stall_reset(ring);
jpeg_v4_0_3_start_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 974030a5c03c..481d1a2dbe5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_5_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_5_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
@@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_5_ring_reset,
};
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 31d213ccbe0a..e0a71909252b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- return 0;
+
+ return r;
}
/**
@@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v5_0_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v5_0_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
@@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_0_ring_reset,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 3b6f65a25646..54523dc1f702 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -200,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- return 0;
+ return r;
}
/**
@@ -226,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -834,14 +832,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v5_0_1_core_stall_reset(ring);
jpeg_v5_0_1_init_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 76167fadb292..cc688ae79e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -76,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
u32 inst_mask;
int i;
@@ -95,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index bc3d6c2fc87a..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index f2ab5001b492..951998454b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -37,39 +37,31 @@
static const char *mmhub_client_ids_v4_1_0[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
- [5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [16][0] = "LSDMA",
+ [17][0] = "JPEG",
+ [19][0] = "VCNU",
+ [22][0] = "VSCH",
+ [23][0] = "HDP",
+ [32+23][0] = "VCNRD",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
- [5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
+ [12][1] = "DBGUNBIO",
[14][1] = "XDP",
[15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [16][1] = "LSDMA",
+ [17][1] = "JPEG",
+ [18][1] = "VCNWR",
+ [19][1] = "VCNU",
+ [22][1] = "VSCH",
+ [23][1] = "HDP",
};
static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index a376f072700d..1c22bc11c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -31,9 +31,6 @@
#define NPS_MODE_MASK 0x000000FFL
-/* Core 0 Port 0 counter */
-#define smnPCIEP_NAK_COUNTER 0x1A340218
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -467,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
}
}
-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
-{
- u32 val, nak_r, nak_g;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- /* Get the number of NAKs received and generated */
- val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
- nak_r = val & 0xFFFF;
- nak_g = val >> 16;
-
- /* Add the total number of NAKs, i.e the number of replays */
- return (nak_r + nak_g);
-}
-
#define MMIO_REG_HOLE_OFFSET 0x1A000
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
@@ -524,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
- .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f4a91b126c73..73f87131a7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -106,7 +106,9 @@ enum psp_gfx_cmd_id
/*IDs of performance monitoring/profiling*/
GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
/* Dynamic memory partitioninig (NPS mode change)*/
- GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */
+ GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */
};
/* PSP boot config sub-commands */
@@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg {
uint32_t boot_cfg; /* boot config data */
};
+/* Command-specific response for fw reserve info */
+struct psp_gfx_uresp_fw_reserve_info {
+ uint32_t reserve_base_address_hi;
+ uint32_t reserve_base_address_lo;
+ uint32_t reserve_size;
+};
+
/* Union of command-specific responses for GPCOM ring. */
union psp_gfx_uresp {
struct psp_gfx_uresp_reserved reserved;
struct psp_gfx_uresp_bootcfg boot_cfg;
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+ struct psp_gfx_uresp_fw_reserve_info fw_reserve_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 145186a1e48f..3584b8c18fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 215543575f47..6cc05d36e359 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,11 +152,9 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
return ret;
@@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -395,7 +399,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
@@ -421,8 +426,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -601,7 +607,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -638,7 +644,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index 5697760a819b..93787a90d598 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
@@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 80153f837470..4c6450d62299 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -82,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -97,7 +97,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -118,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -133,8 +133,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -163,7 +163,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -184,11 +184,13 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -219,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -233,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index ead616c11705..af4a7d7c4abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -182,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
@@ -213,7 +213,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -362,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
if (!ret)
psp_v13_0_init_sos_version(psp);
@@ -384,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -393,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -430,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -460,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -524,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -677,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -714,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
@@ -739,8 +745,9 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -764,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -799,7 +806,7 @@ static int psp_v13_0_dump_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -926,8 +933,9 @@ static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index eaa5512a21da..5f39a2edcc95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 256288c6cd78..38dfc5c19f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -34,7 +34,9 @@
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
@@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
- 0, true);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
@@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
@@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp,
int ret;
/* Confirm PSP is ready to start */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f6b75e3e47ff..833830bc3e2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
/* Change IH ring for UMC */
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
@@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
}
static int psp_v3_1_ring_create(struct psp_context *psp,
@@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_101), 0x80000000,
- 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, 0);
} else {
/* Write low address of the ring to C2PMSG_69 */
@@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_64), 0x80000000,
- 0x8000FFFF, false);
-
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, 0);
}
return ret;
}
@@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33ed2b158fcd..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index bb82c652e4c0..36b1ca73c2ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -110,6 +110,8 @@ static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -1342,6 +1344,7 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
.stop_kernel_queue = &sdma_v4_4_2_stop_queue,
.start_kernel_queue = &sdma_v4_4_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
};
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
@@ -1653,38 +1656,17 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i
return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
}
-static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
-}
-
-static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- if (!adev->sdma.has_page_queue)
- return false;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
-}
-
-static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
u32 id = ring->me;
int r;
- if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- amdgpu_amdkfd_suspend(adev, false);
- r = amdgpu_sdma_reset_engine(adev, id);
- amdgpu_amdkfd_resume(adev, false);
-
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, id, false);
+ amdgpu_amdkfd_resume(adev, true);
return r;
}
@@ -1730,7 +1712,7 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- int i;
+ int i, r;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1747,7 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
return -ETIMEDOUT;
}
- return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
+
+ return r;
+}
+
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id)
+{
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1889,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -2144,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2177,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 37f4b5b4a098..7dc67a22a7a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1428,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
- if (adev->sdma.instance[0].fw_version >= 35)
+ if ((adev->sdma.instance[0].fw_version >= 35) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1539,17 +1540,27 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
int r;
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
amdgpu_amdkfd_suspend(adev, true);
- r = amdgpu_sdma_reset_engine(adev, inst_id);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
@@ -1616,6 +1627,7 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 0b40411b92a0..3bd44c24f692 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1347,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
- if (adev->sdma.instance[0].fw_version >= 76)
+ if ((adev->sdma.instance[0].fw_version >= 76) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
- if (adev->sdma.instance[0].fw_version >= 34)
+ if ((adev->sdma.instance[0].fw_version >= 34) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1452,17 +1454,27 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
int r;
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
amdgpu_amdkfd_suspend(adev, true);
- r = amdgpu_sdma_reset_engine(adev, inst_id);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
@@ -1532,6 +1544,7 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index a9bdf8d61d6c..e6d8eddda2bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1355,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
- if (adev->sdma.instance[0].fw_version >= 21)
+ if ((adev->sdma.instance[0].fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1379,6 +1380,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
+ case IP_VERSION(6, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
case IP_VERSION(6, 0, 2):
if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
@@ -1387,6 +1392,22 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
+ case IP_VERSION(6, 1, 0):
+ if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 1):
+ if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 2):
+ if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 3):
+ if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
default:
break;
}
@@ -1550,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v6_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 86903eccbd4e..326ecc8d37d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -802,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
-static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v7_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/**
@@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
@@ -1352,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
- if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c457be3a3c56..9e74c9822e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1218,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index ef7c603b50ae..c8ac11a9cdef 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 148b651be7ca..68b4371df0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
+
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
@@ -213,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].reset = vcn_v2_0_reset;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -233,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -260,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -1355,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_0_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_0_start(vinst);
+}
+
static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2176,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
@@ -2205,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 58b527a6b795..bc30a5326866 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -404,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ adev->vcn.inst[j].reset = vcn_v2_5_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -425,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -455,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -1816,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -1914,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1942,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_5_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_5_start(vinst);
+}
+
static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9fb0d5380589..4b8f4407047f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -289,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ adev->vcn.inst[i].reset = vcn_v3_0_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -306,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -338,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v3_0_stop(vinst);
+ if (r)
+ return r;
+ vcn_v3_0_enable_clock_gating(vinst);
+ vcn_v3_0_enable_static_power_gating(vinst);
+ return vcn_v3_0_start(vinst);
+}
+
static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b5071f77f78d..1924e075b66f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -241,7 +241,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -1967,18 +1968,22 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
-static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_stop(vinst);
- vcn_v4_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 5a33140f5723..2a3663b551af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -1594,18 +1594,16 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
int r = 0;
int vcn_inst;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
vcn_inst = GET_INST(VCN, ring->me);
r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
@@ -1620,9 +1618,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
vcn_v4_0_3_hw_init_inst(vinst);
vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 16ade84facc7..caf2d95a85d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -220,7 +220,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -1465,18 +1466,22 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_5_stop(vinst);
- vcn_v4_0_5_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_5_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_5_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index f8e3f0b882da..07a6e9582880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -198,7 +198,8 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -1192,18 +1193,22 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v5_0_0_stop(vinst);
- vcn_v5_0_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v5_0_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v5_0_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 62e88e5362e9..16e12c9913f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a2149afa5803..828a9ceef1e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index bf0854bd5555..7e749f9b6d69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -971,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
kfd_smi_event_update_gpu_reset(node, false, reset_context);
}
- kgd2kfd_suspend(kfd, false);
+ kgd2kfd_suspend(kfd, true);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
@@ -1013,13 +1013,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return 0;
}
-bool kfd_is_locked(void)
+bool kfd_is_locked(struct kfd_dev *kfd)
{
+ uint8_t id = 0;
+ struct kfd_node *dev;
+
lockdep_assert_held(&kfd_processes_mutex);
- return (kfd_locked > 0);
+
+ /* check reset/suspend lock */
+ if (kfd_locked > 0)
+ return true;
+
+ if (kfd)
+ return kfd->kfd_dev_lock > 0;
+
+ /* check lock on all cgroup accessible devices */
+ while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
+ if (!dev || kfd_devcgroup_check_permission(dev))
+ continue;
+
+ if (dev->kfd->kfd_dev_lock > 0)
+ return true;
+ }
+
+ return false;
}
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
struct kfd_node *node;
int i;
@@ -1027,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
if (!kfd->init_complete)
return;
- /* for runtime suspend, skip locking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- /* For first KFD device suspend all the KFD processes */
- if (++kfd_locked == 1)
- kfd_suspend_all_processes();
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (suspend_proc)
+ kgd2kfd_suspend_process(kfd);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
@@ -1042,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
}
}
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
{
int ret, i;
@@ -1055,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
- /* for runtime resume, skip unlocking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- if (--kfd_locked == 0)
- ret = kfd_resume_all_processes();
- WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (resume_proc)
+ ret = kgd2kfd_resume_process(kfd);
+
+ return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return;
+
+ mutex_lock(&kfd_processes_mutex);
+ /* For first KFD device suspend all the KFD processes */
+ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ mutex_lock(&kfd_processes_mutex);
+ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
return ret;
}
@@ -1442,24 +1478,53 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
kfd_get_num_sdma_engines(node);
}
-int kgd2kfd_check_and_lock_kfd(void)
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
+ struct kfd_process *p;
+ int r = 0, temp, idx;
+
mutex_lock(&kfd_processes_mutex);
- if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
- return -EBUSY;
+
+ if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
+ goto out;
+
+ /* fail under system reset/resume or kfd device is partition switching. */
+ if (kfd_is_locked(kfd)) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * ensure all running processes are cgroup excluded from device before mode switch.
+ * i.e. no pdd was created on the process socket.
+ */
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->kfd != kfd)
+ continue;
+
+ r = -EBUSY;
+ goto proc_check_unlock;
+ }
}
- ++kfd_locked;
+proc_check_unlock:
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+out:
+ if (!r)
+ ++kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
- return 0;
+ return r;
}
-void kgd2kfd_unlock_kfd(void)
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
mutex_lock(&kfd_processes_mutex);
- --kfd_locked;
+ --kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 76359c6a3f3a..6c5c7c1bf5ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2312,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
continue;
/* Reset engine and check. */
- if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) ||
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
!set_sdma_queue_as_reset(dqm, doorbell_off)) {
r = -ENOTRECOVERABLE;
@@ -2339,9 +2339,18 @@ reset_fail:
static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
{
+ struct amdgpu_device *adev = dqm->dev->adev;
+
while (halt_if_hws_hang)
schedule();
+ if (adev->debug_disable_gpu_ring_reset) {
+ dev_info_once(adev->dev,
+ "%s queue hung, but ring reset disabled",
+ is_sdma ? "sdma" : "compute");
+
+ return -EPERM;
+ }
if (!amdgpu_gpu_recovery)
return -ENOTRECOVERABLE;
@@ -2716,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 2b294ada3ec0..82905f3e54dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1302,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index dbcb60eb54b2..1d170dc50df3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -23,7 +23,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 97933d2a3803..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
- restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
- if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
- struct v9_mqd *m;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
- m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
- m->cp_hqd_pq_doorbell_control |= 1 <<
- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
- mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d221c58dccc3..67694bcd9464 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -372,6 +372,9 @@ struct kfd_dev {
/* bitmap for dynamic doorbell allocation from doorbell object */
unsigned long *doorbell_bitmap;
+
+ /* for dynamic partitioning */
+ int kfd_dev_lock;
};
enum kfd_mempool {
@@ -1536,7 +1539,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason);
-bool kfd_is_locked(void);
+bool kfd_is_locked(struct kfd_dev *kfd);
/* Compute profile */
void kfd_inc_compute_active(struct kfd_node *dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 722ac1662bdc..5be28c6c4f6a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
*/
mutex_lock(&kfd_processes_mutex);
- if (kfd_is_locked()) {
+ if (kfd_is_locked(NULL)) {
pr_debug("KFD is locked! Cannot create process");
process = ERR_PTR(-EINVAL);
goto out;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c643e0ccec52..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 83d9384ac815..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -253,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -359,8 +359,8 @@ void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
kfd_smi_event_add(0, pdd->dev,
start ? KFD_SMI_EVENT_PROCESS_START :
KFD_SMI_EVENT_PROCESS_END,
- KFD_EVENT_FMT_PROCESS(task_info->pid,
- task_info->task_name));
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index faed84172dd4..8bc36f04b1b7 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f58fa5da7fe5..cd0e2976e268 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1758,10 +1758,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1774,6 +1775,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1781,7 +1783,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1847,7 +1849,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2037,7 +2039,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2053,7 +2055,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize freesync_module.\n");
+ "failed to initialize freesync_module.\n");
} else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
@@ -2064,7 +2066,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2075,7 +2077,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
@@ -2085,20 +2087,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
goto error;
}
@@ -2107,7 +2109,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
dmub_aux_fused_io_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2125,7 +2127,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dm_initialize_drm_device(adev)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2140,14 +2142,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize sw for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
@@ -2404,6 +2406,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2570,7 +2573,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -3060,6 +3063,77 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_cache_state(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
+
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+}
+
+static void dm_complete(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ dm_destroy_cached_state(adev);
+}
+
static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3068,11 +3142,8 @@ static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
return 0;
WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
- return 0;
+ return dm_cache_state(adev);
}
static int dm_suspend(struct amdgpu_ip_block *ip_block)
@@ -3106,9 +3177,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
}
if (!adev->dm.cached_state) {
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
}
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3295,12 +3367,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3332,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
+ if (r) {
drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3457,40 +3525,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
@@ -3539,6 +3574,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
+ .complete = dm_complete,
.is_idle = dm_is_idle,
.wait_for_idle = dm_wait_for_idle,
.check_soft_reset = dm_check_soft_reset,
@@ -4003,19 +4039,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4720,16 +4756,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
- return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
}
-/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
static inline u32 scale_fw_to_input(int min, int max, u64 input)
{
- return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
}
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
@@ -4829,6 +4865,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -4941,9 +4985,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[aconnector->bl_idx];
if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
/* min is zero, so max needs to be adjusted */
props.max_brightness = max - min;
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
@@ -5368,7 +5412,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -7534,7 +7579,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7592,7 +7637,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7859,6 +7904,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_FULL;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -8316,7 +8378,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder)
+ if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
@@ -12156,7 +12219,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d7d92f9911e4..b937da0a4e4a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -636,8 +636,9 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
/**
* @oem_i2c:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 2551823382f8..010172f930ae 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -661,6 +661,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index c7d13e743e6c..b726bcd18e29 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 25e8befbcc47..7187d5aedf0a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -107,7 +107,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes*/
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
@@ -133,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b7c6e8d13435..eef51652ca35 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -92,9 +92,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 615d2ab2b803..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f984cb0cb889..ff7b867ae98b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..95f890fda8aa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -726,6 +726,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2c645dffec18..f2b1720a6a66 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7(
process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
}
+ dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction;
}
// Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d9955c5d2e5e..60021671b386 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
###############################################################################
# DCN30
###############################################################################
-CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c3e58c730b1..4071851f9e86 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- if (!stream->dpms_off || (stream_status && stream_status->plane_count))
+ if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
display_count++;
}
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..f5ad0a177038 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
index fa09c594fd36..06da34676965 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -56,6 +56,7 @@
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
#define DALSMC_MSG_BacoAudioD3PME 0x11
-#define DALSMC_Message_Count 0x12
+#define DALSMC_MSG_SmartAccess 0x12
+#define DALSMC_Message_Count 0x13
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 8083a553c60e..ef77fcd164ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -30,6 +30,7 @@
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
+#include "dcn30/dcn30m_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_present
+ .is_smu_present = dcn3_is_smu_present,
+ .set_smartmux_switch = dcn30m_set_smartmux_switch
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
new file mode 100644
index 000000000000..8e8a11c7437e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr_internal.h"
+#include "dcn30/dcn30m_clk_mgr.h"
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
new file mode 100644
index 000000000000..757985b2eadc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30M_CLK_MGR_H__
+#define __DCN30M_CLK_MGR_H__
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
+
+#endif //__DCN30M_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..0dd0583ff21e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
+ unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ uint32_t result;
+ /* Wait for response register to be ready */
+ dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ if (IS_SMU_TIMEOUT(result))
+ dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
+
+ /* Wait for response */
+ if (result == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
+
+ dcn30m_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SmartAccess, pins_to_set, &response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
new file mode 100644
index 000000000000..8a59a473fc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+
+#include "core_types.h"
+
+struct clk_mgr_internal;
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
+#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 4b17d2fcd565..b59703467128 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -22,8 +22,6 @@
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
-#include "dml/dcn401/dcn401_fpu.h"
-
#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
@@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
{
- /* legacy */
- DC_FP_START();
- dcn401_build_wm_range_table_fpu(clk_mgr);
- DC_FP_END();
-
- if (clk_mgr->ctx->dc->debug.using_dml21) {
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
-
- /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
- } else {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
- }
-
- /* Set 1B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
+ /* For min clocks use as reported by PM FW and report those as min */
+ uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
+ uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+
+ /* Set A - Normal - default values */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
+
+ /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
+ /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
+ if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
+ } else {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
}
+
+ /* Set 1B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
}
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->smu_present && clk_mgr->dpm_present &&
+ ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz));
+}
+
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
@@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
.get_hard_min_memclk = dcn401_get_hard_min_memclk,
.get_hard_min_fclk = dcn401_get_hard_min_fclk,
+ .is_dc_mode_present = dcn401_is_dc_mode_present,
+ .get_max_clock_khz = dcn401_get_max_clock_khz,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 6c9ae5ca2c7e..97a1ce1e8a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -105,10 +105,13 @@ struct dcn401_clk_mgr {
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base);
struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
struct dccg *dccg);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
+
#endif /* __DCN401_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b34b5b52236d..9ab0ee20ca6f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -938,17 +938,18 @@ static void dc_destruct(struct dc *dc)
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
-
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -976,6 +977,8 @@ static bool dc_construct_ctx(struct dc *dc,
if (!dc_ctx)
return false;
+ dc_stream_init_rmcm_3dlut(dc);
+
dc_ctx->cgs_device = init_params->cgs_device;
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
@@ -2381,7 +2384,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
- res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING);
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
@@ -3304,7 +3307,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context,
+ DC_VALIDATE_MODE_ONLY) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3526,7 +3530,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -4632,7 +4636,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context,
+ DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -5155,7 +5160,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -5439,8 +5444,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
-
- if (ret)
+ if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_01))
clear_update_flags(srf_updates, surface_count, stream);
return ret;
@@ -5471,7 +5476,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
stream_update, state);
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
}
@@ -5544,6 +5549,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
break;
+ case DC_ACPI_CM_POWER_STATE_D3:
+ if (dc->caps.ips_support)
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dc->caps.ips_v2_support) {
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+ }
+ break;
default:
ASSERT(dc->current_state->stream_count == 0);
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
@@ -6341,13 +6355,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
-/*
- *****************************************************************************
+/**
* dc_get_power_profile_for_dc_state() - extracts power profile from dc state
*
* Called when DM wants to make power policy decisions based on dc_state
*
- *****************************************************************************
+ * @context: Pointer to the dc_state from which the power profile is extracted.
+ *
+ * Return: The power profile structure containing the power level information.
*/
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
{
@@ -6363,13 +6378,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
-/*
- **********************************************************************************
+/**
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
- * Called when DM wants to log detile buffer size from dc_state
+ * This function is called to log the detile buffer size from the dc_state.
+ *
+ * @context: a pointer to the dc_state from which the detile buffer size is extracted.
*
- **********************************************************************************
+ * Return: the size of the detile buffer, or 0 if not available.
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
@@ -6380,26 +6396,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
/**
- ***********************************************************************************************
* dc_get_host_router_index: Get index of host router from a dpia link
*
* This function return a host router index of the target link. If the target link is dpia link.
*
- * @param [in] link: target link
- * @param [out] host_router_index: host router index of the target link
+ * @link: Pointer to the target link (input)
+ * @host_router_index: Pointer to store the host router index of the target link (output).
*
- * @return: true if the host router index is found and valid.
+ * Return: true if the host router index is found and valid.
*
- ***********************************************************************************************
*/
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
{
- struct dc *dc = link->ctx->dc;
+ struct dc *dc;
- if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
return false;
+ dc = link->ctx->dc;
+
if (link->link_index < dc->lowest_dpia_link_index)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 7551d0a3fe82..bbce751b485f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status)
return "Insufficient DP link bandwidth";
case DC_FAIL_HW_CURSOR_SUPPORT:
return "HW Cursor not supported";
+ case DC_FAIL_DP_TUNNEL_BW_VALIDATE:
+ return "Fail DP Tunnel BW validation";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 7014b8d000bb..ec4e80e5b6eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for Smart Mux */
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ const struct tg_color sm_ver_colors[5] = {
+ {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
+ {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
+ };
+
+ if (dc->caps.is_apu) {
+ /* APU driving the eDP */
+ *color = sm_ver_colors[dc->config.smart_mux_version];
+ } else {
+ /* dGPU driving the eDP - red */
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
/* Visual Confirm color definition for VABC */
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 71e15da4bb69..130455f2802a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3da25bd8b578..4d6181e7c612 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3940,7 +3940,9 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ stream->audio_info.mode_count &&
+ (stream->audio_info.flags.all ||
+ (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
@@ -4053,7 +4055,7 @@ static bool add_all_planes_for_stream(
* @set: An array of dc_validation_set with all the current streams reference
* @set_count: Total of streams
* @context: New context
- * @fast_validate: Enable or disable fast validation
+ * @validate_mode: identify the validation mode
*
* This function updates the potential new stream in the context object. It
* creates multiple lists for the add, remove, and unchanged streams. In
@@ -4068,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
@@ -4242,7 +4244,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
}
- res = dc_validate_global_state(dc, context, fast_validate);
+ res = dc_validate_global_state(dc, context, validate_mode);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
@@ -4299,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
*
* @dc: dc struct for this driver
* @new_ctx: state to be validated
- * @fast_validate: set to true if only yes/no to support matters
+ * @validate_mode: identify the validation mode
*
* Checks hardware resource availability and bandwidth requirement.
*
@@ -4309,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -4368,7 +4370,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode);
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 4db7383720fd..883054bb18e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
struct dc_state *state;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-#endif
state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
@@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
- dml2_opt->use_clock_dc_limits = false;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
dc_state_release(state);
return NULL;
}
- dml2_opt->use_clock_dc_limits = true;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
@@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream(
return DC_ERROR_UNEXPECTED;
}
+ dc_stream_release_3dlut_for_stream(dc, stream);
+
dc_stream_release(state->streams[i]);
state->stream_count--;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b883fb24fa12..4d6bc9fd4faa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -857,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
}
/*
+* dc_stream_get_3dlut()
+* Requirements:
+* 1. Is stream already owns an RMCM instance, return it.
+* 2. If it doesn't and we don't need to allocate, return NULL.
+* 3. If there's a free RMCM instance, assign to stream and return it.
+* 4. If no free RMCM instances, return NULL.
+*/
+
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ // see if one is allocated for this stream
+ for (int i = 0; i < num_rmcm; i++) {
+ if (dc->res_pool->rmcm_3dlut[i].isInUse &&
+ dc->res_pool->rmcm_3dlut[i].stream == stream)
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+
+ //case: not found one, and dont need to allocate
+ if (!allocate_one)
+ return NULL;
+
+ //see if there is an unused 3dlut, allocate
+ for (int i = 0; i < num_rmcm; i++) {
+ if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = true;
+ dc->res_pool->rmcm_3dlut[i].stream = stream;
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+ }
+
+ //dont have a 3dlut
+ return NULL;
+}
+
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct dc_rmcm_3dlut *rmcm_3dlut =
+ dc_stream_get_3dlut_for_stream(dc, stream, false);
+
+ if (rmcm_3dlut) {
+ rmcm_3dlut->isInUse = false;
+ rmcm_3dlut->stream = NULL;
+ rmcm_3dlut->protection_bits = 0;
+ }
+}
+
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ for (int i = 0; i < num_rmcm; i++) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = false;
+ dc->res_pool->rmcm_3dlut[i].stream = NULL;
+ dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
+ }
+}
+
+/*
* Finds the greatest index in refresh_rate_hz that contains a value <= refresh
*/
static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f41073c0147e..59c07756130d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -46,6 +46,8 @@
#include "dmub/inc/dmub_cmd.h"
+#include "sspl/dc_spl_types.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -53,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.334"
+#define DC_VER "3.2.340"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -66,8 +68,11 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
+#define MAX_SUPPORTED_FORMATS 7
+
#define MAX_HOST_ROUTERS_NUM 3
-#define MAX_DPIA_PER_HOST_ROUTER 2
+#define MAX_DPIA_PER_HOST_ROUTER 3
+#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
/* Display Core Interfaces */
struct dc_versions {
@@ -193,6 +198,34 @@ struct dpp_color_caps {
struct rom_curve_caps ogam_rom_caps;
};
+/* Below structure is to describe the HW support for mem layout, extend support
+ range to match what OS could handle in the roadmap */
+struct lut3d_caps {
+ uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */
+ struct {
+ uint32_t swizzle_3d_rgb : 1;
+ uint32_t swizzle_3d_bgr : 1;
+ uint32_t linear_1d : 1;
+ } mem_layout_support;
+ struct {
+ uint32_t unorm_12msb : 1;
+ uint32_t unorm_12lsb : 1;
+ uint32_t float_fp1_5_10 : 1;
+ } mem_format_support;
+ struct {
+ uint32_t order_rgba : 1;
+ uint32_t order_bgra : 1;
+ } mem_pixel_order_support;
+ /*< size options are 9, 17, 33, 45, 65 */
+ struct {
+ uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */
+ uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */
+ uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */
+ uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */
+ uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */
+ } lut_dim_caps;
+};
+
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
@@ -204,14 +237,21 @@ struct dpp_color_caps {
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ * @mcm_3d_lut_caps: HW support cap for MCM LUT memory
+ * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory
+ * @preblend: whether color manager supports preblend with MPC
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
+ uint16_t num_rmcm_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
+ struct lut3d_caps mcm_3d_lut_caps;
+ struct lut3d_caps rmcm_3d_lut_caps;
+ bool preblend;
};
/**
@@ -271,6 +311,7 @@ struct dc_caps {
bool dmcub_support;
bool zstate_support;
bool ips_support;
+ bool ips_v2_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -308,6 +349,8 @@ struct dc_caps {
struct dc_scl_caps scl_caps;
uint8_t num_of_host_routers;
uint8_t num_of_dpias_per_host_router;
+ /* limit of the ODM only, could be limited by other factors (like pipe count)*/
+ uint8_t max_odm_combine_factor;
};
struct dc_bug_wa {
@@ -462,6 +505,7 @@ struct dc_config {
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
+ int smart_mux_version;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
@@ -472,6 +516,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_rcg;
unsigned int disable_ips_in_vpb;
bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
@@ -484,6 +529,8 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ struct spl_sharpness_range dcn_sharpness_range;
+ struct spl_sharpness_range dcn_override_sharpness_range;
};
enum visual_confirm {
@@ -495,6 +542,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
@@ -773,6 +821,7 @@ enum pg_hw_resources {
PG_DCHVM,
PG_DWB,
PG_HPO,
+ PG_DCOH,
PG_HW_RESOURCES_NUM_ELEMENT
};
@@ -789,10 +838,8 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t enable_dpia_pre_training:1; /* bit 7 */
- uint32_t unify_link_enc_assignment:1; /* bit 8 */
- uint32_t reserved:24;
+ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -918,6 +965,9 @@ struct dc_debug_options {
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
+ bool disable_io_clk_power_gate;
+ bool disable_mem_power_gate;
+ bool disable_dio_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
@@ -1154,7 +1204,7 @@ struct dc_init_data {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
@@ -1255,6 +1305,12 @@ union dc_3dlut_state {
};
+struct dc_rmcm_3dlut {
+ bool isInUse;
+ const struct dc_stream_state *stream;
+ uint8_t protection_bits;
+};
+
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
@@ -1392,6 +1448,8 @@ struct dc_plane_state {
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
+ struct spl_sharpness_range sharpness_range;
+ enum sharpness_range_source sharpness_source;
};
struct dc_plane_info {
@@ -1573,6 +1631,7 @@ struct dc_scratch_space {
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
bool force_dp_ffe_preset;
+ bool skip_phy_ssc_reduction;
} wa_flags;
union dc_dp_ffe_preset forced_dp_ffe_preset;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1582,6 +1641,8 @@ struct dc_scratch_space {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ bool is_dds;
+ bool is_display_mux_present;
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
struct dc_panel_config panel_config;
@@ -1636,6 +1697,10 @@ struct dc {
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ /* For eDP to know the switching state of SmartMux */
+ bool is_switch_in_progress_orig;
+ bool is_switch_in_progress_dest;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -1666,7 +1731,7 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
- struct dml2_configuration_options dml2_tmp;
+ struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
};
@@ -1771,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
-/*
- * fast_validate: we return after determining if we can support the new state,
- * but before we populate the programming info
- */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
@@ -2379,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
- * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
- * available BW for each host router
- *
- * @dc: pointer to dc struct
- * @stream: pointer to all possible streams
- * @count: number of valid DPIA streams
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: dc_status
*/
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
- const unsigned int count);
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
/* Sink Interfaces - A sink corresponds to a display output device */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index afbcf866520e..f5ef1a07078e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
@@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else {
/* RCG only */
new_signals.bits.allow_pg = 0;
@@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips2 = 0;
new_signals.bits.allow_z10 = 0;
}
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ }
+ // Setting RCG allow bits (IPSv2.0)
+ if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ }
+ // IPS dynamic allow bits (IPSv2 change, vpb use case)
+ if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ new_signals.bits.allow_dynamic_ips1_z8 = 1;
}
-
ips_driver->signals = new_signals;
dc_dmub_srv->driver_signals = ips_driver->signals;
}
@@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
- uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
rcg_exit_count = ips_fw->rcg_exit_count;
ips1_exit_count = ips_fw->ips1_exit_count;
ips2_exit_count = ips_fw->ips2_exit_count;
+ ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
ips_driver->signals.all = 0;
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit,
ips_fw->rcg_entry_count,
ips_fw->ips1_entry_count,
- ips_fw->ips2_entry_count);
+ ips_fw->ips2_entry_count,
+ ips_fw->ips1_z8ret_entry_count);
/* Note: register access has technically not resumed for DCN here, but we
* need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
- if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
- ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
- ips2_exit_count);
+ ips2_exit_count,
+ ips1z8_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
@@ -1656,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
return result;
}
-void dc_dmub_srv_fams2_update_config(struct dc *dc,
+static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
struct dc_state *context,
bool enable)
{
@@ -1722,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
+ union dmub_rb_cmd cmd;
+ uint32_t i;
+
+ memset(config, 0, sizeof(*config));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
+
+ cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
+ cmd.ib_fams2_config.ib_data.size = sizeof(*config);
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* copy static feature configuration overrides */
+ config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+ config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+
+ /* send global configuration parameters */
+ memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ /* copy stream static base state */
+ memcpy(&config->stream_v1[i].base,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(config->stream_v1[i].base));
+
+ /* copy stream static sub-state */
+ memcpy(&config->stream_v1[i].sub_state,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
+ sizeof(config->stream_v1[i].sub_state));
+ }
+ }
+
+ config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ config->global.features.bits.enable = enable;
+
+ dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ if (dc->debug.fams_version.major == 2)
+ dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
+ if (dc->debug.fams_version.major == 3)
+ dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
+}
+
void dc_dmub_srv_fams2_drr_update(struct dc *dc,
uint32_t tg_inst,
uint32_t vtotal_min,
@@ -1847,83 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip(
}
}
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
{
- bool result;
+ union dmub_rb_cmd cmd;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
+ cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
+ cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
+
+ // only panel_inst=0 is supported at the moment
+ cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
+ cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t bytes = sizeof(struct dmub_ips_residency_info);
+
+ dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
+ cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
+ cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
+
+ cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.ips_query_residency_info.info_data.size = bytes;
+ cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
+ cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
+ cmd.ips_query_residency_info.header.ret_status == 0)
return false;
- result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
- start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ // copy the result to the output since ret_status != 0 means the command returned data
+ memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
+ lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Init failed in DMUB");
return result;
}
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count)
{
- uint32_t i;
- enum dmub_gpint_command command_code;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return;
+ memset(&cmd, 0, sizeof(cmd));
- switch (output->ips_mode) {
- case DMUB_IPS_MODE_IPS1_MAX:
- command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS2:
- command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_RCG:
- command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_ONO2_ON:
- command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
- break;
- default:
- command_code = DMUB_GPINT__INVALID_COMMAND;
- break;
- }
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1
+ lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_x = params.src_x;
+ lsdma_data->u.tiled_copy_data.src_y = params.src_y;
+ lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
+ lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.rect_x = params.rect_x;
+ lsdma_data->u.tiled_copy_data.rect_y = params.rect_y;
+ lsdma_data->u.tiled_copy_data.dcc = params.dcc;
+ lsdma_data->u.tiled_copy_data.tmz = params.tmz;
+ lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
+ lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.data_format = params.data_format;
+ lsdma_data->u.tiled_copy_data.max_com = params.max_com;
+ lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
+ lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
+ lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.data = data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Constfill failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
- if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.reg_write_data.reg_addr = reg_addr;
+ lsdma_data->u.reg_write_data.reg_data = reg_data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Poll Reg failed in DMUB");
+
+ return result;
+}
+
+void dc_dmub_srv_release_hw(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd = {0};
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- for (i = 0; i < GPINT_RETRY_NUM; i++) {
- // false could mean GPINT timeout, in which case we should retry
- if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode), &output->residency_percent,
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- break;
- udelay(100);
- }
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
+ cmd.idle_opt_notify_idle.header.payload_bytes =
+ sizeof(cmd.idle_opt_notify_idle) -
+ sizeof(cmd.idle_opt_notify_idle.header);
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
- (uint16_t)(output->ips_mode),
- &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->entry_counter = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[1] = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[1] = 0;
-
- // NUM_IPS_HISTOGRAM_BUCKETS = 16
- for (i = 0; i < 16; i++)
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->histogram[i] = 0;
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index ada5c2fb2db3..8ea320f21269 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_surface_update *srf_updates,
int surface_count);
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count);
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable);
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data);
+
+struct lsdma_send_tiled_to_tiled_copy_command_params {
+ uint64_t src_addr;
+ uint64_t dst_addr;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t dst_width : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_height : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t data_format : 6;
+ uint32_t swizzle_mode : 5;
+ uint32_t element_size : 3;
+ uint32_t dcc : 1;
+ uint32_t tmz : 1;
+ uint32_t read_compress : 2;
+ uint32_t write_compress : 2;
+ uint32_t max_com : 2;
+ uint32_t max_uncom : 1;
+ uint32_t padding : 9;
+};
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params);
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data);
+
/**
* struct ips_residency_info - struct containing info from dmub_ips_residency_stats
*
@@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
* @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
*/
struct ips_residency_info {
- enum dmub_ips_mode ips_mode;
+ enum ips_residency_mode ips_mode;
unsigned int residency_percent;
unsigned int entry_counter;
unsigned int total_active_time_us[2];
@@ -231,21 +285,16 @@ struct ips_residency_info {
unsigned int histogram[16];
};
-/**
- * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
- *
- * @dc_dmub_srv: The DC DMUB service pointer
- * @start_measurement: Describes whether to start or stop measurement
- *
- * Return: true if GPINT was sent successfully, false otherwise
- */
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement);
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst,
+ struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode);
/**
- * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @output: Output struct to copy the the residency info to
+ * @dc - pointer to DC object
*/
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
+void dc_dmub_srv_release_hw(const struct dc *dc);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d346f8ae1634..5ce1be362534 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -162,6 +162,11 @@ struct dc_link_settings {
struct dc_tunnel_settings {
bool should_enable_dp_tunneling;
bool should_use_dp_bw_allocation;
+ uint8_t cm_id;
+ uint8_t group_id;
+ uint32_t bw_granularity;
+ uint32_t estimated_bw;
+ uint32_t allocated_bw;
};
union dc_dp_ffe_preset {
@@ -957,11 +962,21 @@ union usb4_driver_bw_cap {
uint8_t raw;
};
+/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */
+union dpia_tunnel_info {
+ struct {
+ uint8_t group_id :3;
+ uint8_t rsvd :5;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
union usb4_driver_bw_cap driver_bw_cap;
+ union dpia_tunnel_info dpia_tunnel_info;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c9f6c6275ca1..667852517246 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -68,7 +68,7 @@ enum dc_plane_addr_type {
struct dc_plane_address {
enum dc_plane_addr_type type;
- bool tmz_surface;
+ uint8_t tmz_surface;
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
@@ -1104,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select {
enum mpcc_gamut_remap_id {
MPCC_OGAM_GAMUT_REMAP,
MPCC_MCM_FIRST_GAMUT_REMAP,
- MPCC_MCM_SECOND_GAMUT_REMAP
+ MPCC_MCM_SECOND_GAMUT_REMAP,
+ MPCC_RMCM_GAMUT_REMAP,
};
enum cursor_matrix_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index e3a8283b4098..7f57661433eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->adaptive_sharpness.enable = true;
spl_in->adaptive_sharpness.sharpness_level = 0;
} else if (sharpness_setting == SHARPNESS_CUSTOM) {
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+ /* SAT: read harpness_range from dc_plane_state */
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid;
if (force_sharpness_level > 0) {
if (force_sharpness_level > 10)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 341d2ffb64b1..5fc6fea211de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -579,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one);
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream);
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc);
+
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index a4cd0eb39a3a..375ca2f13b7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -175,6 +175,7 @@ struct dc_panel_patch {
unsigned int embedded_tiled_slave;
unsigned int disable_fams;
unsigned int skip_avmute;
+ unsigned int skip_audio_sab_check;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
@@ -263,6 +264,7 @@ enum dc_timing_source {
TIMING_SOURCE_EDID_4BYTE,
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
TIMING_SOURCE_EDID_CEA_RID,
+ TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
TIMING_SOURCE_VBIOS,
TIMING_SOURCE_CV,
TIMING_SOURCE_TV,
@@ -1255,7 +1257,6 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
- DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1277,7 +1278,6 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_333333,
DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
@@ -1315,6 +1315,7 @@ struct dc_cm2_func_luts {
bool mpc_3dlut_enable;
bool rmcm_3dlut_enable;
bool mpc_mcm_post_blend;
+ uint8_t rmcm_tmz;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
@@ -1372,4 +1373,19 @@ struct set_backlight_level_params {
uint8_t aux_inst;
};
+enum dc_validate_mode {
+ /* validate the mode and program HW */
+ DC_VALIDATE_MODE_AND_PROGRAMMING = 0,
+ /* only validate the mode */
+ DC_VALIDATE_MODE_ONLY = 1,
+ /* validate the mode and get the max state (voltage level) */
+ DC_VALIDATE_MODE_AND_STATE_INDEX = 2,
+};
+
+struct dc_validation_dpia_set {
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *tunnel_settings;
+ uint32_t required_bw;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index ffd172231fdf..668ee2d405fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 55e8718aad22..5947a35363aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h);
void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst);
void dccg401_set_src_sel(
@@ -230,7 +230,6 @@ void dccg401_set_dp_dto(
const struct dp_dto_params *params);
void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index d28826c3ae5f..0421b267a0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -292,9 +292,35 @@ static void set_speed(
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
+static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ switch (arbitrate) {
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
+ return true;
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
+ return false;
+ case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
+ default:
+ break;
+ }
+
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return false;
+
+ return true;
+}
+
static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
+ // Deassert soft reset to unblock I2C engine registers
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
+
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
uint32_t reset_length = 0;
@@ -309,8 +335,8 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 1,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+ if (!acquire_engine(dce_i2c_hw))
+ return false;
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
@@ -319,9 +345,8 @@ static bool setup_engine(
i2c_setup_limit = dce_i2c_hw->setup_limit;
/* Program pin select */
- REG_UPDATE_6(DC_I2C_CONTROL,
+ REG_UPDATE_5(DC_I2C_CONTROL,
DC_I2C_GO, 0,
- DC_I2C_SOFT_RESET, 0,
DC_I2C_SEND_RESET, 0,
DC_I2C_SW_STATUS_RESET, 1,
DC_I2C_TRANSACTION_COUNT, 0,
@@ -351,6 +376,32 @@ static bool setup_engine(
return true;
}
+/**
+ * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state
+ * @dce_i2c_hw: Pointer to dce_i2c_hw structure
+ *
+ * If we boot without an HDMI display, the I2C engine does not get initialized
+ * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
+ * acquire. After setting SW_DONE_USING_I2C on release, the engine gets
+ * immediately reacquired by SW, preventing DMUB from using it.
+ *
+ * This function checks the I2C arbitration status and applies a release
+ * workaround if necessary.
+ */
+static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return;
+
+ // Still acquired after release, release again as a workaround
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
+}
+
static void release_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
@@ -378,9 +429,9 @@ static void release_engine(
/*for HW HDCP Ri polling failure w/a test*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
- /* Release I2C after reset, so HW or DMCU could use it */
- REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
- DC_I2C_SW_USE_I2C_REG_REQ, 0);
+ // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ cntl_stuck_hw_workaround(dce_i2c_hw);
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index fcd3d86ad517..e7a318e26d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -4,6 +4,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
+ struct dc_link *link)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable)
+ if (enable) {
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- else
+ // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (!pipe_ctx)
+ return;
+
+ cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ }
+ } else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ else
+ copy_settings_data->hpo_stream_enc_inst = 0;
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ else
+ copy_settings_data->hpo_link_enc_inst = 0;
+ }
+
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..ccbe385e132c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst);
+ uint8_t panel_inst, struct dc_link *link);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index e1d500633dfa..b357683b4255 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
-
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
@@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
-DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1235bf9a596..74962791302f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
/*
* we want a breakdown of the various stages of validation, which the
@@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth(
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
+ if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
@@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth(
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
- } else if (fast_validate) {
+ } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index e9fea9c2162e..2a2eaf6adf26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params(
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
@@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_COUNT();
- out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2077,7 +2077,7 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
@@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
- }
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes);
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
@@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
@@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
@@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
@@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
}
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index b6c34198ddc8..aed00039ca62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
@@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc,
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
@@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
- fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
+ dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 88789987bdbc..e5f5c0663750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg(
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
@@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index d2ae43a82ba5..dfcc5d50071e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate(
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 5ed117e11aa2..df9d50b9b57c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
index d32c5bb99f4c..362ac79184ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -35,6 +35,6 @@
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b0fc1fd20208..6160952245b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
@@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
@@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
@@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc_state_remove_phantom_streams_and_planes(dc, context);
dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
@@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
bool repopulate_pipes = false;
@@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
for (i = 0; i < context->stream_count; i++)
resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
&pipe_cnt, &repopulate_pipes))
goto validate_fail;
}
- if (fast_validate ||
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
@@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
@@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int flag_vlevel = vlevel;
int i;
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!dc->config.enable_windowed_mpo_odm)
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
@@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
@@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
stream_status->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 276e90e4e0ce..273d2bd79d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 92f0a099d089..5d73efa2f0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index 067480fc3691..d121c5afce71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 17d0b4923b0c..6f516af82956 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
index f93efab9a668..f71d9d8d0759 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
deleted file mode 100644
index 4fbecb5ff349..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dcn401_fpu.h"
-#include "dcn401/dcn401_resource.h"
-// We need this includes for WATERMARKS_* defines
-#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
-#include "link.h"
-
-#define DC_LOGGER_INIT(logger)
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
-{
- /* defaults */
- double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
- double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
- double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
- double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- uint16_t setb_min_uclk_mhz = min_uclk_mhz;
- uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
-
- dc_assert_fp_enabled();
-
- /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
- if (dcfclk_mhz_for_the_second_state)
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
- else
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
- setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
- clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
- clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
- clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
- clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
- }
- /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
- /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
-}
-
-/*
- * dcn401_update_bw_bounding_box
- *
- * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
- * spreadsheet with actual values as per dGPU SKU:
- * - with passed few options from dc->config
- * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
- * need to get it from PM FW)
- * - with passed latency values (passed in ns units) in dc-> bb override for
- * debugging purposes
- * - with passed latencies from VBIOS (in 100_ns units) if available for
- * certain dGPU SKU
- * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
- * of the same ASIC)
- * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
- * FW for different clocks (which might differ for certain dGPU SKU of the
- * same ASIC)
- */
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
-{
- dc_assert_fp_enabled();
-
- /* Override from passed dc->bb_overrides if available*/
- if (dc->bb_overrides.sr_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.urgent_latency_ns)
- dc->dml2_options.bbox_overrides.urgent_latency_us =
- dc->bb_overrides.urgent_latency_ns / 1000.0;
-
- if (dc->bb_overrides.dram_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (dc->bb_overrides.fclk_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
-
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
-
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dc->dml2_options.bbox_overrides.dram_num_chan =
- dc->ctx->dc_bios->vram_info.num_chans;
-
- }
-
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
- dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
- dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
-
- if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
- unsigned int i = 0;
-
- dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- }
- }
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
deleted file mode 100644
index 329f1788843c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DCN401_FPU_H__
-#define __DCN401_FPU_H__
-
-#include "clk_mgr.h"
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
-
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 157ecf008d6c..4c21ce42054c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
@@ -94,17 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-
-
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
@@ -120,6 +120,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 7ae9c0ba0c9e..715f9019a33e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para
result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx,
in_out_params->mode_lib,
in_out_params->in_display_cfg,
- 0,
+ in_out_params->in_start_state_idx,
in_out_params->mode_lib->states.num_states - 1);
if (result)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index 0670e4dc4fd9..dbeb08466092 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -1917,6 +1917,7 @@ struct display_mode_lib_st {
struct dml_mode_support_ex_params_st {
struct display_mode_lib_st *mode_lib;
const struct dml_display_cfg_st *in_display_cfg;
+ dml_uint_t in_start_state_idx;
dml_uint_t out_lowest_state_idx;
struct dml_mode_support_info_st *out_evaluation_info;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 2aa6d44bb359..a06217a9eef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml21_wrapper.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
@@ -11,277 +10,263 @@
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct dml2_soc_bb *soc_bb;
- const struct dml2_soc_qos_parameters *qos_params;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
-
- qos_params = &dml_dcn4_variant_a_soc_qos_params;
- }
-
- /* patch soc bb */
- memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
-
- /* patch qos params */
- memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
-}
-
-static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb));
-}
-
-static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
+ const struct dc *in_dc,
const struct dml2_configuration_options *config)
{
- memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities));
+ bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
+
+ /* ODM options */
+ pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
+ pmo_options->disable_dyn_odm_for_multi_stream = true;
+ pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+
+ pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
+
+ /* NOTE: DRR and SubVP Require FAMS2 */
+ pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
+ in_dc->debug.force_disable_subvp ||
+ disable_fams2;
+ pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
+ disable_fams2;
+ pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
+ disable_fams2;
+ pmo_options->disable_fams2 = disable_fams2;
+
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * Populate dml_init based on default static values in soc bb. The default
+ * values are for reference and support at least minimal operation of current
+ * SoC and DCN hardware. The values could be modifed by subsequent override
+ * functions to reflect our true hardware capability.
+ */
+static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- const struct dml2_ip_capabilities *ip_caps;
-
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
+ dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
+ dml_init->soc_bb = dml2_socbb_dcn401;
+ dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
+ dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ break;
default:
- ip_caps = &dml2_dcn401_max_ip_caps;
+ memset(dml_init, 0, sizeof(*dml_init));
+ DC_ERR("unsupported dcn version for DML21!");
+ return;
}
-
- memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities));
}
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init,
+static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_socbb_params(dml_init, config, in_dc);
- else
- dml21_external_socbb_params(dml_init, config);
+ dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
+ dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
}
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void override_dml_init_with_values_from_smu(
+ struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_ip_params(dml_init, config, in_dc);
- else
- dml21_external_ip_params(dml_init, config);
-}
-
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config, const struct dc *in_dc)
-{
int i;
-
const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
-
- /* override clocks if smu is present */
- if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
+
+ if (!in_dc->clk_mgr->funcs->is_smu_present ||
+ !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
+ /* skip if smu is not present */
+ return;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
}
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
}
}
+ }
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
} else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
}
} else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
}
}
+ }
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
} else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
}
} else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
}
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
}
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
}
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
}
}
+ }
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
} else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
}
} else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
}
}
-
- /* do not override phyclks for now */
- /* phyclk */
- // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000;
- // }
-
- /* phyclk_d18 */
- // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000;
- // }
-
- /* phyclk_d32 */
- // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000;
- // }
}
+}
- dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
- dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+static void override_dml_init_with_values_from_vbios(
+ struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
+ struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
- /* override bounding box paramters from VBIOS */
if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
(in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
@@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
- /* override bounding box paramters from DC config */
- if (in_dc->bb_overrides.sr_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+}
+
+
+static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ /*
+ * TODO - There seems to be overlaps between the values overriden from
+ * dmub and vbios. Investigate and identify the values that DMUB needs
+ * to own.
+ */
+// const struct dmub_soc_bb_params *dmub_bb_params =
+// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
+
+// if (dmub_bb_params == NULL)
+// return;
+
+// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
+// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
+// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
+// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
+// if (dmub_bb_params->fclk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
+// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
+// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
+// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_min_idle_time_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
+// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
+// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #else
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #endif
+// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
+// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
+// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
+// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
+}
+
+static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ if (!config->use_native_soc_bb_construction) {
+ dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
+ dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ if (in_dc->bb_overrides.sr_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+ in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
- if (in_dc->bb_overrides.dram_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ if (in_dc->bb_overrides.dram_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.fclk_change_blackout_us =
+ if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
- }
+}
- //TODO
- // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) {
- // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- // }
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ populate_default_dml_init_params(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_smu(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -726,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state(
switch (plane_state->tiling_info.gfxversion) {
case DcGfxVersion7:
case DcGfxVersion8:
- // Placeholder for programming the array_mode
break;
case DcGfxVersion9:
case DcGfxVersion10:
@@ -889,10 +961,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
- case DC_CM2_GPU_MEM_SIZE_333333:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
- break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ default:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 73a013be1e48..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -17,9 +17,7 @@ struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 930e86cdb88a..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* reset fams2 data */
memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc,
memcpy(static_base_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
sizeof(union dmub_cmd_fams2_config));
- memcpy(static_sub_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
- sizeof(union dmub_cmd_fams2_config));
+
+ if (dc->debug.fams_version.major == 3) {
+ memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams],
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2,
+ sizeof(union dmub_fams2_stream_static_sub_state_v2));
+ } else {
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+ }
switch (dc->debug.fams_version.minor) {
case 1:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 208d3651b6ba..03de3cf06ae5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -2,8 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include <linux/vmalloc.h>
-
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
@@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
return true;
}
-static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
{
- bool disable_fams2;
- struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
-
- /* ODM options */
- pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
- pmo_options->disable_dyn_odm_for_multi_stream = true;
- pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+ dml_ctx->config = *config;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
@@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
-
- pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
-
- /* NOTE: DRR and SubVP Require FAMS2 */
- disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
- in_dc->debug.force_disable_subvp ||
- disable_fams2;
- pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
- disable_fams2;
- pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
- disable_fams2;
- pmo_options->disable_fams2 = disable_fams2;
-
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
- in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
- pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- break;
- default:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
- }
- (*dml_ctx)->architecture = dml2_architecture_21;
+ dml_ctx->architecture = dml2_architecture_21;
- /* Store configuration options */
- (*dml_ctx)->config = *config;
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
DC_FP_START();
- /*Initialize SOCBB and DCNIP params */
- dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
-
- /* apply debug overrides */
- dml21_apply_debug_options(in_dc, *dml_ctx, config);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
- /*Initialize DML21 instance */
- dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
DC_FP_END();
}
@@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
if (!dml21_allocate_memory(dml_ctx))
return false;
- dml21_init(in_dc, dml_ctx, config);
+ dml21_init(in_dc, *dml_ctx, config);
return true;
}
@@ -328,12 +290,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
return true;
}
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
@@ -496,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index 42e715024bc9..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -14,6 +14,7 @@ struct dc;
struct dc_state;
struct dml2_configuration_options;
struct dml2_context;
+enum dc_validate_mode;
/**
* dml2_create - Creates dml21_context.
@@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
/**
* dml21_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
*
* Based on fast_validate option internally would call:
*
- * -dml21_mode_check_and_programming - for non fast_validate option
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
* Calculates if dc_state can be supported on the input display
* configuration. If supported, generates the necessary HW
* programming for the new dc_state.
*
- * -dml21_check_mode_support - for fast_validate option
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
* Calculates if dc_state can be supported for the input display
* config.
@@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
* separate dc_states for validation.
* Return: True if mode is supported, false otherwise.
*/
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index c047d56527c4..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 84c90050668c..b05030926ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -46,6 +46,7 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
+ uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 255f05de362c..e8dc6471c0be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -222,6 +222,7 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool upsp_enabled;
struct {
double h_ratio;
double v_ratio;
@@ -426,6 +427,7 @@ struct dml2_stream_parameters {
struct dml2_display_cfg {
bool gpuvm_enable;
+ bool ffbm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 5f0bc42d1d2f..8c9f414aa6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters {
double dram_clk_change_write_only_us;
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
+ double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
@@ -130,6 +133,7 @@ struct dml2_soc_state_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
@@ -138,6 +142,7 @@ struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 0dbf886d8926..98c0234e2f47 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate {
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
- dml2_output_rate_hdmi_rate_12x4 = 12
+ dml2_output_rate_hdmi_rate_12x4 = 12,
+ dml2_output_rate_hdmi_rate_16x4 = 13,
+ dml2_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_pmo_options {
@@ -279,7 +281,10 @@ struct dml2_per_stream_programming {
} phantom_stream;
union dmub_cmd_fams2_config fams2_base_params;
- union dmub_cmd_fams2_config fams2_sub_params;
+ union {
+ union dmub_cmd_fams2_config fams2_sub_params;
+ union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2;
+ };
};
//-----------------
@@ -674,9 +679,14 @@ struct dml2_display_cfg_programming {
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
+ bool failed_prefetch;
+ bool failed_uclk_pstate;
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_mode_programming_dcfclk;
+ bool failed_mode_programming_prefetch;
+ bool failed_mode_programming_flip;
bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 5b62cd19d979..b9cff2198511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -4861,7 +4861,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
- double PrefetchBandwidthOto[],
+ double PrefetchBandwidthMax[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4925,9 +4925,9 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
- l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
@@ -5125,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
- *p->RequiredPrefetchBWOTO = 0.0;
+ *p->RequiredPrefetchBWMax = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5356,7 +5356,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
* mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
* and the required bandwidth increases when going from ms to mp
*/
- *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+ *p->RequiredPrefetchBWMax = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
@@ -5718,8 +5718,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+
+ /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming.
+ * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data
+ * bandwidth may end up higher than what was calculated in mode support.
+ */
+ *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
@@ -6115,7 +6121,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6152,7 +6158,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6189,7 +6195,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6226,7 +6232,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6263,7 +6269,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -7490,7 +7496,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
@@ -7635,7 +7641,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7802,7 +7808,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7908,6 +7914,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
@@ -11256,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11399,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11539,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -11883,7 +11890,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
//Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
+ mode_lib->mp.TotalWRBandwidth = 0;
for (k = 0; k < display_cfg->num_streams; ++k) {
s->WRBandwidth = 0;
if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
@@ -11892,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
(display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
/ ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
* (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
+ mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth;
}
}
@@ -13062,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4;
out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
@@ -13246,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
- out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index bdee6ad7bc59..28687565ac22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe {
double DCFClkDeepSleep;
unsigned int DPPPerSurface;
bool ScalerEnabled;
+ bool UPSPEnabled;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate {
dml2_core_internal_output_rate_hdmi_rate_6x4 = 9,
dml2_core_internal_output_rate_hdmi_rate_8x4 = 10,
dml2_core_internal_output_rate_hdmi_rate_10x4 = 11,
- dml2_core_internal_output_rate_hdmi_rate_12x4 = 12
+ dml2_core_internal_output_rate_hdmi_rate_12x4 = 12,
+ dml2_core_internal_output_rate_hdmi_rate_16x4 = 13,
+ dml2_core_internal_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_core_internal_watermarks {
@@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info {
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
bool EnoughUrgentLatencyHidingSupport;
+ bool PrefetchScheduleSupported;
bool PrefetchSupported;
bool PrefetchBandwidthSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
+ bool ODMSupport;
bool ModeSupport;
bool ViewportSizeSupport;
@@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info {
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor
double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
double max_urgent_latency_us;
double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
@@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info {
bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
+ bool dcfclk_support;
+ bool qos_bandwidth_support;
};
struct dml2_core_internal_mode_support {
@@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support {
double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes
+ double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes
double uclk_freq_mhz;
double dram_bw_mbps;
double max_dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state
double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
@@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support {
double TWait[DML2_MAX_PLANES];
bool UnboundedRequestEnabled;
+ unsigned int compbuf_reserved_space_64b;
+ bool hw_debug5;
unsigned int CompressedBufferSizeInkByte;
double VRatioPreY[DML2_MAX_PLANES];
double VRatioPreC[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int swath_width_luma_ub[DML2_MAX_PLANES];
unsigned int swath_width_chroma_ub[DML2_MAX_PLANES];
unsigned int RequiredSlots[DML2_MAX_PLANES];
@@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support {
double dst_y_prefetch[DML2_MAX_PLANES];
double LinesForVM[DML2_MAX_PLANES];
double LinesForDPTERow[DML2_MAX_PLANES];
- double SwathWidthYSingleDPP[DML2_MAX_PLANES];
- double SwathWidthCSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
double BytePerPixelInDETY[DML2_MAX_PLANES];
@@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support {
double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe
double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES];
+ bool is_using_mall_for_ss[DML2_MAX_PLANES];
+ unsigned int meta_row_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES];
+ bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
+ unsigned int meta_req_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES];
+ unsigned int meta_req_width[DML2_MAX_PLANES];
+ unsigned int meta_row_width[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeY[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeC[DML2_MAX_PLANES];
+ unsigned int meta_req_height[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_req_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES];
+ unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
+ unsigned int vm_group_bytes[DML2_MAX_PLANES];
+ unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
+ double TSetup[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
+ unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos.
+ double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
+ double MaxActiveFCLKChangeLatencySupported;
+
// Backend
bool RequiresDSC[DML2_MAX_PLANES];
bool RequiresFEC[DML2_MAX_PLANES];
double OutputBpp[DML2_MAX_PLANES];
+ double DesiredOutputBpp[DML2_MAX_PLANES];
+ double PixelClockBackEnd[DML2_MAX_PLANES];
unsigned int DSCDelay[DML2_MAX_PLANES];
enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES];
enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES];
+ bool TotalAvailablePipesSupportNoDSC;
+ bool TotalAvailablePipesSupportDSC;
+ unsigned int NumberOfDPPNoDSC;
+ unsigned int NumberOfDPPDSC;
+ enum dml2_odm_mode ODMModeNoDSC;
+ enum dml2_odm_mode ODMModeDSC;
+ double RequiredDISPCLKPerSurfaceNoDSC;
+ double RequiredDISPCLKPerSurfaceDSC;
+ unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES];
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
@@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
- /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */
- double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
+ /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches.
+ * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp
+ *
+ * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support.
+ * Some slight difference in variables may cause the pixel data bandwidth to be higher
+ * even though overall equ prefetch bandwidths can be lower going from ms to mp
+ */
+ double RequiredPrefetchBWMax[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support {
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
+ unsigned int meta_row_height_luma[DML2_MAX_PLANES];
+ unsigned int meta_row_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+
+ unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+
+ unsigned int MaximumVStartup[DML2_MAX_PLANES];
+
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
+ unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
+
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program {
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
//double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
@@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program {
unsigned int MacroTileHeightC[DML2_MAX_PLANES];
unsigned int MacroTileWidthY[DML2_MAX_PLANES];
unsigned int MacroTileWidthC[DML2_MAX_PLANES];
+ double MaximumSwathWidthLuma[DML2_MAX_PLANES];
+ double MaximumSwathWidthChroma[DML2_MAX_PLANES];
bool surf_linear128_l[DML2_MAX_PLANES];
bool surf_linear128_c[DML2_MAX_PLANES];
@@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program {
double UrgentBurstFactorChroma[DML2_MAX_PLANES];
double UrgentBurstFactorChromaPre[DML2_MAX_PLANES];
+ double MaximumSwathWidthInLineBufferLuma;
+ double MaximumSwathWidthInLineBufferChroma;
+
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
double meta_row_bw[DML2_MAX_PLANES];
unsigned int meta_row_bytes[DML2_MAX_PLANES];
unsigned int meta_req_width[DML2_MAX_PLANES];
@@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program {
unsigned int PTERequestSizeC[DML2_MAX_PLANES];
double TWait[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
double TSetup[DML2_MAX_PLANES];
unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
@@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program {
double TCalc;
unsigned int TotImmediateFlipBytes;
+ unsigned int MaxTotalDETInKByte;
+ unsigned int NomDETInKByte;
+ unsigned int MinCompressedBufferSizeInKByte;
+ double PixelClockBackEnd[DML2_MAX_PLANES];
+ double OutputBpp[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+ unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ bool immediate_flip_required; // any pipes need immediate flip
+ double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
+ double TotalWRBandwidth;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+
// -------------------
// Output
// -------------------
@@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program {
// Support
bool UrgVactiveBandwidthSupport;
+ bool PrefetchScheduleSupported;
+ bool UrgentBandwidthSupport;
bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported
bool ImmediateFlipSupported;
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
+ bool dcfclk_support;
// Clock
double Dcfclk;
@@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program {
// RQ registers
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
-
+ double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES];
bool is_using_mall_for_ss[DML2_MAX_PLANES];
@@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals {
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
- unsigned int dummy_integer_array[2][DML2_MAX_PLANES];
+ unsigned int dummy_integer_array[4][DML2_MAX_PLANES];
enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES];
double dummy_single_array[2][DML2_MAX_PLANES];
- unsigned int dummy_long_array[4][DML2_MAX_PLANES];
+ unsigned int dummy_long_array[8][DML2_MAX_PLANES];
bool dummy_boolean_array[2][DML2_MAX_PLANES];
bool dummy_boolean[2];
double dummy_single[2];
@@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals {
double dlg_vblank_start;
double LSetup;
double blank_lines_remaining;
- double TotalWRBandwidth;
double WRBandwidth;
struct dml2_core_internal_DmlPipe myPipe;
double PixelClockBackEndFactor;
@@ -1153,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1389,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
- double flip_and_prefetch_bw_oto;
+ double flip_and_prefetch_bw_max;
double active_and_excess_bw;
};
@@ -1418,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals {
struct dml2_core_shared_rq_dlg_get_dlg_reg_locals {
unsigned int plane_idx;
+ unsigned int stream_idx;
enum dml2_source_format_class source_format;
const struct dml2_timing_cfg *timing;
bool dual_plane;
@@ -1625,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
double *BytePerPixDETC;
unsigned int *DPPPerSurface;
bool mrq_present;
+ unsigned int dummy[2][DML2_MAX_PLANES];
+ unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES];
+ unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES];
// output
unsigned int *req_per_swath_ub_l;
@@ -1642,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
unsigned int *DETBufferSizeC;
unsigned int *full_swath_bytes_l;
unsigned int *full_swath_bytes_c;
+ unsigned int *full_swath_bytes_single_dpp_l;
+ unsigned int *full_swath_bytes_single_dpp_c;
bool *UnboundedRequestEnabled;
unsigned int *compbuf_reserved_space_64b;
unsigned int *CompressedBufferSizeInkByte;
@@ -1801,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
- double *RequiredPrefetchBWOTO;
+ double *RequiredPrefetchBWMax;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -2038,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
- double *prefetch_bandwidth_oto;
+ double *prefetch_bandwidth_max;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 7a220c0141c2..5f301befed16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+ return sw_mode == dml2_sw_linear;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index f486b090bbfc..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
- if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
-
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index b226225103c3..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -10,15 +10,74 @@
#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/* ASSERT with message output */
-#define DML_ASSERT_MSG(condition, fmt, ...) \
- do { \
- if (!(condition)) { \
- DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
- DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
- DML_ASSERT(condition); \
- } \
- } while (0)
+/* private helper macros */
+#define _BOOL_FORMAT(field) "%s", field ? "true" : "false"
+#define _UINT_FORMAT(field) "%u", field
+#define _INT_FORMAT(field) "%d", field
+#define _DOUBLE_FORMAT(field) "%lf", field
+#define _ELEMENT_FUNC "function"
+#define _ELEMENT_COMP_IF "component_interface"
+#define _ELEMENT_TOP_IF "top_interface"
+#define _LOG_ENTRY(element) do { \
+ DML_LOG_INTERNAL("<"element" name=\""); \
+ DML_LOG_INTERNAL(__func__); \
+ DML_LOG_INTERNAL("\">\n"); \
+} while (0)
+#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n")
+#define _LOG_SCALAR(field, format) do { \
+ DML_LOG_INTERNAL(#field" = "format(field)); \
+ DML_LOG_INTERNAL("\n"); \
+} while (0)
+#define _LOG_ARRAY(field, size, format) do { \
+ DML_LOG_INTERNAL(#field " = ["); \
+ for (int _i = 0; _i < (int) size; _i++) { \
+ DML_LOG_INTERNAL(format(field[_i])); \
+ if (_i + 1 == (int) size) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+}} while (0)
+#define _LOG_2D_ARRAY(field, size0, size1, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j])); \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL("["); \
+ for (int _k = 0; _k < (int) size2; _k++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j][_k])); \
+ if (_k + 1 == (int) size2) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
/* fatal errors for unrecoverable DML states until a full reset */
#define DML_LOG_LEVEL_FATAL 0
@@ -28,7 +87,7 @@
#define DML_LOG_LEVEL_WARN 2
/* high level tracing of DML interfaces */
#define DML_LOG_LEVEL_INFO 3
-/* detailed tracing of DML internal components */
+/* tracing of DML internal executions */
#define DML_LOG_LEVEL_DEBUG 4
/* detailed tracing of DML calculation procedure */
#define DML_LOG_LEVEL_VERBOSE 5
@@ -37,30 +96,94 @@
#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
#endif /* #ifndef DML_LOG_LEVEL */
+/* public macros for DML_LOG_LEVEL_FATAL and up */
#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_ERROR and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_WARN and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_INFO and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
+#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF)
+#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
+#define DML_LOG_TOP_IF_ENTER() ((void)0)
+#define DML_LOG_TOP_IF_EXIT() ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_DEBUG and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
-#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF)
+#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF)
+#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC)
+#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC)
+#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT)
+#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT)
+#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#define DML_LOG_COMP_IF_ENTER() ((void)0)
+#define DML_LOG_COMP_IF_EXIT() ((void)0)
+#define DML_LOG_FUNC_ENTER() ((void)0)
+#define DML_LOG_FUNC_EXIT() ((void)0)
+#define DML_LOG_DEBUG_BOOL(field) ((void)0)
+#define DML_LOG_DEBUG_UINT(field) ((void)0)
+#define DML_LOG_DEBUG_INT(field) ((void)0)
+#define DML_LOG_DEBUG_DOUBLE(field) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_VERBOSE */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
-#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
-#endif
+#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */
#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index 00688b9f1df4..d52aa82283b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -202,6 +202,8 @@ struct dml2_core_mode_support_result {
} active;
unsigned int dispclk_khz;
+ unsigned int dpprefclk_khz;
+ unsigned int dtbrefclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
@@ -446,13 +448,17 @@ struct dml2_core_internal_state_intermediates {
};
struct dml2_core_mode_support_locals {
- struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ union {
+ struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
- struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ union {
+ struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 6b3b8803e0ae..a56e75cdf712 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 208630754c8a..3b866e876bf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -1189,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
return location;
}
-static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg)
-{
- int i;
-
- if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- ASSERT(state->stream_count == 1);
- dml_dispcfg->timing.DRRDisplay[0] = true;
- } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) {
-
- for (i = 0; i < dml_dispcfg->num_timings; i++) {
- if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id)
- dml_dispcfg->timing.DRRDisplay[i] = true;
- }
- }
-}
-
static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
{
unsigned int i;
@@ -1437,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
}
}
-
- if (!dml2->config.use_native_pstate_optimization)
- apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg);
}
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 525b7d04bf84..0318260370ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/vmalloc.h>
-
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2,
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
const struct dml_display_cfg_st *display_cfg,
- struct dml_mode_support_info_st *evaluation_info)
+ struct dml_mode_support_info_st *evaluation_info,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
s->mode_support_params.in_display_cfg = display_cfg;
+ if (validate_mode == DC_VALIDATE_MODE_ONLY)
+ s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
+ else
+ s->mode_support_params.in_start_state_idx = 0;
s->mode_support_params.out_evaluation_info = evaluation_info;
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
@@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
{
int unused_dpps = p->ip_params->max_num_dpp;
- int i, j;
- int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
- int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
- float frame_time_sec, max_frame_time_sec;
+ int i;
+ int odms_needed;
int largest_blend_and_timing = 0;
bool optimization_done = false;
@@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
if (p->new_display_config != p->cur_display_config)
*p->new_display_config = *p->cur_display_config;
- // Optimize P-State Support
- if (dml2->config.use_native_pstate_optimization) {
- if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
- // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
- subvp_timing_to_add = -1;
- subvp_surface_to_add = -1;
- max_frame_time_sec = 0;
- surface_count = 0;
- for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
- refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
- (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
- if (refresh_rate_hz < 120) {
- // Check its upstream surfaces to see if this one could be converted to subvp.
- dpps_needed = 0;
- for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
- if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
- p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
- dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
- subvp_surface_to_add = j;
- surface_count++;
- }
- }
-
- if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
- frame_time_sec = (float)1 / refresh_rate_hz;
- if (frame_time_sec > max_frame_time_sec) {
- max_frame_time_sec = frame_time_sec;
- subvp_timing_to_add = i;
- }
- }
- }
- }
- if (subvp_timing_to_add >= 0) {
- new_timing_index = p->new_display_config->num_timings++;
- new_surface_index = p->new_display_config->num_surfaces++;
- // Add a phantom pipe reflecting the main pipe's timing
- dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
-
- pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
- p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
- (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
- (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
-
- subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
-
- p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
- p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
- p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
-
- p->new_display_config->output.OutputDisabled[new_timing_index] = true;
-
- p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
-
- dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
- dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
-
- p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
-
- p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
- p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
-
- p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
-
- p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
-
- p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
-
- optimization_done = true;
- }
- }
- }
// Optimize Clocks
if (!optimization_done) {
@@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
return optimization_done;
}
-static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
+static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
@@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
}
- dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
+ dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
+ validate_mode);
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
@@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const
}
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
- struct dc_state *display_state)
+ struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
unsigned int result = 0, i;
@@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
@@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
dml2->v20.dml_core_ctx.policy = s->new_policy;
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
&s->new_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (optimized_result)
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
@@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
if (!optimized_result) {
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
}
}
@@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
return result;
}
-static int find_drr_eligible_stream(struct dc_state *display_state)
-{
- int i;
-
- for (i = 0; i < display_state->stream_count; i++) {
- if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
- && display_state->streams[i]->ignore_msa_timing_param) {
- // Use ignore_msa_timing_param flag to identify as DRR
- return i;
- }
- }
-
- return -1;
-}
-
-static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
-{
- struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- bool pstate_optimization_done = false;
- bool pstate_optimization_success = false;
- bool result = false;
- int drr_display_index = 0, non_svp_streams = 0;
- bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
-
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
-
- result = dml_mode_support_wrapper(dml2, display_state);
-
- if (!result) {
- pstate_optimization_done = true;
- } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- }
-
- if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
-
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- non_svp_streams = display_state->stream_count;
-
- while (!pstate_optimization_done) {
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
-
- // Always try adding SVP first
- if (result)
- result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
- else
- pstate_optimization_done = true;
-
-
- if (result) {
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- pstate_optimization_done = true;
- }
-
- if (result) {
- non_svp_streams--;
-
- if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- } else {
- drr_display_index = find_drr_eligible_stream(display_state);
-
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_svp_streams == 1 && drr_display_index >= 0) {
- if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- }
-
- if (pstate_optimization_success) {
- pstate_optimization_done = true;
- } else {
- pstate_optimization_done = false;
- }
- }
- }
- }
- }
-
- if (!pstate_optimization_success) {
- dml2_svp_remove_all_phantom_pipes(dml2, display_state);
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- return result;
-}
-
-static bool call_dml_mode_support_and_programming(struct dc_state *context)
+static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
{
unsigned int result = 0;
unsigned int min_state = 0;
@@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
if (!context->streams[0]->sink->link->dc->caps.is_apu) {
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
+ validate_mode);
ASSERT(min_state_for_g6_temp_read >= 0);
}
- if (!dml2->config.use_native_pstate_optimization) {
- result = optimize_pstate_with_svp_and_drr(dml2, context);
- } else {
- result = dml_mode_support_wrapper(dml2, context);
- }
+ result = dml_mode_support_wrapper(dml2, context, validate_mode);
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
@@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
return result;
}
-static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
+static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
@@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
- result = call_dml_mode_support_and_programming(context);
+ result = call_dml_mode_support_and_programming(context, validate_mode);
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
@@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
if (need_recalculation) {
/* Engage the DML again if recalculation is required. */
- call_dml_mode_support_and_programming(context);
+ call_dml_mode_support_and_programming(context, validate_mode);
if (!dml2->config.skip_hw_state_mapping) {
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
}
@@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
return result;
}
-static bool dml2_validate_only(struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2;
unsigned int result = 0;
@@ -708,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context)
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
- &dml2->v20.scratch.mode_support_info);
+ &dml2->v20.scratch.mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
@@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
/* DML2.1 validation path */
if (dml2->architecture == dml2_architecture_21) {
- out = dml21_validate(in_dc, context, dml2, fast_validate);
+ out = dml21_validate(in_dc, context, dml2, validate_mode);
return out;
}
DC_FP_START();
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
- out = dml2_validate_only(context);
+ /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml2_validate_only(context, validate_mode);
else
- out = dml2_validate_and_build_resource(in_dc, context);
+ out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
DC_FP_END();
@@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
@@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21)
- && (in_dc->ctx->dce_version == DCN_VERSION_4_01
- ))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
@@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 5100f269368e..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -240,7 +240,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
@@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc,
* dml2_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx.
*
* DML1.0 compatible interface for validation.
*
@@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc,
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml2,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 97bf26fa3573..36187f890d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
- .dpp_full_bypass = dpp401_full_bypass,
+ .dpp_full_bypass = NULL,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index ecaa976e1f52..5a6a861402b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -641,6 +641,7 @@
uint32_t ISHARP_DELTA_DATA; \
uint32_t ISHARP_DELTA_INDEX; \
uint32_t ISHARP_NLDELTA_SOFT_CLIP
+
struct dcn401_dpp_registers {
DPP_REG_VARIABLE_LIST_DCN401;
};
@@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
-void dpp401_full_bypass(struct dpp *dpp_base);
-
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 712aff7e17f7..7aab77b58869 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -88,30 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-void dpp401_full_bypass(struct dpp *dpp_base)
-{
- struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
-
- /* Input pixel format: ARGB8888 */
- REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
- CNVC_SURFACE_PIXEL_FORMAT, 0x8);
-
- /* Zero expansion */
- REG_SET_3(FORMAT_CONTROL, 0,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 0,
- FORMAT_EXPANSION_MODE, 0);
-
- /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
- if (dpp->tf_mask->CM_BYPASS_EN)
- REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
- else
- REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-
- /* Setting degamma bypass for now */
- REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
-}
-
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 11535922b5ff..1f53a9f0c0ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#include "clk_mgr.h"
+#include "resource.h"
+
#define DC_LOGGER \
dsc->ctx->logger
@@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing);
+
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
@@ -183,6 +191,7 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slice_count,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
return true;
}
-
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
@@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
+ unsigned int min_dsc_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, &config);
+ &options, link_encoding, min_dsc_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
}
+
+static void build_dsc_enc_combined_slice_caps(
+ const struct dsc_enc_caps *single_dsc_enc_caps,
+ struct dsc_enc_caps *dsc_enc_caps,
+ unsigned int max_odm_combine_factor)
+{
+ /* 1-16 slice configurations, single DSC */
+ dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
+
+ /* 2x DSC's */
+ if (max_odm_combine_factor >= 2) {
+ /* 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+
+ /* 8 + 8 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+ }
+
+ /* 3x DSC's */
+ if (max_odm_combine_factor >= 3) {
+ /* 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+
+ /* 4x DSC's */
+ if (max_odm_combine_factor >= 4) {
+ /* 1 + 1 + 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 + 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 3 + 3 + 3 + 3 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
+
+ /* 4 + 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+}
+
+static void build_dsc_enc_caps(
+ const struct display_stream_compressor *dsc,
+ struct dsc_enc_caps *dsc_enc_caps)
+{
+ unsigned int max_dscclk_khz;
+ unsigned int num_dsc;
+ unsigned int max_odm_combine_factor;
+ struct dsc_enc_caps single_dsc_enc_caps;
+
+ struct dc *dc;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
+ return;
+
+ dc = dsc->ctx->dc;
+
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc)
+ return;
+
+ /* get max DSCCLK from clk_mgr */
+ max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
+
+ dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
+
+ /* global capabilities */
+ dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
+ dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
+ dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
+ dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
+ dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
+ dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
+ dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
+
+ /* expand per DSC capabilities to global */
+ max_odm_combine_factor = dc->caps.max_odm_combine_factor;
+ num_dsc = dc->res_pool->res_cap->num_dsc;
+ max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
+ dsc_enc_caps->max_total_throughput_mps =
+ single_dsc_enc_caps.max_total_throughput_mps *
+ max_odm_combine_factor;
+
+ /* check slice counts possible for with ODM combine */
+ build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
+}
+
+static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
+{
+ return (value + 9) / 10;
+}
+
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing)
+{
+ unsigned int max_dispclk_khz;
+
+ /* get max pixel rate and combine caps */
+ max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
+ if (dsc && dsc->ctx->dc) {
+ if (dsc->ctx->dc->clk_mgr &&
+ dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
+ /* dispclk is available */
+ max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
+ }
+ }
+
+ /* validate parameters */
+ if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0)
+ return 1;
+
+ /* consider minimum odm slices required due to
+ * 1) display pipe throughput (dispclk)
+ * 2) max image width per slice
+ */
+ return dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
+ max_dispclk_khz), // throughput
+ dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
+ dsc_enc_caps->max_slice_width))); // slice width
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
- // This is a static HW query, so we can use any DSC
-
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc) {
- if (!dsc->ctx->dc->debug.disable_dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc)
+ return;
+
+ /* check if reported cap global or only for a single DCN DSC enc */
+ if (dsc->funcs->dsc_get_enc_caps) {
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ } else {
+ build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -621,11 +765,6 @@ static bool intersect_dsc_caps(
return true;
}
-static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
-{
- return (value + 9) / 10;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -910,11 +1049,11 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slices_h,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h = 0;
- int min_slices_h = 0;
int num_slices_h = 0;
int pic_width;
int slice_width;
@@ -1018,12 +1157,9 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- min_slices_h = pic_width / dsc_common_caps.max_slice_width;
- if (pic_width % dsc_common_caps.max_slice_width)
- min_slices_h++;
-
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
+ /* increase minimum slice count to meet sink throughput limitations */
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
@@ -1032,14 +1168,12 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
- if (pic_width % min_slices_h != 0)
- min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
-
- if (min_slices_h == 0 && max_slices_h == 0)
- is_dsc_possible = false;
+ /* increase minimum slice count to meet divisibility requirements */
+ while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
+ min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ }
+ is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
if (!is_dsc_possible)
goto done;
@@ -1162,12 +1296,19 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
-
+ unsigned int min_dsc_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, link_encoding, dsc_cfg);
+ timing,
+ options,
+ link_encoding,
+ min_dsc_slice_count,
+ dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4222679fd4c9..7bd92ae8b13e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -9,19 +9,14 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
-#define MAX_DSC_UNIT_COMBINE 4
-
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn401_dsc_funcs = {
- .dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
@@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -66,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
dsc->max_image_width = 5184;
}
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
{
- int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
-
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
- /* 1 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
- /* 2 slice is only supported with 1 or 2 DSC units */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
- /* 3 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
@@ -95,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
- dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
@@ -191,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index e3ca70058e64..7acd57eb4f42 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c
void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
void dsc401_disable(struct display_stream_compressor *dsc);
void dsc401_disconnect(struct display_stream_compressor *dsc);
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 1ebce5426a58..b0bd1f9425b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -108,6 +108,7 @@ struct dsc_funcs {
void (*dsc_disable)(struct display_stream_compressor *dsc);
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
+ void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index c7765e6f09e6..f8f991785d4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -666,10 +666,29 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_fl_regs_st {
+ uint32_t lut_enable;
+ uint32_t lut_done;
+ uint32_t lut_addr_mode;
+ uint32_t lut_width;
+ uint32_t lut_tmz;
+ uint32_t lut_crossbar_sel_r;
+ uint32_t lut_crossbar_sel_g;
+ uint32_t lut_crossbar_sel_b;
+ uint32_t lut_addr_hi;
+ uint32_t lut_addr_lo;
+ uint32_t refcyc_3dlut_group;
+ uint32_t lut_fl_bias;
+ uint32_t lut_fl_scale;
+ uint32_t lut_fl_mode;
+ uint32_t lut_fl_format;
+};
+
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
struct _vcs_dpi_display_ttu_regs_st ttu_attr;
struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct dcn_fl_regs_st fl_regs;
uint32_t pixel_format;
uint32_t inuse_addr_hi;
uint32_t inuse_addr_lo;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index baed31611477..705b98b1b6cc 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
+ REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
}
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 6e1d4c90ddd4..608e6153fa68 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 38e17b1796e1..4ea13d0bf815 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1186,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- if (dccg && dccg->funcs->set_dtbclk_dto)
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) {
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1379,7 +1381,7 @@ static void populate_audio_dp_link_info(
}
}
-static void build_audio_output(
+void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output)
@@ -1684,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ /* Temporary workaround to perform DSC programming ahead of stream enablement
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, true);
+ }
+
if (!stream->dpms_off)
dc->link_srv->set_dpms_on(context, pipe_ctx);
@@ -1925,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
edp_stream->sink, &edp_stream->timing);
+
+ // For Mux-platform, the default value is false.
+ // Disable fast boot during mux switching.
+ // The flag would be clean after switching done.
+ if (dc->is_switch_in_progress_dest && edp_link->is_dds)
+ can_apply_edp_fast_boot = false;
+
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
if (can_apply_edp_fast_boot) {
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
@@ -1968,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (edp_with_sink_num)
edp_link_with_sink = edp_links_with_sink[0];
+ // During a mux switch, powering down the HW blocks and then enabling
+ // the link via a DPCD SET_POWER write causes a brief flash
+ keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
+
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 06789ac3a224..7cd8c1576988 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -110,5 +110,9 @@ void dce110_enable_dp_link_output(
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
+void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index f9ee55998b6b..39910f73ecd0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -327,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
}
+ DTN_INFO("\n=======HUBP FL======\n");
+ DTN_INFO(
+ "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+
+ if (!s->blank_en) {
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ pool->hubps[i]->inst,
+ fl_regs->lut_enable,
+ fl_regs->lut_done,
+ fl_regs->lut_addr_mode,
+ fl_regs->lut_width,
+ fl_regs->lut_tmz,
+ fl_regs->lut_crossbar_sel_r,
+ fl_regs->lut_crossbar_sel_g,
+ fl_regs->lut_crossbar_sel_b,
+ fl_regs->lut_addr_hi,
+ fl_regs->lut_addr_lo,
+ fl_regs->refcyc_3dlut_group,
+ fl_regs->lut_fl_bias,
+ fl_regs->lut_fl_scale,
+ fl_regs->lut_fl_mode,
+ fl_regs->lut_fl_format);
+ DTN_INFO("\n");
+ }
+ }
+
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
@@ -511,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.num_3dluts,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
+ DTN_INFO("===== MPC RMCM 3DLUT =====\n");
+ DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ }
+ DTN_INFO("\n");
+ DTN_INFO("===== MPC RMCM Shaper =====\n");
+ DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
+ s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
+ s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
+ s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
+ s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
+ }
}
void dcn10_log_hw_state(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c277df12c817..3207addbd4eb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock(
}
/* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
@@ -956,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1971,14 +1970,6 @@ static void dcn20_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2492,7 +2483,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2816,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe(
{
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct dtbclk_dto_params dto_params = {0};
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -2876,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe(
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg
+ && dc->ctx->dce_version >= DCN_VERSION_3_5) {
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 5ba3999991b0..8ba934b83957 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
+ /* Temporary workaround to perform DSC programming ahead of pipe reset
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
+ }
+
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index a0b05b9ef660..416b1dca3dac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (!odm_dsc)
+ continue;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index c4177a9a662f..cc9f40d97af2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2,6 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "os_types.h"
#include "dm_services.h"
#include "basics/dc_common.h"
#include "dm_helpers.h"
@@ -49,7 +51,7 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn401_initialize_min_clocks(struct dc *dc)
+void dcn401_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
@@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
- dc->caps.dcmode_power_limits_present =
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
+ dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
+ dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg
@@ -396,249 +393,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
-static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
- MPCC_MOVABLE_CM_LOCATION_AFTER :
- MPCC_MOVABLE_CM_LOCATION_BEFORE;
-}
-
-static void dc_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode)
-{
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
-static void dc_get_lut_format(
- enum dc_cm2_gpu_mem_format dc_format,
- enum hubp_3dlut_fl_format *format)
-{
- switch (dc_format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- *format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
-}
-
-static void dc_get_lut_xbar(
- enum dc_cm2_gpu_mem_pixel_component_order order,
- enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
- enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
-{
- switch (order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- break;
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
-}
-
-static void dc_get_lut_width(
- enum dc_cm2_gpu_mem_size size,
- enum hubp_3dlut_fl_width *width)
-{
- switch (size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- *width = hubp_3dlut_fl_width_33;
- break;
- case DC_CM2_GPU_MEM_SIZE_171717:
- *width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- *width = hubp_3dlut_fl_width_transformed;
- break;
- }
-}
-static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
-{
- if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.program_lut_read_write_control &&
- hubp->funcs->hubp_program_3dlut_fl_addr &&
- mpc->funcs->rmcm.program_bit_depth &&
- hubp->funcs->hubp_program_3dlut_fl_mode &&
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
- hubp->funcs->hubp_program_3dlut_fl_format &&
- hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->rmcm.program_bias_scale &&
- hubp->funcs->hubp_program_3dlut_fl_crossbar &&
- hubp->funcs->hubp_program_3dlut_fl_width &&
- mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.populate_lut &&
- mpc->funcs->rmcm.program_lut_mode &&
- hubp->funcs->hubp_enable_3dlut_fl &&
- mpc->funcs->rmcm.enable_3dlut_fl)
- return true;
-
- return false;
-}
-
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
- struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- union mcm_lut_params m_lut_params;
- enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum hubp_3dlut_fl_width width = 0;
- struct dc *dc = hubp->ctx->dc;
-
- bool bypass_rmcm_3dlut = false;
- bool bypass_rmcm_shaper = false;
-
- dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- // Don't know what to do in this case.
- //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
- if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
- !mpc->funcs->rmcm.is_config_supported(width))
- return false;
-
- //0. disable fl on mpc
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
-
- //1. power down the block
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
-
- //2. program RMCM
- //2a. 3dlut reg programming
- mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
- (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
-
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.addr);
-
- mpc->funcs->rmcm.program_bit_depth(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
-
- // setting native or transformed mode,
- dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
-
- //these program the mcm 3dlut
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
-
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- //seems to be only for the MCM
- dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
-
- mpc->funcs->rmcm.program_bias_scale(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- dc_get_lut_xbar(
- mcm_luts->lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
-
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
-
- //2b. shaper reg programming
- memset(&m_lut_params, 0, sizeof(m_lut_params));
-
- if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
- m_lut_params.pwl = &mcm_luts->shaper->pwl;
- } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
- mcm_luts->shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = &dpp_base->regamma_params;
- }
- if (m_lut_params.pwl) {
- mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
- } else {
- //RMCM 3dlut won't work without its shaper
- return false;
- }
-
- //3. Select the hubp connected to this RMCM
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
-
- //4. power on the block
- if (m_lut_params.pwl)
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- break;
- default:
- return false;
- }
-
- return true;
-}
-
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -664,25 +418,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
- //MCM - setting its location (Before/After) blender
- //set to post blend (true)
- dcn401_set_mcm_location_post_blend(
- dc,
- pipe_ctx,
- mcm_luts.lut3d_data.mpc_mcm_post_blend);
-
- //RMCM - 3dLUT+Shaper
- if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
- dcn401_program_rmcm_luts(
- hubp,
- pipe_ctx,
- lut3d_src,
- &mcm_luts,
- mpc,
- lut_bank_a,
- mpcc_id);
- }
-
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -740,15 +475,15 @@ void dcn401_populate_mcm_luts(struct dc *dc,
break;
case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- width = hubp_3dlut_fl_width_33;
- break;
case DC_CM2_GPU_MEM_SIZE_171717:
width = hubp_3dlut_fl_width_17;
break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
width = hubp_3dlut_fl_width_transformed;
break;
+ default:
+ //TODO: handle default case
+ break;
}
//check for support
@@ -817,11 +552,14 @@ void dcn401_populate_mcm_luts(struct dc *dc,
//navi 4x has a bug and r and blue are swapped and need to be worked around here in
//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- dc_get_lut_xbar(
- mcm_luts.lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
+ switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ default:
+ crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
@@ -2269,14 +2007,6 @@ void dcn401_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2651,7 +2381,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2902,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ }
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
@@ -2916,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
- if (org_ip_request_cntl == 0)
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index ce65b4f6c672..2621b7725267 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,12 +109,5 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
- struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id);
+void dcn401_initialize_min_clocks(struct dc *dc);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 3a0795045bc6..9df8030e37f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -502,6 +502,9 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color);
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index f3696143590c..82085d9c3f40 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -59,6 +59,7 @@ enum dc_status {
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
DC_FAIL_HW_CURSOR_SUPPORT = 29,
+ DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 0cf349cafb3e..f0d7185153b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -67,6 +67,8 @@ struct resource_context;
struct clk_bw_params;
struct dc_mcache_params;
+#define MAX_RMCM_INST 2
+
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
@@ -82,7 +84,7 @@ struct resource_funcs {
enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -107,7 +109,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* Algorithm for assigning available link encoders to links.
@@ -223,6 +225,11 @@ struct resource_funcs {
const struct dc_stream_state *stream);
bool (*program_mcache_pipe_config)(struct dc_state *context,
const struct dc_mcache_params *mcache_params);
+ enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
};
struct audio_support{
@@ -281,6 +288,7 @@ struct resource_pool {
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+ struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
struct {
unsigned int xtalin_clock_inKhz;
@@ -556,7 +564,10 @@ struct dcn_bw_output {
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
- union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union {
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES];
+ };
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index d19a595c2be4..134091d5842d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn_get_soc_clks(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index c14d64687a3d..2c9a4a12bd8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -100,6 +100,17 @@ struct dcn301_clk_internal {
#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
+enum clk_type {
+ CLK_TYPE_DCFCLK,
+ CLK_TYPE_FCLK,
+ CLK_TYPE_MCLK,
+ CLK_TYPE_SOCCLK,
+ CLK_TYPE_DTBCLK,
+ CLK_TYPE_DISPCLK,
+ CLK_TYPE_DPPCLK,
+ CLK_TYPE_DSCCLK,
+ CLK_TYPE_COUNT
+};
struct clk_limit_table_entry {
unsigned int voltage; /* milivolts withh 2 fractional bits */
@@ -324,6 +335,11 @@ struct clk_mgr_funcs {
int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+ bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
+
+ uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
+
+ unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index e94e9ba60f55..61c4d2a7db1c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -211,7 +211,7 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index b610beb075d5..cee29e89ec5c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -282,7 +282,7 @@ struct hubp_funcs {
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 6e303b81bfb0..7641439f6ca0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment {
enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
+struct mpc_rmcm_regs {
+ uint32_t rmcm_3dlut_mem_pwr_state;
+ uint32_t rmcm_3dlut_mem_pwr_force;
+ uint32_t rmcm_3dlut_mem_pwr_dis;
+ uint32_t rmcm_3dlut_mem_pwr_mode;
+ uint32_t rmcm_3dlut_size;
+ uint32_t rmcm_3dlut_mode;
+ uint32_t rmcm_3dlut_mode_cur;
+ uint32_t rmcm_3dlut_read_sel;
+ uint32_t rmcm_3dlut_30bit_en;
+ uint32_t rmcm_3dlut_wr_en_mask;
+ uint32_t rmcm_3dlut_ram_sel;
+ uint32_t rmcm_3dlut_out_norm_factor;
+ uint32_t rmcm_3dlut_fl_sel;
+ uint32_t rmcm_3dlut_out_offset_r;
+ uint32_t rmcm_3dlut_out_scale_r;
+ uint32_t rmcm_3dlut_fl_done;
+ uint32_t rmcm_3dlut_fl_soft_underflow;
+ uint32_t rmcm_3dlut_fl_hard_underflow;
+ uint32_t rmcm_cntl;
+ uint32_t rmcm_shaper_mem_pwr_state;
+ uint32_t rmcm_shaper_mem_pwr_force;
+ uint32_t rmcm_shaper_mem_pwr_dis;
+ uint32_t rmcm_shaper_mem_pwr_mode;
+ uint32_t rmcm_shaper_lut_mode;
+ uint32_t rmcm_shaper_mode_cur;
+ uint32_t rmcm_shaper_lut_write_en_mask;
+ uint32_t rmcm_shaper_lut_write_sel;
+ uint32_t rmcm_shaper_offset_b;
+ uint32_t rmcm_shaper_scale_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_seg_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_base_b;
+};
+
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -301,6 +337,7 @@ struct mpcc_state {
uint32_t rgam_mode;
uint32_t rgam_lut;
struct mpc_grph_gamut_adjustment gamut_remap;
+ struct mpc_rmcm_regs rmcm_regs;
};
/**
@@ -1038,6 +1075,11 @@ struct mpc_funcs {
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+ /**
+ * @mcm:
+ *
+ * MPC MCM new HW sequential programming functions
+ */
struct {
void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
@@ -1050,6 +1092,11 @@ struct mpc_funcs {
bool lut_bank_a, int mpcc_id);
} mcm;
+ /**
+ * @rmcm:
+ *
+ * MPC RMCM new HW sequential programming functions
+ */
struct {
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 00ea3864dd4d..44f86cc2d1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -46,6 +46,8 @@ struct pg_cntl_funcs {
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe7f3137f228..27f950ae45ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -117,6 +117,7 @@ struct stream_encoder {
uint32_t stream_enc_inst;
struct vpg *vpg;
struct afmt *afmt;
+ struct apg *apg;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 7d16351bba99..f2503402c10e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -144,9 +144,9 @@ struct link_service {
uint32_t (*dp_link_bandwidth_kbps)(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
- bool (*validate_dpia_bandwidth)(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+ enum dc_status (*validate_dp_tunnel_bandwidth)(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t (*dp_required_hblank_size_bytes)(
const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 96febabf464a..2956c2b3ad1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "clk_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ struct dc_stream_update stream_update = { 0 };
+ bool dpms_off = false;
+ bool needs_divider_update = false;
bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
bool is_hpo_acquired;
uint8_t count;
int i;
+ struct audio_output audio_output[MAX_PIPES];
+
+ needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
+ link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
udelay(100);
@@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
link->dc,
state,
pipes[i]);
+
+ // Disable OTG and re-enable after updating clocks
+ pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg);
}
- if (link->dc->hwss.setup_hpo_hw_control) {
- is_hpo_acquired = resource_is_hpo_acquired(state);
- if (was_hpo_acquired != is_hpo_acquired)
- link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
+ link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
+ link_setting, count,
+ *pipes, &audio_output[0]);
+ for (i = 0; i < count; i++) {
+ pipes[i]->clock_source->funcs->program_pix_clk(
+ pipes[i]->clock_source,
+ &pipes[i]->stream_res.pix_clk_params,
+ link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings),
+ &pipes[i]->pll_settings);
+
+ if (pipes[i]->stream_res.audio != NULL) {
+ const struct link_hwss *link_hwss = get_link_hwss(
+ link, &pipes[i]->link_res);
+
+ link_hwss->setup_audio_output(pipes[i], &audio_output[i],
+ pipes[i]->stream_res.audio->inst);
+
+ pipes[i]->stream_res.audio->funcs->az_configure(
+ pipes[i]->stream_res.audio,
+ pipes[i]->stream->signal,
+ &audio_output[i].crtc_info,
+ &pipes[i]->stream->audio_info,
+ &audio_output[i].dp_link_info);
+
+ if (link->dc->config.disable_hbr_audio_dp2 &&
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
+ link->dc->link_srv->dp_is_128b_132b_signal(pipes[i]))
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio);
+ }
+ }
}
- for (i = count-1; i >= 0; i--)
- link_set_dpms_on(state, pipes[i]);
+ // Toggle on HPO I/O if necessary
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+
+ for (i = 0; i < count; i++)
+ pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
+
+ // Set DPMS on with stream update
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
+ stream_update.stream = state->streams[i];
+ stream_update.dpms_off = &dpms_off;
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 116ff37126e7..55c5148de800 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- uint8_t clk_src = 0x4C;
+ uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 9655e6fa53a4..827b630daf49 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
+ if (!detect_dp_sink_caps(link)) {
return false;
+ }
if (is_dp_branch_device(link))
/* DP SST branch */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 273a3be6d593..8c8682f743d6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
}
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
+ (link->type != dc_connection_none))
dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -842,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -2296,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
}
- /* get dp overhead for dp tunneling */
- link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
@@ -2537,6 +2537,14 @@ void link_set_dpms_on(
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
update_psp_stream_config(pipe_ctx, false);
+
+ if (link->is_dds) {
+ uint32_t post_oui_delay = 30; // 30ms
+
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ }
+
return;
}
@@ -2629,6 +2637,15 @@ void link_set_dpms_on(
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
+ /* Corruption was observed on systems with display mux when stream gets
+ * enabled after the mux switch. Having a small delay between link
+ * training and stream unblank resolves the corruption issue.
+ * This is workaround.
+ */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ link->is_display_mux_present)
+ msleep(20);
+
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 1a04f4b74585..de1143dbbd25 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
{
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
- link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth;
link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
@@ -539,10 +539,16 @@ static bool construct_phy(struct dc_link *link,
break;
case CONNECTOR_ID_EDP:
+ // If smartmux is supported, only create the link on the primary eDP.
+ // Dual eDP is not supported with smartmux.
+ if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
+ goto create_fail;
+
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
+ if (!link->dc->config.allow_edp_hotplug_detection
+ && !is_smartmux_suported(link))
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 29606fda029d..aecaf37eee35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing(
if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
return false;
break;
+ case PIXEL_ENCODING_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
default:
/* Invalid Pixel Encoding*/
return false;
@@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
break;
+ case COLOR_DEPTH_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
+static uint32_t dp_get_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing,
+ const struct dc_link *link)
+{
+ return dc_bandwidth_in_kbps_from_timing(timing,
+ dc_link_get_highest_encoding_format(link));
+}
+
static bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+ const struct dc_tunnel_settings *dp_tunnel_settings = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) {
+ dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings;
+ break;
+ }
+ }
+
+ return dp_tunnel_settings;
+}
+
/*
- * This function calculates the bandwidth required for the stream timing
- * and aggregates the stream bandwidth for the respective dpia link
- *
- * @stream: pointer to the dc_stream_state struct instance
- * @num_streams: number of streams to be validated
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: true if validation is succeeded
+ * return: dc_status
*/
-bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- int bw_needed[MAX_DPIA_NUM] = {0};
- struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
- int num_dpias = 0;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
- /* new dpia sst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
+ struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
+ uint8_t link_count = 0;
+ enum dc_status result = DC_OK;
- dpia_link[num_dpias] = stream[i].link;
- bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
- num_dpias++;
- } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- uint8_t j = 0;
- /* check whether its a known dpia link */
- for (; j < num_dpias; ++j) {
- if (dpia_link[j] == stream[i].link)
- break;
- }
+ // Iterate through streams in the new context
+ for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) {
+ const struct dc_stream_state *stream = new_ctx->streams[i];
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *dp_tunnel_settings;
+ uint32_t timing_bw;
+
+ if (stream == NULL)
+ continue;
+
+ link = stream->link;
+
+ if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ && link->hpd_status))
+ continue;
- if (j == num_dpias) {
- /* new dpia mst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
- else {
- dpia_link[j] = stream[i].link;
- num_dpias++;
- }
+ dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
+
+ if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false))
+ continue;
+
+ timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link);
+
+ // Find an existing entry for this 'link' in 'dpia_link_sets'
+ for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) {
+ bool is_new_slot = false;
+
+ if (dpia_link_sets[j].link == NULL) {
+ is_new_slot = true;
+ link_count++;
+ dpia_link_sets[j].required_bw = 0;
+ dpia_link_sets[j].link = link;
}
- bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[j]));
+ if (is_new_slot || (dpia_link_sets[j].link == link)) {
+ dpia_link_sets[j].tunnel_settings = dp_tunnel_settings;
+ dpia_link_sets[j].required_bw += timing_bw;
+ break;
+ }
}
}
- /* Include dp overheads */
- for (uint8_t i = 0; i < num_dpias; ++i) {
- int dp_overhead = 0;
-
- dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
- bw_needed[i] += dp_overhead;
- }
+ if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false)
+ result = DC_FAIL_DP_TUNNEL_BW_VALIDATE;
- return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ return result;
}
struct dp_audio_layout_config {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index bf398c49c3e8..9553c81053fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing);
-bool link_validate_dpia_bandwidth(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+enum dc_status link_validate_dp_tunnel_bandwidth(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 0f965380a9b4..651926e547b9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1388,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link)
struct dpcd_amd_signature amd_signature = {0};
struct dpcd_amd_device_id amd_device_id = {0};
+ if (link->is_dds) {
+ uint8_t dpcd_dp_edp_backlight_mode = 0;
+
+ /*
+ * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register
+ * if platform is DDS
+ */
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ dpcd_dp_edp_backlight_mode &= ~0x3;
+
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ }
+
amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
amd_device_id.device_id_byte2 =
@@ -1543,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+ if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) {
+ link->dpcd_sink_ext_caps.raw = 0;
+ return false;
+ }
if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 22bfdced64ab..9b2f1a7da1d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -75,12 +75,15 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
- dpcd_dp_tun_data, 1);
+ dpcd_dp_tun_data, 2);
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw =
+ dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY];
}
DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
@@ -155,8 +158,14 @@ void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
- && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F;
+ dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id;
+ dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw;
+ dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw;
+ dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 3af7564a84f1..819bf2d8ba53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -35,6 +35,8 @@
#define Kbps_TO_Gbps (1000 * 1000)
+#define MST_TIME_SLOT_COUNT 64
+
// ------------------------------------------------------------------
// PRIVATE FUNCTIONS
// ------------------------------------------------------------------
@@ -160,78 +162,6 @@ static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
-static uint8_t get_lowest_dpia_index(struct dc_link *link)
-{
- const struct dc *dc_struct = link->dc;
- uint8_t idx = 0xFF;
- int i;
-
- for (i = 0; i < MAX_LINKS; ++i) {
-
- if (!dc_struct->links[i] ||
- dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- if (idx > dc_struct->links[i]->link_index) {
- idx = dc_struct->links[i]->link_index;
- break;
- }
- }
-
- return idx;
-}
-
-/*
- * Get the maximum dp tunnel banwidth of host router
- *
- * @dc: pointer to the dc struct instance
- * @hr_index: host router index
- *
- * return: host router maximum dp tunnel bandwidth
- */
-static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
-{
- uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
- uint8_t hr_index_temp = 0;
- struct dc_link *link_dpia_primary, *link_dpia_secondary;
- int total_bw = 0;
-
- for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
-
- if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
-
- if (hr_index_temp == hr_index) {
- link_dpia_primary = dc->links[i];
- link_dpia_secondary = dc->links[i + 1];
-
- /**
- * If BW allocation enabled on both DPIAs, then
- * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
- * otherwise HR BW = Estimated(bw alloc enabled dpia)
- */
- if ((link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
- (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
- total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
- link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
- } else if (link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
- } else if (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
- }
- break;
- }
- }
-
- return total_bw;
-}
-
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@@ -251,32 +181,40 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
{
- uint8_t requested_bw;
- uint32_t temp;
+ uint8_t request_reg_val;
+ uint32_t temp, request_bw;
- /* Error check whether request bw greater than allocated */
- if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
- DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n",
- __func__, link->link_index);
- req_bw = link->dpia_bw_alloc_config.estimated_bw;
+ if (link->dpia_bw_alloc_config.bw_granularity == 0) {
+ DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index);
+ return;
}
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
- requested_bw = temp / Kbps_TO_Gbps;
+ request_reg_val = temp / Kbps_TO_Gbps;
/* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
- ++requested_bw;
+ ++request_reg_val;
- /* Error check whether requested and allocated are equal */
- req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
- DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n",
- __func__, link->link_index);
+ request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ if (request_bw > link->dpia_bw_alloc_config.estimated_bw) {
+ DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!",
+ __func__, link->link_index,
+ req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw);
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ request_reg_val = temp / Kbps_TO_Gbps;
+ if (temp % Kbps_TO_Gbps)
+ ++request_reg_val;
}
+ link->dpia_bw_alloc_config.allocated_bw = request_bw;
+ DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw);
+
core_link_write_dpcd(link, REQUESTED_BW,
- &requested_bw,
+ &request_reg_val,
sizeof(uint8_t));
}
@@ -304,14 +242,16 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
ret = true;
- /*
- * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
- * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
- * to make the CM to release preallocation and update estimated BW correctly for
- * all DPIAs per host router
- */
- // TODO: Zero allocation can be removed once the MSFT CM fix has been released
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
} else
DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
@@ -329,19 +269,17 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
} else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
} else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
@@ -374,9 +312,13 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
- DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
+ DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d",
__func__, link->link_index, link->hpd_status,
- link->dpia_bw_alloc_config.allocated_bw, req_bw);
+ link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.estimated_bw,
+ req_bw);
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
@@ -384,73 +326,116 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link)
{
- bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
- uint8_t lowest_dpia_index, i, hr_index;
+ uint32_t link_dp_overhead = 0;
- if (!num_dpias || num_dpias > MAX_DPIA_NUM)
- return ret;
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap = dc_link_get_link_cap(link);
- lowest_dpia_index = get_lowest_dpia_index(link[0]);
+ if (link_cap) {
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT)
+ + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0);
+ }
+ }
- /* get total Host Router BW with granularity for the given modes */
- for (i = 0; i < num_dpias; ++i) {
- int granularity_Gbps = 0;
- int bw_granularity = 0;
+ return link_dp_overhead;
+}
- if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
- continue;
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ * And then validate if the required bandwidth is within the router's capacity.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count)
+{
+ uint32_t granularity_Gbps;
+ const struct dc_link *link;
+ uint32_t link_bw_granularity;
+ uint32_t link_required_bw;
+ struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 };
+ uint8_t i;
+ bool is_success = true;
+ uint8_t router_count = 0;
+
+ if ((dpia_link_sets == NULL) || (count == 0))
+ return is_success;
+
+ // Iterate through each DP tunneling link (DPIA).
+ // Aggregate its bandwidth requirements onto the respective USB4 router.
+ for (i = 0; i < count; i++) {
+ link = dpia_link_sets[i].link;
+ link_required_bw = dpia_link_sets[i].required_bw;
+ const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings;
+
+ if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0)
+ break;
- if (link[i]->link_index < lowest_dpia_index)
- continue;
+ if (link->type == dc_connection_mst_branch)
+ link_required_bw += link_dpia_get_dp_overhead(link);
- granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
- bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
- ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+ granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity);
+ link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps +
+ ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0);
- hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[hr_index] += bw_granularity;
- }
+ // Find or add the USB4 router associated with the current DPIA link
+ for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) {
+ if (router_sets[j].is_valid == false) {
+ router_sets[j].is_valid = true;
+ router_sets[j].cm_id = dp_tunnel_settings->cm_id;
+ router_count++;
+ }
- /* validate against each Host Router max BW */
- for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
- if (bw_needed_per_hr[hr_index]) {
- host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
- if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
- ret = false;
+ if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) {
+ uint32_t remaining_bw =
+ dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw;
+
+ router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw;
+
+ if (remaining_bw > router_sets[j].remaining_bw)
+ router_sets[j].remaining_bw = remaining_bw;
+
+ // Get the max estimated BW within the same CM_ID
+ if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw)
+ router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw;
+
+ router_sets[j].required_bw += link_bw_granularity;
+ router_sets[j].dpia_count++;
break;
}
}
}
- return ret;
-}
+ // Validate bandwidth for each unique router found.
+ for (i = 0; i < router_count; i++) {
+ uint32_t total_bw = 0;
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
-{
- int dp_overhead = 0, link_mst_overhead = 0;
+ if (router_sets[i].is_valid == false)
+ break;
- if (!link_dp_is_bw_alloc_available(link))
- return dp_overhead;
+ // Determine the total available bandwidth for the current router based on aggregated data
+ if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0))
+ total_bw = router_sets[i].estimated_bw;
+ else
+ total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw;
- /* if its mst link, add MTPH overhead */
- if ((link->type == dc_connection_mst_branch) &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
- * MST overhead is 1/64 of link bandwidth (excluding any overhead)
- */
- const struct dc_link_settings *link_cap =
- dc_link_get_link_cap(link);
- uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
- (uint32_t)link_cap->lane_count *
- LINK_RATE_REF_FREQ_IN_KHZ * 8;
- link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ if (router_sets[i].required_bw > total_bw) {
+ is_success = false;
+ break;
+ }
}
- /* add all the overheads */
- dp_overhead = link_mst_overhead;
-
- return dp_overhead;
+ return is_success;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 801965b5f9a4..41efcb3e44e2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -28,10 +28,6 @@
#include "link.h"
-/* Number of Host Routers per motherboard is 2 */
-#define MAX_HR_NUM 2
-/* Number of DPIA per host router is 2 */
-#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
/*
* Host Router BW type
@@ -42,6 +38,16 @@ enum bw_type {
HOST_ROUTER_BW_INVALID,
};
+struct usb4_router_validation_set {
+ bool is_valid;
+ uint8_t cm_id;
+ uint8_t dpia_count;
+ uint32_t required_bw;
+ uint32_t allocated_bw;
+ uint32_t estimated_bw;
+ uint32_t remaining_bw;
+};
+
/*
* Enable USB4 DP BW allocation mode
*
@@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
- * Handle the validation of total BW here and confirm that the bw used by each
- * DPIA doesn't exceed available BW for each host router (HR)
- *
- * @link[]: array of link pointer to all possible DPIA links
- * @bw_needed[]: bw needed for each DPIA link based on timing
- * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
- *
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
- */
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
-
-/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link);
/*
* Handle DP BW allocation status register
@@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index da74c2b5854f..e7927b8f5ba3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds && !link->dpcd_caps.panel_luminance_control)
+ return true;
+
// use internal backlight control if dmub capabilities are not present
if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX &&
!link->dc->caps.dmub_caps.aux_backlight_support) {
@@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
target_luminance = (struct target_luminance_value *)&backlight_millinits;
+ //make sure we disable AMD ABC first.
+ core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, sizeof(uint8_t));
+ if (backlight_enable) {
+ backlight_enable = 0;
+ core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, 1);
+ }
+
core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&backlight_enable, sizeof(uint8_t));
@@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
uint8_t backlight_control = isHDR ? 1 : 0;
+ uint8_t backlight_enable = 0;
+
// OLEDs have no PWM, they can only use AUX
if (link->dpcd_sink_ext_caps.bits.oled == 1)
backlight_control = 1;
+ //make sure we disable VESA ABC first.
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(uint8_t));
+
+ if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) {
+ backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(backlight_enable));
+ }
+
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
@@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
sizeof(union dpcd_source_backlight_get)))
@@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return true;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
@@ -916,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
link->replay_settings.replay_allow_active = *allow_active;
}
@@ -1173,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+bool is_smartmux_suported(struct dc_link *link)
+{
+ if (link->dc->caps.is_apu)
+ return false;
+ if (!link->dc->config.smart_mux_version)
+ return false;
+
+ return true;
+}
+
static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
struct link_resource *link_res, bool enable)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index bcfa6ac5d4e7..4a475d5b9dde 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool is_smartmux_suported(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 1e2e66508192..5402c3529f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o
AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401))
AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401)
-endif
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index b4cea2b8cb2a..6f0e017a8ae2 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -30,7 +30,6 @@
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
-#include "dcn401/dcn401_mpc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
@@ -879,7 +878,7 @@ void mpc32_set3dlut_ram10(
}
-static void mpc32_set_3dlut_mode(
+void mpc32_set_3dlut_mode(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -1022,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
- .set_movable_cm_location = mpc401_set_movable_cm_location,
- .populate_lut = mpc401_populate_lut,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
index 9622518826c9..8c9b20bcca85 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
@@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram(
enum dc_lut_mode mode,
bool is_color_channel_12bits,
uint32_t mpcc_id);
+
+void mpc32_set_3dlut_mode(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17,
+ uint32_t mpcc_id);
+
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index 98cf0cbd59ba..f3fb3fe13757 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -294,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
}
-static void program_gamut_remap(
+void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
const uint16_t *regval,
@@ -426,7 +426,7 @@ void mpc401_set_gamut_remap(
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
/* Bypass / Disable if type is bypass or hw */
- program_gamut_remap(mpc, mpcc_id, NULL,
+ mpc_program_gamut_remap(mpc, mpcc_id, NULL,
adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
} else {
struct fixed31_32 arr_matrix[12];
@@ -460,12 +460,12 @@ void mpc401_set_gamut_remap(
else
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
- program_gamut_remap(mpc, mpcc_id, arr_reg_val,
+ mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val,
adjust->mpcc_gamut_remap_block_id, mode_select);
}
}
-static void read_gamut_remap(struct mpc *mpc,
+void mpc_read_gamut_remap(struct mpc *mpc,
int mpcc_id,
uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
@@ -561,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
uint16_t arr_reg_val[12] = {0};
- uint32_t mode_select;
+ uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0;
- read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
+ mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index 8e35ebc603a9..eb0c68d0b0c7 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,6 +241,19 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
+void mpc_program_gamut_remap(
+ struct mpc *mpc,
+ unsigned int mpcc_id,
+ const uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ enum mpcc_gamut_remap_mode_select mode_select);
+
+void mpc_read_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ uint32_t *mode_select);
+
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f2ba76c1e0c0..782316348941 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,6 +31,7 @@
#include <linux/kgdb.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 84f73fdb0f95..3a51be63f020 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -839,7 +839,7 @@ static enum dc_status build_mapped_resource(
static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index f3d5baac11bf..cccde5a6f3cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -963,7 +963,7 @@ static enum dc_status build_mapped_resource(
static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 4225cae68c10..164ba796f64c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -886,7 +886,7 @@ static enum dc_status build_mapped_resource(
enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 6221d749246d..3efc4c55d2d2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -45,7 +45,7 @@ enum dc_status dce112_validate_with_context(
enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index d9ffdded5ce1..53b60044653f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -373,7 +373,7 @@ static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
.num_stream_encoder = 6,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 6,
};
@@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
.num_stream_encoder = 2,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 2,
};
@@ -866,7 +866,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
@@ -973,21 +973,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@@ -1365,21 +1368,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index bd5811f97531..3e8b0ac11d90 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -872,7 +872,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index be4ade0853e9..652c05c35494 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -1129,12 +1129,12 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
DC_FP_START();
- voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode);
DC_FP_END();
return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 3405be07f5e3..f9cbdad3ef37 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -2125,7 +2125,7 @@ validate_out:
}
enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
@@ -2135,7 +2135,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
@@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index c0e062c7407d..e997d35a8b86 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
@@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 43fa2cb117f3..e4a1338d21e0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 9ab01b65b177..918742a42ded 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -924,7 +924,7 @@ validate_out:
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
@@ -934,7 +934,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c
return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
@@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c2f7..a017fd9854d1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
@@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f631ae34e320..895349d9ca07 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only)
{
bool out = false;
@@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw(
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate || !allow_self_refresh_only) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw(
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
if (allow_self_refresh_only &&
- (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
* If mode is unsupported or there's still no p-state support
@@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw(
}
if (repopulate_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -2037,7 +2037,7 @@ void dcn30_calculate_wm_and_dlg(
enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -2055,7 +2055,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2066,7 +2066,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -2586,6 +2586,8 @@ static bool dcn30_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 689d9bdace81..2c967fe55712 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -57,14 +57,14 @@ unsigned int dcn30_calc_max_scaled_time(
unsigned int urgent_watermark);
enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_acquire_post_bldn_3dlut(
struct resource_context *res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 121a86a59833..82a205a7c25c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 012c5fd52cb1..3345068a878c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index a8d0b4686f9a..3479e1eab4cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 51ca0b2959fc..3ed7f50554e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format)
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.gpuvm = 1;
@@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context(
bool upscaled = false;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1760,7 +1760,7 @@ dcn31_set_mcif_arb_params(struct dc *dc,
enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1778,19 +1778,19 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -2202,6 +2204,8 @@ static bool dcn31_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
@@ -2231,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool(
kfree(pool);
return NULL;
}
+
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output)
+{
+ struct dc_state *state = link->dc->current_state;
+ int i;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]);
+
+ for (i = 0; i < pipe_count; i++) {
+ link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]);
+
+ // Setup audio
+ if (pipes[i].stream_res.audio != NULL)
+ build_audio_output(state, &pipes[i], &audio_output[i]);
+ }
+#else
+ /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
+ * but the above will not compile on architectures without an FPU.
+ */
+ DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__);
+ ASSERT(0);
+#endif
+
+ return DC_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index dd82815d7efe..c32c85ef0ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -39,7 +39,7 @@ struct dcn31_resource_pool {
enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg(
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
@@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool(
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context);
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 8383e2e59be5..663c49cce4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.seamless_boot_odm_combine = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
+ .disable_dsc_power_gate = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1667,12 +1668,12 @@ static struct clock_source *dcn31_clock_source_create(
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt;
DC_FP_START();
- pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
DC_FP_END();
return pipe_cnt;
@@ -1696,7 +1697,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1715,19 +1716,19 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc,
DC_FP_START();
// do not support self refresh only
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1779,7 +1780,9 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -2117,6 +2120,8 @@ static bool dcn314_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index f8ba531d6342..ac9bb7f097d5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -41,7 +41,7 @@ struct dcn314_resource_pool {
enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 6c2bb3f63be1..82cc78c291d8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context(
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn315_resource_construct(
@@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 568094827212..636110e48d01 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format)
static int dcn316_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn316_resource_construct(
@@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index bb0dae0be5b8..9917b366f00c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1742,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
}
}
-static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate)
+static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1767,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
goto validate_fail;
DC_FP_START();
- out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode);
DC_FP_END();
if (pipe_cnt == 0)
@@ -1778,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1809,7 +1809,7 @@ validate_out:
enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
unsigned int i;
enum dc_status status;
@@ -1827,11 +1827,11 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
- if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
/* check new stream configuration still supports cursor if subvp used */
for (i = 0; i < context->stream_count; i++) {
stream = context->streams[i];
@@ -1846,14 +1846,14 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
};
}
- if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
/* attempt to validate again with subvp disabled due to cursor */
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
return status;
@@ -1862,7 +1862,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1878,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context(
int num_subvp_none = 0;
int odm_slice_count;
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
/* For single display subvp, look for subvp main so if we have phantom
* pipe, we can set odm policy to match main pipe
@@ -1960,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context(
/* Only populate DML input with subvp info for full updates.
* This is just a workaround -- needs a proper fix.
*/
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
@@ -2061,21 +2061,15 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -2257,7 +2251,7 @@ static bool dcn32_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -2276,6 +2270,7 @@ static bool dcn32_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2505,6 +2500,8 @@ static bool dcn32_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2519,7 +2516,6 @@ static bool dcn32_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2551,6 +2547,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index d60ed77eda80..82f966cf4ed2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -100,12 +100,12 @@ void dcn32_add_phantom_pipes(struct dc *dc,
enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 7db1f7a5613f..061c0907d802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1761,8 +1755,8 @@ static bool dcn321_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1780,6 +1774,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2004,6 +1999,8 @@ static bool dcn321_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2018,7 +2015,6 @@ static bool dcn321_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2046,6 +2042,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.max_segments_per_hubp = 18;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index e01aa2f2e13e..8475c6eec547 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1734,15 +1734,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn35_resource_construct(
@@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1893,6 +1895,10 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2151,12 +2157,13 @@ static bool dcn35_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 4ebe4e00a4f8..0971c0f74186 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1714,15 +1714,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn351_resource_construct(
@@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1865,6 +1867,10 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2122,13 +2128,14 @@ static bool dcn351_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index db36b8f9ce65..8bae7fcedc22 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1715,15 +1715,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn36_resource_construct(
@@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1866,6 +1868,10 @@ static bool dcn36_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2124,12 +2130,13 @@ static bool dcn36_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index f420c4dafa03..b3988e38d0a6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -70,7 +70,6 @@
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml/dcn401/dcn401_fpu.h"
#include "dc_state_priv.h"
@@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
/* re-calculate the available MALL size if required */
if (bw_params->num_channels > 0) {
dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
@@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_START();
- dcn401_update_bw_bounding_box_fpu(dc, bw_params);
-
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1644,7 +1635,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
unsigned int i;
enum dc_status status = DC_OK;
@@ -1662,9 +1653,9 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
- if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
/* check new stream configuration still supports cursor if subvp used */
for (i = 0; i < context->stream_count; i++) {
stream = context->streams[i];
@@ -1679,12 +1670,12 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
};
}
- if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
/* attempt to validate again with subvp disabled due to cursor */
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
return status;
@@ -1957,8 +1948,30 @@ static bool dcn401_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
dc->config.use_spl = true;
dc->config.prefer_easf = true;
+
+ dc->config.dcn_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.sdr_rgb_max = 1750;
+ dc->config.dcn_sharpness_range.sdr_rgb_mid = 750;
+ dc->config.dcn_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500;
+
+ dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
@@ -2177,6 +2190,8 @@ static bool dcn401_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2195,7 +2210,6 @@ static bool dcn401_resource_construct(
dc->config.sdpif_request_limit_words_per_umc = 16;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
@@ -2228,6 +2242,10 @@ static bool dcn401_resource_construct(
/* SPL */
dc->caps.scl_caps.sharpener_support = true;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index dc52a30991af..2ae6831c31ef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -24,7 +24,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index e0008c5f08ad..55b929ca7982 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- if (use_recout_width_aligned) {
+ if (spl_in->basic_in.custom_width != 0) {
+ mpc_rec.width = spl_in->basic_in.custom_width;
+ mpc_rec.x = spl_in->basic_in.custom_x;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ } else if (use_recout_width_aligned) {
mpc_rec.width = recout_width_align;
if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
mpc_rec.width = plane_clip_rec->width % recout_width_align;
@@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
/* extra pixels in the division remainder need to go to pipes after
* the extra pixel index minus one(epimo) defined here as:
*/
- if (mpc_slice_idx > epimo) {
+ if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
@@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
odm_rec.x = odm_slice_width * odm_slice_idx;
odm_rec.width = is_last_odm_slice ?
- /* last slice width is the reminder of h_active */
- h_active - odm_slice_width * (odm_slice_count - 1) :
- /* odm slice width is the floor of h_active / count */
- odm_slice_width;
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
odm_rec.y = 0;
odm_rec.height = v_active;
@@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale)
+ struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps,
+ bool is_subsampled)
{
bool check_max_downscale = false;
@@ -945,14 +952,15 @@ static void spl_get_taps_non_adaptive_scaler(
SPL_ASSERT(check_max_downscale);
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
/* Calculate optimal number of taps */
@@ -965,15 +973,13 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
- bool always_scale = spl_in->basic_out.always_scale;
+ bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
-
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -982,7 +988,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -997,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps(
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ if (skip_easf) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
+ }
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1124,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
-
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
@@ -1149,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps(
if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 36a284305a70..23d254dea18f 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -460,6 +460,8 @@ struct basic_in {
enum spl_color_space color_space; // Color Space
unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr
bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this?
+ int custom_width; // Width for non-standard segmentation - used when != 0
+ int custom_x; // Start x for non-standard segmentation - used when custom_width != 0
};
// Basic output information
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 3f3fa1b6a69e..0bafb6710761 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -129,7 +129,9 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
+ DMUB_WINDOW_LSDMA_BUFFER,
DMUB_WINDOW_TOTAL,
};
@@ -355,6 +357,7 @@ struct dmub_diagnostic_data {
uint8_t is_traceport_en : 1;
uint8_t is_cw0_enabled : 1;
uint8_t is_cw6_enabled : 1;
+ uint8_t is_pwait : 1;
};
struct dmub_srv_inbox {
@@ -539,6 +542,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ struct dmub_fb ib_mem_gart;
volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
@@ -576,6 +580,7 @@ struct dmub_srv {
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
+ struct dmub_fb lsdma_rb_fb;
};
/**
@@ -602,14 +607,6 @@ struct dmub_notification {
};
};
-/* enum dmub_ips_mode - IPS mode identifier */
-enum dmub_ips_mode {
- DMUB_IPS_MODE_IPS1_MAX = 0,
- DMUB_IPS_MODE_IPS2,
- DMUB_IPS_MODE_IPS1_RCG,
- DMUB_IPS_MODE_IPS1_ONO2_ON
-};
-
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b66bd10cdc9b..c587b3441e07 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -104,6 +104,14 @@
*/
#define DMUB_MAX_FPO_STREAMS 4
+/* Define to ensure that the "common" members always appear in the same
+ * order in different structs for back compat purposes
+ */
+#define COMMON_STREAM_STATIC_SUB_STATE \
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy; \
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp; \
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+
/* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6
@@ -291,6 +299,31 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+
+/* Flattened structure containing SOC BB parameters stored in the VBIOS
+ * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters.
+ * This also prevents alighment issues when new parameters are added to the SoC BB.
+ * The following parameters should be added since these values can't be obtained elsewhere:
+ * -dml2_soc_power_management_parameters
+ * -dml2_soc_vmin_clock_limits
+ */
+struct dmub_soc_bb_params {
+ uint32_t dram_clk_change_blackout_ns;
+ uint32_t dram_clk_change_read_only_ns;
+ uint32_t dram_clk_change_write_only_ns;
+ uint32_t fclk_change_blackout_ns;
+ uint32_t g7_ppt_blackout_ns;
+ uint32_t stutter_enter_plus_exit_latency_ns;
+ uint32_t stutter_exit_latency_ns;
+ uint32_t z8_stutter_enter_plus_exit_latency_ns;
+ uint32_t z8_stutter_exit_latency_ns;
+ uint32_t z8_min_idle_time_ns;
+ uint32_t type_b_dram_clk_change_blackout_ns;
+ uint32_t type_b_ppt_blackout_ns;
+ uint32_t vmin_limit_dispclk_khz;
+ uint32_t vmin_limit_dcfclk_khz;
+ uint32_t g7_temperature_read_blackout_ns;
+};
#pragma pack(pop)
/**
@@ -757,11 +790,29 @@ enum dmub_ips_rcg_disable_type {
DMUB_IPS_RCG_DISABLE = 3
};
+enum dmub_ips_in_vpb_disable_type {
+ DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour
+ DMUB_IPS_VPB_DISABLE_ALL = 1,
+ DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2,
+ DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG
+};
+
#define DMUB_IPS1_ALLOW_MASK 0x00000001
#define DMUB_IPS2_ALLOW_MASK 0x00000002
#define DMUB_IPS1_COMMIT_MASK 0x00000004
#define DMUB_IPS2_COMMIT_MASK 0x00000008
+enum dmub_ips_comand_type {
+ /**
+ * Start/stop IPS residency measurements for a given IPS mode
+ */
+ DMUB_CMD__IPS_RESIDENCY_CNTL = 0,
+ /**
+ * Query IPS residency information for a given IPS mode
+ */
+ DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -831,7 +882,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_fw_signals {
+ union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -846,7 +897,7 @@ union dmub_shared_state_ips_fw_signals {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_driver_signals {
+ union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -856,7 +907,9 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
- uint32_t reserved_bits : 24; /**< Reversed bits */
+ uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */
+ uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */
+ uint32_t reserved_bits : 22; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -1508,6 +1561,16 @@ enum dmub_cmd_type {
*/
DMUB_CMD__FUSED_IO = 89,
+ /**
+ * Command type used for all LSDMA commands.
+ */
+ DMUB_CMD__LSDMA = 90,
+
+ /**
+ * Command type use for all IPS commands.
+ */
+ DMUB_CMD__IPS = 91,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1918,6 +1981,121 @@ struct dmub_rb_cmd_fams2_flip {
struct dmub_fams2_flip_info flip_info;
};
+struct dmub_cmd_lsdma_data {
+ union {
+ struct lsdma_init_data {
+ union dmub_addr gpu_addr_base;
+ uint32_t ring_size;
+ } init_data;
+ struct lsdma_tiled_copy_data {
+ uint32_t src_addr_lo;
+ uint32_t src_addr_hi;
+ uint32_t dst_addr_lo;
+ uint32_t dst_addr_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t dst_width : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_swizzle_mode : 5;
+ uint32_t src_mip_max : 5;
+ uint32_t src_mip_id : 5;
+ uint32_t dst_mip_max : 5;
+ uint32_t dst_swizzle_mode : 5;
+ uint32_t dst_mip_id : 5;
+ uint32_t tmz : 1;
+ uint32_t dcc : 1;
+
+ uint32_t data_format : 6;
+ uint32_t padding1 : 4;
+ uint32_t dst_element_size : 3;
+ uint32_t num_type : 3;
+ uint32_t src_element_size : 3;
+ uint32_t write_compress : 2;
+ uint32_t cache_policy_dst : 2;
+ uint32_t cache_policy_src : 2;
+ uint32_t read_compress : 2;
+ uint32_t src_dim : 2;
+ uint32_t dst_dim : 2;
+ uint32_t max_uncom : 1;
+
+ uint32_t max_com : 2;
+ uint32_t padding : 30;
+ } tiled_copy_data;
+ struct lsdma_linear_copy_data {
+ uint32_t count : 30;
+ uint32_t cache_policy_dst : 2;
+
+ uint32_t tmz : 1;
+ uint32_t cache_policy_src : 2;
+ uint32_t padding : 29;
+
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } linear_copy_data;
+ struct lsdma_reg_write_data {
+ uint32_t reg_addr;
+ uint32_t reg_data;
+ } reg_write_data;
+ struct lsdma_pio_copy_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } pio_copy_data;
+ struct lsdma_pio_constfill_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ uint32_t data;
+ } pio_constfill_data;
+
+ uint32_t all[14];
+ } u;
+
+};
+
+struct dmub_rb_cmd_lsdma {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_lsdma_data lsdma_data;
+};
+
struct dmub_optc_state_v2 {
uint32_t v_total_min;
uint32_t v_total_max;
@@ -1949,6 +2127,28 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
+struct dmub_rect16 {
+ /**
+ * Dirty rect x offset.
+ */
+ uint16_t x;
+
+ /**
+ * Dirty rect y offset.
+ */
+ uint16_t y;
+
+ /**
+ * Dirty rect width.
+ */
+ uint16_t width;
+
+ /**
+ * Dirty rect height.
+ */
+ uint16_t height;
+};
+
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
@@ -2021,11 +2221,13 @@ union dmub_fams2_stream_static_sub_state {
}; //v0
union dmub_fams2_cmd_stream_static_sub_state {
- struct dmub_fams2_cmd_legacy_stream_static_state legacy;
- struct dmub_fams2_cmd_subvp_stream_static_state subvp;
- struct dmub_fams2_cmd_drr_stream_static_state drr;
+ COMMON_STREAM_STATIC_SUB_STATE
}; //v1
+union dmub_fams2_stream_static_sub_state_v2 {
+ COMMON_STREAM_STATIC_SUB_STATE
+}; //v2
+
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
uint32_t otg_vline_time_ns;
@@ -2091,7 +2293,7 @@ struct dmub_fams2_cmd_stream_static_base_state {
struct dmub_fams2_stream_static_state_v1 {
struct dmub_fams2_cmd_stream_static_base_state base;
- union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ union dmub_fams2_stream_static_sub_state_v2 sub_state;
}; //v1
/**
@@ -2139,6 +2341,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2148,6 +2355,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2170,6 +2393,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2315,7 +2543,8 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
- uint8_t reserved2[3]; /**< For future use */
+ uint8_t skip_phy_ssc_reduction;
+ uint8_t reserved2[2]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
};
@@ -2933,6 +3162,7 @@ enum dmub_cmd_fams_type {
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 7,
};
/**
@@ -3818,6 +4048,14 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -3861,6 +4099,11 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
+
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3916,6 +4159,18 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -4416,6 +4671,37 @@ enum dmub_cmd_abm_type {
DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
};
+/**
+ * LSDMA command sub-types.
+ */
+enum dmub_cmd_lsdma_type {
+ /**
+ * Initialize parameters for LSDMA.
+ * Ring buffer is mapped to the ring buffer
+ */
+ DMUB_CMD__LSDMA_INIT_CONFIG = 0,
+ /**
+ * LSDMA copies data from source to destination linearly
+ */
+ DMUB_CMD__LSDMA_LINEAR_COPY = 1,
+ /**
+ * Send the tiled-to-tiled copy command
+ */
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ /**
+ * Send the poll reg write command
+ */
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ /**
+ * Send the pio copy command
+ */
+ DMUB_CMD__LSDMA_PIO_COPY = 4,
+ /**
+ * Send the pio constfill command
+ */
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+};
+
struct abm_ace_curve {
/**
* @offsets: ACE curve offsets.
@@ -5621,6 +5907,59 @@ struct dmub_rb_cmd_assr_enable {
};
/**
+ * Current definition of "ips_mode" from driver
+ */
+enum ips_residency_mode {
+ IPS_RESIDENCY__IPS1_MAX,
+ IPS_RESIDENCY__IPS2,
+ IPS_RESIDENCY__IPS1_RCG,
+ IPS_RESIDENCY__IPS1_ONO2_ON,
+};
+
+#define NUM_IPS_HISTOGRAM_BUCKETS 16
+
+/**
+ * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats
+ */
+struct dmub_ips_residency_info {
+ uint32_t residency_millipercent;
+ uint32_t entry_counter;
+ uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
+ uint64_t total_time_us;
+ uint64_t total_inactive_time_us;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command.
+ */
+struct dmub_cmd_ips_residency_cntl_data {
+ uint8_t panel_inst;
+ uint8_t start_measurement;
+ uint8_t padding[2]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_residency_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_residency_cntl_data cntl_data;
+};
+
+/**
+ * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command.
+ */
+struct dmub_cmd_ips_query_residency_info_data {
+ union dmub_addr dest;
+ uint32_t size;
+ uint32_t ips_mode;
+ uint8_t panel_inst;
+ uint8_t padding[3]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_query_residency_info {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_query_residency_info_data info_data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -5926,13 +6265,25 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
struct dmub_rb_cmd_fused_io fused_io;
+
+ /**
+ * Definition of a DMUB_CMD__LSDMA command.
+ */
+ struct dmub_rb_cmd_lsdma lsdma;
+
+ struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
+
+ struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index a308bd604677..3f38db752b84 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -416,7 +416,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -466,6 +466,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 72a0f078cd1a..2228d62adc7e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
@@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
-
if (is_enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
@@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub)
LONO_SOCCLK_GATE_DISABLE, 1,
LONO_DMCUBCLK_GATE_DISABLE, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
}
@@ -464,7 +454,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -515,6 +505,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 2575dbc448f7..b31adbd0d685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -413,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -464,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index acca7943a8c8..b17a19400c06 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -65,6 +65,12 @@
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
+/* Default indirect buffer size. */
+#define DMUB_IB_MEM_SIZE (1280)
+
+/* Default LSDMA ring buffer size. */
+#define DMUB_LSDMA_RB_SIZE (64 * 1024)
+
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
@@ -559,7 +565,9 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
+ window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -645,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
@@ -655,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+ !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
ASSERT(0);
return DMUB_STATUS_INVALID;
}
@@ -741,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->ib_mem_gart = *ib_mem_gart;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 250f09922d2f..71efd2770c99 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) {
/* When the target refresh rate is the minimum panel refresh rate,
* round down the vtotal value to avoid stretching vblank over
* panel's vtotal boundary.
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 758a8aa31fbe..391209a3bf29 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
void reset_replay_dsync_error_count(struct dc_link *link);
+void change_replay_to_psr(struct dc_link *link);
+void change_psr_to_replay(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 11374a2cbab8..bfb446736ca8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -396,6 +396,7 @@ enum amd_dpm_forced_level;
* (such as allocating any required memory)
* @suspend: handles IP specific hw/sw changes for suspend
* @resume: handles IP specific hw/sw changes for resume
+ * @complete: handles IP specific changes after resume
* @is_idle: returns current IP block idle status
* @wait_for_idle: poll for idle
* @check_soft_reset: check soft reset the IP block
@@ -427,6 +428,7 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
+ void (*complete)(struct amdgpu_ip_block *ip_block);
bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f4d914dc731f..e2b1ea7467b0 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -108,6 +108,8 @@ enum pp_clock_type {
PP_VCLK1,
PP_DCLK,
PP_DCLK1,
+ PP_ISPICLK,
+ PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5c1cbdc122d2..71d986dd7a6e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
case AMD_IP_BLOCK_TYPE_VPE:
+ case AMD_IP_BLOCK_TYPE_ISP:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, 0));
@@ -852,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
uint32_t max)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret = 0;
-
- if (type != PP_SCLK)
- return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- mutex_lock(&adev->pm.mutex);
- ret = smu_set_soft_freq_range(smu,
- SMU_SCLK,
+ guard(mutex)(&adev->pm.mutex);
+
+ return smu_set_soft_freq_range(smu,
+ type,
min,
max);
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index edd9895b46c0..4b64851fdb42 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (ret)
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
while (isspace(*tmp_str))
tmp_str++;
}
@@ -1890,7 +1892,7 @@ out:
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -1901,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
uint32_t ss_power;
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
(void *)&ss_power))
@@ -3645,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf,
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
+
while (isspace(*tmp_str))
tmp_str++;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 34e71727b27d..307ebf7e3226 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, enable);
if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
}
}
@@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = kv_process_firmware_header(adev);
if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
+ drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n");
return ret;
}
kv_init_fps_limits(adev);
kv_init_graphics_levels(adev);
ret = kv_program_bootup_state(adev);
if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n");
return ret;
}
kv_calculate_dfs_bypass_settings(adev);
ret = kv_upload_dpm_settings(adev);
if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
+ drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n");
return ret;
}
ret = kv_populate_uvd_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n");
return ret;
}
ret = kv_populate_vce_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n");
return ret;
}
ret = kv_populate_samu_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n");
return ret;
}
ret = kv_populate_acp_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n");
return ret;
}
kv_program_vc(adev);
@@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
if (pi->enable_auto_thermal_throttling) {
ret = kv_enable_auto_thermal_throttling(adev);
if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n");
return ret;
}
}
ret = kv_enable_dpm_voltage_scaling(adev);
if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n");
return ret;
}
ret = kv_set_dpm_interval(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n");
return ret;
}
ret = kv_set_dpm_boot_state(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n");
return ret;
}
ret = kv_enable_ulv(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n");
return ret;
}
kv_start_dpm(adev);
ret = kv_enable_didt(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_didt failed\n");
return ret;
}
ret = kv_enable_smc_cac(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n");
return ret;
}
@@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = amdgpu_kv_smc_bapm_enable(adev, false);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
@@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n");
return ret;
}
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
@@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
err = amdgpu_kv_smc_bapm_enable(adev, false);
if (err)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
@@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
}
@@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle)
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_sclk_t(adev);
@@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_set_enabled_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_acp_boot_level(adev);
@@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
@@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
data_offset);
if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
@@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
else
pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n");
}
if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
@@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps)
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n",
+ rps->vclk, rps->dclk);
for (i = 0; i < ps->num_levels; i++) {
struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+ drm_dbg(adev_to_drm(adev),
+ "power level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static void kv_dpm_fini(struct amdgpu_device *adev)
@@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- DRM_INFO("amdgpu: dpm initialized\n");
+ drm_info(adev_to_drm(adev), "dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
+ drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index c7518b13e787..ea3ace882a10 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -47,7 +47,7 @@
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
{
const char *s;
@@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2)
s = "performance";
break;
}
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
+ drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
(class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
+ else
+ drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
}
-void amdgpu_dpm_print_cap_info(u32 caps)
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
+ (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
+ (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
+ (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
}
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
+ rps == adev->pm.dpm.current_ps ? " c" : "",
+ rps == adev->pm.dpm.requested_ps ? " r" : "",
+ rps == adev->pm.dpm.boot_ps ? " b" : "");
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
return -EINVAL;
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
- printk("switching from power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching from power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching to power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
index 93bd3973330c..7120eef30509 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -23,10 +23,9 @@
#ifndef __LEGACY_DPM_H__
#define __LEGACY_DPM_H__
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2);
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps);
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps);
int amdgpu_get_platform_caps(struct amdgpu_device *adev);
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 4c0e976004ba..52e732be59e3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7951,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle,
struct rv7xx_pl *pl;
int i;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 79a566f3564a..c305ea4ec17d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
}
cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
}
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d79a1d94661a..b47cb4a5f488 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -76,6 +76,10 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,12 +138,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
}
int smu_set_soft_freq_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
+ enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
+ enum smu_clk_type clk_type;
int ret = 0;
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
@@ -307,6 +316,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret;
+
+ if (!smu->ppt_funcs->dpm_set_isp_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->isp_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->isp_gated, !enable);
+
+ return ret;
+}
+
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -408,6 +437,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_ISP:
+ ret = smu_dpm_set_isp_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -1004,6 +1039,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
+static void smu_update_gpu_addresses(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
+
+ if (pm_status_table->bo)
+ pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
+ if (driver_table->bo)
+ driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
+ if (dummy_read_1_table->bo)
+ dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
+}
+
/**
* smu_alloc_memory_pool - allocate memory pool in the system memory
*
@@ -1285,6 +1335,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu_init_power_profile(smu);
@@ -1672,37 +1723,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
}
- ret = smu_system_features_control(smu, true);
- if (ret) {
- dev_err(adev->dev, "Failed to enable requested dpm features!\n");
- return ret;
- }
-
- smu_init_xgmi_plpd_mode(smu);
-
- ret = smu_feature_get_enabled_mask(smu, &features_supported);
- if (ret) {
- dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
- return ret;
- }
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
-
- if (!smu_is_dpm_running(smu))
- dev_info(adev->dev, "dpm has been disabled\n");
-
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
pcie_gen = 4;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -1738,6 +1758,37 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_system_features_control(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable requested dpm features!\n");
+ return ret;
+ }
+
+ smu_init_xgmi_plpd_mode(smu);
+
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
+ if (!smu_is_dpm_running(smu))
+ dev_info(adev->dev, "dpm has been disabled\n");
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ ret = smu_set_default_dpm_table(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+ return ret;
+ }
+
ret = smu_get_thermal_temperature_range(smu);
if (ret) {
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
@@ -1780,6 +1831,9 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ smu_update_gpu_addresses(smu);
+
smu->smc_fw_state = SMU_FW_INIT;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -2144,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2175,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
+ if (smu->current_power_limit) {
+ ret = smu_set_power_limit(smu, smu->current_power_limit);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -2935,6 +3002,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCLK; break;
case PP_DCLK1:
clk_type = SMU_DCLK1; break;
+ case PP_ISPICLK:
+ clk_type = SMU_ISPICLK;
+ break;
+ case PP_ISPXCLK:
+ clk_type = SMU_ISPXCLK;
+ break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 9aacc7bc1c69..b52e194397e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -402,6 +402,7 @@ struct smu_power_gate {
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
+ atomic_t isp_gated;
atomic_t umsch_mm_gated;
};
@@ -1436,6 +1437,12 @@ struct pptable_funcs {
int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
/**
+ * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
+ * management.
+ */
+ int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
+
+ /**
* @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
* management.
*/
@@ -1635,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
uint32_t min, uint32_t max);
int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 1bc30db22f9c..cd44f4254134 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -106,6 +106,7 @@ typedef struct {
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_MEM_PSTATE_LEVELS 4
+#define ISP_ALL_TILES_MASK 0x7FF
typedef struct {
uint32_t UClk;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index d7505cfc433a..0a2ca544f4e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -86,8 +86,10 @@ typedef enum {
/*36*/ FEATURE_PIT = 36,
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+/*39*/ FEATURE_GLOBAL_DPM = 39,
+/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
-/*39*/ NUM_FEATURES = 39
+/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -133,7 +135,7 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x12
+#define SMU_METRICS_TABLE_VERSION 0x13
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -275,6 +277,16 @@ typedef struct {
//PSNs
uint64_t PublicSerialNumber_AID[4];
uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index eefdaa0b5df6..d7a9e41820fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -305,6 +305,8 @@ enum smu_clk_type {
SMU_MCLK,
SMU_PCIE,
SMU_LCLK,
+ SMU_ISPICLK,
+ SMU_ISPXCLK,
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7fad5dfb39c4..aac202d0c30e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
@@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
- (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
- pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
-
- if (ret)
- return ret;
-
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (pptable->PcieLaneCount[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
+ pptable->PcieLaneCount[i] > pcie_width_cap) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_gen_cap << 8;
+ smu_pcie_arg |= pcie_width_cap;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static inline void navi10_dump_od_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 115e3fa456bc..d57591509aed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t min_gen_speed, max_gen_speed;
uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
@@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16 |
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) ||
+ table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) {
+ smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a55ea76d7399..2c9869feba61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_CCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
break;
case SMU_SOCCLK:
/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9481f897432d..e97b0cf19197 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMinGfxclkFrequency,
- 0, &min);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMaxGfxclkFrequency,
- 0, &max);
- if (ret)
- return ret;
- size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
break;
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
- max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sysfs_emit_at(buf, size, "OD_SCLK\n");
- size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
- size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
- }
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 6de653d2ed62..c63d2e28954d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
+static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk;
+
+ if (amdgpu_sriov_vf(smu->adev)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+
+ if (min) {
+ if (!min_clk)
+ return -ENODATA;
+ *min = min_clk;
+ }
+ if (max) {
+ if (!max_clk)
+ return -ENODATA;
+ *max = max_clk;
+ }
+
+ } else {
+ return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ }
+
+ return 0;
+}
+
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -2081,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.get_bamaco_support = aldebaran_get_bamaco_support,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 1c7235935d14..1a1f2a6b2e52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2386,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
if (!num_of_levels)
return 0;
@@ -2402,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
}
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
+ return ret;
}
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 5a9711e8cf68..e084ed99ec0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ uint32_t smu_pcie_arg;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index e0d356f93ab0..02a455a31c25 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,8 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
+static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint16_t max_speed;
+ uint8_t max_width;
+ int ret;
+
+ if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ ret = 0;
+ } else {
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (!ret) {
+ max_width = (uint8_t)metrics->XgmiWidth;
+ max_speed = (uint16_t)metrics->XgmiBitrate;
+ }
+ }
+ if (!ret)
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
+}
+
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
@@ -237,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ }
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -263,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
int xcc_id;
/* For clocks with multiple instances, only report the first one */
@@ -319,7 +356,7 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- return ret;
+ return 0;
}
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index f00ef7f3f355..9cc294f4708b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -345,6 +345,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00562500)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (fw_ver >= 0x04560100) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -685,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
- void *metrics_table, bool bypass_cache)
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
@@ -800,6 +805,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
+ uint16_t max_speed;
+ uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
@@ -835,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
+ max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
+ max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
@@ -871,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- uint32_t clock_limit = 0, param;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ /* Use dpm tables, if data is already fetched */
+ if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
- if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
- if (pptable->Init)
- clock_limit = pptable->MinGfxclkFrequency;
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
- if (pptable->Init)
- clock_limit = pptable->SocclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
- if (pptable->Init)
- clock_limit = pptable->FclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
- if (pptable->Init)
- clock_limit = pptable->VclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
- if (pptable->Init)
- clock_limit = pptable->DclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
- break;
+ return -EINVAL;
}
- if (min)
- *min = clock_limit;
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+ if (min)
+ *min = min_clk;
if (max)
- *max = clock_limit;
+ *max = max_clk;
- return 0;
+ if (min_clk && max_clk)
+ return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
@@ -1377,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- min_clk = pstate_table->gfxclk_pstate.curr.min;
- max_clk = pstate_table->gfxclk_pstate.curr.max;
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ min_clk = single_dpm_table->min;
+ max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -2682,7 +2693,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
if (ret) {
kfree(metrics_v0);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index d38d6d76b1e7..67b30674fd31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -74,6 +74,8 @@ enum smu_v13_0_6_caps {
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache);
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
int smu_v13_0_12_get_max_metrics_size(void);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c8f4f6fb4083..c96fa5e49ed6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ int link_level;
+ uint32_t smu_pcie_arg;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 76c1adda83db..f9b0938c57ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 84f9b007b59f..fe00c84b1cc6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ u32 min,
+ u32 max,
+ bool __always_unused automatic)
{
- enum smu_message_type msg_set_min, msg_set_max;
- int ret = 0;
+ enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT;
+ enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT;
+ int ret = -EINVAL;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
return -EINVAL;
@@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
+ case SMU_ISPICLK:
+ msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq;
+ break;
+ case SMU_ISPXCLK:
+ msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq;
+ break;
default:
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
- if (ret)
- return ret;
+ if (min && msg_set_min != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+
+ if (max && msg_set_max != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
- return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ return ret;
}
static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
@@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
if (ret)
break;
- ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
break;
default:
ret = -EINVAL;
@@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ false);
if (ret)
return ret;
}
@@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK1,
vclk1_min,
- vclk1_max);
+ vclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
@@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK1,
dclk1_min,
- dclk1_max);
+ dclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_v14_0_0_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile,
+ ISP_ALL_TILES_MASK, NULL);
+}
+
static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
@@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 82c2db972491..3aea32baea3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_14_0_dpm_table *dpm_table;
- struct smu_14_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
-
- if (link_level == 0)
- link_level++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
- int num_of_levels = pcie_table->num_of_link_levels;
+ int num_of_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+ num_of_levels = pcie_table->num_of_link_levels;
if (!num_of_levels)
return 0;
@@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
- }
-
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
- if (ret)
- return ret;
+ if (ret)
+ break;
+ }
+ }
}
- return 0;
+ return ret;
}
static const struct smu_temperature_range smu14_thermal_policy[] = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 7eaf58fd7f9a..59f9abd0f7b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 7473672abd2a..a608cdbdada4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,28 +40,29 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
-#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
- do { \
- typecheck(struct gpu_metrics_v##frev##_##crev, \
- typeof(*(ptr))); \
- struct metrics_table_header *header = \
- (struct metrics_table_header *)(ptr); \
- memset(header, 0xFF, sizeof(*(ptr))); \
- header->format_revision = frev; \
- header->content_revision = crev; \
- header->structure_size = sizeof(*(ptr)); \
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
+ struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*tmp); \
} while (0)
-#define smu_cmn_init_partition_metrics(ptr, frev, crev) \
- do { \
- typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \
- typeof(*(ptr))); \
- struct metrics_table_header *header = \
- (struct metrics_table_header *)(ptr); \
- memset(header, 0xFF, sizeof(*(ptr))); \
- header->format_revision = frev; \
- header->content_revision = crev; \
- header->structure_size = sizeof(*(ptr)); \
+#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
} while (0)
extern const int link_speed[];
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index df5da5a44755..901f938aefe0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -157,6 +157,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct komeda_dev *mdev = dev->dev_private;
@@ -177,7 +178,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EINVAL);
}
- drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index c61ca98a3a63..02b2b8ae482a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -37,6 +37,7 @@ struct komeda_fb {
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 3cfefadc7c9d..806da0aaedf7 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -11,8 +11,8 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/pixel_format.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
@@ -73,7 +73,17 @@ static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.disable_vblank = hdlcd_crtc_disable_vblank,
};
-static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+static const struct {
+ u32 fourcc;
+ struct pixel_format pixel;
+} supported_formats[] = {
+ { DRM_FORMAT_RGB565, PIXEL_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB1555, PIXEL_FORMAT_XRGB1555 },
+ { DRM_FORMAT_RGB888, PIXEL_FORMAT_RGB888 },
+ { DRM_FORMAT_XRGB8888, PIXEL_FORMAT_XRGB8888 },
+ { DRM_FORMAT_XBGR8888, PIXEL_FORMAT_XBGR8888 },
+ { DRM_FORMAT_XRGB2101010, PIXEL_FORMAT_XRGB2101010},
+};
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
@@ -83,15 +93,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format;
- struct simplefb_format *format = NULL;
+ const struct pixel_format *format = NULL;
int i;
- pixel_format = fb->format->format;
-
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ if (supported_formats[i].fourcc == fb->format->format)
+ format = &supported_formats[i].pixel;
}
if (WARN_ON(!format))
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index e083021e9e99..bc5f5e9798c3 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -306,10 +306,10 @@ malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int n_superblocks = 0;
- const struct drm_format_info *info;
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
@@ -325,8 +325,6 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
return false;
}
- info = drm_get_format_info(dev, mode_cmd);
-
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
@@ -366,24 +364,26 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
- return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
+ return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd);
return false;
}
static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->modifier[0]) {
- if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
+ if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd))
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file, mode_cmd);
+ return drm_gem_fb_create(dev, file, info, mode_cmd);
}
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index cf2e88218dc0..aa4289127086 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -18,7 +18,9 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = {
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode,
+ struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
@@ -64,7 +66,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->mod = config;
dfb->fb.obj[0] = &obj->obj;
- drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
+ drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
@@ -84,9 +86,9 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
}
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
@@ -122,7 +124,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err_unref;
}
- dfb = armada_framebuffer_create(dev, mode, obj);
+ dfb = armada_framebuffer_create(dev, info, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index c5bc53d7e0c4..f2b990f055a2 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -17,7 +17,9 @@ struct armada_framebuffer {
#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 6ee7ce04ee71..cb53cc91bafb 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,7 +78,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
return -ENOMEM;
}
- dfb = armada_framebuffer_create(dev, &mode, obj);
+ dfb = armada_framebuffer_create(dev,
+ drm_get_format_info(dev, mode.pixel_format,
+ mode.modifier[0]),
+ &mode, obj);
/*
* A reference is now held by the framebuffer object if
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 8d09ba5d5889..2547613155da 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,11 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_2000.o \
+ ast_2100.o \
+ ast_2300.o \
+ ast_2500.o \
+ ast_2600.o \
ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
new file mode 100644
index 000000000000..41c2aa1e425a
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2000_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xFFFFFFFF },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089),
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static void ast_post_chip_2000(struct ast_device *ast)
+{
+ u8 j;
+ u32 temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2000_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2000(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
new file mode 100644
index 000000000000..477ee15eff5d
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static int cbrtest_ast2150(struct ast_device *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = 0xff;
+ dll_min[1] = 0xff;
+ dll_min[2] = 0xff;
+ dll_min[3] = 0xff;
+ dll_max[0] = 0x00;
+ dll_max[1] = 0x00;
+ dll_max[2] = 0x00;
+ dll_max[3] = 0x00;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150) {
+ goto cbr_start;
+ }
+ }
+ if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+static void ast_post_chip_2100(struct ast_device *ast)
+{
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2100 || ast->chip == AST2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2100_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2100(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
new file mode 100644
index 000000000000..dc2a32244689
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2300_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE0 ((1 << 10) - 1)
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x41);
+}
+
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0xc5);
+}
+
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x05);
+}
+
+static int cbr_test(struct ast_device *ast)
+{
+ u32 data;
+ int i;
+
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_device *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test2(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static bool cbr_test3(struct ast_device *ast)
+{
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single(ast, 0))
+ return false;
+ return true;
+}
+
+static bool cbr_scan3(struct ast_device *ast)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
+ }
+ if (loop == 2)
+ return false;
+ }
+ return true;
+}
+
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli)
+ dllmin[cnt] = dlli;
+ if (dllmax[cnt] < dlli)
+ dllmax[cnt] = dlli;
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
+ if (passcnt != 16)
+ goto FINETUNE_START;
+ status = true;
+FINETUNE_DONE:
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ else
+ dlli = (dlli - 1) & 0x7;
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
+} /* finetuneDQI_L */
+
+static void finetuneDQSI(struct ast_device *ast)
+{
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+ for (dlli = 0; dlli < 76; dlli++) {
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068,
+ 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16)dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16)dlli;
+ } else if (passcnt[dqsip] >= 5) {
+ break;
+ } else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
+ }
+ }
+ }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
+ }
+ /* Search margin */
+ g_dqidly = 0;
+ g_dqsip = 0;
+ g_margin = 0;
+ g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff + 2) < g_margin)
+ continue;
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0];
+ dlli > 0 && tag[dqsip][dlli] != 0;
+ dlli--, passcnt[0]++) {
+ }
+ for (dlli = pass[dqidly][dqsip][1];
+ dlli < 76 && tag[dqsip][dlli] != 0;
+ dlli++, passcnt[1]++) {
+ }
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ }
+ }
+ }
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+}
+
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
+
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
+
+CBR_START2:
+ dllmin[0] = 0xff;
+ dllmin[1] = 0xff;
+ dllmax[0] = 0x0;
+ dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli)
+ dllmin[0] = dlli;
+ if (dllmax[0] < dlli)
+ dllmax[0] = dlli;
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli)
+ dllmin[1] = dlli;
+ if (dllmax[1] < dlli)
+ dllmax[1] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (retry++ > 10)
+ goto CBR_DONE2;
+ if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ status = true;
+CBR_DONE2:
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x300;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
+
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x500;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void ast_post_chip_2300(struct ast_device *ast)
+{
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_freq = 396;
+ param.dram_type = AST_DDR3;
+ temp = ast_mindwm(ast, 0x1e6e2070);
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ switch (temp & 0x18000000) {
+ case 0:
+ param.dram_chipid = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 0x08000000:
+ param.dram_chipid = AST_DRAM_1Gx16;
+ break;
+ case 0x10000000:
+ param.dram_chipid = AST_DRAM_2Gx16;
+ break;
+ case 0x18000000:
+ param.dram_chipid = AST_DRAM_4Gx16;
+ break;
+ }
+ switch (temp & 0x0c) {
+ default:
+ case 0x00:
+ param.vram_size = SZ_8M;
+ break;
+ case 0x04:
+ param.vram_size = SZ_16M;
+ break;
+ case 0x08:
+ param.vram_size = SZ_32M;
+ break;
+ case 0x0c:
+ param.vram_size = SZ_64M;
+ break;
+ }
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2300_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2300(ast);
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
new file mode 100644
index 000000000000..1e541498ea67
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+/*
+ * AST2500 DRAM settings modules
+ */
+
+#define REGTBL_NUM 17
+#define REGIDX_010 0
+#define REGIDX_014 1
+#define REGIDX_018 2
+#define REGIDX_020 3
+#define REGIDX_024 4
+#define REGIDX_02C 5
+#define REGIDX_030 6
+#define REGIDX_214 7
+#define REGIDX_2E0 8
+#define REGIDX_2E4 9
+#define REGIDX_2E8 10
+#define REGIDX_2EC 11
+#define REGIDX_2F0 12
+#define REGIDX_2F4 13
+#define REGIDX_2F8 14
+#define REGIDX_RFC 15
+#define REGIDX_PLL 16
+
+static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
+ 0x64604D38, /* 0x010 */
+ 0x29690599, /* 0x014 */
+ 0x00000300, /* 0x018 */
+ 0x00000000, /* 0x020 */
+ 0x00000000, /* 0x024 */
+ 0x02181E70, /* 0x02C */
+ 0x00000040, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x02001300, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001B, /* 0x2E8 */
+ 0x35B8C105, /* 0x2EC */
+ 0x08090408, /* 0x2F0 */
+ 0x9B000800, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x9971452F, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
+ 0x63604E37, /* 0x010 */
+ 0xE97AFA99, /* 0x014 */
+ 0x00019000, /* 0x018 */
+ 0x08000000, /* 0x020 */
+ 0x00000400, /* 0x024 */
+ 0x00000410, /* 0x02C */
+ 0x00000101, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x03002900, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001C, /* 0x2E8 */
+ 0x35B8C106, /* 0x2EC */
+ 0x08080607, /* 0x2F0 */
+ 0x9B000900, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x99714545, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+#define TIMEOUT 5000000
+
+void ast_2500_patch_ahb(void __iomem *regs)
+{
+ u32 data;
+
+ /* Clear bus lock condition */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
+ /*
+ * If "Fast restet" is enabled for ARM-ICE debugger,
+ * then WDT needs to enable, that
+ * WDT04 is WDT#1 Reload reg.
+ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
+ * WDT0C is WDT#1 control reg
+ * [6:5]:= 01:Full chip
+ * [4]:= 1:1MHz clock source
+ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
+ * [0]:= 1:WDT enable
+ */
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
+ udelay(1000);
+ }
+
+ do {
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
+}
+
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0x85);
+}
+
+static bool cbr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static bool ddr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_burst(ast, 1))
+ return false;
+ if (!mmc_test_burst(ast, 2))
+ return false;
+ if (!mmc_test_burst(ast, 3))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static void ddr_init_common_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
+ ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
+ ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
+ ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
+ ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
+}
+
+static void ddr_phy_init_2500(struct ast_device *ast)
+{
+ u32 data, pass, timecnt;
+
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ while (!pass) {
+ for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
+ data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
+ if (!data)
+ break;
+ }
+ if (timecnt != TIMEOUT) {
+ data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
+ if (!data)
+ pass = 1;
+ }
+ if (!pass) {
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ udelay(10); /* delay 10 us */
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
+}
+
+/*
+ * Check DRAM Size
+ * 1Gb : 0x80000000 ~ 0x87FFFFFF
+ * 2Gb : 0x80000000 ~ 0x8FFFFFFF
+ * 4Gb : 0x80000000 ~ 0x9FFFFFFF
+ * 8Gb : 0x80000000 ~ 0xBFFFFFFF
+ */
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
+{
+ u32 reg_04, reg_14;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
+ reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
+
+ ast_moutdwm(ast, 0xA0100000, 0x41424344);
+ ast_moutdwm(ast, 0x90100000, 0x35363738);
+ ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
+ ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
+
+ /* Check 8Gbit */
+ if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
+ reg_04 |= 0x03;
+ reg_14 |= (tRFC >> 24) & 0xFF;
+ /* Check 4Gbit */
+ } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
+ reg_04 |= 0x02;
+ reg_14 |= (tRFC >> 16) & 0xFF;
+ /* Check 2Gbit */
+ } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
+ reg_04 |= 0x01;
+ reg_14 |= (tRFC >> 8) & 0xFF;
+ } else {
+ reg_14 |= tRFC & 0xFF;
+ }
+ ast_moutdwm(ast, 0x1E6E0004, reg_04);
+ ast_moutdwm(ast, 0x1E6E0014, reg_14);
+}
+
+static void enable_cache_2500(struct ast_device *ast)
+{
+ u32 reg_04, data;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004);
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
+
+ do
+ data = ast_mindwm(ast, 0x1E6E0004);
+ while (!(data & 0x80000));
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
+}
+
+static void set_mpll_2500(struct ast_device *ast)
+{
+ u32 addr, data, param;
+
+ /* Reset MMC */
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
+ ast_moutdwm(ast, addr, 0x0);
+ addr += 4;
+ }
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
+ if (data) {
+ /* CLKIN = 25MHz */
+ param = 0x930023E0;
+ ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
+ } else {
+ /* CLKIN = 24MHz */
+ param = 0x93002400;
+ }
+ ast_moutdwm(ast, 0x1E6E2020, param);
+ udelay(100);
+}
+
+static void reset_mmc_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E78505C, 0x00000004);
+ ast_moutdwm(ast, 0x1E785044, 0x00000001);
+ ast_moutdwm(ast, 0x1E785048, 0x00004755);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000013);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E785054, 0x00000077);
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+}
+
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ u32 data, data2, pass, retrycnt;
+ u32 ddr_vref, phy_vref;
+ u32 min_ddr_vref = 0, min_phy_vref = 0;
+ u32 max_ddr_vref = 0, max_phy_vref = 0;
+
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
+ ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
+
+ /* Train PHY Vref first */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ max_phy_vref = 0x0;
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
+ for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ data = ast_mindwm(ast, 0x1E6E03D0);
+ data2 = data >> 8;
+ data = data & 0xff;
+ if (data > data2)
+ data = data2;
+ if (max_phy_vref < data) {
+ max_phy_vref = data;
+ min_phy_vref = phy_vref;
+ }
+ } else if (pass > 0) {
+ break;
+ }
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
+
+ /* Train DDR Vref next */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ min_ddr_vref = 0xFF;
+ max_ddr_vref = 0x0;
+ pass = 0;
+ for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ if (min_ddr_vref > ddr_vref)
+ min_ddr_vref = ddr_vref;
+ if (max_ddr_vref < ddr_vref)
+ max_ddr_vref = ddr_vref;
+ } else if (pass != 0) {
+ break;
+ }
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static bool ast_dram_init_2500(struct ast_device *ast)
+{
+ u32 data;
+ u32 max_tries = 5;
+
+ do {
+ if (max_tries-- == 0)
+ return false;
+ set_mpll_2500(ast);
+ reset_mmc_2500(ast);
+ ddr_init_common_2500(ast);
+
+ data = ast_mindwm(ast, 0x1E6E2070);
+ if (data & 0x01000000)
+ ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
+ else
+ ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
+ } while (!ddr_test_2500(ast));
+
+ ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
+
+ /* Patch code */
+ data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
+ ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
+
+ return true;
+}
+
+static void ast_post_chip_2500(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ /* Clear bus lock condition */
+ ast_2500_patch_ahb(ast->regs);
+
+ /* Disable watchdog */
+ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
+
+ /*
+ * Reset USB port to patch USB unknown device issue
+ * SCU90 is Multi-function Pin Control #5
+ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
+ * port).
+ * SCU94 is Multi-function Pin Control #6
+ * [14:13]:= 1x:USB2.0 Host2 controller
+ * SCU70 is Hardware Strap reg
+ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
+ * [18]: 0(24)/1(48) MHz)
+ * SCU7C is Write clear reg to SCU70
+ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
+ */
+ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
+ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
+ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
+ }
+ /* Modify eSPI reset pin */
+ temp = ast_mindwm(ast, 0x1E6E2070);
+ if (temp & 0x02000000)
+ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ if (!ast_dram_init_2500(ast))
+ drm_err(dev, "DRAM init failed !\n");
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2500_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2500(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
new file mode 100644
index 000000000000..8d75a47444f5
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+int ast_2600_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->tx_chip == AST_TX_ASTDP)
+ return ast_dp_launch(ast);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
deleted file mode 100644
index 1e9ac9d6d26c..000000000000
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AST_DRAM_TABLES_H
-#define AST_DRAM_TABLES_H
-
-/* DRAM timing tables */
-struct ast_dramstruct {
- u16 index;
- u32 data;
-};
-
-static const struct ast_dramstruct ast2000_dram_table_data[] = {
- { 0x0108, 0x00000000 },
- { 0x0120, 0x00004a21 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xFFFFFFFF },
- { 0x0004, 0x00000089 },
- { 0x0008, 0x22331353 },
- { 0x000C, 0x0d07000b },
- { 0x0010, 0x11113333 },
- { 0x0020, 0x00110350 },
- { 0x0028, 0x1e0828f0 },
- { 0x0024, 0x00000001 },
- { 0x001C, 0x00000000 },
- { 0x0014, 0x00000003 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000131 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000031 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0028, 0x1e0828f1 },
- { 0x0024, 0x00000003 },
- { 0x002C, 0x1f0f28fb },
- { 0x0030, 0xFFFFFE01 },
- { 0xFFFF, 0xFFFFFFFF }
-};
-
-static const struct ast_dramstruct ast1100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x000041f0 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00050000 },
- { 0x0004, 0x00000585 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x22201724 },
- { 0x0018, 0x1e29011a },
- { 0x0020, 0x00c82222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x032aa02a },
- { 0x0064, 0x002d3000 },
- { 0x0068, 0x00000000 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000732 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000632 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00004c41 },
- { 0xffff, 0xffffffff },
-};
-
-static const struct ast_dramstruct ast2100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x00004120 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00070000 },
- { 0x0004, 0x00000489 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x32302926 },
- { 0x0018, 0x274c0122 },
- { 0x0020, 0x00ce2222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x0f2aa02a },
- { 0x0064, 0x003f3005 },
- { 0x0068, 0x02020202 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000942 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000842 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00005061 },
- { 0xffff, 0xffffffff },
-};
-
-/*
- * AST2500 DRAM settings modules
- */
-#define REGTBL_NUM 17
-#define REGIDX_010 0
-#define REGIDX_014 1
-#define REGIDX_018 2
-#define REGIDX_020 3
-#define REGIDX_024 4
-#define REGIDX_02C 5
-#define REGIDX_030 6
-#define REGIDX_214 7
-#define REGIDX_2E0 8
-#define REGIDX_2E4 9
-#define REGIDX_2E8 10
-#define REGIDX_2EC 11
-#define REGIDX_2F0 12
-#define REGIDX_2F4 13
-#define REGIDX_2F8 14
-#define REGIDX_RFC 15
-#define REGIDX_PLL 16
-
-static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
- 0x64604D38, /* 0x010 */
- 0x29690599, /* 0x014 */
- 0x00000300, /* 0x018 */
- 0x00000000, /* 0x020 */
- 0x00000000, /* 0x024 */
- 0x02181E70, /* 0x02C */
- 0x00000040, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x02001300, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001B, /* 0x2E8 */
- 0x35B8C105, /* 0x2EC */
- 0x08090408, /* 0x2F0 */
- 0x9B000800, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x9971452F, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
- 0x63604E37, /* 0x010 */
- 0xE97AFA99, /* 0x014 */
- 0x00019000, /* 0x018 */
- 0x08000000, /* 0x020 */
- 0x00000400, /* 0x024 */
- 0x00000410, /* 0x02C */
- 0x00000101, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x03002900, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001C, /* 0x2E8 */
- 0x35B8C106, /* 0x2EC */
- 0x08080607, /* 0x2F0 */
- 0x9B000900, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x99714545, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6fbf62a99c48..473faa92d08c 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -171,7 +171,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(regs);
+ ast_2500_patch_ahb(regs);
}
/* Double check that it's actually working */
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2ee402096cd9..e37a55295ed7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -417,11 +417,26 @@ struct ast_crtc_state {
int ast_mm_init(struct ast_device *ast);
+/* ast_2000.c */
+int ast_2000_post(struct ast_device *ast);
+
+/* ast_2100.c */
+int ast_2100_post(struct ast_device *ast);
+
+/* ast_2300.c */
+int ast_2300_post(struct ast_device *ast);
+
+/* ast_2500.c */
+void ast_2500_patch_ahb(void __iomem *regs);
+int ast_2500_post(struct ast_device *ast);
+
+/* ast_2600.c */
+int ast_2600_post(struct ast_device *ast);
+
/* ast post */
int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 031980d8f3ab..b4e8edc7c767 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
@@ -70,31 +71,44 @@ static unsigned long ast_fb_vram_size(struct ast_device *ast)
return cursor_offset - offset;
}
-static inline void ast_load_palette_index(struct ast_device *ast,
- u8 index, u8 red, u8 green,
- u8 blue)
+static void ast_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- ast_io_write8(ast, AST_IO_VGADWR, index);
+ struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ ast_io_write8(ast, AST_IO_VGADWR, i8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, red);
+ ast_io_write8(ast, AST_IO_VGAPDR, r8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, green);
+ ast_io_write8(ast, AST_IO_VGAPDR, g8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, blue);
+ ast_io_write8(ast, AST_IO_VGAPDR, b8);
ast_io_read8(ast, AST_IO_VGASRI);
}
-static void ast_crtc_set_gamma_linear(struct ast_device *ast,
- const struct drm_format_info *format)
+static void ast_crtc_fill_gamma(struct ast_device *ast,
+ const struct drm_format_info *format)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_fill_palette_8(crtc, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -103,21 +117,22 @@ static void ast_crtc_set_gamma_linear(struct ast_device *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_device *ast,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ast_crtc_load_gamma(struct ast_device *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_load_palette_8(crtc, lut, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i,
- lut[i].red >> 8,
- lut[i].green >> 8,
- lut[i].blue >> 8);
+ drm_crtc_load_gamma_888(crtc, lut, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -810,11 +825,11 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
- ast_crtc_set_gamma(ast,
- ast_crtc_state->format,
- crtc_state->gamma_lut->data);
+ ast_crtc_load_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ ast_crtc_fill_gamma(ast, ast_crtc_state->format);
}
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 37568cf3822c..b72914dbed38 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -31,51 +31,10 @@
#include <drm/drm_print.h>
-#include "ast_dram_tables.h"
#include "ast_drv.h"
+#include "ast_post.h"
-static void ast_post_chip_2300(struct ast_device *ast);
-static void ast_post_chip_2500(struct ast_device *ast);
-
-static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
-
-static void ast_set_def_ext_reg(struct ast_device *ast)
-{
- u8 i, index, reg;
- const u8 *ext_reg_info;
-
- /* reset scratch */
- for (i = 0x81; i <= 0x9f; i++)
- ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- ext_reg_info = extreginfo_ast2300;
- else
- ext_reg_info = extreginfo;
-
- index = 0xa0;
- while (*ext_reg_info != 0xff) {
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
- index++;
- ext_reg_info++;
- }
-
- /* disable standard IO/MEM decode if secondary */
- /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
-
- /* Set Ext. Default */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
-
- /* Enable RAMDAC for A1 */
- reg = 0x04;
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- reg |= 0x20;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
-}
-
-static u32 __ast_mindwm(void __iomem *regs, u32 r)
+u32 __ast_mindwm(void __iomem *regs, u32 r)
{
u32 data;
@@ -89,7 +48,7 @@ static u32 __ast_mindwm(void __iomem *regs, u32 r)
return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
u32 data;
@@ -113,332 +72,38 @@ void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
__ast_moutdwm(ast->regs, r, v);
}
-/*
- * AST2100/2150 DLL CBR Setting
- */
-#define CBR_SIZE_AST2150 ((16 << 10) - 1)
-#define CBR_PASSNUM_AST2150 5
-#define CBR_THRESHOLD_AST2150 10
-#define CBR_THRESHOLD2_AST2150 10
-#define TIMEOUT_AST2150 5000000
-
-#define CBR_PATNUM_AST2150 8
-
-static const u32 pattern_AST2150[14] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0xFFFE0001,
- 0x683501FE,
- 0x0F1929B0,
- 0x2D0B4346,
- 0x60767F02,
- 0x6FBE36A6,
- 0x3A253035,
- 0x3019686D,
- 0x41C6167E,
- 0x620152BF,
- 0x20F050E0
-};
-
-static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-#endif
-
-static int cbrtest_ast2150(struct ast_device *ast)
-{
- int i;
-
- for (i = 0; i < 8; i++)
- if (mmctestburst2_ast2150(ast, i))
- return 0;
- return 1;
-}
-
-static int cbrscan_ast2150(struct ast_device *ast, int busw)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
- if (cbrtest_ast2150(ast))
- break;
- }
- if (loop == CBR_PASSNUM_AST2150)
- return 0;
- }
- return 1;
-}
-
-
-static void cbrdlli_ast2150(struct ast_device *ast, int busw)
-{
- u32 dll_min[4], dll_max[4], dlli, data, passcnt;
-
-cbr_start:
- dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
- dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
- passcnt = 0;
-
- for (dlli = 0; dlli < 100; dlli++) {
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
- data = cbrscan_ast2150(ast, busw);
- if (data != 0) {
- if (data & 0x1) {
- if (dll_min[0] > dlli)
- dll_min[0] = dlli;
- if (dll_max[0] < dlli)
- dll_max[0] = dlli;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD_AST2150)
- goto cbr_start;
- }
- if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
- goto cbr_start;
-
- dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
-}
-
-
-
-static void ast_init_dram_reg(struct ast_device *ast)
-{
- u8 j;
- u32 data, temp, i;
- const struct ast_dramstruct *dram_reg_info;
-
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
-
- if ((j & 0x80) == 0) { /* VGA only */
- if (IS_AST_GEN1(ast)) {
- dram_reg_info = ast2000_dram_table_data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x10100, 0xa8);
-
- do {
- ;
- } while (ast_read32(ast, 0x10100) != 0xa8);
- } else { /* GEN2/GEN3 */
- if (ast->chip == AST2100 || ast->chip == AST2200)
- dram_reg_info = ast2100_dram_table_data;
- else
- dram_reg_info = ast1100_dram_table_data;
-
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688A8A8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x01);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x01);
- }
-
- while (dram_reg_info->index != 0xffff) {
- if (dram_reg_info->index == 0xff00) {/* delay fn */
- for (i = 0; i < 15; i++)
- udelay(dram_reg_info->data);
- } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
- data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
- data = 0x00000c8d;
-
- temp = ast_read32(ast, 0x12070);
- temp &= 0xc;
- temp <<= 2;
- ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
- } else
- ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
- dram_reg_info++;
- }
-
- /* AST 2100/2150 DRAM calibration */
- data = ast_read32(ast, 0x10120);
- if (data == 0x5061) { /* 266Mhz */
- data = ast_read32(ast, 0x10004);
- if (data & 0x40)
- cbrdlli_ast2150(ast, 16); /* 16 bits */
- else
- cbrdlli_ast2150(ast, 32); /* 32 bits */
- }
-
- switch (AST_GEN(ast)) {
- case 1:
- temp = ast_read32(ast, 0x10140);
- ast_write32(ast, 0x10140, temp | 0x40);
- break;
- case 2:
- case 3:
- temp = ast_read32(ast, 0x1200c);
- ast_write32(ast, 0x1200c, temp & 0xfffffffd);
- temp = ast_read32(ast, 0x12040);
- ast_write32(ast, 0x12040, temp | 0x40);
- break;
- default:
- break;
- }
- }
-
- /* wait ready */
- do {
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((j & 0x40) == 0);
-}
-
int ast_post_gpu(struct ast_device *ast)
{
int ret;
- ast_set_def_ext_reg(ast);
-
if (AST_GEN(ast) >= 7) {
- if (ast->tx_chip == AST_TX_ASTDP) {
- ret = ast_dp_launch(ast);
- if (ret)
- return ret;
- }
+ ret = ast_2600_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 6) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2500(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2500_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 4) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2300(ast);
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2300_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 2) {
+ ret = ast_2100_post(ast);
+ if (ret)
+ return ret;
} else {
- if (ast->config_mode == ast_use_p2a) {
- ast_init_dram_reg(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2000_post(ast);
+ if (ret)
+ return ret;
}
return 0;
}
-/* AST 2300 DRAM settings */
-#define AST_DDR3 0
-#define AST_DDR2 1
-
-struct ast2300_dram_param {
- u32 dram_type;
- u32 dram_chipid;
- u32 dram_freq;
- u32 vram_size;
- u32 odt;
- u32 wodt;
- u32 rodt;
- u32 dram_config;
- u32 reg_PERIOD;
- u32 reg_MADJ;
- u32 reg_SADJ;
- u32 reg_MRS;
- u32 reg_EMRS;
- u32 reg_AC1;
- u32 reg_AC2;
- u32 reg_DQSIC;
- u32 reg_DRV;
- u32 reg_IOZ;
- u32 reg_DQIDLY;
- u32 reg_FREQ;
- u32 madj_max;
- u32 dll2_finetune_step;
-};
-
-/*
- * DQSI DLL CBR Setting
- */
-#define CBR_SIZE0 ((1 << 10) - 1)
-#define CBR_SIZE1 ((4 << 10) - 1)
-#define CBR_SIZE2 ((64 << 10) - 1)
-#define CBR_PASSNUM 5
-#define CBR_PASSNUM2 5
-#define CBR_THRESHOLD 10
-#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
-#define CBR_PATNUM 8
-static const u32 pattern[8] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0x88778877,
- 0x92CC4D6E,
- 0x543D3CDE,
- 0xF1E843C7,
- 0x7C61D253
-};
-
-static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -458,1657 +123,7 @@ static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
- if (++timeout > TIMEOUT) {
- ast_moutdwm(ast, 0x1e6e0070, 0x0);
- return 0xffffffff;
- }
- } while (!data);
- data = ast_mindwm(ast, 0x1e6e0078);
- data = (data | (data >> 16)) & 0xffff;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-
-static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
+bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-
-static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x41);
-}
-
-static bool mmc_test_single(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0xc5);
-}
-
-static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x05);
-}
-
-static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0x85);
-}
-
-static int cbr_test(struct ast_device *ast)
-{
- u32 data;
- int i;
- data = mmc_test_single2(ast, 0);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- for (i = 0; i < 8; i++) {
- data = mmc_test_burst2(ast, i);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- }
- if (!data)
- return 3;
- else if (data & 0xff)
- return 2;
- return 1;
-}
-
-static int cbr_scan(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 3;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static u32 cbr_test2(struct ast_device *ast)
-{
- u32 data;
-
- data = mmc_test_burst2(ast, 0);
- if (data == 0xffff)
- return 0;
- data |= mmc_test_single2(ast, 0);
- if (data == 0xffff)
- return 0;
-
- return ~data & 0xffff;
-}
-
-static u32 cbr_scan2(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 0xffff;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test2(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static bool cbr_test3(struct ast_device *ast)
-{
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single(ast, 0))
- return false;
- return true;
-}
-
-static bool cbr_scan3(struct ast_device *ast)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < 2; loop++) {
- if (cbr_test3(ast))
- break;
- }
- if (loop == 2)
- return false;
- }
- return true;
-}
-
-static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
- bool status = false;
-FINETUNE_START:
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD2) {
- break;
- }
- }
- gold_sadj[0] = 0x0;
- passcnt = 0;
- for (cnt = 0; cnt < 16; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- gold_sadj[0] += dllmin[cnt];
- passcnt++;
- }
- }
- if (retry++ > 10)
- goto FINETUNE_DONE;
- if (passcnt != 16) {
- goto FINETUNE_START;
- }
- status = true;
-FINETUNE_DONE:
- gold_sadj[0] = gold_sadj[0] >> 4;
- gold_sadj[1] = gold_sadj[0];
-
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[0] >= dlli) {
- dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- }
- } else {
- dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0080, data);
-
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[1] >= dlli) {
- dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- } else {
- dlli = (dlli - 1) & 0x7;
- }
- } else {
- dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
- dlli += 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0084, data);
- return status;
-} /* finetuneDQI_L */
-
-static void finetuneDQSI(struct ast_device *ast)
-{
- u32 dlli, dqsip, dqidly;
- u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
- u32 g_dqidly, g_dqsip, g_margin, g_side;
- u16 pass[32][2][2];
- char tag[2][76];
-
- /* Disable DQI CBR */
- reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
- reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
- reg_mcr18 &= 0x0000ffff;
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
- for (dlli = 0; dlli < 76; dlli++) {
- tag[0][dlli] = 0x0;
- tag[1][dlli] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- pass[dqidly][0][0] = 0xff;
- pass[dqidly][0][1] = 0x0;
- pass[dqidly][1][0] = 0xff;
- pass[dqidly][1][1] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- passcnt[0] = passcnt[1] = 0;
- for (dqsip = 0; dqsip < 2; dqsip++) {
- ast_moutdwm(ast, 0x1E6E000C, 0);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
- ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0070, 0);
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
- if (cbr_scan3(ast)) {
- if (dlli == 0)
- break;
- passcnt[dqsip]++;
- tag[dqsip][dlli] = 'P';
- if (dlli < pass[dqidly][dqsip][0])
- pass[dqidly][dqsip][0] = (u16) dlli;
- if (dlli > pass[dqidly][dqsip][1])
- pass[dqidly][dqsip][1] = (u16) dlli;
- } else if (passcnt[dqsip] >= 5)
- break;
- else {
- pass[dqidly][dqsip][0] = 0xff;
- pass[dqidly][dqsip][1] = 0x0;
- }
- }
- }
- if (passcnt[0] == 0 && passcnt[1] == 0)
- dqidly++;
- }
- /* Search margin */
- g_dqidly = g_dqsip = g_margin = g_side = 0;
-
- for (dqidly = 0; dqidly < 32; dqidly++) {
- for (dqsip = 0; dqsip < 2; dqsip++) {
- if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
- continue;
- diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
- if ((diff+2) < g_margin)
- continue;
- passcnt[0] = passcnt[1] = 0;
- for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
- for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
- if (passcnt[0] > passcnt[1])
- passcnt[0] = passcnt[1];
- passcnt[1] = 0;
- if (passcnt[0] > g_side)
- passcnt[1] = passcnt[0] - g_side;
- if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- } else if (passcnt[1] > 1 && g_side < 8) {
- if (diff > g_margin)
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- }
- }
- }
- reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
-}
-static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
- bool status = false;
-
- finetuneDQSI(ast);
- if (finetuneDQI_L(ast, param) == false)
- return status;
-
-CBR_START2:
- dllmin[0] = dllmin[1] = 0xff;
- dllmax[0] = dllmax[1] = 0x0;
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
- data = cbr_scan(ast);
- if (data != 0) {
- if (data & 0x1) {
- if (dllmin[0] > dlli) {
- dllmin[0] = dlli;
- }
- if (dllmax[0] < dlli) {
- dllmax[0] = dlli;
- }
- }
- if (data & 0x2) {
- if (dllmin[1] > dlli) {
- dllmin[1] = dlli;
- }
- if (dllmax[1] < dlli) {
- dllmax[1] = dlli;
- }
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD) {
- break;
- }
- }
- if (retry++ > 10)
- goto CBR_DONE2;
- if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- status = true;
-CBR_DONE2:
- dlli = (dllmin[1] + dllmax[1]) >> 1;
- dlli <<= 8;
- dlli += (dllmin[0] + dllmax[0]) >> 1;
- ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
- return status;
-} /* CBRDLL2 */
-
-static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = 0x00020000 + (trap << 16);
- trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
- trap_MRS = 0x00000010 + (trap << 4);
- trap_MRS |= ((trap & 0x2) << 18);
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 0;
- param->reg_AC1 = 0x22202725;
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x04001400 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA00761C | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA007636 | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xCD44961A;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00081830;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 4;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0270);
- param->wodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xDE44A61D;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x070000BB;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 4;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0290);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xEF44B61E;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000088;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302A37;
- param->reg_AC2 = 0xEF56B61E;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 136;
- param->dll2_finetune_step = 3;
- break;
- case 600:
- ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xDF56B61F;
- param->reg_DQSIC = 0x0000014D;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000058C0;
- param->madj_max = 132;
- param->dll2_finetune_step = 3;
- break;
- case 624:
- ast_moutdwm(ast, 0x1E6E2020, 0x0160);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xEF56B621;
- param->reg_DQSIC = 0x0000015A;
- param->reg_MRS = 0x02101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000059C0;
- param->madj_max = 128;
- param->dll2_finetune_step = 3;
- break;
- } /* switch freq */
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x130;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x131;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x132;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x133;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case SZ_8M:
- param->dram_config |= 0x00;
- break;
- case SZ_16M:
- param->dram_config |= 0x04;
- break;
- case SZ_32M:
- param->dram_config |= 0x08;
- break;
- case SZ_64M:
- param->dram_config |= 0x0c;
- break;
- }
-
-}
-
-static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr3_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
- ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- data = 0;
- if (param->wodt) {
- data = 0x300;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr3_init_start;
-
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-
-}
-
-static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = (trap << 20) | (trap << 16);
- trap_AC2 += 0x00110000;
- trap_MRS = 0x00000040 | (trap << 4);
-
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 264:
- ast_moutdwm(ast, 0x1E6E2020, 0x0130);
- param->wodt = 0;
- param->reg_AC1 = 0x11101513;
- param->reg_AC2 = 0x78117011;
- param->reg_DQSIC = 0x00000092;
- param->reg_MRS = 0x00000842;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x000000F0;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x0000005A;
- param->reg_FREQ = 0x00004AC0;
- param->madj_max = 138;
- param->dll2_finetune_step = 3;
- break;
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 1;
- param->reg_AC1 = 0x22202613;
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x00000A02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xAA009012 | trap_AC2;
- break;
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA009023 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA00903B | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xCD44B01E;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 3;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0261);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xDE44C022;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 3;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0120);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xEF44D024;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F9;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A7;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 552:
- ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E025;
- param->reg_DQSIC = 0x00000132;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000AD;
- param->reg_FREQ = 0x000056C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E027;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000B3;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- }
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x100;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x121;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x122;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x123;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case SZ_8M:
- param->dram_config |= 0x00;
- break;
- case SZ_16M:
- param->dram_config |= 0x04;
- break;
- case SZ_32M:
- param->dram_config |= 0x08;
- break;
- case SZ_64M:
- param->dram_config |= 0x0c;
- break;
- }
-}
-
-static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr2_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
- ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
- ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
-
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
- data = 0;
- if (param->wodt) {
- data = 0x500;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr2_init_start;
-
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-}
-
-static void ast_post_chip_2300(struct ast_device *ast)
-{
- struct ast2300_dram_param param;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & 0x80) == 0) {/* vga only */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688a8a8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x1);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x1);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- param.dram_freq = 396;
- param.dram_type = AST_DDR3;
- temp = ast_mindwm(ast, 0x1e6e2070);
- if (temp & 0x01000000)
- param.dram_type = AST_DDR2;
- switch (temp & 0x18000000) {
- case 0:
- param.dram_chipid = AST_DRAM_512Mx16;
- break;
- default:
- case 0x08000000:
- param.dram_chipid = AST_DRAM_1Gx16;
- break;
- case 0x10000000:
- param.dram_chipid = AST_DRAM_2Gx16;
- break;
- case 0x18000000:
- param.dram_chipid = AST_DRAM_4Gx16;
- break;
- }
- switch (temp & 0x0c) {
- default:
- case 0x00:
- param.vram_size = SZ_8M;
- break;
-
- case 0x04:
- param.vram_size = SZ_16M;
- break;
-
- case 0x08:
- param.vram_size = SZ_32M;
- break;
-
- case 0x0c:
- param.vram_size = SZ_64M;
- break;
- }
-
- if (param.dram_type == AST_DDR3) {
- get_ddr3_info(ast, &param);
- ddr3_init(ast, &param);
- } else {
- get_ddr2_info(ast, &param);
- ddr2_init(ast, &param);
- }
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
-
-static bool cbr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static bool ddr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_burst(ast, 1))
- return false;
- if (!mmc_test_burst(ast, 2))
- return false;
- if (!mmc_test_burst(ast, 3))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static void ddr_init_common_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
- ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
- ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
- ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
- ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
- ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
-}
-
-static void ddr_phy_init_2500(struct ast_device *ast)
-{
- u32 data, pass, timecnt;
-
- pass = 0;
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- while (!pass) {
- for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
- data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
- if (!data)
- break;
- }
- if (timecnt != TIMEOUT) {
- data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
- if (!data)
- pass = 1;
- }
- if (!pass) {
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- udelay(10); /* delay 10 us */
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- }
- }
-
- ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
-}
-
-/*
- * Check DRAM Size
- * 1Gb : 0x80000000 ~ 0x87FFFFFF
- * 2Gb : 0x80000000 ~ 0x8FFFFFFF
- * 4Gb : 0x80000000 ~ 0x9FFFFFFF
- * 8Gb : 0x80000000 ~ 0xBFFFFFFF
- */
-static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
-{
- u32 reg_04, reg_14;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
- reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
-
- ast_moutdwm(ast, 0xA0100000, 0x41424344);
- ast_moutdwm(ast, 0x90100000, 0x35363738);
- ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
- ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
-
- /* Check 8Gbit */
- if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
- reg_04 |= 0x03;
- reg_14 |= (tRFC >> 24) & 0xFF;
- /* Check 4Gbit */
- } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
- reg_04 |= 0x02;
- reg_14 |= (tRFC >> 16) & 0xFF;
- /* Check 2Gbit */
- } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
- reg_04 |= 0x01;
- reg_14 |= (tRFC >> 8) & 0xFF;
- } else {
- reg_14 |= tRFC & 0xFF;
- }
- ast_moutdwm(ast, 0x1E6E0004, reg_04);
- ast_moutdwm(ast, 0x1E6E0014, reg_14);
-}
-
-static void enable_cache_2500(struct ast_device *ast)
-{
- u32 reg_04, data;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004);
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
-
- do
- data = ast_mindwm(ast, 0x1E6E0004);
- while (!(data & 0x80000));
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
-}
-
-static void set_mpll_2500(struct ast_device *ast)
-{
- u32 addr, data, param;
-
- /* Reset MMC */
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
- ast_moutdwm(ast, addr, 0x0);
- addr += 4;
- }
- ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
- if (data) {
- /* CLKIN = 25MHz */
- param = 0x930023E0;
- ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
- } else {
- /* CLKIN = 24MHz */
- param = 0x93002400;
- }
- ast_moutdwm(ast, 0x1E6E2020, param);
- udelay(100);
-}
-
-static void reset_mmc_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E78505C, 0x00000004);
- ast_moutdwm(ast, 0x1E785044, 0x00000001);
- ast_moutdwm(ast, 0x1E785048, 0x00004755);
- ast_moutdwm(ast, 0x1E78504C, 0x00000013);
- mdelay(100);
- ast_moutdwm(ast, 0x1E785054, 0x00000077);
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
-}
-
-static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
- u32 data, data2, pass, retrycnt;
- u32 ddr_vref, phy_vref;
- u32 min_ddr_vref = 0, min_phy_vref = 0;
- u32 max_ddr_vref = 0, max_phy_vref = 0;
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
- ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
-
- /* Train PHY Vref first */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- max_phy_vref = 0x0;
- pass = 0;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
- for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- data = ast_mindwm(ast, 0x1E6E03D0);
- data2 = data >> 8;
- data = data & 0xff;
- if (data > data2)
- data = data2;
- if (max_phy_vref < data) {
- max_phy_vref = data;
- min_phy_vref = phy_vref;
- }
- } else if (pass > 0)
- break;
- }
- }
- ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
-
- /* Train DDR Vref next */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- min_ddr_vref = 0xFF;
- max_ddr_vref = 0x0;
- pass = 0;
- for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- if (min_ddr_vref > ddr_vref)
- min_ddr_vref = ddr_vref;
- if (max_ddr_vref < ddr_vref)
- max_ddr_vref = ddr_vref;
- } else if (pass != 0)
- break;
- }
- }
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static bool ast_dram_init_2500(struct ast_device *ast)
-{
- u32 data;
- u32 max_tries = 5;
-
- do {
- if (max_tries-- == 0)
- return false;
- set_mpll_2500(ast);
- reset_mmc_2500(ast);
- ddr_init_common_2500(ast);
-
- data = ast_mindwm(ast, 0x1E6E2070);
- if (data & 0x01000000)
- ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
- else
- ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
- } while (!ddr_test_2500(ast));
-
- ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
-
- /* Patch code */
- data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
- ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
-
- return true;
-}
-
-void ast_patch_ahb_2500(void __iomem *regs)
-{
- u32 data;
-
- /* Clear bus lock condition */
- __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
- __ast_moutdwm(regs, 0x1e600084, 0x00010000);
- __ast_moutdwm(regs, 0x1e600088, 0x00000000);
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
-
- data = __ast_mindwm(regs, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
- /*
- * If "Fast restet" is enabled for ARM-ICE debugger,
- * then WDT needs to enable, that
- * WDT04 is WDT#1 Reload reg.
- * WDT08 is WDT#1 counter restart reg to avoid system deadlock
- * WDT0C is WDT#1 control reg
- * [6:5]:= 01:Full chip
- * [4]:= 1:1MHz clock source
- * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
- * [0]:= 1:WDT enable
- */
- __ast_moutdwm(regs, 0x1E785004, 0x00000010);
- __ast_moutdwm(regs, 0x1E785008, 0x00004755);
- __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
- udelay(1000);
- }
-
- do {
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
- data = __ast_mindwm(regs, 0x1e6e2000);
- } while (data != 1);
-
- __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
-}
-
-void ast_post_chip_2500(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
- /* Clear bus lock condition */
- ast_patch_ahb_2500(ast->regs);
-
- /* Disable watchdog */
- ast_moutdwm(ast, 0x1E78502C, 0x00000000);
- ast_moutdwm(ast, 0x1E78504C, 0x00000000);
-
- /*
- * Reset USB port to patch USB unknown device issue
- * SCU90 is Multi-function Pin Control #5
- * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
- * port).
- * SCU94 is Multi-function Pin Control #6
- * [14:13]:= 1x:USB2.0 Host2 controller
- * SCU70 is Hardware Strap reg
- * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
- * [18]: 0(24)/1(48) MHz)
- * SCU7C is Write clear reg to SCU70
- * [23]:= write 1 and then SCU70[23] will be clear as 0b.
- */
- ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
- ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
- if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
- ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
- mdelay(100);
- ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
- }
- /* Modify eSPI reset pin */
- temp = ast_mindwm(ast, 0x1E6E2070);
- if (temp & 0x02000000)
- ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- if (!ast_dram_init_2500(ast))
- drm_err(dev, "DRAM init failed !\n");
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
diff --git a/drivers/gpu/drm/ast/ast_post.h b/drivers/gpu/drm/ast/ast_post.h
new file mode 100644
index 000000000000..aa5d247bebe8
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef AST_POST_H
+#define AST_POST_H
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct ast_device;
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+/* hardware fields */
+#define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004
+
+/* control commands */
+#define __AST_DRAMSTRUCT_UDELAY 0xff00
+#define __AST_DRAMSTRUCT_INVALID 0xffff
+
+#define __AST_DRAMSTRUCT_INDEX(_name) \
+ (__AST_DRAMSTRUCT_ ## _name)
+
+#define AST_DRAMSTRUCT_INIT(_name, _value) \
+ { __AST_DRAMSTRUCT_INDEX(_name), (_value) }
+
+#define AST_DRAMSTRUCT_UDELAY(_usecs) \
+ AST_DRAMSTRUCT_INIT(UDELAY, _usecs)
+#define AST_DRAMSTRUCT_INVALID \
+ AST_DRAMSTRUCT_INIT(INVALID, U32_MAX)
+
+#define AST_DRAMSTRUCT_IS(_entry, _name) \
+ ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name))
+
+u32 __ast_mindwm(void __iomem *regs, u32 r);
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v);
+
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl);
+bool mmc_test_burst(struct ast_device *ast, u32 datagen);
+
+/* ast_2000.c */
+void ast_2000_set_def_ext_reg(struct ast_device *ast);
+
+/* ast_2300.c */
+void ast_2300_set_def_ext_reg(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index f46a5e26b5dd..59a5256ce8a6 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -5,6 +5,9 @@ config DRM_I2C_ADV7511
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
@@ -19,7 +22,7 @@ config DRM_I2C_ADV7511_AUDIO
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
- select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
default y
help
When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ec0b7f3d889c..85ebead9809c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -313,16 +313,11 @@ enum adv7511_csc_scaling {
* @csc_enable: Whether to enable color space conversion
* @csc_scaling_factor: Color space conversion scaling factor
* @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
*/
struct adv7511_video_config {
bool csc_enable;
enum adv7511_csc_scaling csc_scaling_factor;
const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
};
enum adv7511_type {
@@ -337,6 +332,7 @@ struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
+ const char *name;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
@@ -371,7 +367,7 @@ struct adv7511 {
struct work_struct hpd_work;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *cec_connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -389,9 +385,7 @@ struct adv7511 {
bool use_timing_gen;
const struct adv7511_chip_info *info;
- struct platform_device *audio_pdev;
- struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
@@ -399,20 +393,29 @@ struct adv7511 {
u32 cec_clk_freq;
};
+static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable);
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr);
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
-static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
-{
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
- return 0;
-}
+#define adv7511_cec_init NULL
+#define adv7511_cec_enable NULL
+#define adv7511_cec_log_addr NULL
+#define adv7511_cec_transmit NULL
#endif
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
@@ -421,16 +424,18 @@ int adv7533_attach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_audio_exit(struct adv7511 *adv7511);
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
-static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- return 0;
-}
-static inline void adv7511_audio_exit(struct adv7511 *adv7511)
-{
-}
+#define adv7511_hdmi_audio_startup NULL
+#define adv7511_hdmi_audio_shutdown NULL
+#define adv7511_hdmi_audio_prepare NULL
#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 1ff8c815ec79..766b1c96bc88 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -55,11 +55,12 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-static int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int audio_source, i2s_format = 0;
unsigned int invert_clock;
unsigned int rate;
@@ -167,9 +168,10 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
return 0;
}
-static int audio_startup(struct device *dev, void *data)
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
@@ -204,69 +206,12 @@ static int audio_startup(struct device *dev, void *data)
return 0;
}
-static void audio_shutdown(struct device *dev, void *data)
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
}
-
-static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint,
- void *data)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops adv7511_codec_ops = {
- .hw_params = adv7511_hdmi_hw_params,
- .audio_shutdown = audio_shutdown,
- .audio_startup = audio_startup,
- .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
-};
-
-static const struct hdmi_codec_pdata codec_data = {
- .ops = &adv7511_codec_ops,
- .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
- .max_i2s_channels = 2,
- .i2s = 1,
- .no_i2s_capture = 1,
- .spdif = 1,
- .no_spdif_capture = 1,
-};
-
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- adv7511->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
-}
-
-void adv7511_audio_exit(struct adv7511 *adv7511)
-{
- if (adv7511->audio_pdev) {
- platform_device_unregister(adv7511->audio_pdev);
- adv7511->audio_pdev = NULL;
- }
-}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 2e9c88a2b5ed..8ecbc25dc647 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -12,6 +12,8 @@
#include <media/cec.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
@@ -44,8 +46,8 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
- cec_transmit_attempt_done(adv7511->cec_adap,
- CEC_TX_STATUS_ARB_LOST);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
@@ -72,12 +74,14 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
- cec_transmit_done(adv7511->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, err_cnt);
+ drm_connector_hdmi_cec_transmit_done(adv7511->cec_connector, status,
+ 0, nack_cnt, low_drive_cnt,
+ err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
- cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_OK);
return;
}
}
@@ -116,7 +120,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0);
- cec_received_msg(adv7511->cec_adap, &msg);
+ drm_connector_hdmi_cec_received_msg(adv7511->cec_connector, &msg);
}
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
@@ -179,9 +183,9 @@ int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
return IRQ_HANDLED;
}
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
@@ -225,9 +229,9 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
return 0;
}
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -293,10 +297,10 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
return 0;
}
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@@ -328,12 +332,6 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
@@ -348,20 +346,18 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+ struct device *dev = &adv7511->i2c_main->dev;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
goto err_cec_parse_dt;
- adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap)) {
- ret = PTR_ERR(adv7511->cec_adap);
- goto err_cec_alloc;
- }
+ adv7511->cec_connector = connector;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
@@ -378,17 +374,8 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
- ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret)
- goto err_cec_register;
return 0;
-err_cec_register:
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
-err_cec_alloc:
- dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
- ret);
err_cec_parse_dt:
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 1257009e850c..00d6417c177b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -12,14 +12,17 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <media/cec.h>
+#include <sound/pcm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "adv7511.h"
@@ -203,62 +206,37 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb, bool hdmi_mode)
+ bool rgb)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
- uint8_t infoframe[17];
-
- config.hdmi_mode = hdmi_mode;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
if (rgb) {
config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ output_format_422 = false;
+ output_format_ycbcr = false;
} else {
config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
DRM_COLOR_FORMAT_YCBCR422) &&
- config.hdmi_mode) {
+ connector->display_info.is_hdmi) {
config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
output_format_422 = true;
output_format_ycbcr = true;
- break;
- default:
+ } else {
+ config.csc_enable = true;
output_format_422 = false;
output_format_ycbcr = false;
- break;
}
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
}
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ if (connector->display_info.is_hdmi)
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+ else
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
adv7511_set_colormap(adv7511, config.csc_enable,
config.csc_coefficents,
@@ -269,15 +247,6 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
}
static void adv7511_set_link_config(struct adv7511 *adv7511,
@@ -446,22 +415,16 @@ static void adv7511_hpd_work(struct work_struct *work)
* restore its state.
*/
if (status == connector_status_connected &&
- adv7511->connector.status == connector_status_disconnected &&
+ adv7511->status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
- if (adv7511->connector.status != status) {
- adv7511->connector.status = status;
+ if (adv7511->status != status) {
+ adv7511->status = status;
- if (adv7511->connector.dev) {
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv7511->cec_adap);
- drm_kms_helper_hotplug_event(adv7511->connector.dev);
- } else {
- drm_bridge_hpd_notify(&adv7511->bridge, status);
- }
+ drm_bridge_hpd_notify(&adv7511->bridge, status);
}
}
@@ -636,45 +599,11 @@ static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- if (drm_edid) {
- /*
- * FIXME: The CEC physical address should be set using
- * cec_s_phys_addr(adap,
- * connector->display_info.source_physical_address, false) from
- * a path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
-
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
- } else {
- cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
- }
-
return drm_edid;
}
-static int adv7511_get_modes(struct adv7511 *adv7511,
- struct drm_connector *connector)
-{
- const struct drm_edid *drm_edid;
- unsigned int count;
-
- drm_edid = adv7511_edid_read(adv7511, connector);
-
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
-
- drm_edid_free(drm_edid);
-
- return count;
-}
-
static enum drm_connector_status
-adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511)
{
enum drm_connector_status status;
unsigned int val;
@@ -699,8 +628,6 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- if (connector)
- adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -719,17 +646,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
return status;
}
-static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- const struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static void adv7511_mode_set(struct adv7511 *adv7511,
- const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
@@ -800,11 +717,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (drm_mode_vrefresh(mode) <= 24)
+ if (drm_mode_vrefresh(adj_mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (drm_mode_vrefresh(mode) <= 25)
+ else if (drm_mode_vrefresh(adj_mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (drm_mode_vrefresh(mode) <= 30)
+ else if (drm_mode_vrefresh(adj_mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
@@ -821,82 +738,30 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ /* Update horizontal/vertical porch params */
+ if (adv7511->info->has_dsi && adv7511->use_timing_gen)
+ adv7533_dsi_config_timing_gen(adv7511);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
*/
- adv7511->f_tmds = mode->clock;
-}
-
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
-{
- return container_of(connector, struct adv7511, connector);
-}
-
-static int adv7511_connector_get_modes(struct drm_connector *connector)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_get_modes(adv, connector);
-}
-
-static enum drm_mode_status
-adv7511_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_mode_valid(adv, mode);
+ adv7511->f_tmds = adj_mode->clock;
}
-static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
- .get_modes = adv7511_connector_get_modes,
- .mode_valid = adv7511_connector_mode_valid,
-};
-
-static enum drm_connector_status
-adv7511_connector_detect(struct drm_connector *connector, bool force)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_detect(adv, connector);
-}
-
-static const struct drm_connector_funcs adv7511_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = adv7511_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int adv7511_connector_init(struct adv7511 *adv)
{
struct drm_bridge *bridge = &adv->bridge;
- int ret;
-
- if (adv->i2c_main->irq)
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
- else
- adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ struct drm_connector *connector;
- ret = drm_connector_init(bridge->dev, &adv->connector,
- &adv7511_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
+ return PTR_ERR(connector);
}
- drm_connector_helper_add(&adv->connector,
- &adv7511_connector_helper_funcs);
- drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
return 0;
}
@@ -905,7 +770,7 @@ static int adv7511_connector_init(struct adv7511 *adv)
* DRM Bridge Operations
*/
-static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+static const struct adv7511 *bridge_to_adv7511_const(const struct drm_bridge *bridge)
{
return container_of(bridge, struct adv7511, bridge);
}
@@ -914,8 +779,29 @@ static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
adv7511_power_on(adv);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ adv7511_set_config_csc(adv, connector, adv->rgb);
+
+ adv7511_mode_set(adv, &crtc_state->adjusted_mode);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -926,13 +812,17 @@ static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
adv7511_power_off(adv);
}
-static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
+static enum drm_mode_status
+adv7511_bridge_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ const struct adv7511 *adv = bridge_to_adv7511_const(bridge);
- adv7511_mode_set(adv, mode, adj_mode);
+ if (tmds_rate > 1000ULL * adv->info->max_mode_clock_khz)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
@@ -941,10 +831,10 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- if (adv->info->has_dsi)
- return adv7533_mode_valid(adv, mode);
- else
- return adv7511_mode_valid(adv, mode);
+ if (!adv->info->has_dsi)
+ return MODE_OK;
+
+ return adv7533_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
@@ -974,11 +864,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
return ret;
}
-static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_detect(adv, NULL);
+ return adv7511_detect(adv);
}
static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
@@ -989,28 +880,71 @@ static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge
return adv7511_edid_read(adv, connector);
}
-static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
- enum drm_connector_status status)
+static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv->cec_adap);
+ return 0;
+}
+
+static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ adv7511_bridge_hdmi_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
- .hpd_notify = adv7511_bridge_hpd_notify,
.atomic_enable = adv7511_bridge_atomic_enable,
.atomic_disable = adv7511_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+
+ .hdmi_tmds_char_rate_valid = adv7511_bridge_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = adv7511_bridge_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = adv7511_bridge_hdmi_write_infoframe,
+
+ .hdmi_audio_startup = adv7511_hdmi_audio_startup,
+ .hdmi_audio_prepare = adv7511_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = adv7511_hdmi_audio_shutdown,
+
+ .hdmi_cec_init = adv7511_cec_init,
+ .hdmi_cec_enable = adv7511_cec_enable,
+ .hdmi_cec_log_addr = adv7511_cec_log_addr,
+ .hdmi_cec_transmit = adv7511_cec_transmit,
};
/* -----------------------------------------------------------------------------
@@ -1224,9 +1158,10 @@ static int adv7511_probe(struct i2c_client *i2c)
if (!dev->of_node)
return -EINVAL;
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
+ adv7511 = devm_drm_bridge_alloc(dev, struct adv7511, bridge,
+ &adv7511_bridge_funcs);
+ if (IS_ERR(adv7511))
+ return PTR_ERR(adv7511);
adv7511->i2c_main = i2c;
adv7511->powered = false;
@@ -1323,22 +1258,43 @@ static int adv7511_probe(struct i2c_client *i2c)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
- ret = adv7511_cec_init(dev, adv7511);
- if (ret)
- goto err_unregister_cec;
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
- adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI;
if (adv7511->i2c_main->irq)
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.vendor = "Analog";
+ adv7511->bridge.product = adv7511->info->name;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO;
+ adv7511->bridge.hdmi_audio_dev = dev;
+ adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2;
+ adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
+ adv7511->bridge.hdmi_audio_spdif_playback = 1;
+ adv7511->bridge.hdmi_audio_dai_port = 2;
+#endif
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ adv7511->bridge.hdmi_cec_dev = dev;
+ adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev);
+ adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+#endif
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
- adv7511_audio_init(dev, adv7511);
-
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1360,10 +1316,7 @@ static int adv7511_probe(struct i2c_client *i2c)
return 0;
err_unregister_audio:
- adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
-err_unregister_cec:
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
@@ -1388,9 +1341,6 @@ static void adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
- adv7511_audio_exit(adv7511);
-
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1400,6 +1350,8 @@ static void adv7511_remove(struct i2c_client *i2c)
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
+ .name = "ADV7511",
+ .max_mode_clock_khz = 165000,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
@@ -1407,6 +1359,7 @@ static const struct adv7511_chip_info adv7511_chip_info = {
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
+ .name = "ADV7533",
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
@@ -1417,6 +1370,7 @@ static const struct adv7511_chip_info adv7533_chip_info = {
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
+ .name = "ADV7535",
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 122ad91e8a32..188c1093a66e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -24,7 +24,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
@@ -67,9 +67,6 @@ void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
- if (adv->use_timing_gen)
- adv7511_dsi_config_timing_gen(adv);
-
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
@@ -106,10 +103,6 @@ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- /* Check max clock for either 7533 or 7535 */
- if (mode->clock > adv->info->max_mode_clock_khz)
- return MODE_CLOCK_HIGH;
-
/* Check max clock for each lane */
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index a83020d6576f..ba0fc149a9e7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1193,9 +1193,10 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
bool found = false;
int err;
- anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
- if (!anx78xx)
- return -ENOMEM;
+ anx78xx = devm_drm_bridge_alloc(&client->dev, struct anx78xx, bridge,
+ &anx78xx_bridge_funcs);
+ if (IS_ERR(anx78xx))
+ return PTR_ERR(anx78xx);
pdata = &anx78xx->pdata;
@@ -1306,8 +1307,6 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
goto err_poweroff;
}
- anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index b1e482994ffe..e8662168717d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -5,6 +5,8 @@
* Based on anx7808 driver obtained from chromeos with copyright:
* Copyright(c) 2013, Google Inc.
*/
+
+#include <linux/export.h>
#include <linux/regmap.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 505eec6b819b..ed35e567d117 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1040,7 +1041,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1124,7 +1125,7 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1179,7 +1180,7 @@ out_dp_init:
static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
@@ -1216,7 +1217,7 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1239,7 +1240,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -1277,7 +1278,7 @@ out:
static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
@@ -1299,7 +1300,7 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
@@ -1384,25 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.attach = analogix_dp_bridge_attach,
};
-static int analogix_dp_create_bridge(struct drm_device *drm_dev,
- struct analogix_dp_device *dp)
-{
- struct drm_bridge *bridge;
-
- bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("failed to allocate for drm bridge\n");
- return -ENOMEM;
- }
-
- dp->bridge = bridge;
-
- bridge->driver_private = dp;
- bridge->funcs = &analogix_dp_bridge_funcs;
-
- return drm_bridge_attach(dp->encoder, bridge, NULL, 0);
-}
-
static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
{
struct device_node *dp_node = dp->dev->of_node;
@@ -1490,7 +1472,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_PTR(-EINVAL);
}
- dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
+ dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
+ &analogix_dp_bridge_funcs);
if (!dp)
return ERR_PTR(-ENOMEM);
@@ -1642,7 +1625,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
return ret;
}
- ret = analogix_dp_create_bridge(drm_dev, dp);
+ ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
goto err_unregister_aux;
@@ -1659,7 +1642,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind);
void analogix_dp_unbind(struct analogix_dp_device *dp)
{
- analogix_dp_bridge_disable(dp->bridge);
+ analogix_dp_bridge_disable(&dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
drm_panel_unprepare(dp->plat_data->panel);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 2b54120ba4a3..b86e93f30ed6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -11,6 +11,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_bridge.h>
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
@@ -154,7 +155,7 @@ struct analogix_dp_device {
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8a9079c2ed5c..c0ad8f59e483 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2448,7 +2448,7 @@ anx7625_audio_update_connector_status(struct anx7625_data *ctx,
enum drm_connector_status status);
static enum drm_connector_status
-anx7625_bridge_detect(struct drm_bridge *bridge)
+anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2596,7 +2596,6 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
return ret;
}
- platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2630,10 +2629,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return -ENODEV;
}
- platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
- if (!platform) {
+ platform = devm_drm_bridge_alloc(dev, struct anx7625_data, bridge, &anx7625_bridge_funcs);
+ if (IS_ERR(platform)) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
- return -ENOMEM;
+ return PTR_ERR(platform);
}
pdata = &platform->pdata;
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index c179b86d208f..b3e4cdff61d6 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -17,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@@ -64,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;
@@ -109,9 +112,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev, struct drm_aux_bridge_data,
+ bridge, &drm_aux_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
@@ -119,7 +123,6 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
"failed to acquire drm_bridge\n");
- data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index af6f79793407..2e9c702c7087 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -172,12 +173,13 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_hpd_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev,
+ struct drm_aux_hpd_bridge_data, bridge,
+ &drm_aux_hpd_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
- data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
data->bridge.of_node = dev_get_platdata(data->dev);
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index b022dd6e6b6e..a57ca8c3bdae 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -670,13 +670,28 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
u32 val;
+ /*
+ * The cdns-dsi controller needs to be disabled after it's DPI source
+ * has stopped streaming. If this is not followed, there is a brief
+ * window before DPI source is disabled and after cdns-dsi controller
+ * has been disabled where the DPI stream is still on, but the cdns-dsi
+ * controller is not ready anymore to accept the incoming signals. This
+ * is one of the reasons why a shift in pixel colors is observed on
+ * displays that have cdns-dsi as one of the bridges.
+ *
+ * To mitigate this, disable this bridge from the bridge post_disable()
+ * hook, instead of the bridge _disable() hook. The bridge post_disable()
+ * hook gets called after the CRTC disable, where often many DPI sources
+ * disable their streams.
+ */
+
val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
DISP_EOT_GEN);
@@ -688,15 +703,6 @@ static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
if (dsi->platform_ops && dsi->platform_ops->disable)
dsi->platform_ops->disable(dsi);
- pm_runtime_put(dsi->base.dev);
-}
-
-static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
dsi->phy_initialized = false;
dsi->link_initialized = false;
phy_power_off(dsi->dphy);
@@ -774,8 +780,8 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -792,6 +798,21 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
u32 tmp, reg_wakeup, div, status;
int nlanes;
+ /*
+ * The cdns-dsi controller needs to be enabled before it's DPI source
+ * has begun streaming. If this is not followed, there is a brief window
+ * after DPI source enable and before cdns-dsi controller enable where
+ * the DPI stream is on, but the cdns-dsi controller is not ready to
+ * accept the incoming signals. This is one of the reasons why a shift
+ * in pixel colors is observed on displays that have cdns-dsi as one of
+ * the bridges.
+ *
+ * To mitigate this, enable this bridge from the bridge pre_enable()
+ * hook, instead of the bridge _enable() hook. The bridge pre_enable()
+ * hook gets called before the CRTC enable, where often many DPI sources
+ * enable their streams.
+ */
+
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
@@ -811,8 +832,8 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
/*
* Now that the DSI Link and DSI Phy are initialized,
@@ -941,19 +962,6 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
- if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
- return;
-
- cdns_dsi_init_link(dsi);
- cdns_dsi_hs_init(dsi);
-}
-
static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -1048,9 +1056,7 @@ cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .atomic_disable = cdns_dsi_bridge_atomic_disable,
.atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
- .atomic_enable = cdns_dsi_bridge_atomic_enable,
.atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
.atomic_check = cdns_dsi_bridge_atomic_check,
.atomic_reset = cdns_dsi_bridge_atomic_reset,
@@ -1289,9 +1295,10 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
int ret, irq;
u32 val;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct cdns_dsi, input.bridge,
+ &cdns_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
@@ -1349,7 +1356,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
* CDNS_DPI_INPUT.
*/
input->id = CDNS_DPI_INPUT;
- input->bridge.funcs = &cdns_dsi_bridge_funcs;
input->bridge.of_node = pdev->dev.of_node;
/* Mask all interrupts before registering the IRQ handler. */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index b431e7efd1f0..a614d1384f71 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2143,7 +2143,8 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2389,9 +2390,10 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
int ret;
int irq;
- mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
- if (!mhdp)
- return -ENOMEM;
+ mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
+ &cdns_mhdp_bridge_funcs);
+ if (IS_ERR(mhdp))
+ return PTR_ERR(mhdp);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -2481,7 +2483,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->display_fmt.bpc = 8;
mhdp->bridge.of_node = pdev->dev.of_node;
- mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 634c5b030667..814713c5bea9 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -691,9 +691,10 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
struct chipone *icn;
int ret;
- icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
- if (!icn)
- return -ENOMEM;
+ icn = devm_drm_bridge_alloc(dev, struct chipone, bridge,
+ &chipone_bridge_funcs);
+ if (IS_ERR(icn))
+ return PTR_ERR(icn);
icn->dev = dev;
@@ -701,7 +702,6 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
if (ret)
return ret;
- icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 210c45c1efd4..54d49d4882c8 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -215,7 +215,7 @@ static enum drm_connector_status ch7033_connector_detect(
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- return drm_bridge_detect(priv->next_bridge);
+ return drm_bridge_detect(priv->next_bridge, connector);
}
static const struct drm_connector_funcs ch7033_connector_funcs = {
@@ -536,9 +536,10 @@ static int ch7033_probe(struct i2c_client *client)
unsigned int val;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct ch7033_priv, bridge,
+ &ch7033_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -575,7 +576,6 @@ static int ch7033_probe(struct i2c_client *client)
}
INIT_LIST_HEAD(&priv->bridge.list);
- priv->bridge.funcs = &ch7033_bridge_funcs;
priv->bridge.of_node = dev->of_node;
drm_bridge_add(&priv->bridge);
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index c8abd9920fee..a35dae9b56e2 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -103,9 +103,10 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
u8 buffer[4];
int ret;
- anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
- if (!anx7688)
- return -ENOMEM;
+ anx7688 = devm_drm_bridge_alloc(dev, struct cros_ec_anx7688, bridge,
+ &cros_ec_anx7688_bridge_funcs);
+ if (IS_ERR(anx7688))
+ return PTR_ERR(anx7688);
anx7688->client = client;
i2c_set_clientdata(client, anx7688);
@@ -153,7 +154,6 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
fw_version);
- anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
drm_bridge_add(&anx7688->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index badd2c7f91a1..52b7b5889e6f 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -40,8 +40,7 @@ static int display_connector_attach(struct drm_bridge *bridge,
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
-static enum drm_connector_status
-display_connector_detect(struct drm_bridge *bridge)
+static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge)
{
struct display_connector *conn = to_display_connector(bridge);
@@ -82,6 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
+static enum drm_connector_status
+display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return display_connector_detect(bridge);
+}
+
static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -172,7 +177,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
- .detect = display_connector_detect,
+ .detect = display_connector_bridge_detect,
.edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 2cb6dfc7a6d3..5c3cf37200bc 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -298,16 +298,15 @@ static int fsl_ldb_probe(struct platform_device *pdev)
struct fsl_ldb *fsl_ldb;
int dual_link;
- fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL);
- if (!fsl_ldb)
- return -ENOMEM;
+ fsl_ldb = devm_drm_bridge_alloc(dev, struct fsl_ldb, bridge, &funcs);
+ if (IS_ERR(fsl_ldb))
+ return PTR_ERR(fsl_ldb);
fsl_ldb->devdata = of_device_get_match_data(dev);
if (!fsl_ldb->devdata)
return -EINVAL;
fsl_ldb->dev = &pdev->dev;
- fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
fsl_ldb->clk = devm_clk_get(dev, "ldb");
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index f072c6ed39ef..0e31d5000e7c 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -5,6 +5,8 @@
* bridge driver for legacy DT bindings, utilizing display-timings node
*/
+#include <linux/export.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
@@ -59,9 +61,10 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
struct imx_legacy_bridge *imx_bridge;
int ret;
- imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
- if (!imx_bridge)
- return ERR_PTR(-ENOMEM);
+ imx_bridge = devm_drm_bridge_alloc(dev, struct imx_legacy_bridge,
+ base, &imx_legacy_bridge_funcs);
+ if (IS_ERR(imx_bridge))
+ return ERR_CAST(imx_bridge);
ret = of_get_drm_display_mode(np,
&imx_bridge->mode,
@@ -72,7 +75,6 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
- imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
imx_bridge->base.of_node = np;
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 8a4fd7d77a8d..3a6f8587a257 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -140,9 +140,10 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
struct device_node *remote;
struct imx8mp_hdmi_pvi *pvi;
- pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
- if (!pvi)
- return -ENOMEM;
+ pvi = devm_drm_bridge_alloc(&pdev->dev, struct imx8mp_hdmi_pvi,
+ bridge, &imx_hdmi_pvi_bridge_funcs);
+ if (IS_ERR(pvi))
+ return PTR_ERR(pvi);
platform_set_drvdata(pdev, pvi);
pvi->dev = &pdev->dev;
@@ -166,7 +167,6 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Register the bridge. */
- pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
pvi->bridge.of_node = pdev->dev.of_node;
pvi->bridge.timings = pvi->next_bridge->timings;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1f6fd488e703..8517b1c953d4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -63,12 +63,11 @@ struct imx8qxp_pc_channel {
struct drm_bridge *next_bridge;
struct imx8qxp_pc *pc;
unsigned int stream_id;
- bool is_available;
};
struct imx8qxp_pc {
struct device *dev;
- struct imx8qxp_pc_channel ch[2];
+ struct imx8qxp_pc_channel *ch[2];
struct clk *clk_apb;
void __iomem *base;
};
@@ -307,7 +306,14 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
goto free_child;
}
- ch = &pc->ch[i];
+ ch = devm_drm_bridge_alloc(dev, struct imx8qxp_pc_channel, bridge,
+ &imx8qxp_pc_bridge_funcs);
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ goto free_child;
+ }
+
+ pc->ch[i] = ch;
ch->pc = pc;
ch->stream_id = i;
@@ -333,9 +339,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
of_node_put(remote);
ch->bridge.driver_private = ch;
- ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
ch->bridge.of_node = child;
- ch->is_available = true;
drm_bridge_add(&ch->bridge);
}
@@ -345,8 +349,8 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0].next_bridge)
- drm_bridge_remove(&pc->ch[0].bridge);
+ if (i == 1 && pc->ch[0]->next_bridge)
+ drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
return ret;
@@ -359,13 +363,10 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
int i;
for (i = 0; i < 2; i++) {
- ch = &pc->ch[i];
-
- if (!ch->is_available)
- continue;
+ ch = pc->ch[i];
- drm_bridge_remove(&ch->bridge);
- ch->is_available = false;
+ if (ch)
+ drm_bridge_remove(&ch->bridge);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index e092c9ea99b0..e5943506981d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -327,9 +327,10 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return -ENOMEM;
+ pl = devm_drm_bridge_alloc(dev, struct imx8qxp_pixel_link, bridge,
+ &imx8qxp_pixel_link_bridge_funcs);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
ret = imx_scu_get_handle(&pl->ipc_handle);
if (ret) {
@@ -384,7 +385,6 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pl);
pl->bridge.driver_private = pl;
- pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
pl->bridge.of_node = np;
drm_bridge_add(&pl->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index da138ab51b3b..111310acab2c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -392,9 +392,10 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
- if (!p2d)
- return -ENOMEM;
+ p2d = devm_drm_bridge_alloc(dev, struct imx8qxp_pxl2dpi, bridge,
+ &imx8qxp_pxl2dpi_bridge_funcs);
+ if (IS_ERR(p2d))
+ return PTR_ERR(p2d);
p2d->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(p2d->regmap)) {
@@ -441,7 +442,6 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
p2d->bridge.driver_private = p2d;
- p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
p2d->bridge.of_node = np;
drm_bridge_add(&p2d->bridge);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index a3a63a977b0a..cf813672b4ff 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -693,7 +693,8 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -816,9 +817,10 @@ static int it6263_probe(struct i2c_client *client)
struct it6263 *it;
int ret;
- it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
- if (!it)
- return -ENOMEM;
+ it = devm_drm_bridge_alloc(dev, struct it6263, bridge,
+ &it6263_bridge_funcs);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
it->dev = dev;
it->hdmi_i2c = client;
@@ -866,7 +868,6 @@ static int it6263_probe(struct i2c_client *client)
i2c_set_clientdata(client, it);
- it->bridge.funcs = &it6263_bridge_funcs;
it->bridge.of_node = dev->of_node;
/* IT6263 chip doesn't support HPD interrupt. */
it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1383d1e21afe..89649c17ffad 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3238,7 +3238,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-it6505_bridge_detect(struct drm_bridge *bridge)
+it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3583,9 +3583,10 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct extcon_dev *extcon;
int err;
- it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
- if (!it6505)
- return -ENOMEM;
+ it6505 = devm_drm_bridge_alloc(&client->dev, struct it6505, bridge,
+ &it6505_bridge_funcs);
+ if (IS_ERR(it6505))
+ return PTR_ERR(it6505);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
@@ -3660,7 +3661,6 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->aux.transfer = it6505_aux_transfer;
drm_dp_aux_init(&it6505->aux);
- it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 7b110ae53291..aa7b1dcc5d70 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -843,7 +843,8 @@ static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -1516,9 +1517,10 @@ static int it66121_probe(struct i2c_client *client)
return -ENXIO;
}
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct it66121_ctx, bridge,
+ &it66121_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!ep)
@@ -1577,7 +1579,6 @@ static int it66121_probe(struct i2c_client *client)
return -ENODEV;
}
- ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 3e49d855b364..342374cb8fc6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -408,7 +408,7 @@ lt8912_connector_detect(struct drm_connector *connector, bool force)
struct lt8912 *lt = connector_to_lt8912(connector);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -607,12 +607,12 @@ lt8912_bridge_mode_valid(struct drm_bridge *bridge,
}
static enum drm_connector_status
-lt8912_bridge_detect(struct drm_bridge *bridge)
+lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -761,9 +761,10 @@ static int lt8912_probe(struct i2c_client *client)
int ret = 0;
struct device *dev = &client->dev;
- lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
- if (!lt)
- return -ENOMEM;
+ lt = devm_drm_bridge_alloc(dev, struct lt8912, bridge,
+ &lt8912_bridge_funcs);
+ if (IS_ERR(lt))
+ return PTR_ERR(lt);
lt->dev = dev;
lt->i2c_client[0] = client;
@@ -778,7 +779,6 @@ static int lt8912_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt);
- lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9b2dac9bd63c..399fa7eebd49 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -727,9 +727,9 @@ static int lt9211_probe(struct i2c_client *client)
struct lt9211 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct lt9211, bridge, &lt9211_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -755,7 +755,6 @@ static int lt9211_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &lt9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a35a8b8ca89c..a2d032ee4744 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -543,7 +543,8 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
@@ -936,8 +937,8 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
-static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -952,8 +953,8 @@ static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
return 0;
}
-static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -974,8 +975,8 @@ static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
&hparms->cea);
}
-static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -1072,9 +1073,10 @@ static int lt9611_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
- if (!lt9611)
- return -ENOMEM;
+ lt9611 = devm_drm_bridge_alloc(dev, struct lt9611, bridge,
+ &lt9611_bridge_funcs);
+ if (IS_ERR(lt9611))
+ return PTR_ERR(lt9611);
lt9611->dev = dev;
lt9611->client = client;
@@ -1127,7 +1129,6 @@ static int lt9611_probe(struct i2c_client *client)
/* Disable Audio InfoFrame, enabled by default */
regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
- lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 766da2cb45a7..38fb8776c0f4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -353,7 +353,8 @@ static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge,
lt9611uxc_unlock(lt9611uxc);
}
-static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 1646e454e0b0..e6a7147e141b 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -118,9 +118,10 @@ static int lvds_codec_probe(struct platform_device *pdev)
u32 val;
int ret;
- lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
- if (!lvds_codec)
- return -ENOMEM;
+ lvds_codec = devm_drm_bridge_alloc(dev, struct lvds_codec, bridge,
+ &funcs);
+ if (IS_ERR(lvds_codec))
+ return PTR_ERR(lvds_codec);
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
@@ -156,8 +157,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
- lvds_codec->bridge.funcs = &funcs;
-
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 15a5a1f644fc..c9e6505cbd88 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -120,7 +120,8 @@ drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
};
-static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct i2c_client *stdp4028_i2c =
ge_b850v3_lvds_ptr->stdp4028_i2c;
@@ -141,7 +142,7 @@ static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge
static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector,
bool force)
{
- return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge);
+ return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector);
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
@@ -225,13 +226,11 @@ static int ge_b850v3_lvds_init(struct device *dev)
if (ge_b850v3_lvds_ptr)
goto success;
- ge_b850v3_lvds_ptr = devm_kzalloc(dev,
- sizeof(*ge_b850v3_lvds_ptr),
- GFP_KERNEL);
-
- if (!ge_b850v3_lvds_ptr) {
+ ge_b850v3_lvds_ptr = devm_drm_bridge_alloc(dev, struct ge_b850v3_lvds, bridge,
+ &ge_b850v3_lvds_funcs);
+ if (IS_ERR(ge_b850v3_lvds_ptr)) {
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
- return -ENOMEM;
+ return PTR_ERR(ge_b850v3_lvds_ptr);
}
success:
@@ -264,7 +263,6 @@ static int ge_b850v3_register(void)
struct device *dev = &stdp4028_i2c->dev;
/* drm bridge initialization */
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 1d4ae0097df8..9f4ff82bc6b4 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -157,9 +157,10 @@ static int mchp_lvds_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct mchp_lvds, bridge,
+ &mchp_lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -192,7 +193,6 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->bridge.of_node = dev->of_node;
lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
dev_set_drvdata(dev, lvds);
ret = devm_pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 55912ae11f46..2f7429b24fc2 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1149,9 +1149,10 @@ static int nwl_dsi_probe(struct platform_device *pdev)
struct nwl_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct nwl_dsi, bridge,
+ &nwl_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
@@ -1180,7 +1181,6 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->quirks = (uintptr_t)attr->data;
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 25d7c415478b..7acb11f16dc1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -261,10 +261,10 @@ static int ptn3460_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
- if (!ptn_bridge) {
- return -ENOMEM;
- }
+ ptn_bridge = devm_drm_bridge_alloc(dev, struct ptn3460_bridge, bridge,
+ &ptn3460_bridge_funcs);
+ if (IS_ERR(ptn_bridge))
+ return PTR_ERR(ptn_bridge);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -300,7 +300,6 @@ static int ptn3460_probe(struct i2c_client *client)
return ret;
}
- ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 29b0358a7b6d..184a8b7049a7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -287,15 +288,14 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
if (!panel)
return ERR_PTR(-EINVAL);
- panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
- GFP_KERNEL);
- if (!panel_bridge)
- return ERR_PTR(-ENOMEM);
+ panel_bridge = devm_drm_bridge_alloc(panel->dev, struct panel_bridge, bridge,
+ &panel_bridge_bridge_funcs);
+ if (IS_ERR(panel_bridge))
+ return (void *)panel_bridge;
panel_bridge->connector_type = connector_type;
panel_bridge->panel = panel;
- panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type;
@@ -328,7 +328,8 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
- devm_kfree(panel_bridge->panel->dev, bridge);
+ /* TODO remove this after reworking panel_bridge lifetime */
+ devm_drm_put_bridge(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 8726fefc5c65..f879a1df077d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -449,9 +449,10 @@ static int ps8622_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
- if (!ps8622)
- return -ENOMEM;
+ ps8622 = devm_drm_bridge_alloc(dev, struct ps8622_bridge, bridge,
+ &ps8622_bridge_funcs);
+ if (IS_ERR(ps8622))
+ return PTR_ERR(ps8622);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -509,7 +510,6 @@ static int ps8622_probe(struct i2c_client *client)
ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
}
- ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 2422ff68c104..825777a5758f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -636,9 +636,10 @@ static int ps8640_probe(struct i2c_client *client)
int ret;
u32 i;
- ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
- if (!ps_bridge)
- return -ENOMEM;
+ ps_bridge = devm_drm_bridge_alloc(dev, struct ps8640, bridge,
+ &ps8640_bridge_funcs);
+ if (IS_ERR(ps_bridge))
+ return PTR_ERR(ps_bridge);
mutex_init(&ps_bridge->aux_lock);
@@ -662,7 +663,6 @@ static int ps8640_probe(struct i2c_client *client)
if (IS_ERR(ps_bridge->gpio_reset))
return PTR_ERR(ps_bridge->gpio_reset);
- ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index bccc88d25948..b5dd71f6a990 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -14,11 +14,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <video/mipi_display.h>
@@ -557,10 +559,6 @@ static void samsung_dsim_reset(struct samsung_dsim *dsi)
samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
}
-#ifndef MHZ
-#define MHZ (1000 * 1000)
-#endif
-
static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
unsigned long fin,
unsigned long fout,
@@ -574,8 +572,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
u16 _m, best_m;
u8 _s, best_s;
- p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ));
- p_max = fin / (driver_data->pll_fin_min * MHZ);
+ p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * HZ_PER_MHZ));
+ p_max = fin / (driver_data->pll_fin_min * HZ_PER_MHZ);
for (_p = p_min; _p <= p_max; ++_p) {
for (_s = 0; _s <= 5; ++_s) {
@@ -590,8 +588,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
tmp = (u64)_m * fin;
do_div(tmp, _p);
- if (tmp < driver_data->min_freq * MHZ ||
- tmp > driver_data->max_freq * MHZ)
+ if (tmp < driver_data->min_freq * HZ_PER_MHZ ||
+ tmp > driver_data->max_freq * HZ_PER_MHZ)
continue;
tmp = (u64)_m * fin;
@@ -634,7 +632,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
* limit.
*/
fin = clk_get_rate(clk_get_parent(dsi->pll_clk));
- while (fin > driver_data->pll_fin_max * MHZ)
+ while (fin > driver_data->pll_fin_max * HZ_PER_MHZ)
fin /= 2;
clk_set_rate(dsi->pll_clk, fin);
@@ -660,10 +658,11 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ 100 * HZ_PER_MHZ, 120 * HZ_PER_MHZ, 160 * HZ_PER_MHZ,
+ 200 * HZ_PER_MHZ, 270 * HZ_PER_MHZ, 320 * HZ_PER_MHZ,
+ 390 * HZ_PER_MHZ, 450 * HZ_PER_MHZ, 510 * HZ_PER_MHZ,
+ 560 * HZ_PER_MHZ, 640 * HZ_PER_MHZ, 690 * HZ_PER_MHZ,
+ 770 * HZ_PER_MHZ, 870 * HZ_PER_MHZ, 950 * HZ_PER_MHZ,
};
int band;
@@ -723,7 +722,7 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
esc_clk = byte_clk / esc_div;
- if (esc_clk > 20 * MHZ) {
+ if (esc_clk > 20 * HZ_PER_MHZ) {
++esc_div;
esc_clk = byte_clk / esc_div;
}
@@ -898,8 +897,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* The user manual describes that following bits are ignored in
* command mode.
*/
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -1235,43 +1232,34 @@ static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
{
unsigned long flags;
struct samsung_dsim_transfer *xfer;
- bool start = false;
-again:
spin_lock_irqsave(&dsi->transfer_lock, flags);
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
+ while (!list_empty(&dsi->transfer_list)) {
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
- xfer = list_first_entry(&dsi->transfer_list,
- struct samsung_dsim_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
- samsung_dsim_send_to_fifo(dsi, xfer);
+ samsung_dsim_send_to_fifo(dsi, xfer);
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
- xfer->result = 0;
- complete(&xfer->completed);
+ xfer->result = 0;
+ complete(&xfer->completed);
- spin_lock_irqsave(&dsi->transfer_lock, flags);
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
+ list_del_init(&xfer->list);
+ }
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
}
static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 6de61d9fe064..d537b1d036fb 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -458,7 +458,8 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -1135,7 +1136,6 @@ static int sii902x_init(struct sii902x *sii902x)
if (ret)
goto err_unreg_audio;
- sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@@ -1170,9 +1170,9 @@ static int sii902x_probe(struct i2c_client *client)
return -EIO;
}
- sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
- if (!sii902x)
- return -ENOMEM;
+ sii902x = devm_drm_bridge_alloc(dev, struct sii902x, bridge, &sii902x_bridge_funcs);
+ if (IS_ERR(sii902x))
+ return PTR_ERR(sii902x);
sii902x->i2c = client;
sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index cd7837c9a6e0..bb1bed03eb5b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -888,9 +888,10 @@ static int sii9234_probe(struct i2c_client *client)
struct device *dev = &client->dev;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii9234, bridge,
+ &sii9234_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -921,7 +922,6 @@ static int sii9234_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii9234_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 3af650dc92a1..9e48ad39e1cc 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2291,9 +2291,10 @@ static int sii8620_probe(struct i2c_client *client)
struct sii8620 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii8620, bridge,
+ &sii8620_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -2336,7 +2337,6 @@ static int sii8620_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 70db5b99e5bb..3d15ddd39470 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -90,7 +90,7 @@ simple_bridge_connector_detect(struct drm_connector *connector, bool force)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- return drm_bridge_detect(sbridge->next_bridge);
+ return drm_bridge_detect(sbridge->next_bridge, connector);
}
static const struct drm_connector_funcs simple_bridge_con_funcs = {
@@ -168,9 +168,10 @@ static int simple_bridge_probe(struct platform_device *pdev)
struct simple_bridge *sbridge;
struct device_node *remote;
- sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
- if (!sbridge)
- return -ENOMEM;
+ sbridge = devm_drm_bridge_alloc(&pdev->dev, struct simple_bridge,
+ bridge, &simple_bridge_bridge_funcs);
+ if (IS_ERR(sbridge))
+ return PTR_ERR(sbridge);
sbridge->info = of_device_get_match_data(&pdev->dev);
@@ -204,7 +205,6 @@ static int simple_bridge_probe(struct platform_device *pdev)
"Unable to retrieve enable GPIO\n");
/* Register the bridge. */
- sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
sbridge->bridge.of_node = pdev->dev.of_node;
sbridge->bridge.timings = sbridge->info->timings;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 5e5f8c2f95be..39332c57f2c5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -8,6 +8,7 @@
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -439,8 +440,8 @@ static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long lo
dw_hdmi_qp_set_cts_n(hdmi, cts, n);
}
-static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -450,8 +451,8 @@ static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
return 0;
}
-static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -496,8 +497,8 @@ static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
}
-static void dw_hdmi_qp_audio_disable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -875,7 +876,7 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
+dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
@@ -1045,9 +1046,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(-ENODEV);
}
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi_qp, bridge,
+ &dw_hdmi_qp_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return ERR_CAST(hdmi);
hdmi->dev = dev;
@@ -1073,7 +1075,6 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 8791408dd1ff..206b099a35e9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
@@ -2977,7 +2978,8 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->mutex);
}
-static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b08ada920a50..8fc2e282ff11 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
@@ -1194,9 +1195,10 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct dw_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return ERR_PTR(-ENOMEM);
+ dsi = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi, bridge,
+ &dw_mipi_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return ERR_CAST(dsi);
dsi->dev = dev;
dsi->plat_data = plat_data;
@@ -1265,7 +1267,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
dsi->bridge.of_node = pdev->dev.of_node;
return dsi;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index c76f5f2e74d1..5926a3a05d79 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -914,9 +915,10 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
struct dw_mipi_dsi2 *dsi2;
int ret;
- dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
- if (!dsi2)
- return ERR_PTR(-ENOMEM);
+ dsi2 = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi2, bridge,
+ &dw_mipi_dsi2_bridge_funcs);
+ if (IS_ERR(dsi2))
+ return ERR_CAST(dsi2);
dsi2->dev = dev;
dsi2->plat_data = plat_data;
@@ -981,7 +983,6 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
}
dsi2->bridge.driver_private = dsi2;
- dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
dsi2->bridge.of_node = pdev->dev.of_node;
return dsi2;
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index edf01476f2ef..98df3e667d4a 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -265,9 +265,10 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
struct tc358762 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358762, bridge,
+ &tc358762_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -288,7 +289,6 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3f76c890fad9..084e9d898e22 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -347,9 +347,10 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
struct tc358764 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358764, bridge,
+ &tc358764_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -368,7 +369,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 7e5449fb86a3..4097fef4b86b 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -344,6 +344,14 @@
#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
+enum tc_mode {
+ mode_dpi_to_edp = BIT(1) | BIT(2),
+ mode_dpi_to_dp = BIT(1),
+ mode_dsi_to_edp = BIT(0) | BIT(2),
+ mode_dsi_to_dp = BIT(0),
+ mode_dsi_to_dpi = BIT(0) | BIT(1),
+};
+
static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
@@ -1752,7 +1760,8 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.get_modes = tc_connector_get_modes,
};
-static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
bool conn;
@@ -1777,7 +1786,7 @@ tc_connector_detect(struct drm_connector *connector, bool force)
struct tc_data *tc = connector_to_tc(connector);
if (tc->hpd_pin >= 0)
- return tc_bridge_detect(&tc->bridge);
+ return tc_bridge_detect(&tc->bridge, connector);
if (tc->panel_bridge)
return connector_status_connected;
@@ -2327,7 +2336,6 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
if (bridge) {
tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
- tc->bridge.funcs = &tc_dpi_bridge_funcs;
return 0;
}
@@ -2360,7 +2368,6 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
- tc->bridge.funcs = &tc_edp_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2368,17 +2375,11 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
return 0;
}
-static int tc_probe_bridge_endpoint(struct tc_data *tc)
+static enum tc_mode tc_probe_get_mode(struct device *dev)
{
- struct device *dev = tc->dev;
struct of_endpoint endpoint;
struct device_node *node = NULL;
- const u8 mode_dpi_to_edp = BIT(1) | BIT(2);
- const u8 mode_dpi_to_dp = BIT(1);
- const u8 mode_dsi_to_edp = BIT(0) | BIT(2);
- const u8 mode_dsi_to_dp = BIT(0);
- const u8 mode_dsi_to_dpi = BIT(0) | BIT(1);
- u8 mode = 0;
+ enum tc_mode mode = 0;
/*
* Determine bridge configuration.
@@ -2401,7 +2402,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+ }
+
+ if (mode != mode_dpi_to_edp &&
+ mode != mode_dpi_to_dp &&
+ mode != mode_dsi_to_dpi &&
+ mode != mode_dsi_to_edp &&
+ mode != mode_dsi_to_dp) {
+ dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
+ return -EINVAL;
+ }
+
+ return mode;
+}
+static int tc_probe_bridge_endpoint(struct tc_data *tc, enum tc_mode mode)
+{
+ struct device *dev = tc->dev;
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(dev->of_node, node) {
+ of_graph_parse_endpoint(node, &endpoint);
if (endpoint.port == 2) {
of_property_read_u8_array(node, "toshiba,pre-emphasis",
tc->pre_emphasis,
@@ -2427,24 +2449,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return tc_probe_edp_bridge_endpoint(tc);
}
- dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
-
+ /* Should never happen, mode was validated by tc_probe_get_mode() */
return -EINVAL;
}
static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ const struct drm_bridge_funcs *funcs;
struct tc_data *tc;
+ int mode;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ mode = tc_probe_get_mode(dev);
+ funcs = (mode == mode_dsi_to_dpi) ? &tc_dpi_bridge_funcs : &tc_edp_bridge_funcs;
+
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge, funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
- ret = tc_probe_bridge_endpoint(tc);
+ ret = tc_probe_bridge_endpoint(tc, mode);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 063f217a17b6..fbdc44e16229 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1287,9 +1287,10 @@ static int tc358768_i2c_probe(struct i2c_client *client)
if (!np)
return -ENODEV;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tc358768_priv, bridge,
+ &tc358768_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
priv->dev = dev;
@@ -1321,7 +1322,6 @@ static int tc358768_i2c_probe(struct i2c_client *client)
priv->dsi_host.dev = dev;
priv->dsi_host.ops = &tc358768_dsi_host_ops;
- priv->bridge.funcs = &tc358768_bridge_funcs;
priv->bridge.timings = &default_tc358768_timings;
priv->bridge.of_node = np;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 1b10e6ee1724..366b12db0e7c 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -659,9 +659,10 @@ static int tc_probe(struct i2c_client *client)
struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge,
+ &tc_bridge_funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
tc->i2c = client;
@@ -701,7 +702,6 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index e2fc78adebcf..2cb7cd0c0608 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -181,9 +181,10 @@ static int thc63_probe(struct platform_device *pdev)
struct thc63_dev *thc63;
int ret;
- thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
- if (!thc63)
- return -ENOMEM;
+ thc63 = devm_drm_bridge_alloc(&pdev->dev, struct thc63_dev, bridge,
+ &thc63_bridge_func);
+ if (IS_ERR(thc63))
+ return PTR_ERR(thc63);
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
@@ -208,7 +209,6 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
- thc63->bridge.funcs = &thc63_bridge_func;
thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 47638d1c96ec..b07f7c9d5890 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -348,9 +348,10 @@ static int dlpc3433_probe(struct i2c_client *client)
struct dlpc *dlpc;
int ret;
- dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
- if (!dlpc)
- return -ENOMEM;
+ dlpc = devm_drm_bridge_alloc(dev, struct dlpc, bridge,
+ &dlpc_bridge_funcs);
+ if (IS_ERR(dlpc))
+ return PTR_ERR(dlpc);
dlpc->dev = dev;
@@ -365,7 +366,6 @@ static int dlpc3433_probe(struct i2c_client *client)
dev_set_drvdata(dev, dlpc);
i2c_set_clientdata(client, dlpc);
- dlpc->bridge.funcs = &dlpc_bridge_funcs;
dlpc->bridge.of_node = dev->of_node;
drm_bridge_add(&dlpc->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 834b42a4d31f..464390372b34 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -453,23 +453,6 @@ DEFINE_SHOW_ATTRIBUTE(status);
* Auxiliary Devices (*not* AUX)
*/
-static void ti_sn65dsi86_uninit_aux(void *data)
-{
- auxiliary_device_uninit(data);
-}
-
-static void ti_sn65dsi86_delete_aux(void *data)
-{
- auxiliary_device_delete(data);
-}
-
-static void ti_sn65dsi86_aux_device_release(struct device *dev)
-{
- struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
-
- kfree(aux);
-}
-
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device **aux_out,
const char *name)
@@ -477,34 +460,16 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct device *dev = pdata->dev;
const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
- int ret;
+ int id;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ id = (client->adapter->nr << 10) | client->addr;
+ aux = __devm_auxiliary_device_create(dev, KBUILD_MODNAME, name,
+ NULL, id);
if (!aux)
- return -ENOMEM;
-
- aux->name = name;
- aux->id = (client->adapter->nr << 10) | client->addr;
- aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_aux_device_release;
- device_set_of_node_from_dev(&aux->dev, dev);
- ret = auxiliary_device_init(aux);
- if (ret) {
- kfree(aux);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
- if (ret)
- return ret;
-
- ret = auxiliary_device_add(aux);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
- if (!ret)
- *aux_out = aux;
+ return -ENODEV;
- return ret;
+ *aux_out = aux;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -1196,7 +1161,8 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
-static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
@@ -1758,24 +1724,15 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
}
-static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
- int ret;
-
- if (!test_bit(offset, pdata->gchip_output)) {
- dev_err(pdata->dev, "Ignoring GPIO set while input\n");
- return;
- }
val &= 1;
- ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
- BIT(SN_GPIO_OUTPUT_SHIFT + offset),
- val << (SN_GPIO_OUTPUT_SHIFT + offset));
- if (ret)
- dev_warn(pdata->dev,
- "Failed to set bridge GPIO %u: %d\n", offset, ret);
+ return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
}
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index cca75443f012..27053d020df7 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -68,9 +68,10 @@ static int tdp158_probe(struct i2c_client *client)
struct tdp158 *tdp158;
struct device *dev = &client->dev;
- tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
- if (!tdp158)
- return -ENOMEM;
+ tdp158 = devm_drm_bridge_alloc(dev, struct tdp158, bridge,
+ &tdp158_bridge_funcs);
+ if (IS_ERR(tdp158))
+ return PTR_ERR(tdp158);
tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(tdp158->next))
@@ -89,7 +90,6 @@ static int tdp158_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
tdp158->bridge.of_node = dev->of_node;
- tdp158->bridge.funcs = &tdp158_bridge_funcs;
tdp158->bridge.driver_private = tdp158;
tdp158->dev = dev;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index e15d232ddbac..b80ee089f880 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -89,7 +89,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- return drm_bridge_detect(dvi->next_bridge);
+ return drm_bridge_detect(dvi->next_bridge, connector);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -341,14 +341,14 @@ static int tfp410_init(struct device *dev, bool i2c)
return -ENXIO;
}
- dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
- if (!dvi)
- return -ENOMEM;
+ dvi = devm_drm_bridge_alloc(dev, struct tfp410, bridge,
+ &tfp410_bridge_funcs);
+ if (IS_ERR(dvi))
+ return PTR_ERR(dvi);
dvi->dev = dev;
dev_set_drvdata(dev, dvi);
- dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 1c289051a598..dcf686c4e73d 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -77,6 +77,12 @@ static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
return connector_status_disconnected;
}
+static enum drm_connector_status
+tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return tpd12s015_detect(bridge);
+}
+
static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -94,7 +100,7 @@ static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
.attach = tpd12s015_attach,
.detach = tpd12s015_detach,
- .detect = tpd12s015_detect,
+ .detect = tpd12s015_bridge_detect,
.hpd_enable = tpd12s015_hpd_enable,
.hpd_disable = tpd12s015_hpd_disable,
};
@@ -116,13 +122,13 @@ static int tpd12s015_probe(struct platform_device *pdev)
struct gpio_desc *gpio;
int ret;
- tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
- if (!tpd)
- return -ENOMEM;
+ tpd = devm_drm_bridge_alloc(&pdev->dev, struct tpd12s015_device,
+ bridge, &tpd12s015_bridge_funcs);
+ if (IS_ERR(tpd))
+ return PTR_ERR(tpd);
platform_set_drvdata(pdev, tpd);
- tpd->bridge.funcs = &tpd12s015_bridge_funcs;
tpd->bridge.of_node = pdev->dev.of_node;
tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
index caa2f4804ed5..eddb5f782a5e 100644
--- a/drivers/gpu/drm/ci/build-igt.sh
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -71,4 +71,4 @@ tar -cf artifacts/igt.tar /igt
# Pass needed files to the test stage
S3_ARTIFACT_NAME="igt.tar.gz"
gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
-s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/
+ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 6fb74c51abe2..ac5e7ed195cf 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -113,17 +113,6 @@ mkdir -p install/modules/
INSTALL_MOD_PATH=install/modules/ make modules_install
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
- make Image.lzma
- mkimage \
- -f auto \
- -A arm \
- -O linux \
- -d arch/arm64/boot/Image.lzma \
- -C lzma\
- -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /kernel/cheza-kernel
- KERNEL_IMAGE_NAME+=" cheza-kernel"
-
# Make a gzipped copy of the Image for db410c.
gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
@@ -148,13 +137,13 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- s3_upload /kernel/$f \
- https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
done
S3_ARTIFACT_NAME="kernel-files.tar.zst"
tar --zstd -cf $S3_ARTIFACT_NAME install
- s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
fi
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 8eb56ebcf4aa..af27ff5de369 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -13,7 +13,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
KERNEL_IMAGE_NAME: "zImage"
@@ -24,7 +24,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm64/configs/defconfig"
KERNEL_IMAGE_NAME: "Image"
@@ -44,16 +44,22 @@
igt:arm32:
extends: .build:arm32
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:arm64:
extends: .build:arm64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:x86_64:
extends: .build:x86_64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
diff --git a/drivers/gpu/drm/ci/check-devicetrees.yml b/drivers/gpu/drm/ci/check-devicetrees.yml
new file mode 100644
index 000000000000..727bd56018b8
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-devicetrees.yml
@@ -0,0 +1,50 @@
+.dt-check-base:
+ stage: static-checks
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ SCHEMA: "display:gpu"
+ VENV_PATH: "/tmp/dtcheck-venv"
+ before_script:
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION} python3-dev python3-venv python3-pip yamllint
+ - python3 -m venv "${VENV_PATH}"
+ - source "${VENV_PATH}/bin/activate"
+ - pip3 install dtschema
+ script:
+ - drivers/gpu/drm/ci/${SCRIPT_NAME}
+ artifacts:
+ when: on_failure
+ paths:
+ - ${ARTIFACT_FILE}
+ allow_failure:
+ exit_codes:
+ - 102
+
+dtbs-check:arm32:
+ extends:
+ - .build:arm32
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dtbs-check:arm64:
+ extends:
+ - .build:arm64
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dt-binding-check:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dt-binding-check.sh"
+ ARTIFACT_FILE: "dt-binding-check.log"
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 56c95c2f91ae..5f90508578a3 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -20,31 +20,15 @@ debian/arm64_build:
EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
# Disable container jobs that we won't use
-alpine/x86_64_build:
- rules:
- - when: never
-
-debian/arm32_test-base:
- rules:
- - when: never
-
-debian/arm32_test-gl:
- rules:
- - when: never
-
-debian/arm32_test-vk:
- rules:
- - when: never
-
-debian/arm64_test-gl:
+debian/arm64_test-vk:
rules:
- when: never
-debian/arm64_test-vk:
+debian/baremetal_arm32_test-gl:
rules:
- when: never
-debian/baremetal_arm32_test:
+debian/baremetal_arm64_test-vk:
rules:
- when: never
@@ -64,19 +48,19 @@ debian/x86_64_test-android:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/x86_64_test-video:
rules:
- when: never
-fedora/x86_64_build:
+debian/x86_64_test-vk:
rules:
- when: never
-debian/android_build:
+fedora/x86_64_build:
rules:
- when: never
-.debian/x86_64_test-android:
+debian/android_build:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/dt-binding-check.sh b/drivers/gpu/drm/ci/dt-binding-check.sh
new file mode 100755
index 000000000000..99e1c0df84b7
--- /dev/null
+++ b/drivers/gpu/drm/ci/dt-binding-check.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+VENV_PATH="${VENV_PATH:-/tmp/dtschema-venv}"
+source "${VENV_PATH}/bin/activate"
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" dt_binding_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dt-binding-check.log; then
+ echo "ERROR: 'make dt_binding_check' failed. Please check dt-binding-check.log for details."
+ exit 1
+fi
+
+if [[ -s dt-binding-check.log ]]; then
+ echo "WARNING: dt_binding_check reported warnings. Please check dt-binding-check.log" \
+ "for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/dtbs-check.sh b/drivers/gpu/drm/ci/dtbs-check.sh
new file mode 100755
index 000000000000..57842c452439
--- /dev/null
+++ b/drivers/gpu/drm/ci/dtbs-check.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+make LLVM=1 ARCH="${KERNEL_ARCH}" defconfig
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" ARCH="${KERNEL_ARCH}" LLVM=1 dtbs_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dtbs-check.log; then
+ echo "ERROR: 'make dtbs_check' failed. Please check dtbs-check.log for details."
+ exit 1
+fi
+
+if [[ -s dtbs-check.log ]]; then
+ echo "WARNING: dtbs_check reported warnings. Please check dtbs-check.log for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index ba75b3a7eca4..d502d146b177 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,17 +1,17 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha f73132f1215a37ce8ffc711a0136c90649aaf128
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 02337aec715c25dae7ff2479d986f831c77fe536
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
+ IGT_VERSION: 129d5b10baaadea1d6cd6377341c4cb42e7ee6fd
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
- MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb
DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
CI_PRE_CLONE_SCRIPT: |-
set -o xtrace
@@ -20,10 +20,8 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
- S3_JWT_HEADER_FILE: /s3_jwt_header
S3_JWT_FILE_SCRIPT: |-
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
- echo -n "Authorization: Bearer ${S3_JWT}" > '${S3_JWT_HEADER_FILE}' &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
@@ -38,7 +36,11 @@ variables:
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
- LAVA_JOB_PRIORITY: 30
+ # Default priority for non-merge pipelines
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: "" # Empty tags are ignored by gitlab
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm
+ FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64
+ JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
@@ -73,15 +75,12 @@ default:
include:
- project: 'freedesktop/ci-templates'
- ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
- file:
- - '/templates/ci-fairy.yml'
- - project: 'freedesktop/ci-templates'
ref: *ci-templates-commit
file:
- '/templates/alpine.yml'
- '/templates/debian.yml'
- '/templates/fedora.yml'
+ - '/templates/ci-fairy.yml'
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
@@ -105,20 +104,26 @@ include:
- '/src/microsoft/ci/gitlab-ci-inc.yml'
- '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
+ - 'docs/gitlab-ci.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
- drivers/gpu/drm/ci/static-checks.yml
- drivers/gpu/drm/ci/build.yml
- drivers/gpu/drm/ci/test.yml
+ - drivers/gpu/drm/ci/check-devicetrees.yml
+ - drivers/gpu/drm/ci/kunit.yml
- 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
stages:
- sanity
- container
+ - deploy
- git-archive
- build-for-tests
- build-only
+ - static-checks
+ - kunit
- code-validation
- amdgpu
- i915
@@ -232,16 +237,20 @@ stages:
- _build/meson-logs/strace
+python-artifacts:
+ variables:
+ GIT_DEPTH: 10
+
+
# Git archive
-make git archive:
+make-git-archive:
extends:
- .fdo.ci-fairy
stage: git-archive
rules:
- !reference [.scheduled_pipeline-rules, rules]
- # ensure we are running on packet
tags:
- - packet.net
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
script:
# Remove drm-ci files we just added
- rm -rf .gitlab-ci.*
@@ -253,7 +262,7 @@ make git archive:
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
# Use id_tokens for JWT auth
- - s3_upload ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/
+ - ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
# Sanity checks of MR settings and commit logs
@@ -261,6 +270,8 @@ sanity:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- if: *is-pre-merge
when: on_success
@@ -279,7 +290,6 @@ sanity:
DEBIAN_BUILD_TAG
DEBIAN_PYUTILS_TAG
DEBIAN_TEST_GL_TAG
- KERNEL_ROOTFS_TAG
KERNEL_TAG
PKG_REPO_REV
)
@@ -295,14 +305,14 @@ sanity:
when: on_failure
reports:
junit: check-*.xml
- tags:
- - placeholder-job
mr-label-maker-test:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- !reference [.mr-label-maker-rules, rules]
variables:
@@ -325,3 +335,15 @@ mr-label-maker-test:
optional: true
- job: toml-lint
optional: true
+
+deploy-docs:
+ rules:
+ - when: never
+
+linkcheck-docs:
+ rules:
+ - when: never
+
+test-docs:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 2a0599f12c58..b24d4bc53cda 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -19,6 +19,7 @@ set +e
cat /sys/kernel/debug/dri/*/state
set -e
+mkdir -p /lib/modules
case "$DRIVER_NAME" in
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 53fe34b86578..7acc2e2a8eaa 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,24 +1,18 @@
variables:
- CONTAINER_TAG: "20250328-mesa-uprev"
- DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ CONTAINER_TAG: "20250502-mesa-uprev"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+ DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_TAG: "v6.13-rc4-mesa-5e77"
+ KERNEL_TAG: "v6.14-mesa-0bdd"
KERNEL_REPO: "gfx-ci/linux"
- PKG_REPO_REV: "bca9635d"
-
- DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
- DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
+ PKG_REPO_REV: "95bf62c"
- DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+ ALPINE_X86_64_BUILD_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
- CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
+ CONDITIONAL_BUILD_ANGLE_TAG: 384145a4023315dae658259bee07c43a
+ CONDITIONAL_BUILD_PIGLIT_TAG: a19e424b8a3f020dbf1b9dd29f220a4f
diff --git a/drivers/gpu/drm/ci/kunit.sh b/drivers/gpu/drm/ci/kunit.sh
new file mode 100755
index 000000000000..7a1052fd3f17
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+export PATH="/usr/bin:$PATH"
+
+./tools/testing/kunit/kunit.py run \
+ --arch "${KERNEL_ARCH}" \
+ --make_options LLVM=1 \
+ --kunitconfig=drivers/gpu/drm/tests
diff --git a/drivers/gpu/drm/ci/kunit.yml b/drivers/gpu/drm/ci/kunit.yml
new file mode 100644
index 000000000000..0d5b2c4433d2
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.yml
@@ -0,0 +1,37 @@
+.kunit-packages: &kunit-packages
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION}
+
+.kunit-base:
+ stage: kunit
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ script:
+ - drivers/gpu/drm/ci/kunit.sh
+
+kunit:arm32:
+ extends:
+ - .build:arm32
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-arm
+
+kunit:arm64:
+ extends:
+ - .build:arm64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-aarch64
+
+kunit:x86_64:
+ extends:
+ - .build:x86_64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-x86
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index a1e8b34fb2d4..a295102c3468 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -41,7 +41,6 @@ section_start prepare_rootfs "Preparing root filesystem"
set -ex
-section_switch rootfs "Assembling root filesystem"
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
@@ -54,7 +53,7 @@ cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
+ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
@@ -64,6 +63,9 @@ section_switch lava_submit "Submitting job for scheduling"
touch results/lava.log
tail -f results/lava.log &
+# Ensure that we are printing the commands that are being executed,
+# making it easier to debug the job in case it fails.
+set -x
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
diff --git a/drivers/gpu/drm/ci/setup-llvm-links.sh b/drivers/gpu/drm/ci/setup-llvm-links.sh
new file mode 100755
index 000000000000..ace33af82a3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/setup-llvm-links.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: MIT
+set -euo pipefail
+
+ln -svf "$(which clang++-${LLVM_VERSION})" /usr/bin/clang++
+ln -svf "$(which clang-${LLVM_VERSION})" /usr/bin/clang
+ln -svf "$(which ld.lld-${LLVM_VERSION})" /usr/bin/ld.lld
+ln -svf "$(which lld-${LLVM_VERSION})" /usr/bin/lld
+ln -svf "$(which llvm-ar-${LLVM_VERSION})" /usr/bin/llvm-ar
+ln -svf "$(which llvm-nm-${LLVM_VERSION})" /usr/bin/llvm-nm
+ln -svf "$(which llvm-objcopy-${LLVM_VERSION})" /usr/bin/llvm-objcopy
+ln -svf "$(which llvm-readelf-${LLVM_VERSION})" /usr/bin/llvm-readelf
+ln -svf "$(which llvm-strip-${LLVM_VERSION})" /usr/bin/llvm-strip
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 84a25f0e783b..81147e86bfd0 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -24,7 +24,7 @@
.lava-igt:arm32:
extends:
- - .lava-test:arm32
+ - .lava-arm32-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "armhf"
@@ -33,15 +33,14 @@
- testing:arm32
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm32
- - debian/x86_64_build
+ - debian/arm32_test-gl
- python-artifacts
- testing:arm32
- igt:arm32
.lava-igt:arm64:
extends:
- - .lava-test:arm64
+ - .lava-arm64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "arm64"
@@ -50,15 +49,14 @@
- testing:arm64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm64
- - debian/x86_64_build
+ - debian/arm64_test-gl
- python-artifacts
- testing:arm64
- igt:arm64
.lava-igt:x86_64:
extends:
- - .lava-test:x86_64
+ - .lava-x86_64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "amd64"
@@ -67,16 +65,15 @@
- testing:x86_64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_x86_64
- - debian/x86_64_build
+ - debian/x86_64_test-gl
- python-artifacts
- testing:x86_64
- igt:x86_64
.baremetal-igt-arm64:
extends:
- - .baremetal-test-arm64
- - .use-debian/baremetal_arm64_test
+ - .baremetal-test-arm64-gl
+ - .use-debian/baremetal_arm64_test-gl
- .allow_failure_lockdep
timeout: "1h30m"
rules:
@@ -91,7 +88,7 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/baremetal_arm64_test
+ - debian/baremetal_arm64_test-gl
- job: testing:arm64
artifacts: false
- igt:arm64
@@ -101,19 +98,21 @@
.software-driver:
stage: software-driver
extends:
+ - .test-gl
- .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
- when: on_success
- extends:
- - .test-gl
tags:
- - kvm
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM
+ before_script:
+ - !reference [default, before_script]
+ - rm -rf install
+ - tar -xf artifacts/install.tar
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /kernel/bzImage
- - mkdir -p /lib/modules
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
@@ -180,20 +179,6 @@ msm:apq8096:
script:
- ./install/bare-metal/fastboot.sh || exit $?
-msm:sdm845:
- extends:
- - .baremetal-igt-arm64
- stage: msm
- parallel: 6
- variables:
- DEVICE_TYPE: sdm845-cheza-r3
- DRIVER_NAME: msm
- BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
- GPU_VERSION: sdm845
- RUNNER_TAG: google-freedreno-cheza
- script:
- - ./install/bare-metal/cros-servo.sh || exit $?
-
msm:sm8350-hdk:
extends:
- .lava-igt:arm64
@@ -323,7 +308,7 @@ i915:cml:
variables:
DEVICE_TYPE: asus-C436FA-Flip-hatch
GPU_VERSION: cml
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-Flip-hatch
i915:tgl:
extends:
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index d4b8ba3a54a9..154b047787b2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -32,3 +32,8 @@ kms_display_modes@mst-extended-mode-negative
# It causes other tests to fail, so skip it.
kms_invalid_mode@overflow-vrefresh
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 022db559cc7d..a9bb3e1ad75c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -23,3 +23,8 @@ core_hotunplug.*
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
deleted file mode 100644
index 7a2ab58b706f..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@overlay,Fail
-kms_plane_cursor@viewport,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
deleted file mode 100644
index e32d73c6c98e..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ /dev/null
@@ -1,139 +0,0 @@
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-legacy
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-oom-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@short-flip-before-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@flip-vs-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-msm/msm_shrink@copy-mmap-oom-8
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_lease@page-flip-implicit-plane
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc5
-kms_flip@flip-vs-expired-vblank
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
-# Failure Rate: 20
-# IGT Version: 1.30-g04bedb923
-# Linux Version: 6.14.0-rc4
-kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
deleted file mode 100644
index 6c86d1953e11..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ /dev/null
@@ -1,350 +0,0 @@
-# Hangs machine
-kms_bw.*
-
-# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm/msm_mapping@*
-
-# Skip driver specific tests
-^amdgpu.*
-nouveau_.*
-^panfrost.*
-^v3d.*
-^vc4.*
-^vmwgfx*
-
-# Skip intel specific tests
-gem_.*
-i915_.*
-tools_test.*
-kms_dp_link_training.*
-
-# Currently fails and causes coverage loss for other tests
-# since core_getversion also fails.
-core_hotunplug.*
-
-# Whole machine hangs
-kms_cursor_crc.*
-
-# IGT test crash
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_content_protection@uevent
-
-# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
-# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
-kms_display_modes@extended-mode-basic
-kms_display_modes@mst-extended-mode-negative
-
-# Kernel panic
-msm/msm_recovery@hangcheck
-# DEBUG - Begin test msm/msm_recovery@hangcheck
-# Console: switching to colour dummy device 80x25
-# [ 489.526286] [IGT] msm_recovery: executing
-# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
-# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
-# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
-# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
-# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
-# [ 515.934727] Tainted: [W]=WARN
-# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
-# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
-# [ 515.934751] pc : rcu_core+0x59c/0xe68
-# [ 515.934769] lr : rcu_core+0x74/0xe68
-# [ 515.934781] sp : ffff800080003e50
-# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
-# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
-# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
-# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
-# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
-# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
-# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
-# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
-# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
-# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
-# [ 515.934939] Call trace:
-# [ 515.934945] rcu_core+0x59c/0xe68 (P)
-# [ 515.934962] rcu_core_si+0x10/0x1c
-# [ 515.934976] handle_softirqs+0x118/0x4b8
-# [ 515.934994] __do_softirq+0x14/0x20
-# [ 515.935007] ____do_softirq+0x10/0x1c
-# [ 515.935021] call_on_irq_stack+0x24/0x4c
-# [ 515.935034] do_softirq_own_stack+0x1c/0x28
-# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
-# [ 515.935063] irq_exit_rcu+0x10/0x38
-# [ 515.935077] el1_interrupt+0x38/0x64
-# [ 515.935092] el1h_64_irq_handler+0x18/0x24
-# [ 515.935104] el1h_64_irq+0x6c/0x70
-# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
-# [ 515.935129] __mutex_lock+0xa8/0x4b8
-# [ 515.935144] mutex_lock_nested+0x24/0x30
-# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
-# [ 515.935174] _find_key+0x64/0x16c
-# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
-# [ 515.935227] sugov_work+0x58/0x74
-# [ 515.935239] kthread_worker_fn+0xf4/0x324
-# [ 515.935254] kthread+0x12c/0x208
-# [ 515.935266] ret_from_fork+0x10/0x20
-# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
-# [ 540.124439] Modules linked in:
-# [ 540.124448] irq event stamp: 9912389
-# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
-# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
-# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
-# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
-# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
-# [ 540.124540] Tainted: [W]=WARN
-# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
-# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
-# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
-# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
-# [ 540.124581] sp : ffff800080003c20
-# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
-# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
-# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
-# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
-# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
-# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
-# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
-# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
-# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
-# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
-# [ 540.124731] Call trace:
-# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
-# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
-# [ 540.124766] usb_submit_urb+0x294/0x560
-# [ 540.124780] intr_callback+0x78/0x1fc
-# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
-# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
-# [ 540.124825] process_one_work+0x208/0x5e8
-# [ 540.124840] bh_worker+0x1a8/0x20c
-# [ 540.124853] workqueue_softirq_action+0x78/0x88
-# [ 540.124868] tasklet_hi_action+0x14/0x3c
-# [ 540.124883] handle_softirqs+0x118/0x4b8
-# [ 540.124897] __do_softirq+0x14/0x20
-# [ 540.124908] ____do_softirq+0x10/0x1c
-# [ 540.124922] call_on_irq_stack+0x24/0x4c
-# [ 540.124934] do_softirq_own_stack+0x1c/0x28
-# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
-# [ 540.124961] irq_exit_rcu+0x10/0x38
-# [ 540.124976] el1_interrupt+0x38/0x64
-# [ 540.124987] el1h_64_irq_handler+0x18/0x24
-# [ 540.124998] el1h_64_irq+0x6c/0x70
-# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
-# [ 540.125023] __mutex_lock+0xa8/0x4b8
-# [ 540.125038] mutex_lock_nested+0x24/0x30
-# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
-# [ 540.125067] _find_key+0x64/0x16c
-# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
-# [ 540.125121] sugov_work+0x58/0x74
-# [ 540.125133] kthread_worker_fn+0xf4/0x324
-# [ 540.125148] kthread+0x12c/0x208
-# [ 540.125160] ret_from_fork+0x10/0x20
-# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
-# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
-# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
-# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
-# [ 540.443022] Call trace:
-# [ 540.445559] show_stack+0x18/0x24 (C)
-# [ 540.449357] dump_stack_lvl+0x38/0xd0
-# [ 540.453157] dump_stack+0x18/0x24
-# [ 540.456599] panic+0x3bc/0x41c
-# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
-# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
-# [ 540.468508] hrtimer_interrupt+0xe4/0x244
-# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
-# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
-# [ 540.481943] handle_irq_desc+0x40/0x58
-# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
-# [ 540.490604] gic_handle_irq+0x4c/0x11c
-# [ 540.494483] do_interrupt_handler+0x50/0x84
-# [ 540.498811] el1_interrupt+0x34/0x64
-# [ 540.502518] el1h_64_irq_handler+0x18/0x24
-# [ 540.506758] el1h_64_irq+0x6c/0x70
-# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
-# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
-# [ 540.518932] usb_submit_urb+0x294/0x560
-# [ 540.522901] intr_callback+0x78/0x1fc
-# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
-# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
-# [ 540.535614] process_one_work+0x208/0x5e8
-# [ 540.539769] bh_worker+0x1a8/0x20c
-# [ 540.543293] workqueue_softirq_action+0x78/0x88
-# [ 540.547980] tasklet_hi_action+0x14/0x3c
-# [ 540.552038] handle_softirqs+0x118/0x4b8
-# [ 540.556096] __do_softirq+0x14/0x20
-# [ 540.559705] ____do_softirq+0x10/0x1c
-# [ 540.563500] call_on_irq_stack+0x24/0x4c
-# [ 540.567554] do_softirq_own_stack+0x1c/0x28
-# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
-# [ 540.575849] irq_exit_rcu+0x10/0x38
-# [ 540.579462] el1_interrupt+0x38/0x64
-# [ 540.583158] el1h_64_irq_handler+0x18/0x24
-# [ 540.587397] el1h_64_irq+0x6c/0x70
-# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
-# [ 540.595060] __mutex_lock+0xa8/0x4b8
-# [ 540.598760] mutex_lock_nested+0x24/0x30
-# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
-# [ 540.607503] _find_key+0x64/0x16c
-# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
-# [ 540.625698] sugov_work+0x58/0x74
-# [ 540.629134] kthread_worker_fn+0xf4/0x324
-# [ 540.633278] kthread+0x12c/0x208
-# [ 540.636619] ret_from_fork+0x10/0x20
-# [ 540.640321] SMP: stopping secondary CPUs
-# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
-# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
-# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
-# [ 540.660829] Memory Limit: none
-# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index e17265039ca8..72480db1f00d 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
@@ -31,6 +34,10 @@ MODULE_PARM_DESC(active,
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n");
+ return;
+ }
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (!strcmp(drm_client_default, "fbdev")) {
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 8d22b7627d41..df09cf9a8ca1 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -8,6 +8,7 @@ config DRM_DISPLAY_DP_AUX_BUS
config DRM_DISPLAY_HELPER
tristate
depends on DRM
+ select CEC_CORE if DRM_DISPLAY_DP_AUX_CEC || DRM_DISPLAY_HDMI_CEC_HELPER || CEC_NOTIFIER
help
DRM helpers for display adapters.
@@ -16,6 +17,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_CEC_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -23,7 +25,6 @@ config DRM_BRIDGE_CONNECTOR
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
@@ -82,6 +83,16 @@ config DRM_DISPLAY_HDMI_AUDIO_HELPER
DRM display helpers for HDMI Audio functionality (generic HDMI Codec
implementation).
+config DRM_DISPLAY_HDMI_CEC_HELPER
+ bool
+ help
+ DRM display helpers for HDMI CEC implementation.
+
+config DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER
+ def_bool CEC_NOTIFIER
+ help
+ DRM display helpers for HDMI CEC notifiers implementation.
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index b17879b957d5..0ff4a1ad0222 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -16,6 +16,10 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
drm_hdmi_audio_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_HELPER) += \
+ drm_hdmi_cec_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) += \
+ drm_hdmi_cec_notifier_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 262e93e07a28..5eb7e9bfe361 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -3,6 +3,7 @@
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -20,6 +21,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -113,6 +115,13 @@ struct drm_bridge_connector {
* &DRM_BRIDGE_OP_DP_AUDIO).
*/
struct drm_bridge *bridge_dp_audio;
+ /**
+ * @bridge_hdmi_cec:
+ *
+ * The bridge in the chain that implements CEC support, if any (see
+ * DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER).
+ */
+ struct drm_bridge *bridge_hdmi_cec;
};
#define to_drm_bridge_connector(x) \
@@ -201,7 +210,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
if (detect) {
- status = detect->funcs->detect(detect);
+ status = detect->funcs->detect(detect, connector);
if (hdmi)
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
@@ -454,7 +463,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
if (!bridge->funcs->hdmi_audio_startup)
return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->hdmi_audio_startup(bridge, connector);
}
if (bridge_connector->bridge_dp_audio) {
@@ -463,7 +472,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
if (!bridge->funcs->dp_audio_startup)
return 0;
- return bridge->funcs->dp_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(bridge, connector);
}
return -EINVAL;
@@ -480,13 +489,13 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
if (bridge_connector->bridge_hdmi_audio) {
bridge = bridge_connector->bridge_hdmi_audio;
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms);
}
if (bridge_connector->bridge_dp_audio) {
bridge = bridge_connector->bridge_dp_audio;
- return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms);
}
return -EINVAL;
@@ -500,12 +509,12 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
if (bridge_connector->bridge_hdmi_audio) {
bridge = bridge_connector->bridge_hdmi_audio;
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ bridge->funcs->hdmi_audio_shutdown(bridge, connector);
}
if (bridge_connector->bridge_dp_audio) {
bridge = bridge_connector->bridge_dp_audio;
- bridge->funcs->dp_audio_shutdown(connector, bridge);
+ bridge->funcs->dp_audio_shutdown(bridge, connector);
}
}
@@ -522,7 +531,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
if (!bridge->funcs->hdmi_audio_mute_stream)
return -ENOTSUPP;
- return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ return bridge->funcs->hdmi_audio_mute_stream(bridge, connector,
enable, direction);
}
@@ -532,7 +541,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
if (!bridge->funcs->dp_audio_mute_stream)
return -ENOTSUPP;
- return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ return bridge->funcs->dp_audio_mute_stream(bridge, connector,
enable, direction);
}
@@ -546,6 +555,65 @@ static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_aud
.mute_stream = drm_bridge_connector_audio_mute_stream,
};
+static int drm_bridge_connector_hdmi_cec_enable(struct drm_connector *connector, bool enable)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_enable(bridge, enable);
+}
+
+static int drm_bridge_connector_hdmi_cec_log_addr(struct drm_connector *connector, u8 logical_addr)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_log_addr(bridge, logical_addr);
+}
+
+static int drm_bridge_connector_hdmi_cec_transmit(struct drm_connector *connector,
+ u8 attempts,
+ u32 signal_free_time,
+ struct cec_msg *msg)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_transmit(bridge, attempts,
+ signal_free_time,
+ msg);
+}
+
+static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ if (!bridge->funcs->hdmi_cec_init)
+ return 0;
+
+ return bridge->funcs->hdmi_cec_init(bridge, connector);
+}
+
+static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+ .init = drm_bridge_connector_hdmi_cec_init,
+ .enable = drm_bridge_connector_hdmi_cec_enable,
+ .log_addr = drm_bridge_connector_hdmi_cec_log_addr,
+ .transmit = drm_bridge_connector_hdmi_cec_transmit,
+};
+
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
@@ -662,6 +730,25 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
bridge_connector->bridge_dp_audio = bridge;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+
+ if (!bridge->funcs->hdmi_cec_enable ||
+ !bridge->funcs->hdmi_cec_log_addr ||
+ !bridge->funcs->hdmi_cec_transmit)
+ return ERR_PTR(-EINVAL);
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -720,12 +807,33 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drm_connector_hdmi_audio_init(connector, dev,
&drm_bridge_connector_hdmi_audio_funcs,
bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_i2s_formats,
bridge->hdmi_audio_spdif_playback,
bridge->hdmi_audio_dai_port);
if (ret)
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ ret = drmm_connector_hdmi_cec_notifier_register(connector,
+ NULL,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ ret = drmm_connector_hdmi_cec_register(connector,
+ &drm_bridge_connector_hdmi_cec_funcs,
+ bridge->hdmi_cec_adapter_name,
+ bridge->hdmi_cec_available_las,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 718c9122bc3a..2d279e82922f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -12,6 +12,7 @@
* to perform transactions on that bus.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index ed31471bd0e2..3b50d817c839 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -5,6 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index ea78c6c8ca7a..1ecc3df7e316 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/dynamic_debug.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/iopoll.h>
@@ -692,6 +693,34 @@ void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
/**
+ * drm_dp_dpcd_set_probe() - Set whether a probing before DPCD access is done
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable the probing if required
+ */
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable)
+{
+ WRITE_ONCE(aux->dpcd_probe_disabled, !enable);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_probe);
+
+static bool dpcd_access_needs_probe(struct drm_dp_aux *aux)
+{
+ /*
+ * HP ZR24w corrupts the first DPCD access after entering power save
+ * mode. Eg. on a read, the entire buffer will be filled with the same
+ * byte. Do a throw away read to avoid corrupting anything we care
+ * about. Afterwards things will work correctly until the monitor
+ * gets woken up and subsequently re-enters power save mode.
+ *
+ * The user pressing any button on the monitor is enough to wake it
+ * up, so there is no particularly good place to do the workaround.
+ * We just have to do it before any DPCD access and hope that the
+ * monitor doesn't power down exactly after the throw away read.
+ */
+ return !aux->is_remote && !READ_ONCE(aux->dpcd_probe_disabled);
+}
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -712,19 +741,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
{
int ret;
- /*
- * HP ZR24w corrupts the first DPCD access after entering power save
- * mode. Eg. on a read, the entire buffer will be filled with the same
- * byte. Do a throw away read to avoid corrupting anything we care
- * about. Afterwards things will work correctly until the monitor
- * gets woken up and subsequently re-enters power save mode.
- *
- * The user pressing any button on the monitor is enough to wake it
- * up, so there is no particularly good place to do the workaround.
- * We just have to do it before any DPCD access and hope that the
- * monitor doesn't power down exactly after the throw away read.
- */
- if (!aux->is_remote) {
+ if (dpcd_access_needs_probe(aux)) {
ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
@@ -3940,23 +3957,31 @@ EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
* Returns: %0 on success, negative error code on failure
*/
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level)
+ u32 level)
{
int ret;
- u8 buf[2] = { 0 };
+ unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
+ u8 buf[3] = { 0 };
/* The panel uses the PWM for controlling brightness levels */
- if (!bl->aux_set)
+ if (!(bl->aux_set || bl->luminance_set))
return 0;
- if (bl->lsb_reg_used) {
+ if (bl->luminance_set) {
+ level = level * 1000;
+ level &= 0xffffff;
+ buf[0] = (level & 0x0000ff);
+ buf[1] = (level & 0x00ff00) >> 8;
+ buf[2] = (level & 0xff0000) >> 16;
+ offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ } else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
} else {
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
@@ -4019,7 +4044,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
* Returns: %0 on success, negative error code on failure.
*/
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- const u16 level)
+ const u32 level)
{
int ret;
u8 dpcd_buf;
@@ -4029,6 +4054,9 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
else
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
+ if (bl->luminance_set)
+ dpcd_buf |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+
if (bl->pwmgen_bit_count) {
ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
if (ret < 0)
@@ -4192,7 +4220,7 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 *current_mode)
{
int ret;
- u8 buf[2];
+ u8 buf[3];
u8 mode_reg;
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
@@ -4209,17 +4237,37 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
- aux->name, ret);
- return ret;
- }
+ if (bl->luminance_set) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
- if (bl->lsb_reg_used)
- return (buf[0] << 8) | buf[1];
- else
- return buf[0];
+ /*
+ * Incase luminance is set we want to send the value back in nits but
+ * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we
+ * need to divide by 1000.
+ */
+ return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000;
+ } else {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ buf, size);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
+
+ if (bl->lsb_reg_used)
+ return (buf[0] << 8) | buf[1];
+ else
+ return buf[0];
+ }
}
/*
@@ -4234,10 +4282,12 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
* interface.
* @aux: The DP aux device to use for probing
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
+ * @max_luminance: max luminance when need luminance is set as true
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
* @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
+ * @need_luminance: Tells us if a we want to manipulate backlight using luminance values
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
* along with also probing the current and maximum supported brightness levels.
@@ -4249,8 +4299,9 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
*/
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode)
+ u32 *current_level, u8 *current_mode, bool need_luminance)
{
int ret;
@@ -4260,18 +4311,26 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl
bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
+ if ((edp_dpcd[0] & DP_EDP_15) && edp_dpcd[3] &
+ (DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && need_luminance)
+ bl->luminance_set = true;
/* Sanity check caps */
- if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP) &&
+ !bl->luminance_set) {
drm_dbg_kms(aux->drm_dev,
- "%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
+ "%s: Panel does not support AUX, PWM or luminance-based brightness control. Aborting\n",
aux->name);
return -EINVAL;
}
- ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
- if (ret < 0)
- return ret;
+ if (bl->luminance_set) {
+ bl->max = max_luminance;
+ } else {
+ ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
+ if (ret < 0)
+ return ret;
+ }
ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
@@ -4350,7 +4409,7 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
{
struct dp_aux_backlight *bl;
struct backlight_properties props = { 0 };
- u16 current_level;
+ u32 current_level;
u8 current_mode;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
int ret;
@@ -4374,8 +4433,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
bl->aux = aux;
- ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(aux, &bl->info, 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index a89f38fd3218..64e5c176d5cc 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -23,6 +23,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 076edf161048..43f13a7c79b9 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/types.h>
@@ -1920,7 +1921,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
- ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
#endif
for (i = 0; i < max_group_count; i++) {
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 6900f4dac520..05996c526a8a 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -6,6 +6,7 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index ae8a0cf595fc..7d78b02c1446 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -3,6 +3,7 @@
* Copyright (c) 2024 Linaro Ltd
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -143,6 +144,7 @@ static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
* @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
* @funcs: callbacks for this HDMI Codec
* @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @i2s_formats: set of I2S formats (use 0 for a bus-specific set)
* @spdif_playback: set if HDMI codec has S/PDIF playback port
* @dai_port: sound DAI port, -1 if it is not enabled
*
@@ -155,6 +157,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int dai_port)
{
@@ -162,6 +165,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
.ops = &drm_connector_hdmi_audio_ops,
.max_i2s_channels = max_i2s_playback_channels,
.i2s = !!max_i2s_playback_channels,
+ .i2s_formats = i2s_formats,
.spdif = spdif_playback,
.no_i2s_capture = true,
.no_spdif_capture = true,
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
new file mode 100644
index 000000000000..3651ad0f76e0
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+
+struct drm_connector_hdmi_cec_data {
+ struct cec_adapter *adapter;
+ const struct drm_connector_hdmi_cec_funcs *funcs;
+};
+
+static int drm_connector_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->enable(connector, enable);
+}
+
+static int drm_connector_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->log_addr(connector, logical_addr);
+}
+
+static int drm_connector_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->transmit(connector, attempts, signal_free_time, msg);
+}
+
+static const struct cec_adap_ops drm_connector_hdmi_cec_adap_ops = {
+ .adap_enable = drm_connector_hdmi_cec_adap_enable,
+ .adap_log_addr = drm_connector_hdmi_cec_adap_log_addr,
+ .adap_transmit = drm_connector_hdmi_cec_adap_transmit,
+};
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_invalidate(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_phys_addr_invalidate(data->adapter);
+}
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_s_phys_addr(data->adapter, addr, false);
+}
+
+static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_unregister_adapter(data->adapter);
+
+ if (data->funcs->uninit)
+ data->funcs->uninit(connector);
+
+ kfree(data);
+ connector->cec.data = NULL;
+}
+
+static struct drm_connector_cec_funcs drm_connector_hdmi_cec_adapter_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_adapter_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_adapter_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev)
+{
+ struct drm_connector_hdmi_cec_data *data;
+ struct cec_connector_info conn_info;
+ struct cec_adapter *cec_adap;
+ int ret;
+
+ if (!funcs->init || !funcs->enable || !funcs->log_addr || !funcs->transmit)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->funcs = funcs;
+
+ cec_adap = cec_allocate_adapter(&drm_connector_hdmi_cec_adap_ops, connector, name,
+ CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO,
+ available_las ? : CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec_adap);
+ if (ret < 0)
+ goto err_free;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(cec_adap, &conn_info);
+
+ data->adapter = cec_adap;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = data;
+ connector->cec.funcs = &drm_connector_hdmi_cec_adapter_funcs;
+
+ ret = funcs->init(connector);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ /*
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
+ *
+ * However, the CEC framework cleans up the CEC adapter only when the
+ * last user has closed its file descriptor, so we don't need to handle
+ * it in DRM.
+ *
+ * Before that CEC framework makes sure that even if the userspace
+ * still holds CEC device open, all calls will be shortcut via
+ * cec_is_registered(), making sure that there is no access to the
+ * freed memory.
+ */
+ ret = cec_register_adapter(cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_adapter_unregister,
+ connector);
+
+err_delete_adapter:
+ cec_delete_adapter(cec_adap);
+
+ connector->cec.data = NULL;
+
+ mutex_unlock(&connector->cec.mutex);
+
+err_free:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_register);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_received_msg(data->adapter, msg);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_received_msg);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_attempt_done(data->adapter, status);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_attempt_done);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_done(data->adapter, status,
+ arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_done);
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
new file mode 100644
index 000000000000..31b8e4a93e24
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_invalidate(struct drm_connector *connector)
+{
+ cec_notifier_phys_addr_invalidate(connector->cec.data);
+}
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ cec_notifier_set_phys_addr(connector->cec.data, addr);
+}
+
+static void drm_connector_hdmi_cec_notifier_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+
+ cec_notifier_conn_unregister(connector->cec.data);
+ connector->cec.data = NULL;
+}
+
+static const struct drm_connector_cec_funcs drm_connector_cec_notifier_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_notifier_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_notifier_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(dev, port_name, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = notifier;
+ connector->cec.funcs = &drm_connector_cec_notifier_funcs;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_notifier_unregister,
+ connector);
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_notifier_register);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 855cb02b827d..a237dc55805d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -44,7 +45,7 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf))
+ connector->display_info.hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index d9d9948b29e9..a561f124be99 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -407,6 +411,11 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return false;
}
+ if (drm_mode_is_420_only(info, mode) && format != HDMI_COLORSPACE_YUV420) {
+ drm_dbg_kms(dev, "Mode can be only supported in YUV420 format.\n");
+ return false;
+ }
+
switch (format) {
case HDMI_COLORSPACE_RGB:
drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
@@ -437,9 +446,36 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return true;
case HDMI_COLORSPACE_YUV420:
- /* TODO: YUV420 is unsupported at the moment. */
- drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
- return false;
+ drm_dbg_kms(dev, "YUV420 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR420)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV420.\n");
+ return false;
+ }
+
+ if (!drm_mode_is_420(info, mode)) {
+ drm_dbg_kms(dev, "Mode cannot be supported in YUV420 format.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ if (bpc == 16 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) {
+ drm_dbg_kms(dev, "16 BPC but sink doesn't support Deep Color 48.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV420 format supported in that configuration.\n");
+
+ return true;
case HDMI_COLORSPACE_YUV422:
drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
@@ -545,8 +581,9 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret;
- drm_dbg_kms(dev, "Trying %s output format\n",
- drm_hdmi_connector_get_output_format_name(fmt));
+ drm_dbg_kms(dev, "Trying %s output format with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
@@ -563,7 +600,7 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
return false;
}
- drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_dbg_kms(dev, "%s output format supported with %u bpc (TMDS char rate: %llu Hz)\n",
drm_hdmi_connector_get_output_format_name(fmt),
bpc, conn_state->hdmi.tmds_char_rate);
@@ -571,23 +608,35 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
}
static int
-hdmi_compute_format(const struct drm_connector *connector,
- struct drm_connector_state *conn_state,
- const struct drm_display_mode *mode,
- unsigned int bpc)
+hdmi_compute_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int max_bpc, enum hdmi_colorspace fmt)
{
struct drm_device *dev = connector->dev;
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ ret = hdmi_try_format_bpc(connector, conn_state, mode, bpc, fmt);
+ if (!ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+ conn_state->hdmi.output_format = fmt;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
- /*
- * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
- * devices, for modes that only support YCbCr420.
- */
- if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
- conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
return 0;
}
- drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+ drm_dbg_kms(dev, "Failed. %s output format not supported for any bpc count.\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
return -EINVAL;
}
@@ -597,33 +646,29 @@ hdmi_compute_config(const struct drm_connector *connector,
struct drm_connector_state *conn_state,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
unsigned int max_bpc = clamp_t(unsigned int,
conn_state->max_bpc,
8, connector->max_bpc);
- unsigned int bpc;
int ret;
- for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
- drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
-
- ret = hdmi_compute_format(connector, conn_state, mode, bpc);
- if (ret)
- continue;
-
- conn_state->hdmi.output_bpc = bpc;
-
- drm_dbg_kms(dev,
- "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
- mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
- conn_state->hdmi.output_bpc,
- drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
- conn_state->hdmi.tmds_char_rate);
-
- return 0;
+ ret = hdmi_compute_format_bpc(connector, conn_state, mode, max_bpc,
+ HDMI_COLORSPACE_RGB);
+ if (ret) {
+ if (connector->ycbcr_420_allowed) {
+ ret = hdmi_compute_format_bpc(connector, conn_state,
+ mode, max_bpc,
+ HDMI_COLORSPACE_YUV420);
+ if (ret)
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format doesn't work.\n");
+ } else {
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format not allowed for connector.\n");
+ ret = -EINVAL;
+ }
}
- return -EINVAL;
+ return ret;
}
static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
@@ -798,12 +843,12 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
return 0;
- new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
-
ret = hdmi_compute_config(connector, new_conn_state, mode);
if (ret)
return ret;
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
ret = hdmi_generate_infoframes(connector, new_conn_state);
if (ret)
return ret;
@@ -1081,9 +1126,10 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
const struct drm_edid *drm_edid;
if (status == connector_status_disconnected) {
- // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ // TODO: also handle scramber, HDMI sink disconnected.
drm_connector_hdmi_audio_plugged_notify(connector, false);
drm_edid_connector_update(connector, NULL);
+ drm_connector_cec_phys_addr_invalidate(connector);
return;
}
@@ -1097,8 +1143,9 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
drm_edid_free(drm_edid);
if (status == connector_status_connected) {
- // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ // TODO: also handle scramber, HDMI sink is now connected.
drm_connector_hdmi_audio_plugged_notify(connector, true);
+ drm_connector_cec_phys_addr_set(connector);
}
}
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 6d2f244e5830..df878aad4a36 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 0138cf0b8b63..cd15cf52f0c9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,7 +26,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-
+#include <linux/export.h>
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ee64ca1b1bec..ef56b474acf5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,6 +25,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/ktime.h>
@@ -1160,11 +1161,10 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
- struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
@@ -1224,9 +1224,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-
- drm_atomic_bridge_chain_post_disable(bridge, state);
}
+}
+
+static void
+crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -1274,6 +1280,68 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+static void
+encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
+
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /*
+ * We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
+ }
+}
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ encoder_bridge_disable(dev, state);
+
+ crtc_disable(dev, state);
+
+ encoder_bridge_post_disable(dev, state);
+}
+
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
@@ -1483,28 +1551,44 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
-/**
- * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @state: atomic state object being committed
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy CRTC helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *state)
+static void
+encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ if (!new_conn_state->best_encoder)
+ continue;
+
+ if (!new_conn_state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
+ continue;
+
+ encoder = new_conn_state->best_encoder;
+
+ drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
+ }
+}
+
+static void
+crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1528,6 +1612,14 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(crtc);
}
}
+}
+
+static void
+encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
@@ -1552,7 +1644,6 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1565,6 +1656,30 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_atomic_bridge_chain_enable(bridge, state);
}
+}
+
+/**
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy CRTC helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ encoder_bridge_pre_enable(dev, state);
+
+ crtc_enable(dev, state);
+
+ encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 519228eb1095..7142e163e618 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c2726af6698e..ecc73d52bfae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -36,6 +36,7 @@
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22aa015df387..a2556d16bed6 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,6 +28,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_auth.h>
@@ -95,7 +96,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -103,7 +104,6 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
@@ -118,13 +118,12 @@ int drm_authmagic(struct drm_device *dev, void *data,
drm_dbg_core(dev, "%u\n", auth->magic);
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
@@ -248,41 +247,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
if (drm_is_current_master_locked(file_priv))
- goto out_unlock;
+ return ret;
- if (dev->master) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (dev->master)
+ return -EBUSY;
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!file_priv->master)
+ return -EINVAL;
- if (!file_priv->is_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
+ if (!file_priv->is_master)
+ return drm_new_set_master(dev, file_priv);
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_set_master(dev, file_priv, false);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -299,33 +290,27 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
- if (!drm_is_current_master_locked(file_priv)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!drm_is_current_master_locked(file_priv))
+ return -EINVAL;
- if (!dev->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!dev->master)
+ return -EINVAL;
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_drop_master(dev, file_priv);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -337,7 +322,7 @@ int drm_master_open(struct drm_file *file_priv)
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
@@ -345,7 +330,6 @@ int drm_master_open(struct drm_file *file_priv)
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
- mutex_unlock(&dev->master_mutex);
return ret;
}
@@ -355,7 +339,7 @@ void drm_master_release(struct drm_file *file_priv)
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
@@ -376,7 +360,6 @@ out:
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- mutex_unlock(&dev->master_mutex);
}
/**
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b4c89ec01998..4bde00083047 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -203,6 +204,8 @@ static void __drm_bridge_free(struct kref *kref)
{
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+ if (bridge->funcs->destroy)
+ bridge->funcs->destroy(bridge);
kfree(bridge->container);
}
@@ -292,6 +295,11 @@ EXPORT_SYMBOL(__devm_drm_bridge_alloc);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
+
+ drm_bridge_get(bridge);
+
mutex_init(&bridge->hpd_mutex);
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
@@ -339,6 +347,8 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
+
+ drm_bridge_put(bridge);
}
EXPORT_SYMBOL(drm_bridge_remove);
@@ -404,11 +414,17 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
if (!encoder || !bridge)
return -EINVAL;
- if (previous && (!previous->dev || previous->encoder != encoder))
- return -EINVAL;
+ drm_bridge_get(bridge);
- if (bridge->dev)
- return -EBUSY;
+ if (previous && (!previous->dev || previous->encoder != encoder)) {
+ ret = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ if (bridge->dev) {
+ ret = -EBUSY;
+ goto err_put_bridge;
+ }
bridge->dev = encoder->dev;
bridge->encoder = encoder;
@@ -457,6 +473,8 @@ err_reset_bridge:
"failed to attach bridge %pOF to encoder %s\n",
bridge->of_node, encoder->name);
+err_put_bridge:
+ drm_bridge_put(bridge);
return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -477,6 +495,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
list_del(&bridge->chain_node);
bridge->dev = NULL;
+ drm_bridge_put(bridge);
}
/**
@@ -1208,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
+ * @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
@@ -1218,12 +1238,13 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
* The detection status on success, or connector_status_unknown if the bridge
* doesn't support output detection.
*/
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
return connector_status_unknown;
- return bridge->funcs->detect(bridge);
+ return bridge->funcs->detect(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_detect);
@@ -1392,6 +1413,23 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+/**
+ * devm_drm_put_bridge - Release a bridge reference obtained via devm
+ * @dev: device that got the bridge via devm
+ * @bridge: pointer to a struct drm_bridge obtained via devm
+ *
+ * Same as drm_bridge_put() for bridge pointers obtained via devm functions
+ * such as devm_drm_bridge_alloc().
+ *
+ * This function is a temporary workaround and MUST NOT be used. Manual
+ * handling of bridge lifetime is inherently unsafe.
+ */
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
+{
+ devm_release_action(dev, drm_bridge_put_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_put_bridge);
+
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
struct drm_bridge *bridge,
unsigned int idx)
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
index af80d2496194..420f29cf3e54 100644
--- a/drivers/gpu/drm/drm_bridge_helper.c
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 66aff35f8647..a94061f373de 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -5,6 +5,7 @@
#include <kunit/test-bug.h>
+#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index f1de7faf9fb4..3fa38d4ac70b 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -303,34 +304,17 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int
-drm_client_buffer_vmap(struct drm_client_buffer *buffer,
- struct iosys_map *map_copy)
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
int ret;
- drm_gem_lock(gem);
-
- ret = drm_gem_pin_locked(gem);
- if (ret)
- goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap_locked(gem, map);
+ ret = drm_gem_vmap(buffer->gem, &buffer->map);
if (ret)
- goto err_drm_gem_vmap;
-
- drm_gem_unlock(gem);
-
- *map_copy = *map;
+ return ret;
+ *map_copy = buffer->map;
return 0;
-
-err_drm_gem_vmap:
- drm_gem_unpin_locked(buffer->gem);
-err_drm_gem_pin_locked:
- drm_gem_unlock(gem);
- return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -344,13 +328,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
-
- drm_gem_lock(gem);
- drm_gem_vunmap_locked(gem, map);
- drm_gem_unpin_locked(gem);
- drm_gem_unlock(gem);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index bd93cd93d519..c83196ad8b59 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 0f9d5ba36c81..9c2c3b0c8c47 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -8,6 +8,8 @@
*/
#include "drm/drm_modeset_lock.h"
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3969dc548cff..37a3270bc3c2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
@@ -28,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+#include <kunit/visibility.h>
#include "drm_crtc_internal.h"
@@ -494,6 +496,7 @@ const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
return color_encoding_name[encoding];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_encoding_name);
/**
* drm_get_color_range_name - return a string for color range
@@ -509,6 +512,7 @@ const char *drm_get_color_range_name(enum drm_color_range range)
return color_range_name[range];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_range_name);
/**
* drm_plane_create_color_properties - color encoding related plane properties
@@ -630,3 +634,209 @@ int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
+
+/*
+ * Gamma-LUT programming
+ */
+
+/**
+ * drm_crtc_load_gamma_888 - Programs gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component.
+ */
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_gamma(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_888);
+
+/**
+ * drm_crtc_load_gamma_565_from_888 - Programs gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/6/5.
+ */
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 4 + i / 16].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i) {
+ g = lut[i * 4 + i / 16].green;
+ set_gamma(crtc, i, 0, g, 0);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_565_from_888);
+
+/**
+ * drm_crtc_load_gamma_555_from_888 - Programs gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/5/5.
+ */
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 8 + i / 4].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_555_from_888);
+
+static void fill_gamma_888(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 8) | r;
+ g = (g << 8) | g;
+ b = (b << 8) | b;
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_888 - Programs a default gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_gamma_888(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_888);
+
+static void fill_gamma_565(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 10) | (g << 4) | (g >> 2);
+ b = (b << 11) | (b << 6) | (b << 1) | (b >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_565 - Programs a default gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_565(crtc, i, i, i, i, set_gamma);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i)
+ fill_gamma_565(crtc, i, 0, i, 0, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_565);
+
+static void fill_gamma_555(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 11) | (g << 6) | (g << 1) | (g >> 4);
+ b = (b << 11) | (b << 6) | (b << 1) | (r >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_555 - Programs a default gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_555(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_555);
+
+/*
+ * Color-LUT programming
+ */
+
+/**
+ * drm_crtc_load_palette_8 - Programs palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The palette to program
+ * @set_palette: Callback for programming the hardware palette
+ *
+ * Programs the palette specified in @lut to hardware. The input palette
+ * must have 256 entries per color component.
+ */
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_palette(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_palette_8);
+
+static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
+ drm_crtc_set_lut_func set_palette)
+{
+ u16 Y = (i << 8) | i; // relative luminance
+
+ set_palette(crtc, i, Y, Y, Y);
+}
+
+/**
+ * drm_crtc_fill_palette_8 - Programs a default palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default palette to hardware.
+ */
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_palette_8(crtc, i, set_palette);
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_8);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 48b08c9611a7..272d6254ea47 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -279,6 +280,7 @@ static int drm_connector_init_only(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->cec.mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
@@ -702,6 +704,46 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_cec_phys_addr_invalidate - invalidate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Invalidated CEC physical address set for this DRM connector.
+ */
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector)
+{
+ mutex_lock(&connector->cec.mutex);
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_invalidate)
+ connector->cec.funcs->phys_addr_invalidate(connector);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_invalidate);
+
+/**
+ * drm_connector_cec_phys_addr_set - propagate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Propagate CEC physical address from the display_info to this DRM connector.
+ */
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector)
+{
+ u16 addr;
+
+ mutex_lock(&connector->cec.mutex);
+
+ addr = connector->display_info.source_physical_address;
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_set)
+ connector->cec.funcs->phys_addr_set(connector, addr);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_set);
+
+/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
@@ -1645,7 +1687,7 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* structure from userspace. This is received as blob and stored in
* &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
* sink metadata in &struct hdr_sink_metadata, as
- * &drm_connector.hdr_sink_metadata. Driver uses
+ * &drm_connector.display_info.hdr_sink_metadata. Driver uses
* drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
* hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
* HDMI encoder.
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 44a5a36806e3..6a49e7a0ab84 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -30,6 +30,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3dfd8b34dceb..365cf337529f 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -44,6 +44,9 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
+static struct dentry *accel_debugfs_root;
+static struct dentry *drm_debugfs_root;
+
/***************************************************
* Initialization, etc.
**************************************************/
@@ -77,14 +80,15 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s %*s\n",
+ "%20s %5s %3s master a %5s %10s %*s %20s\n",
"command",
"tgid",
"dev",
"uid",
"magic",
DRM_CLIENT_NAME_MAX_LEN,
- "name");
+ "name",
+ "id");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -100,7 +104,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s %20llu\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
@@ -109,7 +113,8 @@ static int drm_clients_info(struct seq_file *m, void *data)
from_kuid_munged(seq_user_ns(m), uid),
priv->magic,
DRM_CLIENT_NAME_MAX_LEN,
- priv->client_name ? priv->client_name : "<unset>");
+ priv->client_name ? priv->client_name : "<unset>",
+ priv->client_id);
rcu_read_unlock();
mutex_unlock(&priv->client_name_lock);
}
@@ -285,16 +290,120 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
+void drm_debugfs_bridge_params(void)
+{
+ drm_bridge_debugfs_params(drm_debugfs_root);
+}
+
+void drm_debugfs_init_root(void)
+{
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ accel_debugfs_root = debugfs_create_dir("accel", NULL);
+#endif
+}
+
+void drm_debugfs_remove_root(void)
+{
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ debugfs_remove(accel_debugfs_root);
+#endif
+ debugfs_remove(drm_debugfs_root);
+}
+
+static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused)
+{
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_file *file = m->private;
+
+ if (!file)
+ return -EINVAL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ seq_printf(m, "pid: %d\n", task ? task->pid : 0);
+ seq_printf(m, "comm: %s\n", task ? task->comm : "Unset");
+ rcu_read_unlock();
+ return 0;
+}
+
+static int drm_debufs_proc_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, drm_debugfs_proc_info_show, inode->i_private);
+}
+
+static const struct file_operations drm_debugfs_proc_info_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debufs_proc_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * drm_debugfs_clients_add - Add a per client debugfs directory
+ * @file: drm_file for a client
+ *
+ * Create the debugfs directory for each client. This will be used to populate
+ * driver specific data for each client.
+ *
+ * Also add the process information debugfs file for each client to tag
+ * which client belongs to which process.
+ */
+void drm_debugfs_clients_add(struct drm_file *file)
+{
+ char *client;
+
+ client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id);
+ if (!client)
+ return;
+
+ /* Create a debugfs directory for the client in root on drm debugfs */
+ file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root);
+ kfree(client);
+
+ debugfs_create_file("proc_info", 0444, file->debugfs_client, file,
+ &drm_debugfs_proc_info_fops);
+
+ client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique);
+ if (!client)
+ return;
+
+ /* Create a link from client_id to the drm device this client id belongs to */
+ debugfs_create_symlink("device", file->debugfs_client, client);
+ kfree(client);
+}
+
+/**
+ * drm_debugfs_clients_remove - removes all debugfs directories and files
+ * @file: drm_file for a client
+ *
+ * Removes the debugfs directories recursively from the client directory.
+ *
+ * There is also a possibility that debugfs files are open while the drm_file
+ * is released.
+ */
+void drm_debugfs_clients_remove(struct drm_file *file)
+{
+ debugfs_remove_recursive(file->debugfs_client);
+ file->debugfs_client = NULL;
+}
+
/**
* drm_debugfs_dev_init - create debugfs directory for the device
* @dev: the device which we want to create the directory for
- * @root: the parent directory depending on the device type
*
* Creates the debugfs directory for the device under the given root directory.
*/
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+void drm_debugfs_dev_init(struct drm_device *dev)
{
- dev->debugfs_root = debugfs_create_dir(dev->unique, root);
+ if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root);
+ else
+ dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root);
}
/**
@@ -321,14 +430,13 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_atomic_debugfs_init(dev);
}
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
struct drm_device *dev = minor->dev;
char name[64];
sprintf(name, "%d", minor_id);
- minor->debugfs_symlink = debugfs_create_symlink(name, root,
+ minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root,
dev->unique);
/* TODO: Only for compatibility with drivers */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index bbc3bc4ba844..6b43b1cf2327 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,6 +29,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 56dd61f8e05a..cdd591b11488 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -29,11 +29,13 @@
#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sprintf.h>
#include <linux/srcu.h>
@@ -70,8 +72,6 @@ DEFINE_XARRAY_ALLOC(drm_minors_xa);
*/
static bool drm_core_init_complete;
-static struct dentry *drm_debugfs_root;
-
DEFINE_STATIC_SRCU(drm_unplug_srcu);
/*
@@ -184,8 +184,7 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
return 0;
if (minor->type != DRM_MINOR_ACCEL) {
- ret = drm_debugfs_register(minor, minor->index,
- drm_debugfs_root);
+ ret = drm_debugfs_register(minor, minor->index);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_debugfs;
@@ -538,10 +537,15 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
}
}
+#define WEDGE_STR_LEN 32
+#define PID_STR_LEN 15
+#define COMM_STR_LEN (TASK_COMM_LEN + 5)
+
/**
* drm_dev_wedged_event - generate a device wedged uevent
* @dev: DRM device
* @method: method(s) to be used for recovery
+ * @info: optional information about the guilty task
*
* This generates a device wedged uevent for the DRM device specified by @dev.
* Recovery @method\(s) of choice will be sent in the uevent environment as
@@ -554,13 +558,13 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
*
* Returns: 0 on success, negative error code otherwise.
*/
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info)
{
+ char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];
+ char *envp[] = { event_string, NULL, NULL, NULL };
const char *recovery = NULL;
unsigned int len, opt;
- /* Event string length up to 28+ characters with available methods */
- char event_string[32];
- char *envp[] = { event_string, NULL };
len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
@@ -582,6 +586,13 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
"but recovered through reset" : "needs recovery");
+ if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {
+ snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);
+ snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);
+ envp[1] = pid_string;
+ envp[2] = comm_string;
+ }
+
return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_dev_wedged_event);
@@ -773,10 +784,7 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
- accel_debugfs_init(dev);
- else
- drm_debugfs_dev_init(dev, drm_debugfs_root);
+ drm_debugfs_dev_init(dev);
return 0;
@@ -1216,7 +1224,7 @@ static void drm_core_exit(void)
drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
- debugfs_remove(drm_debugfs_root);
+ drm_debugfs_remove_root();
drm_sysfs_destroy();
WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
@@ -1235,8 +1243,8 @@ static int __init drm_core_init(void)
goto error;
}
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- drm_bridge_debugfs_params(drm_debugfs_root);
+ drm_debugfs_init_root();
+ drm_debugfs_bridge_params();
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 74e77742b2bd..e2e85345aa9a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -31,6 +31,7 @@
#include <linux/bitfield.h>
#include <linux/byteorder/generic.h>
#include <linux/cec.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -66,34 +67,36 @@ static int oui(u8 first, u8 second, u8 third)
* on as many displays as possible).
*/
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* Force reduced-blanking timings for detailed modes */
-#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
-/* Force 8bpc */
-#define EDID_QUIRK_FORCE_8BPC (1 << 8)
-/* Force 12bpc */
-#define EDID_QUIRK_FORCE_12BPC (1 << 9)
-/* Force 6bpc */
-#define EDID_QUIRK_FORCE_6BPC (1 << 10)
-/* Force 10bpc */
-#define EDID_QUIRK_FORCE_10BPC (1 << 11)
-/* Non desktop display (i.e. HMD) */
-#define EDID_QUIRK_NON_DESKTOP (1 << 12)
-/* Cap the DSC target bitrate to 15bpp */
-#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
+enum drm_edid_internal_quirk {
+ /* First detailed mode wrong, use largest 60Hz mode */
+ EDID_QUIRK_PREFER_LARGE_60 = DRM_EDID_QUIRK_NUM,
+ /* Reported 135MHz pixel clock is too high, needs adjustment */
+ EDID_QUIRK_135_CLOCK_TOO_HIGH,
+ /* Prefer the largest mode at 75 Hz */
+ EDID_QUIRK_PREFER_LARGE_75,
+ /* Detail timing is in cm not mm */
+ EDID_QUIRK_DETAILED_IN_CM,
+ /* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+ EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE,
+ /* use +hsync +vsync for detailed mode */
+ EDID_QUIRK_DETAILED_SYNC_PP,
+ /* Force reduced-blanking timings for detailed modes */
+ EDID_QUIRK_FORCE_REDUCED_BLANKING,
+ /* Force 8bpc */
+ EDID_QUIRK_FORCE_8BPC,
+ /* Force 12bpc */
+ EDID_QUIRK_FORCE_12BPC,
+ /* Force 6bpc */
+ EDID_QUIRK_FORCE_6BPC,
+ /* Force 10bpc */
+ EDID_QUIRK_FORCE_10BPC,
+ /* Non desktop display (i.e. HMD) */
+ EDID_QUIRK_NON_DESKTOP,
+ /* Cap the DSC target bitrate to 15bpp */
+ EDID_QUIRK_CAP_DSC_15BPP,
+};
#define MICROSOFT_IEEE_OUI 0xca125c
@@ -128,124 +131,132 @@ static const struct edid_quirk {
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'C', 'R', 44358, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Acer F51 */
- EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'P', 'I', 0x7602, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('A', 'E', 'O', 0, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BenQ GW2765 */
- EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, BIT(EDID_QUIRK_FORCE_8BPC)),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x78b, BIT(EDID_QUIRK_FORCE_6BPC)),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('C', 'P', 'T', 0x17df, BIT(EDID_QUIRK_FORCE_6BPC)),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 0x3652, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x0771, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Belinea 10 15 55 */
- EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 1516, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Envision Peripherals, Inc. EN-7100e */
- EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
+ EDID_QUIRK('E', 'P', 'I', 59264, BIT(EDID_QUIRK_135_CLOCK_TOO_HIGH)),
/* Envision EN2028 */
- EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('E', 'P', 'I', 8232, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Funai Electronics PM36B */
- EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM),
+ EDID_QUIRK('F', 'C', 'M', 13600, BIT(EDID_QUIRK_PREFER_LARGE_75) |
+ BIT(EDID_QUIRK_DETAILED_IN_CM)),
/* LG 27GP950 */
- EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5bbf, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LG 27GN950 */
- EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5b9a, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+ EDID_QUIRK('L', 'G', 'D', 764, BIT(EDID_QUIRK_FORCE_10BPC)),
/* LG Philips LCD LP154W01-A5 */
- EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
- EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
/* Samsung SyncMaster 205BW. Note: irony */
- EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
+ EDID_QUIRK('S', 'A', 'M', 541, BIT(EDID_QUIRK_DETAILED_SYNC_PP)),
/* Samsung SyncMaster 22[5-6]BW */
- EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 596, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('S', 'A', 'M', 638, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, BIT(EDID_QUIRK_FORCE_12BPC)),
/* ViewSonic VA2026w */
- EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
+ EDID_QUIRK('V', 'S', 'C', 5020, BIT(EDID_QUIRK_FORCE_REDUCED_BLANKING)),
/* Medion MD 30217 PG */
- EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, BIT(EDID_QUIRK_PREFER_LARGE_75)),
/* Lenovo G50 */
- EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 18514, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('S', 'E', 'C', 0xd033, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('E', 'T', 'R', 13896, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Valve Index Headset */
- EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, BIT(EDID_QUIRK_NON_DESKTOP)),
/* HTC Vive and Vive Pro VR Headsets */
- EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0001, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Windows Mixed Reality Headsets */
- EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sony PlayStation VR Headset */
- EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sensics VR Headsets */
- EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'N', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
/* OSVR HDK and HDK2 VR Headsets */
- EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+
+ /*
+ * @drm_edid_internal_quirk entries end here, following with the
+ * @drm_edid_quirk entries.
+ */
+
+ /* HP ZR24w DP AUX DPCD access requires probing to prevent corruption. */
+ EDID_QUIRK('H', 'W', 'P', 0x2869, BIT(DRM_EDID_QUIRK_DP_DPCD_PROBE)),
};
/*
@@ -2951,6 +2962,18 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
return 0;
}
+static bool drm_edid_has_internal_quirk(struct drm_connector *connector,
+ enum drm_edid_internal_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+EXPORT_SYMBOL(drm_edid_has_quirk);
+
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
@@ -2960,7 +2983,6 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
*/
static void edid_fixup_preferred(struct drm_connector *connector)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2968,9 +2990,9 @@ static void edid_fixup_preferred(struct drm_connector *connector)
if (list_empty(&connector->probed_modes))
return;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60))
target_refresh = 60;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3474,7 +3496,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3508,7 +3529,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_REDUCED_BLANKING)) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3520,7 +3541,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_135_CLOCK_TOO_HIGH))
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3551,7 +3572,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_SYNC_PP)) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
@@ -3564,12 +3585,12 @@ set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_IN_CM)) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -5373,7 +5394,8 @@ static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
static void drm_calculate_luminance_range(struct drm_connector *connector)
{
- struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ const struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
struct drm_luminance_range_info *luminance_range =
&connector->display_info.luminance_range;
static const u8 pre_computed_values[] = {
@@ -5434,21 +5456,21 @@ static uint8_t hdr_metadata_type(const u8 *edid_ext)
static void
drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
{
+ struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
u16 len;
len = cea_db_payload_len(db);
- connector->hdr_sink_metadata.hdmi_type1.eotf =
- eotf_supported(db);
- connector->hdr_sink_metadata.hdmi_type1.metadata_type =
- hdr_metadata_type(db);
+ hdr_metadata->eotf = eotf_supported(db);
+ hdr_metadata->metadata_type = hdr_metadata_type(db);
if (len >= 4)
- connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
+ hdr_metadata->max_cll = db[4];
if (len >= 5)
- connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
+ hdr_metadata->max_fall = db[5];
if (len >= 6) {
- connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+ hdr_metadata->min_cll = db[6];
/* Calculate only when all values are available */
drm_calculate_luminance_range(connector);
@@ -6596,7 +6618,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
- memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
+ memset(&info->hdr_sink_metadata, 0, sizeof(info->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6734,26 +6756,26 @@ static void update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_NON_DESKTOP)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_CAP_DSC_15BPP))
info->max_dsc_bpp = 15;
- if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_6BPC))
info->bpc = 6;
- if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_8BPC))
info->bpc = 8;
- if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_10BPC))
info->bpc = 10;
- if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_12BPC))
info->bpc = 12;
/* Depends on info->cea_rev set by drm_parse_cea_ext() above */
@@ -6918,7 +6940,6 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
static int _drm_edid_connector_add_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
if (!drm_edid)
@@ -6948,7 +6969,8 @@ static int _drm_edid_connector_add_modes(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60) ||
+ drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector);
return num_modes;
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 18e366cc4993..8d0601400182 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -2,7 +2,9 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
+
#include <linux/dma-resv.h>
+#include <linux/export.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 2c4dc7ebc0c3..fd71969d2fb1 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -17,7 +17,9 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/module.h>
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 937c3939e502..11a5b60cb9ce 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 02a516e77192..8bd626ef16c7 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index f824369baacd..1e827bf8b815 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 73d35d59590c..85feb55bba11 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 246cf845e2c9..eebd1a05ee97 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -33,6 +33,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,6 +46,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -167,6 +169,9 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
drm_prime_init_file_private(&file->prime);
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_add(file);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
if (ret < 0)
@@ -181,6 +186,10 @@ out_prime_destroy:
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
+
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
put_pid(rcu_access_pointer(file->pid));
kfree(file);
@@ -235,6 +244,9 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -1017,8 +1029,10 @@ void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
pid = rcu_dereference(file_priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
- drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
- task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+ drm_err(dev, "comm: %s pid: %d client-id:%llu client: %s ... %pV",
+ task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_id,
+ file_priv->client_name ?: "Unset", &vaf);
va_end(args);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 8c6090a90d56..f5889dd8e7aa 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,6 +21,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index d36e6cacc575..8f3daf38ca63 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,6 +8,7 @@
* (at your option) any later version.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -558,18 +559,6 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne
drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
}
-static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
-{
- return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
-}
-
-/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
-static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
- unsigned int pixels)
-{
- drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
-}
-
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
* @dst: Array of RGB565 destination buffers
@@ -579,7 +568,6 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @state: Transform and conversion state
- * @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -594,23 +582,56 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
- if (swab)
- xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
- else
- xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+static void drm_fb_xrgb8888_to_rgb565be_line(void *dbuf, const void *sbuf,
+ unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565be);
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565be - Convert XRGB8888 to RGB565|DRM_FORMAT_BIG_ENDIAN clip buffer
+ * @dst: Array of RGB565BE destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565BE devices that don't support XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565be_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565be);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -857,11 +878,33 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
+ * @dst: Array of ABGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -870,17 +913,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
+ * @dst: Array of XBGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -889,6 +955,49 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+
+static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
+ * @dst: Array of BGRX8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgrx8888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -1099,7 +1208,7 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
@@ -1250,141 +1359,3 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
-
-static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
-{
- /* only handle formats with depth != 0 and alpha channel */
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_ABGR1555:
- return DRM_FORMAT_XBGR1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_BGRA5551:
- return DRM_FORMAT_BGRX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_ABGR8888:
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
- case DRM_FORMAT_BGRA8888:
- return DRM_FORMAT_BGRX8888;
- case DRM_FORMAT_ARGB2101010:
- return DRM_FORMAT_XRGB2101010;
- case DRM_FORMAT_ABGR2101010:
- return DRM_FORMAT_XBGR2101010;
- case DRM_FORMAT_RGBA1010102:
- return DRM_FORMAT_RGBX1010102;
- case DRM_FORMAT_BGRA1010102:
- return DRM_FORMAT_BGRX1010102;
- }
-
- return fourcc;
-}
-
-static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
-{
- const uint32_t *fourccs_end = fourccs + nfourccs;
-
- while (fourccs < fourccs_end) {
- if (*fourccs == fourcc)
- return true;
- ++fourccs;
- }
- return false;
-}
-
-/**
- * drm_fb_build_fourcc_list - Filters a list of supported color formats against
- * the device's native formats
- * @dev: DRM device
- * @native_fourccs: 4CC codes of natively supported color formats
- * @native_nfourccs: The number of entries in @native_fourccs
- * @fourccs_out: Returns 4CC codes of supported color formats
- * @nfourccs_out: The number of available entries in @fourccs_out
- *
- * This function create a list of supported color format from natively
- * supported formats and additional emulated formats.
- * At a minimum, most userspace programs expect at least support for
- * XRGB8888 on the primary plane. Devices that have to emulate the
- * format, and possibly others, can use drm_fb_build_fourcc_list() to
- * create a list of supported color formats. The returned list can
- * be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Native formats with alpha channel
- * will be replaced by such without, as primary planes usually don't
- * support alpha. Other heuristics might be applied
- * to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list.
- *
- * Returns:
- * The number of color-formats 4CC codes returned in @fourccs_out.
- */
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out)
-{
- /*
- * XRGB8888 is the default fallback format for most of userspace
- * and it's currently the only format that should be emulated for
- * the primary plane. Only if there's ever another default fallback,
- * it should be added here.
- */
- static const uint32_t extra_fourccs[] = {
- DRM_FORMAT_XRGB8888,
- };
- static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
-
- u32 *fourccs = fourccs_out;
- const u32 *fourccs_end = fourccs_out + nfourccs_out;
- size_t i;
-
- /*
- * The device's native formats go first.
- */
-
- for (i = 0; i < native_nfourccs; ++i) {
- /*
- * Several DTs, boot loaders and firmware report native
- * alpha formats that are non-alpha formats instead. So
- * replace alpha formats by non-alpha formats.
- */
- u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- /*
- * The extra formats, emulated by the driver, go second.
- */
-
- for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = extra_fourccs[i];
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate and native entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- return fourccs - fourccs_out;
-}
-EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
index 9f857bfa368d..ce29dd05bcc5 100644
--- a/drivers/gpu/drm/drm_format_internal.h
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -5,6 +5,7 @@
#include <linux/bits.h>
#include <linux/types.h>
+#include <linux/swab.h>
/*
* Each pixel-format conversion helper takes a raw pixel in a
@@ -42,7 +43,7 @@ static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
u32 b = pix & 0x000000ff;
/* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- return (3 * r + 6 * g + b) / 10;
+ return (77 * r + 150 * g + 29 * b) / 256;
}
static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
@@ -59,6 +60,11 @@ static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
((pix & 0x000000f8) >> 3);
}
+static inline u32 drm_pixel_xrgb8888_to_rgb565be(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
+}
+
static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
{
return ((pix & 0x00f80000) >> 8) |
@@ -111,6 +117,14 @@ static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
((pix & 0x000000ff) << 16);
}
+static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
+{
+ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 8) |
+ ((pix & 0x0000ff00) << 8) |
+ ((pix & 0x000000ff) << 24);
+}
+
static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
{
return GENMASK(31, 24) | /* fill alpha bits */
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 3a94ca211f9c..e0d533611040 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -238,6 +238,14 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGB161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
+ { .format = DRM_FORMAT_BGR161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -346,6 +354,33 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S010, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S210, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S410, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S012, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S212, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S412, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S016, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S216, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S416, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
};
unsigned int i;
@@ -382,7 +417,8 @@ EXPORT_SYMBOL(drm_format_info);
/**
* drm_get_format_info - query information for a given framebuffer configuration
* @dev: DRM device
- * @mode_cmd: metadata from the userspace fb creation request
+ * @pixel_format: pixel format (DRM_FORMAT_*)
+ * @modifier: modifier
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
@@ -390,15 +426,16 @@ EXPORT_SYMBOL(drm_format_info);
*/
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+ u32 pixel_format, u64 modifier)
{
const struct drm_format_info *info = NULL;
if (dev->mode_config.funcs->get_format_info)
- info = dev->mode_config.funcs->get_format_info(mode_cmd);
+ info = dev->mode_config.funcs->get_format_info(pixel_format,
+ modifier);
if (!info)
- info = drm_format_info(mode_cmd->pixel_format);
+ info = drm_format_info(pixel_format);
return info;
}
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 63a70f285cce..adbb73f00d68 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -153,18 +153,11 @@ int drm_mode_addfb_ioctl(struct drm_device *dev,
}
static int framebuffer_check(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *r)
{
- const struct drm_format_info *info;
int i;
- /* check if the format is supported at all */
- if (!__drm_format_info(r->pixel_format)) {
- drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
- &r->pixel_format);
- return -EINVAL;
- }
-
if (r->width == 0) {
drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
@@ -175,9 +168,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
- /* now let the driver pick its own format info */
- info = drm_get_format_info(dev, r);
-
for (i = 0; i < info->num_planes; i++) {
unsigned int width = drm_format_info_plane_width(info, r->width, i);
unsigned int height = drm_format_info_plane_height(info, r->height, i);
@@ -272,6 +262,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
+ const struct drm_format_info *info;
struct drm_framebuffer *fb;
int ret;
@@ -297,11 +288,21 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- ret = framebuffer_check(dev, r);
+ /* check if the format is supported at all */
+ if (!__drm_format_info(r->pixel_format)) {
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* now let the driver pick its own format info */
+ info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]);
+
+ ret = framebuffer_check(dev, info, r);
if (ret)
return ERR_PTR(ret);
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r);
if (IS_ERR(fb)) {
drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ac0524595bd6..6a44351e58b7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
@@ -1238,38 +1239,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->pin)
- return obj->funcs->pin(obj);
-
- return 0;
-}
-
-void drm_gem_unpin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->unpin)
- obj->funcs->unpin(obj);
-}
-
-int drm_gem_pin(struct drm_gem_object *obj)
-{
- int ret;
-
- dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_pin_locked(obj);
- dma_resv_unlock(obj->resv);
-
- return ret;
-}
-
-void drm_gem_unpin(struct drm_gem_object *obj)
-{
- dma_resv_lock(obj->resv, NULL);
- drm_gem_unpin_locked(obj);
- dma_resv_unlock(obj->resv);
-}
-
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1514,12 +1483,14 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
* @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
+ * @ticket: Optional ww_acquire_ctx context to use for locking
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj))
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket)
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
@@ -1552,17 +1523,20 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
*/
mutex_unlock(lru->lock);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
+
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv)) {
+ if (!ww_mutex_trylock(&obj->resv->lock, ticket)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail;
}
- if (shrink(obj)) {
+ if (shrink(obj, ticket)) {
freed += obj->size >> PAGE_SHIFT;
/*
@@ -1576,6 +1550,9 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
dma_resv_unlock(obj->resv);
+ if (ticket)
+ ww_acquire_fini(ticket);
+
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 93337543aac3..ebf305fb24f0 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -2,6 +2,7 @@
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
+#include <linux/export.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 6ff22e04029e..4bc89d33df59 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -67,6 +68,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
static int
drm_gem_fb_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
@@ -74,7 +76,7 @@ drm_gem_fb_init(struct drm_device *dev,
unsigned int i;
int ret;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
@@ -135,6 +137,7 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* @dev: DRM device
* @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -151,20 +154,14 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];
unsigned int i;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- drm_dbg_kms(dev, "Failed to get FB format info\n");
- return -EINVAL;
- }
-
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -199,7 +196,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
}
}
- ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs);
if (ret)
goto err_gem_object_put;
@@ -220,6 +217,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
* callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -232,6 +230,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
@@ -242,7 +241,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -262,6 +261,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -281,9 +281,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
@@ -299,6 +300,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -319,9 +321,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
*/
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs_dirtyfb);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
@@ -501,12 +504,9 @@ EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
// TODO Drop this function and replace by drm_format_info_bpp() once all
// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
-
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -520,6 +520,7 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
@@ -560,7 +561,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
afbc_fb->offset = mode_cmd->offsets[0];
- bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd);
if (!bpp) {
drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
return -EINVAL;
@@ -582,6 +583,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
*
* @dev: DRM device
* @afbc_fb: afbc-specific framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @afbc_fb: afbc framebuffer
*
@@ -595,24 +597,24 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
* Zero on success or a negative error value on failure.
*/
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
- const struct drm_format_info *info;
struct drm_gem_object **objs;
int ret;
objs = afbc_fb->base.obj;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return -EINVAL;
- ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb);
if (ret < 0)
return ret;
- if (objs[0]->size < afbc_fb->afbc_size)
+ if (objs[0]->size < afbc_fb->afbc_size) {
+ drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum afbc size (%u)\n",
+ objs[0]->size, afbc_fb->afbc_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a5dbee6974ab..5d1349c34afd 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -348,6 +348,8 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
} else {
@@ -408,6 +410,8 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
@@ -800,6 +804,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b..257cca4cb97a 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 22b1fe9c03b8..b04cde4a60e7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -88,11 +89,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
- * For drawing or scanout operations, rsp. buffer objects have to be pinned
- * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
@@ -299,30 +295,7 @@ out:
return 0;
}
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Small buffer objects, such as cursor images, can lead to memory
- * fragmentation if they are pinned in the middle of video RAM. This
- * is especially a problem on devices with only a small amount of
- * video RAM. Fragmentation can prevent the primary framebuffer from
- * fitting in, even though there's enough memory overall. The modifier
- * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
- * at the high end of the memory region to avoid fragmentation.
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
@@ -334,7 +307,6 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
@@ -343,15 +315,7 @@ static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
ttm_bo_unpin(&gbo->bo);
}
-/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
- * @gbo: the GEM VRAM object
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+static int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
@@ -364,7 +328,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vram_unpin);
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
@@ -690,41 +653,6 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
- * @gem: The GEM object to pin
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- /*
- * Fbdev console emulation is the use case of these PRIME
- * helpers. This may involve updating a hardware buffer from
- * a shadow FB. We pin the buffer to it's current location
- * (either video RAM or system memory) to prevent it from
- * being relocated during the update operation. If you require
- * the buffer to be pinned to VRAM, implement a callback that
- * sets the flags accordingly.
- */
- return drm_gem_vram_pin_locked(gbo, 0);
-}
-
-/**
- * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
- * @gem: The GEM object to unpin
- */
-static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- drm_gem_vram_unpin_locked(gbo);
-}
-
-/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
@@ -762,8 +690,6 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
- .pin = drm_gem_vram_object_pin,
- .unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 735bfdf4322f..5bb4c77db2c3 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -7,11 +7,11 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/hmm.h>
+#include <linux/hugetlb_inline.h>
#include <linux/memremap.h>
-#include <linux/migrate.h>
#include <linux/mm_types.h>
-#include <linux/pagemap.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
@@ -108,21 +108,6 @@
*/
/**
- * DOC: Migration
- *
- * The migration support is quite simple, allowing migration between RAM and
- * device memory at the range granularity. For example, GPU SVM currently does
- * not support mixing RAM and device memory pages within a range. This means
- * that upon GPU fault, the entire range can be migrated to device memory, and
- * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
- * memory storage within a range could be added in the future if required.
- *
- * The reasoning for only supporting range granularity is as follows: it
- * simplifies the implementation, and range sizes are driver-defined and should
- * be relatively small.
- */
-
-/**
* DOC: Partial Unmapping of Ranges
*
* Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
@@ -191,12 +176,9 @@
* }
*
* if (driver_migration_policy(range)) {
- * mmap_read_lock(mm);
- * devmem = driver_alloc_devmem();
- * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
- * devmem_allocation,
- * &ctx);
- * mmap_read_unlock(mm);
+ * err = drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
+ * gpuva_start, gpuva_end, gpusvm->mm,
+ * ctx->timeslice_ms);
* if (err) // CPU mappings may have changed
* goto retry;
* }
@@ -289,97 +271,6 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * struct drm_gpusvm_zdd - GPU SVM zone device data
- *
- * @refcount: Reference count for the zdd
- * @devmem_allocation: device memory allocation
- * @device_private_page_owner: Device private pages owner
- *
- * This structure serves as a generic wrapper installed in
- * page->zone_device_data. It provides infrastructure for looking up a device
- * memory allocation upon CPU page fault and asynchronously releasing device
- * memory once the CPU has no page references. Asynchronous release is useful
- * because CPU page references can be dropped in IRQ contexts, while releasing
- * device memory likely requires sleeping locks.
- */
-struct drm_gpusvm_zdd {
- struct kref refcount;
- struct drm_gpusvm_devmem *devmem_allocation;
- void *device_private_page_owner;
-};
-
-/**
- * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
- * @device_private_page_owner: Device private pages owner
- *
- * This function allocates and initializes a new zdd structure. It sets up the
- * reference count and initializes the destroy work.
- *
- * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
- */
-static struct drm_gpusvm_zdd *
-drm_gpusvm_zdd_alloc(void *device_private_page_owner)
-{
- struct drm_gpusvm_zdd *zdd;
-
- zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
- if (!zdd)
- return NULL;
-
- kref_init(&zdd->refcount);
- zdd->devmem_allocation = NULL;
- zdd->device_private_page_owner = device_private_page_owner;
-
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
- * @zdd: Pointer to the zdd structure.
- *
- * This function increments the reference count of the provided zdd structure.
- *
- * Return: Pointer to the zdd structure.
- */
-static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
-{
- kref_get(&zdd->refcount);
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
- * @ref: Pointer to the reference count structure.
- *
- * This function queues the destroy_work of the zdd for asynchronous destruction.
- */
-static void drm_gpusvm_zdd_destroy(struct kref *ref)
-{
- struct drm_gpusvm_zdd *zdd =
- container_of(ref, struct drm_gpusvm_zdd, refcount);
- struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
-
- if (devmem) {
- complete_all(&devmem->detached);
- if (devmem->ops->devmem_release)
- devmem->ops->devmem_release(devmem);
- }
- kfree(zdd);
-}
-
-/**
- * drm_gpusvm_zdd_put() - Put a zdd reference.
- * @zdd: Pointer to the zdd structure.
- *
- * This function decrements the reference count of the provided zdd structure
- * and schedules its destruction if the count drops to zero.
- */
-static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
-{
- kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
-}
-
-/**
* drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
* @notifier: Pointer to the GPU SVM notifier structure.
* @start: Start address of the range
@@ -945,7 +836,7 @@ retry:
* process-many-malloc' fails. In the failure case, each process
* mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
* ranges. When migrating the SVM ranges, some processes fail in
- * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
+ * drm_pagemap_migrate_to_devmem with 'migrate.cpages != npages'
* and then upon drm_gpusvm_range_get_pages device pages from
* other processes are collected + faulted in which creates all
* sorts of problems. Unsure exactly how this happening, also
@@ -981,6 +872,40 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
#endif
/**
+ * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @start: The inclusive start user address.
+ * @end: The exclusive end user address.
+ *
+ * Returns: The start address of first VMA within the provided range,
+ * ULONG_MAX otherwise. Assumes start_addr < end_addr.
+ */
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = ULONG_MAX;
+
+ if (!mmget_not_zero(mm))
+ return addr;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma_intersection(mm, start, end);
+ if (vma)
+ addr = vma->vm_start;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start);
+
+/**
* drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @fault_addr: Fault address
@@ -1329,7 +1254,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
.dev_private_owner = gpusvm->device_private_page_owner,
};
struct mm_struct *mm = gpusvm->mm;
- struct drm_gpusvm_zdd *zdd;
+ void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
@@ -1412,6 +1337,7 @@ map_pages:
}
zdd = NULL;
+ pagemap = NULL;
num_dma_mapped = 0;
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
@@ -1431,7 +1357,7 @@ map_pages:
}
pagemap = page_pgmap(page);
- dpagemap = zdd->devmem_allocation->dpagemap;
+ dpagemap = drm_pagemap_page_to_dpagemap(page);
if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
/*
* Raced. This is not supposed to happen
@@ -1455,7 +1381,7 @@ map_pages:
} else {
dma_addr_t addr;
- if (is_zone_device_page(page) || zdd) {
+ if (is_zone_device_page(page) || pagemap) {
err = -EOPNOTSUPP;
goto err_unmap;
}
@@ -1483,7 +1409,7 @@ map_pages:
flags.has_dma_mapping = true;
}
- if (zdd) {
+ if (pagemap) {
flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
@@ -1511,6 +1437,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1541,562 +1468,11 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
/**
- * drm_gpusvm_migration_unlock_put_page() - Put a migration page
- * @page: Pointer to the page to put
- *
- * This function unlocks and puts a page.
- */
-static void drm_gpusvm_migration_unlock_put_page(struct page *page)
-{
- unlock_page(page);
- put_page(page);
-}
-
-/**
- * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
- * @npages: Number of pages
- * @migrate_pfn: Array of migrate page frame numbers
- *
- * This function unlocks and puts an array of pages.
- */
-static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
- unsigned long *migrate_pfn)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!migrate_pfn[i])
- continue;
-
- page = migrate_pfn_to_page(migrate_pfn[i]);
- drm_gpusvm_migration_unlock_put_page(page);
- migrate_pfn[i] = 0;
- }
-}
-
-/**
- * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
- * @page: Pointer to the page
- * @zdd: Pointer to the GPU SVM zone device data
- *
- * This function associates the given page with the specified GPU SVM zone
- * device data and initializes it for zone device usage.
- */
-static void drm_gpusvm_get_devmem_page(struct page *page,
- struct drm_gpusvm_zdd *zdd)
-{
- page->zone_device_data = drm_gpusvm_zdd_get(zdd);
- zone_device_page_init(page);
-}
-
-/**
- * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
- * @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
- * @migrate_pfn: Array of migrate page frame numbers to map
- * @npages: Number of pages to map
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function maps pages of memory for migration usage in GPU SVM. It
- * iterates over each page frame number provided in @migrate_pfn, maps the
- * corresponding page, and stores the DMA address in the provided @dma_addr
- * array.
- *
- * Return: 0 on success, -EFAULT if an error occurs during mapping.
- */
-static int drm_gpusvm_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long *migrate_pfn,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
-
- if (!page)
- continue;
-
- if (WARN_ON_ONCE(is_zone_device_page(page)))
- return -EFAULT;
-
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
- * @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
- * @npages: Number of pages to unmap
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function unmaps previously mapped pages of memory for GPU Shared Virtual
- * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
- * if it's valid and not already unmapped, and unmaps the corresponding page.
- */
-static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
-
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
- }
-}
-
-/**
- * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
- * @devmem_allocation: Pointer to the device memory allocation. The caller
- * should hold a reference to the device memory allocation,
- * which should be dropped via ops->devmem_release or upon
- * the failure of this function.
- * @ctx: GPU SVM context
- *
- * This function migrates the specified GPU SVM range to device memory. It
- * performs the necessary setup and invokes the driver-specific operations for
- * migration to device memory. Upon successful return, @devmem_allocation can
- * safely reference @range until ops->devmem_release is called which only upon
- * successful return. Expected to be called while holding the mmap lock in read
- * mode.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- struct drm_gpusvm_devmem *devmem_allocation,
- const struct drm_gpusvm_ctx *ctx)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long start = drm_gpusvm_range_start(range),
- end = drm_gpusvm_range_end(range);
- struct migrate_vma migrate = {
- .start = start,
- .end = end,
- .pgmap_owner = gpusvm->device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_SYSTEM,
- };
- struct mm_struct *mm = gpusvm->mm;
- unsigned long i, npages = npages_in_range(start, end);
- struct vm_area_struct *vas;
- struct drm_gpusvm_zdd *zdd = NULL;
- struct page **pages;
- dma_addr_t *dma_addr;
- void *buf;
- int err;
-
- mmap_assert_locked(gpusvm->mm);
-
- if (!range->flags.migrate_devmem)
- return -EINVAL;
-
- if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
- !ops->copy_to_ram)
- return -EOPNOTSUPP;
-
- vas = vma_lookup(mm, start);
- if (!vas) {
- err = -ENOENT;
- goto err_out;
- }
-
- if (end > vas->vm_end || start < vas->vm_start) {
- err = -EINVAL;
- goto err_out;
- }
-
- if (!vma_is_anonymous(vas)) {
- err = -EBUSY;
- goto err_out;
- }
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
- if (!zdd) {
- err = -ENOMEM;
- goto err_free;
- }
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- if (!migrate.cpages) {
- err = -EFAULT;
- goto err_free;
- }
-
- if (migrate.cpages != npages) {
- err = -EBUSY;
- goto err_finalize;
- }
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- migrate.src, npages, DMA_TO_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = pfn_to_page(migrate.dst[i]);
-
- pages[i] = page;
- migrate.dst[i] = migrate_pfn(migrate.dst[i]);
- drm_gpusvm_get_devmem_page(page, zdd);
- }
-
- err = ops->copy_to_devmem(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
- /* Upon success bind devmem allocation to range and zdd */
- devmem_allocation->timeslice_expiration = get_jiffies_64() +
- msecs_to_jiffies(ctx->timeslice_ms);
- zdd->devmem_allocation = devmem_allocation; /* Owns ref */
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_TO_DEVICE);
-err_free:
- if (zdd)
- drm_gpusvm_zdd_put(zdd);
- kvfree(buf);
-err_out:
- return err;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
-
-/**
- * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
- * @vas: Pointer to the VM area structure, can be NULL
- * @fault_page: Fault page
- * @npages: Number of pages to populate
- * @mpages: Number of pages to migrate
- * @src_mpfn: Source array of migrate PFNs
- * @mpfn: Array of migrate PFNs to populate
- * @addr: Start address for PFN allocation
- *
- * This function populates the RAM migrate page frame numbers (PFNs) for the
- * specified VM area structure. It allocates and locks pages in the VM area for
- * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
- * alloc_page for allocation.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
- struct page *fault_page,
- unsigned long npages,
- unsigned long *mpages,
- unsigned long *src_mpfn,
- unsigned long *mpfn,
- unsigned long addr)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
-
- if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- src_page = migrate_pfn_to_page(src_mpfn[i]);
- if (!src_page)
- continue;
-
- if (fault_page) {
- if (src_page->zone_device_data !=
- fault_page->zone_device_data)
- continue;
- }
-
- if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
- else
- page = alloc_page(GFP_HIGHUSER);
-
- if (!page)
- goto free_pages;
-
- mpfn[i] = migrate_pfn(page_to_pfn(page));
- }
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
- }
-
- return 0;
-
-free_pages:
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- put_page(page);
- mpfn[i] = 0;
- }
- return -ENOMEM;
-}
-
-/**
- * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
- * @devmem_allocation: Pointer to the device memory allocation
- *
- * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
- * migration done via migrate_device_* functions.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long npages, mpages = 0;
- struct page **pages;
- unsigned long *src, *dst;
- dma_addr_t *dma_addr;
- void *buf;
- int i, err = 0;
- unsigned int retry_count = 2;
-
- npages = devmem_allocation->size >> PAGE_SHIFT;
-
-retry:
- if (!mmget_not_zero(devmem_allocation->mm))
- return -EFAULT;
-
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- src = buf;
- dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
- if (err)
- goto err_free;
-
- err = migrate_device_pfns(src, npages);
- if (err)
- goto err_free;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
- src, dst, 0);
- if (err || !mpages)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- dst, npages, DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, dst);
- migrate_device_pages(src, dst, npages);
- migrate_device_finalize(src, dst, npages);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
- mmput_async(devmem_allocation->mm);
-
- if (completion_done(&devmem_allocation->detached))
- return 0;
-
- if (retry_count--) {
- cond_resched();
- goto retry;
- }
-
- return err ?: -EBUSY;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
-
-/**
- * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
- * @vas: Pointer to the VM area structure
- * @device_private_page_owner: Device private pages owner
- * @page: Pointer to the page for fault handling (can be NULL)
- * @fault_addr: Fault address
- * @size: Size of migration
- *
- * This internal function performs the migration of the specified GPU SVM range
- * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
- * invokes the driver-specific operations for migration to RAM.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
- void *device_private_page_owner,
- struct page *page,
- unsigned long fault_addr,
- unsigned long size)
-{
- struct migrate_vma migrate = {
- .vma = vas,
- .pgmap_owner = device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
- MIGRATE_VMA_SELECT_DEVICE_COHERENT,
- .fault_page = page,
- };
- struct drm_gpusvm_zdd *zdd;
- const struct drm_gpusvm_devmem_ops *ops;
- struct device *dev = NULL;
- unsigned long npages, mpages = 0;
- struct page **pages;
- dma_addr_t *dma_addr;
- unsigned long start, end;
- void *buf;
- int i, err = 0;
-
- if (page) {
- zdd = page->zone_device_data;
- if (time_before64(get_jiffies_64(),
- zdd->devmem_allocation->timeslice_expiration))
- return 0;
- }
-
- start = ALIGN_DOWN(fault_addr, size);
- end = ALIGN(fault_addr + 1, size);
-
- /* Corner where VMA area struct has been partially unmapped */
- if (start < vas->vm_start)
- start = vas->vm_start;
- if (end > vas->vm_end)
- end = vas->vm_end;
-
- migrate.start = start;
- migrate.end = end;
- npages = npages_in_range(start, end);
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- /* Raced with another CPU fault, nothing to do */
- if (!migrate.cpages)
- goto err_free;
-
- if (!page) {
- for (i = 0; i < npages; ++i) {
- if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- page = migrate_pfn_to_page(migrate.src[i]);
- break;
- }
-
- if (!page)
- goto err_finalize;
- }
- zdd = page->zone_device_data;
- ops = zdd->devmem_allocation->ops;
- dev = zdd->devmem_allocation->dev;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
- migrate.src, migrate.dst,
- start);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
- DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(migrate.src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- if (dev)
- drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
-
- return err;
-}
-
-/**
- * drm_gpusvm_range_evict - Evict GPU SVM range
* @range: Pointer to the GPU SVM range to be removed
*
- * This function evicts the specified GPU SVM range. This function will not
- * evict coherent pages.
+ * This function evicts the specified GPU SVM range.
*
* Return: 0 on success, a negative error code on failure.
*/
@@ -2149,60 +1525,6 @@ int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
/**
- * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
- *
- * This function is a callback used to put the GPU SVM zone device data
- * associated with a page when it is being released.
- */
-static void drm_gpusvm_page_free(struct page *page)
-{
- drm_gpusvm_zdd_put(page->zone_device_data);
-}
-
-/**
- * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
- * @vmf: Pointer to the fault information structure
- *
- * This function is a page fault handler used to migrate a GPU SVM range to RAM.
- * It retrieves the GPU SVM range information from the faulting page and invokes
- * the internal migration function to migrate the range back to RAM.
- *
- * Return: VM_FAULT_SIGBUS on failure, 0 on success.
- */
-static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
-{
- struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
- int err;
-
- err = __drm_gpusvm_migrate_to_ram(vmf->vma,
- zdd->device_private_page_owner,
- vmf->page, vmf->address,
- zdd->devmem_allocation->size);
-
- return err ? VM_FAULT_SIGBUS : 0;
-}
-
-/*
- * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
- */
-static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
- .page_free = drm_gpusvm_page_free,
- .migrate_to_ram = drm_gpusvm_migrate_to_ram,
-};
-
-/**
- * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
- *
- * Return: Pointer to the GPU SVM device page map operations structure.
- */
-const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
-{
- return &drm_gpusvm_pagemap_ops;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
-
-/**
* drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
* @gpusvm: Pointer to the GPU SVM structure.
* @start: Start address
@@ -2246,28 +1568,5 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
-/**
- * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
- *
- * @dev: Pointer to the device structure which device memory allocation belongs to
- * @mm: Pointer to the mm_struct for the address space
- * @ops: Pointer to the operations structure for GPU SVM device memory
- * @dpagemap: The struct drm_pagemap we're allocating from.
- * @size: Size of device memory allocation
- */
-void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
- struct device *dev, struct mm_struct *mm,
- const struct drm_gpusvm_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
-{
- init_completion(&devmem_allocation->detached);
- devmem_allocation->dev = dev;
- devmem_allocation->mm = mm;
- devmem_allocation->ops = ops;
- devmem_allocation->dpagemap = dpagemap;
- devmem_allocation->size = size;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
-
MODULE_DESCRIPTION("DRM GPUSVM");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..bbc7fecb6f4a 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -27,6 +27,7 @@
#include <drm/drm_gpuvm.h>
+#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
@@ -2299,13 +2300,13 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @priv: pointer to a driver private data structure
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
- * @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2349,7 +2350,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
@@ -2390,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+static int
+drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
+{
+ struct drm_exec *exec = priv;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.unmap->va->gem.obj)
+ return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
+ return 0;
+ case DRM_GPUVA_OP_UNMAP:
+ if (op->unmap.va->gem.obj)
+ return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct drm_gpuvm_ops lock_ops = {
+ .sm_step_map = drm_gpuva_sm_step_lock,
+ .sm_step_remap = drm_gpuva_sm_step_lock,
+ .sm_step_unmap = drm_gpuva_sm_step_lock,
+};
+
+/**
+ * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @num_fences: for newly mapped objects, the # of fences to reserve
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
+ * will be newly mapped.
+ *
+ * The expected usage is:
+ *
+ * vm_bind {
+ * struct drm_exec exec;
+ *
+ * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
+ * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
+ *
+ * drm_exec_until_all_locked (&exec) {
+ * for_each_vm_bind_operation {
+ * switch (op->op) {
+ * case DRIVER_OP_UNMAP:
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * break;
+ * case DRIVER_OP_MAP:
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
+ * op->addr, op->range,
+ * obj, op->obj_offset);
+ * break;
+ * }
+ *
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * return ret;
+ * }
+ * }
+ * }
+ *
+ * This enables all locking to be performed before the driver begins modifying
+ * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
+ * where an earlier op can alter the sequence of steps generated for a later
+ * op, because the later altered step will involve the same GEM object(s)
+ * already seen in the earlier locking step. For example:
+ *
+ * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
+ * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
+ * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
+ *
+ * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
+ * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
+ * required without the earlier DRIVER_OP_MAP. This is safe because we've
+ * already locked the GEM object in the earlier DRIVER_OP_MAP step.
+ *
+ * Returns: 0 on success or a negative error codec
+ */
+int
+drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ if (req_obj) {
+ int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
+ req_addr, req_range,
+ req_obj, req_offset);
+
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
+
+/**
+ * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped by drm_gpuvm_sm_unmap().
+ *
+ * See drm_gpuvm_sm_map_exec_lock() for expected usage.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range)
+{
+ return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 60c282881958..e79c3c623c9a 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -177,10 +177,6 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-int drm_gem_pin_locked(struct drm_gem_object *obj);
-void drm_gem_unpin_locked(struct drm_gem_object *obj);
-int drm_gem_pin(struct drm_gem_object *obj);
-void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
@@ -188,8 +184,7 @@ void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_dev_fini(struct drm_device *dev);
void drm_debugfs_dev_register(struct drm_device *dev);
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root);
+int drm_debugfs_register(struct drm_minor *minor, int minor_id);
void drm_debugfs_unregister(struct drm_minor *minor);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
@@ -207,8 +202,7 @@ static inline void drm_debugfs_dev_register(struct drm_device *dev)
{
}
-static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
return 0;
}
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index cc4c463daae7..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a4cd476f9b30..e33c78fc8fbd 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -229,7 +230,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
case DRM_FORMAT_XRGB8888:
switch (dbidev->pixel_format) {
case DRM_FORMAT_RGB565:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
+ if (swap) {
+ drm_fb_xrgb8888_to_rgb565be(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ }
break;
case DRM_FORMAT_RGB888:
drm_fb_xrgb8888_to_rgb888(&dst_map, NULL, src, fb, clip, fmtcnv_state);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 21fd647f8ce1..3a9b3278a6e3 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -26,6 +26,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index b4239fd04e9d..25f376869b3a 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 5565464c1734..988735560570 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,8 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
@@ -72,6 +74,7 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @dev: DRM device
* @fb: drm_framebuffer object to fill out
+ * @info: pixel format information
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
@@ -79,12 +82,13 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
*/
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
fb->dev = dev;
- fb->format = drm_get_format_info(dev, mode_cmd);
+ fb->format = info;
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7694b85e75e3..beb91a13a312 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,6 +21,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
new file mode 100644
index 000000000000..1da55322af12
--- /dev/null
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_pagemap.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
+ * providing a way to populate a struct mm_struct virtual range with device
+ * private pages and to provide helpers to abstract device memory allocations,
+ * to migrate memory back and forth between device memory and system RAM and
+ * to handle access (and in the future migration) between devices implementing
+ * a fast interconnect that is not necessarily visible to the rest of the
+ * system.
+ *
+ * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
+ * instances to populate struct mm_struct virtual ranges with memory, and the
+ * migration is best effort only and may thus fail. The implementation should
+ * also handle device unbinding by blocking (return an -ENODEV) error for new
+ * population requests and after that migrate all device pages to system ram.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * Migration granularity typically follows the GPU SVM range requests, but
+ * if there are clashes, due to races or due to the fact that multiple GPU
+ * SVM instances have different views of the ranges used, and because of that
+ * parts of a requested range is already present in the requested device memory,
+ * the implementation has a variety of options. It can fail and it can choose
+ * to populate only the part of the range that isn't already in device memory,
+ * and it can evict the range to system before trying to migrate. Ideally an
+ * implementation would just try to migrate the missing part of the range and
+ * allocate just enough memory to do so.
+ *
+ * When migrating to system memory as a response to a cpu fault or a device
+ * memory eviction request, currently a full device memory allocation is
+ * migrated back to system. Moving forward this might need improvement for
+ * situations where a single page needs bouncing between system memory and
+ * device memory due to, for example, atomic operations.
+ *
+ * Key DRM pagemap components:
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for the drm_pagemap to
+ * migrate to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ */
+
+/**
+ * struct drm_pagemap_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_pagemap_zdd {
+ struct kref refcount;
+ struct drm_pagemap_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_pagemap_zdd *
+drm_pagemap_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_pagemap_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_pagemap_zdd_destroy(struct kref *ref)
+{
+ struct drm_pagemap_zdd *zdd =
+ container_of(ref, struct drm_pagemap_zdd, refcount);
+ struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_pagemap_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_pagemap_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_pagemap_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_pagemap_get_devmem_page(struct page *page,
+ struct drm_pagemap_zdd *zdd)
+{
+ page->zone_device_data = drm_pagemap_zdd_get(zdd);
+ zone_device_page_init(page);
+}
+
+/**
+ * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Returns: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_pagemap_migrate_map_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+
+ if (!page)
+ continue;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, dma_addr[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_pagemap_migrate_unmap_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ continue;
+
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ }
+}
+
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
+ * @devmem_allocation: The device memory allocation to migrate to.
+ * The caller should hold a reference to the device memory allocation,
+ * and the reference is consumed by this function unless it returns with
+ * an error.
+ * @mm: Pointer to the struct mm_struct.
+ * @start: Start of the virtual address range to migrate.
+ * @end: End of the virtual address range to migrate.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @pgmap_owner: Not used currently, since only system memory is considered.
+ *
+ * This function migrates the specified virtual address range to device memory.
+ * It performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Expected to be called while holding the mmap lock in
+ * at least read mode.
+ *
+ * Note: The @timeslice_ms parameter can typically be used to force data to
+ * remain in pagemap pages long enough for a GPU to perform a task and to prevent
+ * a migration livelock. One alternative would be for the GPU driver to block
+ * in a mmu_notifier for the specified amount of time, but adding the
+ * functionality to the pagemap is likely nicer to the system as a whole.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = pgmap_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_pagemap_zdd *zdd = NULL;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(mm);
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ zdd = drm_pagemap_zdd_alloc(pgmap_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_pagemap_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(timeslice_ms);
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_pagemap_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
+
+/**
+ * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ struct page *page, *src_page;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ continue;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ continue;
+ }
+
+ if (vas)
+ page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ else
+ page = alloc_page(GFP_HIGHUSER);
+
+ if (!page)
+ goto free_pages;
+
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+ }
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ WARN_ON_ONCE(!trylock_page(page));
+ ++*mpages;
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ put_page(page);
+ mpfn[i] = 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ dma_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
+
+/**
+ * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_pagemap_zdd *zdd;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
+ * @page: Pointer to the page
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_pagemap_page_free(struct page *page)
+{
+ drm_pagemap_zdd_put(page->zone_device_data);
+}
+
+/**
+ * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a virtual range
+ * to ram. The device memory allocation in which the device page is found is
+ * migrated in its entirety.
+ *
+ * Returns:
+ * VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_pagemap_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
+ .page_free = drm_pagemap_page_free,
+ .migrate_to_ram = drm_pagemap_migrate_to_ram,
+};
+
+/**
+ * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Returns:
+ * Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
+{
+ return &drm_pagemap_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
+
+/**
+ * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
+ *
+ * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
+
+/**
+ * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
+ * @page: The struct page.
+ *
+ * Return: A pointer to the struct drm_pagemap of a device private page that
+ * was populated from the struct drm_pagemap. If the page was *not* populated
+ * from a struct drm_pagemap, the result is undefined and the function call
+ * may result in dereferencing and invalid address.
+ */
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+{
+ struct drm_pagemap_zdd *zdd = page->zone_device_data;
+
+ return zdd->devmem_allocation->dpagemap;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
+
+/**
+ * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
+ * @dpagemap: Pointer to the drm_pagemap managing the device memory
+ * @start: Start of the virtual range to populate.
+ * @end: End of the virtual range to populate.
+ * @mm: Pointer to the virtual address space.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ *
+ * Attempt to populate a virtual range with device memory pages,
+ * clearing them or migrating data from the existing pages if necessary.
+ * The function is best effort only, and implementations may vary
+ * in how hard they try to satisfy the request.
+ *
+ * Return: %0 on success, negative error code on error. If the hardware
+ * device was removed / unbound the function will return %-ENODEV.
+ */
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
+{
+ int err;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+ err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
+ timeslice_ms);
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_pagemap_populate_mm);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 650de4da0853..c8bb28dccdc1 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -23,6 +23,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -473,13 +474,51 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/* Find panel by fwnode. This should be identical to of_drm_find_panel(). */
+static struct drm_panel *find_panel_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct drm_panel *panel;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (dev_fwnode(panel->dev) == fwnode) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/* Find panel by follower device */
+static struct drm_panel *find_panel_by_dev(struct device *follower_dev)
+{
+ struct fwnode_handle *fwnode;
+ struct drm_panel *panel;
+
+ fwnode = fwnode_find_reference(dev_fwnode(follower_dev), "panel", 0);
+ if (IS_ERR(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ panel = find_panel_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return panel;
+}
+
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
- * At the moment panels can only be followed on device tree enabled systems.
+ *
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
@@ -491,7 +530,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_present(dev->of_node, "panel");
+ return device_property_present(dev, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
@@ -508,7 +547,6 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* If a follower is added to a panel that's already been turned on, the
* follower's prepare callback is called right away.
*
- * At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
@@ -518,16 +556,10 @@ EXPORT_SYMBOL(drm_is_panel_follower);
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
- struct device_node *panel_np;
struct drm_panel *panel;
int ret;
- panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
- if (!panel_np)
- return -ENODEV;
-
- panel = of_drm_find_panel(panel_np);
- of_node_put(panel_np);
+ panel = find_panel_by_dev(follower_dev);
if (IS_ERR(panel))
return PTR_ERR(panel);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index c477d98ade2b..598f812b7cb3 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -2,6 +2,7 @@
#include <linux/array_size.h>
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 7ac0fd5391fe..3a218fb592ce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b4de79583805..1d6312fa1429 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -6,6 +6,7 @@
* Tux Ascii art taken from cowsay written by Tony Monroe
*/
+#include <linux/export.h>
#include <linux/font.h>
#include <linux/highmem.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 18492daae4b3..09a9b452e8b7 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -404,7 +404,7 @@ impl DecFifo {
let mut out = 0;
let mut exp = 1;
for i in 0..poplen {
- out += self.decimals[self.len + i] as u16 * exp;
+ out += u16::from(self.decimals[self.len + i]) * exp;
exp *= 10;
}
Some((out, NUM_CHARS_BITS[poplen]))
@@ -425,7 +425,7 @@ impl Iterator for SegmentIterator<'_> {
match self.segment {
Segment::Binary(data) => {
if self.offset < data.len() {
- let byte = data[self.offset] as u16;
+ let byte = u16::from(data[self.offset]);
self.offset += 1;
Some((byte, 8))
} else {
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c585f1e8803e..cb0f68d7f8ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 04992dfd4c79..38f82391bfda 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7982be4b0306..747d248aaf02 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -23,6 +23,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a0a5d725eab0..a23fc712a8b7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -605,6 +605,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
/*
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
@@ -614,7 +615,16 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
!obj->funcs->get_sg_table)
return -ENOSYS;
- return drm_gem_pin(obj);
+ if (!obj->funcs->pin)
+ return 0;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = obj->funcs->pin(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -631,8 +641,16 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
+
+ if (!obj->funcs->unpin)
+ return;
- drm_gem_unpin(obj);
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (drm_WARN_ON(obj->dev, ret))
+ return;
+ obj->funcs->unpin(obj);
+ dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -916,6 +934,26 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
+ * drm_gem_is_prime_exported_dma_buf -
+ * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
+ * @dev: drm_device to check against
+ * @dma_buf: dma-buf object to import
+ *
+ * Return: true if the DMA-BUF was exported from a GEM object belonging
+ * to @dev, false otherwise.
+ */
+
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
+}
+EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
+
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
@@ -939,16 +977,14 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
- if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
obj = dma_buf->priv;
- if (obj->dev == dev) {
- /*
- * Importing dmabuf exported from our own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_get(obj);
- return obj;
- }
+ drm_gem_object_get(obj);
+ return obj;
}
if (!dev->driver->gem_prime_import_sg_table)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 79517bd4418f..ded9461df5f2 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 6cc39e30781f..8959f7084e0b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index dd33fec5aabd..c0948586b7fd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -7,6 +7,7 @@
*/
#include <linux/average.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 250819fbc5ce..fcbcaaa36b5f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,6 +3,7 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
index 38cc7a123819..879ea33dbbc4 100644
--- a/drivers/gpu/drm/drm_suballoc.c
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -42,6 +42,8 @@
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 636cd83ca29e..e1b0fa4000cd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -195,6 +195,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 9cc71120246f..e4e1873f0e1e 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -2,6 +2,8 @@
#include <uapi/linux/sched/types.h>
+#include <linux/export.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 83229a031af0..58659c16874c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,6 +23,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index d983ee85cf13..95b8a2e4bda6 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -10,6 +10,7 @@
*/
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3c0a5c3e0e3d..76c742328edb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -534,7 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- 1, submit->ctx);
+ 1, submit->ctx, file->client_id);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 71e2e6b9d713..df4232d7e135 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,17 +35,16 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
- struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr, primid = 0;
int change;
/*
- * If the GPU managed to complete this jobs fence, the timout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* If the GPU is still making forward progress on the front-end (which
@@ -71,7 +70,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_primid = primid;
gpu->hangcheck_fence = gpu->completed_fence;
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
/* block scheduler */
@@ -87,13 +86,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-
-out_no_timeout:
- spin_lock(&sched->job_list_lock);
- list_add(&sched_job->list, &sched->pending_list);
- spin_unlock(&sched->job_list_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index fc1c5608db96..ddd73e7f26a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -56,6 +56,7 @@ static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count)
@@ -76,7 +77,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
@@ -94,9 +95,9 @@ err:
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_framebuffer *fb;
int i;
@@ -124,7 +125,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+ fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 2f841bbdddc5..fdc6cb40cc9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,6 +14,7 @@
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 9526a25e90ac..93de25b77e68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -116,7 +116,10 @@ int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(exynos_gem);
helper->fb =
- exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
+ exynos_drm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d44401a695e2..e3fbb45f37a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -7,7 +7,6 @@
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <linux/shmem_fs.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 8edefea2ef59..4a37136f90f4 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -6,7 +6,6 @@
**************************************************************************/
#include <linux/fb.h>
-#include <linux/pfn_t.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
- err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ err = vmf_insert_mixed(vma, address, pfn);
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
@@ -203,7 +202,10 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return PTR_ERR(backing);
obj = &backing->base;
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ fb = psb_framebuffer_create(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_drm_gem_object_put;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1a374702b696..e69b537ded6b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -29,24 +29,23 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- const struct drm_format_info *info;
int ret;
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
- info = drm_get_format_info(dev, mode_cmd);
- if (!info || !info->depth || info->cpp[0] > 4)
+ if (!info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
@@ -59,6 +58,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
+ * @info: pixel format information
* @mode_cmd: the description of the requested mode
* @obj: the backing object
*
@@ -68,6 +68,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* TODO: review object references
*/
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -78,7 +79,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -96,6 +97,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
@@ -110,7 +112,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- fb = psb_framebuffer_create(dev, cmd, obj);
+ fb = psb_framebuffer_create(dev, info, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 7f77cb2b2751..0b27112ec46f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -594,6 +594,7 @@ extern void psb_modeset_cleanup(struct drm_device *dev);
/* framebuffer */
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index adadd526641d..8d548d08f127 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -188,8 +188,13 @@ retry:
} else if (format->format == DRM_FORMAT_RGB332) {
drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
- gud_is_big_endian());
+ if (gud_is_big_endian()) {
+ drm_fb_xrgb8888_to_rgb565be(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ }
} else if (format->format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index d2d8582b36df..9e776112c03e 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -11,7 +11,9 @@
struct hyperv_drm_device {
/* drm */
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
/* mode */
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6c6b57298797..945b9482bcb3 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -5,6 +5,8 @@
#include <linux/hyperv.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -15,7 +17,8 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
#include "hyperv_drm.h"
@@ -38,18 +41,6 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
return 0;
}
-static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
-}
-
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
@@ -98,30 +89,66 @@ static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
return 0;
}
-static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct hyperv_drm_device *hv = to_hv(crtc->dev);
+ struct drm_plane *plane = &hv->plane;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc_state *crtc_state = crtc->state;
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
- hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
}
-static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_enable = hyperv_crtc_helper_atomic_enable,
+};
+
+static const struct drm_crtc_funcs hyperv_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int hyperv_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
- if (fb->format->format != DRM_FORMAT_XRGB8888)
- return -EINVAL;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible)
+ return 0;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
@@ -132,53 +159,120 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
return 0;
}
-static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void hyperv_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_rect rect;
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_rect damage;
+ struct drm_rect dst_clip;
+ struct drm_atomic_helper_damage_iter iter;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = new_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ hyperv_blit_to_vram_rect(new_state->fb, &shadow_plane_state->data[0], &damage);
+ hyperv_update_dirt(hv->hdev, &damage);
+ }
+}
- if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
- hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
- hyperv_update_dirt(hv->hdev, &rect);
+static int hyperv_plane_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
}
+ return -ENODEV;
}
-static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
- .enable = hyperv_pipe_enable,
- .check = hyperv_pipe_check,
- .update = hyperv_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static void hyperv_plane_panic_flush(struct drm_plane *plane)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_rect rect;
+
+ if (!plane->state || !plane->state->fb)
+ return;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ hyperv_update_dirt(hv->hdev, &rect);
+}
+
+static const struct drm_plane_helper_funcs hyperv_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = hyperv_plane_atomic_check,
+ .atomic_update = hyperv_plane_atomic_update,
+ .get_scanout_buffer = hyperv_plane_get_scanout_buffer,
+ .panic_flush = hyperv_plane_panic_flush,
};
-static const uint32_t hyperv_formats[] = {
- DRM_FORMAT_XRGB8888,
+static const struct drm_plane_funcs hyperv_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static const uint64_t hyperv_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs hyperv_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
+ struct drm_device *dev = &hv->dev;
+ struct drm_encoder *encoder = &hv->encoder;
+ struct drm_plane *plane = &hv->plane;
+ struct drm_crtc *crtc = &hv->crtc;
+ struct drm_connector *connector = &hv->connector;
int ret;
- ret = drm_simple_display_pipe_init(&hv->dev,
- &hv->pipe,
- &hyperv_pipe_funcs,
- hyperv_formats,
- ARRAY_SIZE(hyperv_formats),
- hyperv_modifiers,
- &hv->connector);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &hyperv_plane_funcs,
+ hyperv_formats, ARRAY_SIZE(hyperv_formats),
+ hyperv_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(plane, &hyperv_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(plane);
- drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &hyperv_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &hyperv_crtc_helper_funcs);
- return 0;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder,
+ &hyperv_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
}
static enum drm_mode_status
@@ -221,12 +315,6 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
dev->mode_config.funcs = &hyperv_mode_config_funcs;
- ret = hyperv_conn_init(hv);
- if (ret) {
- drm_err(dev, "Failed to initialized connector.\n");
- return ret;
- }
-
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e153686256c9..853543443072 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,12 +40,11 @@ i915-y += \
intel_pcode.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sbi.o \
intel_step.o \
intel_uncore.o \
intel_uncore_trace.o \
intel_wakeref.o \
- vlv_sideband.o \
+ vlv_iosf_sb.o \
vlv_suspend.o
# core peripheral code
@@ -219,12 +218,11 @@ i915-$(CONFIG_HWMON) += \
# modesetting core code
i915-y += \
display/hsw_ips.o \
- display/i9xx_plane.o \
display/i9xx_display_sr.o \
+ display/i9xx_plane.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
- display/intel_atomic_plane.o \
display/intel_audio.o \
display/intel_bios.o \
display/intel_bo.o \
@@ -266,6 +264,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
+ display/intel_flipq.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
@@ -284,10 +283,12 @@ i915-y += \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
+ display/intel_plane.o \
display/intel_plane_initial.o \
display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
+ display/intel_sbi.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \
display/intel_tc.o \
@@ -296,7 +297,8 @@ i915-y += \
display/intel_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
- display/skl_watermark.o
+ display/skl_watermark.o \
+ display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e0a98e6fd6d1..87f6b9602b16 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -18,6 +18,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 1d252432d729..2610f5702fb9 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -15,6 +15,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 4307e2ed03d9..927fe56aec77 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -5,11 +5,13 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -17,8 +19,6 @@
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val;
if (!crtc_state->ips_enabled)
@@ -39,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -65,8 +65,6 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bool need_vblank_wait = false;
if (!crtc_state->ips_enabled)
@@ -74,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -267,7 +265,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
return PTR_ERR(cdclk_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
index 32abe9743014..935419441709 100644
--- a/drivers/gpu/drm/i915/display/i9xx_display_sr.c
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -5,10 +5,10 @@
#include <drm/drm_device.h>
-#include "i915_reg.h"
#include "i9xx_display_sr.h"
#include "i9xx_wm_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_gmbus.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index a2a6d52be0a5..f291ced989dc 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -2,6 +2,7 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
@@ -14,13 +15,15 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
/* Primary plane formats for gen <= 3 */
@@ -334,10 +337,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- i9xx_plane_has_windowing(plane));
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
if (ret)
return ret;
@@ -903,6 +906,27 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
+static void i9xx_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 dspcntr;
+ u32 reg;
+
+ dspcntr = intel_de_read_fw(display, DSPCNTR(display, i9xx_plane));
+ dspcntr &= ~DISP_TILED;
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(display) >= 4) {
+ reg = intel_de_read_fw(display, DSPSURF(display, i9xx_plane));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), reg);
+
+ } else {
+ reg = intel_de_read_fw(display, DSPADDR(display, i9xx_plane));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), reg);
+ }
+}
+
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
{
@@ -1045,6 +1069,8 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
}
}
+ plane->disable_tiling = i9xx_disable_tiling;
+
modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
@@ -1149,7 +1175,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 77876ef735b7..1f9db5118777 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -11,6 +11,7 @@
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
#include "intel_mchbar_regs.h"
@@ -107,43 +108,41 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if (enable)
val &= ~FORCE_DDR_HIGH_FREQ;
else
val |= FORCE_DDR_HIGH_FREQ;
val &= ~FORCE_DDR_LOW_FREQ;
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
#define FW_WM(value, plane) \
@@ -3900,7 +3899,6 @@ static void g4x_wm_sanitize(struct intel_display *display)
static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -3911,9 +3909,9 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
wm->level = VLV_WM_LEVEL_PM2;
if (display->platform.cherryview) {
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -3926,23 +3924,23 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
* HIGH/LOW bits so that we don't actually change
* the current state.
*/
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
for_each_intel_crtc(display->drm, crtc) {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ca7033251e91..8d9cb73a93a7 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -192,12 +193,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
else
tmp &= ~PAYLOAD_PRESENT;
- tmp &= ~VBLANK_FENCE;
+ tmp &= ~(VBLANK_FENCE | LP_DATA_TRANSFER | PIPELINE_FLUSH);
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
else
- tmp &= ~LP_DATA_TRANSFER;
+ tmp |= PIPELINE_FLUSH;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@@ -658,7 +659,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy;
u32 val;
@@ -1276,6 +1277,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+ intel_panel_prepare(crtc_state, conn_state);
+
intel_crtc_vblank_on(crtc_state);
}
@@ -1409,6 +1412,8 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_panel_unprepare(old_conn_state);
+
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
index d4845ac65acc..b601b7632339 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -272,6 +272,7 @@
#define PAYLOAD_PRESENT (1 << 31)
#define LP_DATA_TRANSFER (1 << 30)
#define VBLANK_FENCE (1 << 29)
+#define PIPELINE_FLUSH (1 << 28)
#define PARAM_WC_MASK (0xffff << 8)
#define PARAM_WC_LOWER_SHIFT 8
#define PARAM_WC_UPPER_SHIFT 16
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index c176bdbc19a3..dfdde8e4eabe 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -26,6 +26,13 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_psr_needs_alpm_aux_less(intel_dp, crtc_state) ||
+ (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp));
+}
+
void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -329,7 +336,6 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
@@ -341,30 +347,26 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((crtc_state->has_panel_replay && intel_dp_is_edp(intel_dp)) ||
- (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
- intel_de_write(display,
- PORT_ALPM_CTL(port),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
- PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
- PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks));
-
- intel_de_write(display,
- PORT_ALPM_LFPS_CTL(port),
- PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
- PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms));
+ if (intel_dp->as_sdp_supported) {
+ u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP)
+ pr_alpm_ctl |= PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU;
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR))
+ pr_alpm_ctl |= PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE;
+
+ intel_de_write(display, PR_ALPM_CTL(display, cpu_transcoder),
+ pr_alpm_ctl);
+ }
+
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
@@ -388,6 +390,36 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
}
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 alpm_ctl_val = 0, lfps_ctl_val = 0;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
+ alpm_ctl_val = PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ intel_dp->alpm_parameters.silence_period_sym_clocks);
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ }
+
+ intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
+
+ intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
+}
+
void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index c9fe21e3e72c..a861c20b5d79 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -27,11 +27,15 @@ void intel_alpm_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_alpm_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_alpm_disable(struct intel_dp *intel_dp);
bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index e83feca5c9c9..348b1655435e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -26,7 +26,7 @@
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
- * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ * See intel_plane.c for the plane-specific atomic functionality.
*/
#include <drm/display/drm_dp_tunnel.h>
@@ -274,7 +274,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color = NULL;
crtc_state->dsb_commit = NULL;
crtc_state->use_dsb = false;
@@ -310,7 +310,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color);
drm_WARN_ON(crtc->dev, crtc_state->dsb_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 55af3a553c58..5bdaef38f13d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -951,7 +951,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+ intel_cdclk_force_min_cdclk(cdclk_state, enable ? 2 * 96000 : 0);
return drm_atomic_commit(&state->base);
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 5827da586003..e007380e9a63 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
-
#include <acpi/video.h>
#include <drm/drm_file.h>
@@ -19,6 +18,7 @@
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 166ee11831ab..9c268bed091d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index fbd16d7b58d9..65d64f79a4bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_panic.h>
+#include "display/intel_display_types.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -57,3 +59,18 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ return i915_gem_object_alloc_framebuffer();
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ return i915_gem_object_panic_setup(sb);
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ return i915_gem_object_panic_finish(fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index ea7a2253aaa5..97087a64d23b 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
struct drm_gem_object;
+struct drm_scanout_buffer;
+struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -23,5 +25,8 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
+void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index a5dd2932b852..d29a755612de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,6 +5,8 @@
#include <drm/drm_atomic_state_helper.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -12,10 +14,47 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_uncore.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /*
+ * From MTL onwards, to lock a QGV point, punit expects the peak BW of
+ * the selected QGV point as the parameter in multiples of 100MB/s
+ */
+ u16 qgv_point_peakbw;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u16 qgv_points_mask;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -79,14 +118,13 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
@@ -107,13 +145,12 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
struct intel_psf_gv_point *points)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -151,21 +188,20 @@ static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct intel_display *display,
- u32 points_mask)
+static int icl_pcode_restrict_qgv_points(struct intel_display *display,
+ u32 points_mask)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
drm_err(display->drm,
@@ -218,11 +254,10 @@ intel_read_qgv_point_info(struct intel_display *display,
}
static int icl_get_qgv_points(struct intel_display *display,
+ const struct dram_info *dram_info,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
@@ -418,19 +453,27 @@ static const struct intel_sa_info xe3lpd_sa_info = {
.derating = 10,
};
-static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe3lpd_3002_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 22, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(display, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -488,11 +531,11 @@ static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_
return 0;
}
-static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
@@ -502,7 +545,7 @@ static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_
int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(display, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -632,15 +675,15 @@ static void dg2_get_bw_info(struct intel_display *display)
}
static int xe2_hpd_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- int num_channels = i915->dram_info.num_channels;
+ int num_channels = dram_info->num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(display, &qi, true);
+ ret = icl_get_qgv_points(display, dram_info, &qi, true);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -763,32 +806,34 @@ static unsigned int icl_qgv_bw(struct intel_display *display,
void intel_bw_init_hw(struct intel_display *display)
{
- const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(display) >= 30)
- tgl_get_bw_info(display, &xe3lpd_sa_info);
+ if (DISPLAY_VERx100(display) >= 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
- xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
else if (DISPLAY_VER(display) >= 14)
- tgl_get_bw_info(display, &mtl_sa_info);
+ tgl_get_bw_info(display, dram_info, &mtl_sa_info);
else if (display->platform.dg2)
dg2_get_bw_info(display);
else if (display->platform.alderlake_p)
- tgl_get_bw_info(display, &adlp_sa_info);
+ tgl_get_bw_info(display, dram_info, &adlp_sa_info);
else if (display->platform.alderlake_s)
- tgl_get_bw_info(display, &adls_sa_info);
+ tgl_get_bw_info(display, dram_info, &adls_sa_info);
else if (display->platform.rocketlake)
- tgl_get_bw_info(display, &rkl_sa_info);
+ tgl_get_bw_info(display, dram_info, &rkl_sa_info);
else if (DISPLAY_VER(display) == 12)
- tgl_get_bw_info(display, &tgl_sa_info);
+ tgl_get_bw_info(display, dram_info, &tgl_sa_info);
else if (DISPLAY_VER(display) == 11)
- icl_get_bw_info(display, &icl_sa_info);
+ icl_get_bw_info(display, dram_info, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -862,6 +907,11 @@ static unsigned int intel_bw_data_rate(struct intel_display *display,
return data_rate;
}
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_bw_state, base);
+}
+
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
@@ -971,6 +1021,70 @@ static void icl_force_disable_sagv(struct intel_display *display,
icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -991,7 +1105,7 @@ static int mtl_find_qgv_points(struct intel_display *display,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(display, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
@@ -1104,7 +1218,7 @@ static int icl_find_qgv_points(struct intel_display *display,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(display, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
@@ -1354,12 +1468,12 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
* requirements. This can reduce back and forth
* display blinking due to constant cdclk changes.
*/
- if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
return 0;
drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, cdclk_state->bw_min_cdclk);
+ new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
*need_cdclk_calc = true;
return 0;
@@ -1471,8 +1585,8 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
if (!new_bw_state)
return 0;
- if (intel_can_enable_sagv(display, new_bw_state) !=
- intel_can_enable_sagv(display, old_bw_state)) {
+ if (intel_bw_can_enable_sagv(display, new_bw_state) !=
+ intel_bw_can_enable_sagv(display, old_bw_state)) {
ret = intel_atomic_serialize_global_state(&new_bw_state->base);
if (ret)
return ret;
@@ -1518,8 +1632,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(display, old_bw_state) !=
- intel_can_enable_sagv(display, new_bw_state))
+ intel_bw_can_enable_sagv(display, old_bw_state) !=
+ intel_bw_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1641,3 +1755,32 @@ int intel_bw_init(struct intel_display *display)
return 0;
}
+
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *new_bw_state, *old_bw_state;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (new_bw_state &&
+ new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
+ return true;
+
+ return false;
+}
+
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(display) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
+{
+ return bw_state->qgv_point_peakbw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index eb2cc883e9c1..d51f50c9d302 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,52 +8,14 @@
#include <drm/drm_atomic.h>
-#include "intel_display_limits.h"
-#include "intel_display_power.h"
-#include "intel_global_state.h"
-
struct intel_atomic_state;
+struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_global_state;
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
-struct intel_bw_state {
- struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
-
- /*
- * Contains a bit mask, used to determine, whether correspondent
- * pipe allows SAGV or not.
- */
- u8 pipe_sagv_reject;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /*
- * From MTL onwards, to lock a QGV point, punit expects the peak BW of
- * the selected QGV point as the parameter in multiples of 100MB/s
- */
- u16 qgv_point_peakbw;
-
- /*
- * Current QGV points mask, which restricts
- * some particular SAGV states, not to confuse
- * with pipe_sagv_mask.
- */
- u16 qgv_points_mask;
-
- unsigned int data_rate[I915_MAX_PIPES];
- u8 num_active_planes[I915_MAX_PIPES];
-};
-
-#define to_intel_bw_state(global_state) \
- container_of_const((global_state), struct intel_bw_state, base)
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state);
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
@@ -67,8 +29,6 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct intel_display *display);
int intel_bw_init(struct intel_display *display);
int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
-int icl_pcode_restrict_qgv_points(struct intel_display *display,
- u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
int intel_bw_min_cdclk(struct intel_display *display,
@@ -76,4 +36,11 @@ int intel_bw_min_cdclk(struct intel_display *display,
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state);
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state);
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state);
+void icl_sagv_post_plane_update(struct intel_atomic_state *state);
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state);
+
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b1718b491ffd..228aa64c1349 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -32,16 +32,17 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_watermark.h"
@@ -113,6 +114,42 @@
* dividers can be programmed correctly.
*/
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk to satisfy bandwidth requirements */
+ int bw_min_cdclk;
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
+};
+
struct intel_cdclk_funcs {
void (*get_cdclk)(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
@@ -567,20 +604,18 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
static void vlv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ cdclk_config->vco = vlv_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_config->vco);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (display->platform.valleyview)
@@ -658,16 +693,16 @@ static void vlv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_iosf_sb_get(dev_priv,
+ vlv_iosf_sb_get(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
drm_err(display->drm,
@@ -681,12 +716,12 @@ static void vlv_set_cdclk(struct intel_display *display,
cdclk) - 1;
/* adjust cdclk divider */
- val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~CCK_FREQUENCY_VALUES;
val |= divider;
- vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
drm_err(display->drm,
@@ -694,7 +729,7 @@ static void vlv_set_cdclk(struct intel_display *display,
}
/* adjust self-refresh exit latency value */
- val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val = vlv_bunit_read(display->drm, BUNIT_REG_BISOC);
val &= ~0x7f;
/*
@@ -705,9 +740,9 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
- vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ vlv_bunit_write(display->drm, BUNIT_REG_BISOC, val);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
@@ -723,7 +758,6 @@ static void chv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -747,19 +781,19 @@ static void chv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_punit_get(dev_priv);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ vlv_punit_get(display->drm);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
intel_update_cdclk(display);
@@ -843,7 +877,6 @@ static void bdw_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int ret;
@@ -856,7 +889,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -884,8 +917,8 @@ static void bdw_set_cdclk(struct intel_display *display,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(display->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1125,7 +1158,6 @@ static void skl_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
@@ -1142,10 +1174,10 @@ static void skl_set_cdclk(struct intel_display *display,
drm_WARN_ON_ONCE(display->drm,
display->platform.skylake && vco == 8640000);
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1188,8 +1220,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -2125,7 +2157,6 @@ static void bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_config mid_cdclk_config;
int cdclk = cdclk_config->cdclk;
int ret = 0;
@@ -2139,18 +2170,18 @@ static void bxt_set_cdclk(struct intel_display *display,
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2179,8 +2210,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* Display versions 14 and beyond
*/;
else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
- ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2188,10 +2219,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level,
- 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2477,7 +2507,6 @@ static void intel_pcode_notify(struct intel_display *display,
bool cdclk_update_valid,
bool pipe_count_update_valid)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 update_mask = 0;
@@ -2492,11 +2521,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -3388,7 +3417,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) >= 3002) {
+ display->cdclk.max_cdclk_freq = 480000;
+ } else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
} else if (display->platform.jasperlake || display->platform.elkhartlake) {
if (display->cdclk.hw.ref == 24000)
@@ -3528,10 +3559,8 @@ static int pch_rawclk(struct intel_display *display)
static int vlv_hrawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -3841,3 +3870,60 @@ void intel_init_cdclk_hooks(struct intel_display *display)
"Unknown platform. Assuming i830\n"))
display->funcs.cdclk = &i830_cdclk_funcs;
}
+
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->logical.cdclk;
+}
+
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.cdclk;
+}
+
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.voltage_level;
+}
+
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe)
+{
+ return cdclk_state->min_cdclk[pipe];
+}
+
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->bw_min_cdclk;
+}
+
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
+
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk ||
+ new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level))
+ return true;
+
+ return false;
+}
+
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk)
+{
+ cdclk_state->force_min_cdclk = force_min_cdclk;
+}
+
+void intel_cdclk_read_hw(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
+
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
+ cdclk_state->actual = display->cdclk.hw;
+ cdclk_state->logical = display->cdclk.hw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index a1cefd455d92..cacee598af0e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,10 +8,9 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-
+enum pipe;
struct intel_atomic_state;
+struct intel_cdclk_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -23,42 +22,6 @@ struct intel_cdclk_config {
bool joined_mbus;
};
-struct intel_cdclk_state {
- struct intel_global_state base;
-
- /*
- * Logical configuration of cdclk (used for all scaling,
- * watermark, etc. calculations and checks). This is
- * computed as if all enabled crtcs were active.
- */
- struct intel_cdclk_config logical;
-
- /*
- * Actual configuration of cdclk, can be different from the
- * logical configuration only when all crtc's are DPMS off.
- */
- struct intel_cdclk_config actual;
-
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
-
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
-
- /* forced minimum cdclk for glk+ audio w/a */
- int force_min_cdclk;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /* update cdclk with pipes disabled */
- bool disable_pipes;
-};
-
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -97,4 +60,13 @@ void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
int intel_cdclk_init(struct intel_display *display);
void intel_cdclk_debugfs_register(struct intel_display *display);
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
+void intel_cdclk_read_hw(struct intel_display *display);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 82606ebae1de..165138b95cb2 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,13 +9,13 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
-#include "intel_crtc.h"
#include "intel_cmtg.h"
#include "intel_cmtg_regs.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
/**
* DOC: Common Primary Timing Generator (CMTG)
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
index 668e41d65e86..945a35578284 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_CMTG_REGS_H__
#define __INTEL_CMTG_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define CMTG_CLK_SEL _MMIO(0x46160)
#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 98dddf72c0eb..671db6926e4c 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1339,8 +1339,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1350,8 +1350,8 @@ static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1389,7 +1389,7 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1917,7 +1917,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
return;
display->funcs.color->load_luts(crtc_state);
@@ -1965,6 +1965,25 @@ void intel_color_modeset(const struct intel_crtc_state *crtc_state)
}
}
+bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->dsb_color;
+}
+
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && !HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1982,47 +2001,53 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
- if (!crtc_state->dsb_color_vblank)
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
+ else
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+
+ if (!intel_color_uses_dsb(crtc_state))
return;
display->funcs.color->load_luts(crtc_state);
- if (crtc_state->use_dsb) {
- intel_vrr_send_push(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
- intel_vrr_check_push_sent(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
+ intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
+ intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
+ intel_dsb_interrupt(crtc_state->dsb_color);
}
- intel_dsb_finish(crtc_state->dsb_color_vblank);
+ if (intel_color_uses_gosub_dsb(crtc_state))
+ intel_dsb_gosub_finish(crtc_state->dsb_color);
+ else
+ intel_dsb_finish(crtc_state->dsb_color);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank) {
- intel_dsb_cleanup(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_vblank = NULL;
+ if (crtc_state->dsb_color) {
+ intel_dsb_cleanup(crtc_state->dsb_color);
+ crtc_state->dsb_color = NULL;
}
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank)
- intel_dsb_wait(crtc_state->dsb_color_vblank);
-}
-
-bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->dsb_color_vblank;
+ if (crtc_state->dsb_color)
+ intel_dsb_wait(crtc_state->dsb_color);
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ return false;
+
return !old_crtc_state->post_csc_lut &&
!old_crtc_state->pre_csc_lut;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 9d66457c1e89..bf7a12ce9df0 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -24,6 +24,8 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index f5cc38dbe559..112749f97c26 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#define for_each_combo_phy(__display, __phy) \
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index ee41acdccf4e..3694f95376c2 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_COMBO_PHY_REGS__
#define __INTEL_COMBO_PHY_REGS__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6c81c9f2fd09..42c923f416b3 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -32,7 +32,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
@@ -65,10 +64,10 @@ static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
void intel_connector_queue_modeset_retry_work(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ if (!queue_work(display->wq.unordered, &connector->modeset_retry_work))
drm_connector_put(&connector->base);
}
@@ -153,36 +152,36 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-int intel_connector_register(struct drm_connector *connector)
+int intel_connector_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
- ret = intel_backlight_device_register(intel_connector);
+ ret = intel_panel_register(connector);
if (ret)
goto err;
if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
- goto err_backlight;
+ goto err_panel;
}
- intel_connector_debugfs_add(intel_connector);
+ intel_connector_debugfs_add(connector);
return 0;
-err_backlight:
- intel_backlight_device_unregister(intel_connector);
+err_panel:
+ intel_panel_unregister(connector);
err:
return ret;
}
-void intel_connector_unregister(struct drm_connector *connector)
+void intel_connector_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_backlight_device_unregister(intel_connector);
+ intel_panel_unregister(connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -209,8 +208,7 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(display->drm,
- !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
if (!connector->base.state->crtc)
return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 38b50a779b6b..898c5d9e8f7a 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -34,8 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -44,6 +42,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 29cfc38f12e0..a187db6df2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -17,7 +17,6 @@
#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_cursor.h"
@@ -29,6 +28,7 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_vblank.h"
@@ -417,10 +417,13 @@ int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+
return crtc_state->hw.active &&
!crtc_state->preload_luts &&
!intel_crtc_needs_modeset(crtc_state) &&
- intel_crtc_needs_color_update(crtc_state) &&
+ (intel_crtc_needs_color_update(crtc_state) &&
+ !HAS_DOUBLE_BUFFERED_LUT(display)) &&
!intel_color_uses_dsb(crtc_state) &&
!crtc_state->use_dsb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2fec5ba58373..198e69efe9ac 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -12,10 +12,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
@@ -24,6 +22,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vblank.h"
@@ -159,10 +158,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index a82b93cbc81d..801235a5bc0a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -8,8 +8,8 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
+#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -39,7 +39,13 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- if (display->platform.pantherlake && phy == PHY_A)
+ /* PTL doesn't have a PHY connected to PORT B; as such,
+ * there will never be a case where PTL uses PHY B.
+ * WCL uses PORT A and B with the C10 PHY.
+ * Reusing the condition for WCL and extending it for PORT B
+ * should not cause any issues for PTL.
+ */
+ if (display->platform.pantherlake && phy < PHY_C)
return true;
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
@@ -3224,6 +3230,46 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
intel_cx0pll_enable(encoder, crtc_state);
}
+/*
+ * According to HAS we need to enable MAC Transmitting LFPS in the "PHY Common
+ * Control 0" PIPE register in case of AUX Less ALPM is going to be used. This
+ * function is doing that and is called by link retrain sequence.
+ */
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ intel_wakeref_t wakeref;
+ int i;
+ u8 owned_lane_mask;
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
+ for (i = 0; i < 4; i++) {
+ int tx = i % 2 + 1;
+ u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
+
+ if (!(owned_lane_mask & lane_mask))
+ continue;
+
+ intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
+ CONTROL0_MAC_TRANSMIT_LFPS,
+ CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
+ }
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index a8f811ca5e7b..c5a7b529955b 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -43,5 +43,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 59c22beaf1de..77eae1d845f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_CX0_PHY_REGS_H__
#define __INTEL_CX0_PHY_REGS_H__
-#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+#include "intel_display_reg_defs.h"
/* DDI Buffer Control */
#define _DDI_CLK_VALFREQ_A 0x64030
@@ -285,6 +285,9 @@
#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6)
+#define PHY_CMN1_CONTROL(tx, control) (0x800 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL0_MAC_TRANSMIT_LFPS REG_BIT(1)
+
/* C20 Registers */
#define PHY_C20_WR_ADDRESS_L 0xC02
#define PHY_C20_WR_ADDRESS_H 0xC03
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d58f8fc37326..0405396c7750 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -50,6 +50,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -72,11 +73,13 @@
#include "intel_lspcon.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
@@ -236,7 +239,7 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
port_name(port));
}
-static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -260,7 +263,7 @@ static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
@@ -1393,6 +1396,21 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 2; ln++) {
int level;
+ /* Wa_16011342517:adl-p */
+ if (display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if ((intel_encoder_is_hdmi(encoder) &&
+ crtc_state->port_clock == 594000) ||
+ (intel_encoder_is_dp(encoder) &&
+ crtc_state->port_clock == 162000)) {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 1);
+ } else {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 0);
+ }
+ }
+
intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
@@ -1561,7 +1579,7 @@ static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t
return !(intel_de_read(display, reg) & clk_off);
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
_icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel_shift)
{
@@ -1569,14 +1587,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
id = (intel_de_read(display, reg) & clk_sel_mask) >> clk_sel_shift;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1606,7 +1624,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1620,7 +1638,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1650,7 +1668,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1664,7 +1682,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1703,7 +1721,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1723,14 +1741,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
if (phy >= PHY_C)
id += DPLL_ID_DG1_DPLL2;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1760,7 +1778,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1774,7 +1792,7 @@ static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -1817,7 +1835,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
@@ -1868,7 +1886,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
-static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
@@ -1895,10 +1913,10 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
-static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->base.dev);
enum intel_dpll_id id;
@@ -1918,14 +1936,14 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -1967,7 +1985,7 @@ static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
-static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -1986,14 +2004,14 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -2018,7 +2036,7 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
-static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2053,7 +2071,7 @@ static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void intel_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2760,7 +2778,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 4. Enable the port PLL.
*
* The PLL enabling itself was already done before this function by
- * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * hsw_crtc_enable()->intel_enable_dpll(). We need only
* configure the PLL to port mapping here.
*/
intel_ddi_enable_clock(encoder, crtc_state);
@@ -3354,6 +3372,8 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
+ intel_panel_prepare(crtc_state, conn_state);
+
if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
@@ -3551,6 +3571,7 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
intel_dp->link.active = false;
+ intel_panel_unprepare(old_conn_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
@@ -3647,7 +3668,7 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
- intel_update_active_dpll(state, pipe_crtc, encoder);
+ intel_dpll_update_active(state, pipe_crtc, encoder);
}
/*
@@ -3740,6 +3761,18 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_ddi_buf_enable(encoder, intel_dp->DP);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+
+ /*
+ * 6.k If AUX-Less ALPM is going to be enabled:
+ * i. Configure PORT_ALPM_CTL and PORT_ALPM_LFPS_CTL here
+ */
+ intel_alpm_port_configure(intel_dp, crtc_state);
+
+ /*
+ * ii. Enable MAC Transmits LFPS in the "PHY Common Control 0" PIPE
+ * register
+ */
+ intel_lnl_mac_transmit_lfps(encoder, crtc_state);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -4184,7 +4217,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
@@ -4200,7 +4233,7 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4254,7 +4287,7 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
-static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
@@ -4264,7 +4297,7 @@ icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
@@ -4287,7 +4320,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id;
@@ -4310,10 +4343,10 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->intel_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(display, encoder->port);
else
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 353eb04079e9..f6f511bb0431 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -16,9 +16,9 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
+struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
-struct intel_shared_dpll;
enum pipe;
enum port;
enum transcoder;
@@ -40,7 +40,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
@@ -50,7 +50,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 54ce3e4f8fd9..9ecdcf6b73e4 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -107,10 +107,10 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms);
}
static inline int
@@ -125,14 +125,14 @@ __intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
static inline int
intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout);
+ timeout_ms);
intel_dmc_wl_put(display, reg);
@@ -141,14 +141,14 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
static inline int
intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -176,16 +176,16 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
static inline int
intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout_ms);
}
static inline int
intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, 0, timeout_ms);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 43aa1f97378b..7035c1fc9033 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -57,7 +57,6 @@
#include "i9xx_wm.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
@@ -67,13 +66,14 @@
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
-#include "intel_cursor.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -93,6 +93,7 @@
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
+#include "intel_flipq.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -105,9 +106,9 @@
#include "intel_panel.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
#include "intel_pfit.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
@@ -140,46 +141,47 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+int vlv_get_hpll_vco(struct drm_device *drm)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
return vco_freq[hpll_freq] * 1000;
}
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
- val = vlv_cck_read(dev_priv, reg);
+ val = vlv_cck_read(drm, reg);
divider = val & CCK_FREQUENCY_VALUES;
- drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg)
{
+ struct drm_i915_private *dev_priv = to_i915(drm);
int hpll;
- vlv_cck_get(dev_priv);
+ vlv_cck_get(drm);
if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+ dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
- hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+ hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(drm);
return hpll;
}
@@ -191,7 +193,7 @@ void intel_update_czclk(struct intel_display *display)
if (!display->platform.valleyview && !display->platform.cherryview)
return;
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+ dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
CCK_CZ_CLOCK_CONTROL);
drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
@@ -1325,7 +1327,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
- new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->intel_dpll = old_crtc_state->intel_dpll;
new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
}
}
@@ -1658,13 +1660,17 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, crtc->active))
return;
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
- intel_dmc_enable_pipe(display, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
+ const struct intel_crtc_state *new_pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_dmc_enable_pipe(new_pipe_crtc_state);
+ }
intel_encoders_pre_pll_enable(state, crtc);
- if (new_crtc_state->shared_dpll)
- intel_enable_shared_dpll(new_crtc_state);
+ if (new_crtc_state->intel_dpll)
+ intel_dpll_enable(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1793,12 +1799,16 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- intel_disable_shared_dpll(old_crtc_state);
+ intel_dpll_disable(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i)
- intel_dmc_disable_pipe(display, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_dmc_disable_pipe(old_pipe_crtc_state);
+ }
}
/* Prefer intel_encoder_is_combo() */
@@ -1959,7 +1969,7 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
- if (crtc_state->shared_dpll)
+ if (crtc_state->intel_dpll)
set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
@@ -4159,7 +4169,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
return 0;
linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
- cdclk_state->logical.cdclk);
+ intel_cdclk_logical(cdclk_state));
return min(linetime_wm, 0x1ff);
}
@@ -4225,7 +4235,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (intel_crtc_needs_modeset(crtc_state)) {
- ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
+ ret = intel_dpll_crtc_get_dpll(state, crtc);
if (ret)
return ret;
}
@@ -4318,6 +4328,22 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return 0;
}
+int intel_display_min_pipe_bpp(void)
+{
+ return 6 * 3;
+}
+
+int intel_display_max_pipe_bpp(struct intel_display *display)
+{
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
+ return 10*3;
+ else if (DISPLAY_VER(display) >= 5)
+ return 12*3;
+ else
+ return 8*3;
+}
+
static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -4327,17 +4353,9 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int bpp, i;
-
- if (display->platform.g4x || display->platform.valleyview ||
- display->platform.cherryview)
- bpp = 10*3;
- else if (DISPLAY_VER(display) >= 5)
- bpp = 12*3;
- else
- bpp = 8*3;
+ int i;
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display);
/* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
@@ -4501,7 +4519,7 @@ copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
/* preserve some things from the slave's original crtc state */
saved_state->uapi = secondary_crtc_state->uapi;
saved_state->scaler_state = secondary_crtc_state->scaler_state;
- saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
+ saved_state->intel_dpll = secondary_crtc_state->intel_dpll;
saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
intel_crtc_free_hw_state(secondary_crtc_state);
@@ -4564,7 +4582,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->intel_dpll = crtc_state->intel_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
@@ -5318,7 +5336,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
if (display->dpll.mgr)
- PIPE_CONF_CHECK_P(shared_dpll);
+ PIPE_CONF_CHECK_P(intel_dpll);
/* FIXME convert everything over the dpll_mgr */
if (display->dpll.mgr || HAS_GMCH(display))
@@ -5470,7 +5488,7 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
@@ -6186,7 +6204,7 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
}
@@ -6428,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
any_ms = true;
- intel_release_shared_dplls(state, crtc);
+ intel_dpll_release(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6438,7 +6456,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = intel_atomic_check_planes(state);
+ ret = intel_plane_atomic_check(state);
if (ret)
goto fail;
@@ -6602,7 +6620,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* During modesets pipe configuration was programmed as the
@@ -6630,18 +6648,24 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(display) >= 9 &&
- !intel_crtc_needs_modeset(new_crtc_state))
+ if (DISPLAY_VER(display) >= 9 && !modeset)
skl_detach_scalers(NULL, new_crtc_state);
+ if (!modeset &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
+ !intel_color_uses_dsb(new_crtc_state) &&
+ HAS_DOUBLE_BUFFERED_LUT(display))
+ intel_color_load_luts(new_crtc_state);
+
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
}
@@ -6715,10 +6739,10 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state) &&
- !new_crtc_state->use_dsb)
+ !new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_color_commit_noarm(NULL, new_crtc_state);
- if (!new_crtc_state->use_dsb)
+ if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_crtc_planes_update_noarm(NULL, state, crtc);
}
@@ -6730,16 +6754,23 @@ static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq) {
+ intel_flipq_enable(new_crtc_state);
+
+ intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event);
+
+ intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0,
+ new_crtc_state->dsb_commit);
+ } else if (new_crtc_state->use_dsb) {
intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event);
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
} else {
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
if (new_crtc_state->dsb_commit)
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
commit_pipe_pre_planes(state, crtc);
@@ -7169,7 +7200,17 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
return;
/* FIXME deal with everything */
+ new_crtc_state->use_flipq =
+ intel_flipq_supported(display) &&
+ !new_crtc_state->do_async_flip &&
+ !new_crtc_state->vrr.enable &&
+ !new_crtc_state->has_psr &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state) &&
+ !intel_crtc_needs_color_update(new_crtc_state);
+
new_crtc_state->use_dsb =
+ !new_crtc_state->use_flipq &&
!new_crtc_state->do_async_flip &&
(DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
!intel_crtc_needs_modeset(new_crtc_state) &&
@@ -7185,7 +7226,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
+ if (!new_crtc_state->use_flipq &&
+ !new_crtc_state->use_dsb &&
+ !new_crtc_state->dsb_color)
return;
/*
@@ -7194,14 +7237,20 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Double that for pipe stuff and other overhead.
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
- new_crtc_state->use_dsb ? 1024 : 16);
+ new_crtc_state->use_dsb ||
+ new_crtc_state->use_flipq ? 1024 : 16);
if (!new_crtc_state->dsb_commit) {
+ new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
intel_color_cleanup_commit(new_crtc_state);
return;
}
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) {
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc);
+
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state->dsb_commit,
new_crtc_state);
@@ -7216,7 +7265,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
- intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
+ if (new_crtc_state->use_dsb)
+ intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(new_crtc_state->dsb_commit,
@@ -7232,19 +7282,27 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
skl_detach_scalers(new_crtc_state->dsb_commit,
new_crtc_state);
- if (!new_crtc_state->dsb_color_vblank) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
-
- intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
- intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_interrupt(new_crtc_state->dsb_commit);
- }
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc);
}
- if (new_crtc_state->dsb_color_vblank)
+ if (intel_color_uses_chained_dsb(new_crtc_state))
intel_dsb_chain(state, new_crtc_state->dsb_commit,
- new_crtc_state->dsb_color_vblank, true);
+ new_crtc_state->dsb_color, true);
+ else if (intel_color_uses_gosub_dsb(new_crtc_state))
+ intel_dsb_gosub(new_crtc_state->dsb_commit,
+ new_crtc_state->dsb_color);
+
+ if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+ intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_dsb_interrupt(new_crtc_state->dsb_commit);
+ }
intel_dsb_finish(new_crtc_state->dsb_commit);
}
@@ -7367,6 +7425,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
display->funcs.display->commit_modeset_enables(state);
+ /* FIXME probably need to sequence this properly */
intel_program_dpkgc_latency(state);
intel_wait_for_vblank_workers(state);
@@ -7390,6 +7449,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb)
intel_vrr_check_push_sent(NULL, new_crtc_state);
+
+ if (new_crtc_state->use_flipq)
+ intel_flipq_disable(new_crtc_state);
}
/*
@@ -7433,7 +7495,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color);
old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit);
}
@@ -7526,7 +7588,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
intel_atomic_swap_global_state(state);
- intel_shared_dpll_swap_state(state);
+ intel_dpll_swap_state(state);
intel_atomic_track_fbs(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 3b54a62c290a..37e2ab301a80 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -30,38 +30,21 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
-enum drm_scaling_filter;
-struct dpll;
struct drm_atomic_state;
-struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_file;
-struct drm_format_info;
-struct drm_framebuffer;
-struct drm_i915_private;
-struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
-struct drm_plane;
-struct drm_plane_state;
-struct i915_address_space;
-struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_display;
-struct intel_dp;
struct intel_encoder;
-struct intel_initial_plane_config;
struct intel_link_m_n;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
-struct pci_dev;
-struct work_struct;
-
#define pipe_name(p) ((p) + 'A')
@@ -452,10 +435,10 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_hpll_vco(struct drm_device *drm);
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -524,6 +507,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_display_min_pipe_bpp(void);
+int intel_display_max_pipe_bpp(struct intel_display *display);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 0578b68404da..4d565935e2cc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -2,10 +2,11 @@
/* Copyright © 2024 Intel Corporation */
#include "i915_drv.h"
+#include "intel_display_conversion.h"
struct intel_display *__i915_to_display(struct drm_i915_private *i915)
{
- return &i915->display;
+ return i915->display;
}
struct intel_display *__drm_to_display(struct drm_device *drm)
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index b4937e102360..8c226406c5cd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -42,7 +42,7 @@ struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dmc;
-struct intel_dpll_funcs;
+struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
struct intel_fdi_funcs;
@@ -122,11 +122,11 @@ struct intel_audio {
* intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
* dpll, because on some platforms plls share registers.
*/
-struct intel_dpll {
+struct intel_dpll_global {
struct mutex lock;
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ int num_dpll;
+ struct intel_dpll dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *mgr;
struct {
@@ -300,7 +300,7 @@ struct intel_display {
const struct intel_cdclk_funcs *cdclk;
/* Display pll funcs */
- const struct intel_dpll_funcs *dpll;
+ const struct intel_dpll_global_funcs *dpll;
/* irq display functions */
const struct intel_hotplug_funcs *hotplug;
@@ -480,6 +480,12 @@ struct intel_display {
} irq;
struct {
+ /* protected by wm.wm_mutex */
+ u16 linetime[I915_MAX_PIPES];
+ bool disable[I915_MAX_PIPES];
+ } pkgc;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -539,6 +545,11 @@ struct intel_display {
} sagv;
struct {
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex lock;
+ } sbi;
+
+ struct {
/*
* DG2: Mask of PHYs that were not calibrated by the firmware
* and should not be used.
@@ -565,12 +576,15 @@ struct intel_display {
/* hipri wq for commit cleanups */
struct workqueue_struct *cleanup;
+
+ /* unordered workqueue for all display unordered work */
+ struct workqueue_struct *unordered;
} wq;
/* Grouping using named structs. Keep sorted. */
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
- struct intel_dpll dpll;
+ struct intel_dpll_global dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 8d0a1779dd19..ce3f9810c42d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -13,7 +14,6 @@
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
@@ -25,6 +25,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -39,6 +40,7 @@
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
@@ -618,7 +620,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
drm_modeset_lock_all(display->drm);
@@ -627,7 +629,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
display->dpll.ref_clks.nssc,
display->dpll.ref_clks.ssc);
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
@@ -972,7 +974,7 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
+ str_true_false(dsc_enable));
intel_dp->force_dsc_en = dsc_enable;
*offp += len;
@@ -1183,7 +1185,7 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Fractional BPP Enable\n",
- (dsc_fractional_bpp_enable) ? "true" : "false");
+ str_true_false(dsc_fractional_bpp_enable));
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
*offp += len;
@@ -1325,6 +1327,7 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
intel_psr_connector_debugfs_add(connector);
intel_alpm_lobf_debugfs_add(connector);
intel_dp_link_training_debugfs_add(connector);
+ intel_link_bw_connector_debugfs_add(connector);
if (DISPLAY_VER(display) >= 11 &&
((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 90d714598664..089cffabbad5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -18,6 +18,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1479,6 +1480,7 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
+ { 30, 2, &xe2_lpd_display },
};
static const struct intel_display_device_info *
@@ -1621,13 +1623,17 @@ static void display_platforms_or(struct intel_display_platforms *dst,
struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
{
- struct intel_display *display = to_intel_display(pdev);
+ struct intel_display *display;
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
enum intel_step step;
+ display = kzalloc(sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return ERR_PTR(-ENOMEM);
+
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
@@ -1708,7 +1714,11 @@ no_display:
void intel_display_device_remove(struct intel_display *display)
{
+ if (!display)
+ return;
+
intel_display_params_free(&display->params);
+ kfree(display);
}
static void __intel_display_device_info_runtime_init(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 87c666792c0d..4308822f0415 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -157,6 +157,7 @@ struct intel_display_platforms {
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
@@ -172,6 +173,7 @@ struct intel_display_platforms {
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_FDI(__display) (IS_DISPLAY_VER((__display), 5, 8) && !HAS_GMCH(__display))
#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
@@ -181,6 +183,7 @@ struct intel_display_platforms {
#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PIPEDMC(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
@@ -189,9 +192,8 @@ struct intel_display_platforms {
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
- ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
- HAS_DSC(__display))
+#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \
+ DISPLAY_VER(__display) == 14) && HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 411fe7b918a7..8586ba102605 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -27,6 +27,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
@@ -43,6 +44,7 @@
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
+#include "intel_flipq.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -83,16 +85,10 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct intel_cdclk_state *cdclk_state;
-
if (!HAS_DISPLAY(display))
return;
- cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
-
- intel_update_cdclk(display);
- intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
+ intel_cdclk_read_hw(display);
intel_display_wa_apply(display);
}
@@ -241,12 +237,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (!HAS_DISPLAY(display))
return 0;
- intel_dmc_init(display);
+ display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
+ if (!display->hotplug.dp_wq) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
if (!display->wq.modeset) {
ret = -ENOMEM;
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_dp;
}
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -262,27 +262,35 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
goto cleanup_wq_flip;
}
+ display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ if (!display->wq.unordered) {
+ ret = -ENOMEM;
+ goto cleanup_wq_cleanup;
+ }
+
+ intel_dmc_init(display);
+
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_color_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_bw_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
intel_init_quirks(display);
@@ -290,12 +298,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_unordered:
+ destroy_workqueue(display->wq.unordered);
cleanup_wq_cleanup:
destroy_workqueue(display->wq.cleanup);
cleanup_wq_flip:
destroy_workqueue(display->wq.flip);
cleanup_wq_modeset:
destroy_workqueue(display->wq.modeset);
+cleanup_wq_dp:
+ destroy_workqueue(display->hotplug.dp_wq);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -466,7 +478,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
}
intel_plane_possible_crtcs_init(display);
- intel_shared_dpll_init(display);
+ intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
intel_update_czclk(display);
@@ -526,6 +538,8 @@ int intel_display_driver_probe(struct intel_display *display)
*/
intel_hdcp_component_init(display);
+ intel_flipq_init(display);
+
/*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
@@ -591,6 +605,7 @@ void intel_display_driver_remove(struct intel_display *display)
flush_workqueue(display->wq.flip);
flush_workqueue(display->wq.modeset);
flush_workqueue(display->wq.cleanup);
+ flush_workqueue(display->wq.unordered);
/*
* MST topology needs to be suspended so we don't have any calls to
@@ -603,8 +618,6 @@ void intel_display_driver_remove(struct intel_display *display)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -619,7 +632,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_workqueue(i915->unordered_wq);
+ flush_workqueue(display->wq.unordered);
intel_hdcp_component_fini(display);
@@ -631,9 +644,11 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_gmbus_teardown(display);
+ destroy_workqueue(display->hotplug.dp_wq);
destroy_workqueue(display->wq.flip);
destroy_workqueue(display->wq.modeset);
destroy_workqueue(display->wq.cleanup);
+ destroy_workqueue(display->wq.unordered);
intel_fbc_cleanup(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 3e73832e5e81..fb25ec8adae3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -9,14 +9,15 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "icl_dsi_regs.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
@@ -25,6 +26,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
#include "intel_pipe_crc_regs.h"
+#include "intel_plane.h"
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -1016,7 +1018,15 @@ static u32 gen8_de_port_aux_mask(struct intel_display *display)
static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 14)
+ if (DISPLAY_VER(display) >= 20)
+ return MTL_PLANE_ATS_FAULT |
+ GEN9_PIPE_CURSOR_FAULT |
+ GEN11_PIPE_PLANE5_FAULT |
+ GEN9_PIPE_PLANE4_FAULT |
+ GEN9_PIPE_PLANE3_FAULT |
+ GEN9_PIPE_PLANE2_FAULT |
+ GEN9_PIPE_PLANE1_FAULT;
+ else if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -1418,7 +1428,8 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err_ratelimited(display->drm,
- "The master control interrupt lied (DE PIPE)!\n");
+ "The master control interrupt lied (DE PIPE %c)!\n",
+ pipe_name(pipe));
continue;
}
@@ -1441,6 +1452,9 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
+ if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT)
+ intel_pipedmc_irq_handler(display, pipe);
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(display, pipe);
@@ -2258,6 +2272,10 @@ void gen8_de_irq_postinstall(struct intel_display *display)
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
+ /* TODO figure PIPEDMC interrupts for pre-LNL */
+ if (DISPLAY_VER(display) >= 20)
+ de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT;
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index c4f1ab43fc0c..75316247ee8a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -62,6 +62,9 @@ intel_display_param_named_unsafe(enable_dpt, bool, 0400,
intel_display_param_named_unsafe(enable_dsb, bool, 0400,
"Enable display state buffer (DSB) (default: true)");
+intel_display_param_named_unsafe(enable_flipq, bool, 0400,
+ "Enable DMC flip queue (default: false)");
+
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 5317138e6044..784e6bae8615 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -31,6 +31,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(bool, enable_dpt, true, 0400) \
param(bool, enable_dsb, true, 0600) \
+ param(bool, enable_flipq, false, 0600) \
param(bool, enable_sagv, true, 0600) \
param(int, disable_power_well, -1, 0400) \
param(bool, enable_ips, true, 0600) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 16356523816f..273054c22325 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -16,6 +18,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -1254,10 +1257,8 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.haswell) {
- if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1604,9 +1605,7 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
static void tgl_bw_buddy_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum intel_dram_type type = dev_priv->dram_info.type;
- u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct buddy_page_mask *table;
unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
@@ -1623,8 +1622,8 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table = tgl_buddy_page_masks;
for (config = 0; table[config].page_mask != 0; config++)
- if (table[config].num_channels == num_channels &&
- table[config].type == type)
+ if (table[config].num_channels == dram_info->num_channels &&
+ table[config].type == dram_info->type)
break;
if (table[config].page_mask == 0) {
@@ -1883,12 +1882,11 @@ static void vlv_cmnlane_wa(struct intel_display *display)
static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
- vlv_punit_get(dev_priv);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- vlv_punit_put(dev_priv);
+ vlv_punit_get(display->drm);
+ ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(display->drm);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index ab1163744bc5..77268802b55e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "vlv_sideband_reg.h"
+#include "vlv_iosf_sb_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index b104bce0e14d..48cac225a809 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
@@ -30,8 +31,20 @@
#include "intel_vga.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
+#include "vlv_iosf_sb_reg.h"
#include "vlv_sideband.h"
-#include "vlv_sideband_reg.h"
+
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ *
+ * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
+ */
+static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
+{
+ int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
+
+ return pw_idx - pw1_idx + SKL_PG1;
+}
struct i915_power_well_regs {
i915_reg_t bios;
@@ -307,8 +320,8 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- bool disabled;
u32 reqs;
+ int ret;
/*
* Bspec doesn't require waiting for PWs to get disabled, but still do
@@ -319,12 +332,18 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(display, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
- if (disabled)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
+ ret = intel_de_wait_for_clear(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
+ if (!ret)
return;
+ /* Refresh requesters in case they popped up during the wait. */
+ if (!reqs)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
@@ -349,8 +368,7 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
/* Wa_16013190616:adlp */
if (display->platform.alderlake_p && pg == SKL_PG1)
@@ -374,8 +392,8 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
+
gen9_wait_for_power_well_fuses(display, pg);
}
@@ -485,8 +503,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
+ ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -809,7 +826,6 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -829,7 +845,7 @@ static void assert_can_enable_dc5(struct intel_display *display)
assert_display_rpm_held(display);
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void gen9_enable_dc5(struct intel_display *display)
@@ -860,7 +876,7 @@ static void assert_can_enable_dc6(struct intel_display *display)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void skl_enable_dc6(struct intel_display *display)
@@ -1102,7 +1118,6 @@ static void i830_pipes_power_well_sync_hw(struct intel_display *display,
static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1112,29 +1127,29 @@ static void vlv_set_power_well(struct intel_display *display,
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void vlv_power_well_enable(struct intel_display *display,
@@ -1152,7 +1167,6 @@ static void vlv_power_well_disable(struct intel_display *display,
static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1162,9 +1176,9 @@ static bool vlv_power_well_enabled(struct intel_display *display,
mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1178,10 +1192,10 @@ static bool vlv_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
drm_WARN_ON(display->drm, ctrl != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1437,7 +1451,6 @@ static void assert_chv_phy_status(struct intel_display *display)
static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1461,30 +1474,30 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(display, DISPLAY_PHY_CONTROL,
@@ -1535,7 +1548,6 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1553,9 +1565,9 @@ static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_ph
else
reg = CHV_CMN_DW6_CH1;
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, phy, reg);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ val = vlv_dpio_read(display->drm, phy, reg);
+ vlv_dpio_put(display->drm);
/*
* This assumes !override is only used when the port is disabled.
@@ -1665,14 +1677,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1685,10 +1696,10 @@ static bool chv_pipe_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
drm_WARN_ON(display->drm, ctrl << 16 != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1697,36 +1708,35 @@ static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_pipe_power_well_sync_hw(struct intel_display *display,
@@ -1772,7 +1782,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
new file mode 100644
index 000000000000..7bd09d981cd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -0,0 +1,2932 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_REGS_H__
+#define __INTEL_DISPLAY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
+
+#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
+
+#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
+#define MIPIO_RST_CTRL (1 << 2)
+
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
+#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
+#define COMMON_RESET_DIS (1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
+
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 _MMIO(0x4F074)
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
+
+#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
+#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
+#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
+#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
+#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
+#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
+#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
+#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
+#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
+
+#define DERRMR _MMIO(0x44050)
+/* Note that HBLANK events are reserved on bdw+ */
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
+
+#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
+ VLV_IER, \
+ VLV_IIR)
+
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
+#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
+#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
+#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
+#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
+#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
+#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
+
+#define IPS_CTL _MMIO(0x43408)
+#define IPS_ENABLE REG_BIT(31)
+#define IPS_FALSE_COLOR REG_BIT(4)
+
+/*
+ * Clock control & power management
+ */
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+
+#define VGA0 _MMIO(0x6000)
+#define VGA1 _MMIO(0x6004)
+#define VGA_PD _MMIO(0x6010)
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
+
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
+#define PHY_LDO_DELAY_0NS 0x0
+#define PHY_LDO_DELAY_200NS 0x1
+#define PHY_LDO_DELAY_600NS 0x2
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
+
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
+#define _FPA0 0x6040
+#define _FPA1 0x6044
+#define _FPB0 0x6048
+#define _FPB1 0x604c
+#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+
+#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1 << 15)
+
+#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
+#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
+
+#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
+
+/*
+ * Overlay regs
+ */
+#define OVADD _MMIO(0x30000)
+#define DOVSTA _MMIO(0x30008)
+#define OC_BUF (0x3 << 20)
+#define OGAMC5 _MMIO(0x30010)
+#define OGAMC4 _MMIO(0x30014)
+#define OGAMC3 _MMIO(0x30018)
+#define OGAMC2 _MMIO(0x3001c)
+#define OGAMC1 _MMIO(0x30020)
+#define OGAMC0 _MMIO(0x30024)
+
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
+
+#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
+#define DPCE_GATING_DIS REG_BIT(17)
+
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DUPS1_GATING_DIS (1 << 15)
+#define DUPS2_GATING_DIS (1 << 19)
+#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
+/*
+ * Display engine regs
+ */
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define _TRANS_HTOTAL_B 0x61000
+#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+
+#define _TRANS_HBLANK_A 0x60004
+#define _TRANS_HBLANK_B 0x61004
+#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+
+#define _TRANS_HSYNC_A 0x60008
+#define _TRANS_HSYNC_B 0x61008
+#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+
+#define _TRANS_VTOTAL_A 0x6000c
+#define _TRANS_VTOTAL_B 0x6100c
+#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+
+#define _TRANS_VBLANK_A 0x60010
+#define _TRANS_VBLANK_B 0x61010
+#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+
+#define _TRANS_VSYNC_A 0x60014
+#define _TRANS_VSYNC_B 0x61014
+#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+
+#define _PIPEASRC 0x6001c
+#define _PIPEBSRC 0x6101c
+#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
+#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
+#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
+
+#define _BCLRPAT_A 0x60020
+#define _BCLRPAT_B 0x61020
+#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+
+#define _TRANS_MULT_A 0x6002c
+#define _TRANS_MULT_B 0x6102c
+#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
+/* HDMI/DP bits are g4x+ */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
+/* CRT/TV common between gen3+ */
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define _GEN3_SDVOB 0x61140
+#define _GEN3_SDVOC 0x61160
+#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
+#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
+#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
+#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
+#define PCH_SDVOB _MMIO(0xe1140)
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC _MMIO(0xe1150)
+#define PCH_HDMID _MMIO(0xe1160)
+
+#define PORT_DFT_I9XX _MMIO(0x61150)
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL_SHIFT 30
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/*
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA _MMIO(0x61178)
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
+#define VIDEO_DIP_GMP_DATA_SIZE 36
+#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
+#define VIDEO_DIP_CTL _MMIO(0x61170)
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
+
+#define PCH_GTC_CTL _MMIO(0xe7000)
+#define PCH_GTC_ENABLE (1 << 31)
+
+/* Display Port */
+#define DP_A _MMIO(0x64000) /* eDP */
+#define DP_B _MMIO(0x64100)
+#define DP_C _MMIO(0x64200)
+#define DP_D _MMIO(0x64300)
+#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
+#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
+#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
+#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define TU_SIZE_MASK REG_GENMASK(30, 25)
+#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
+#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
+#define DATA_LINK_N_MAX (0x800000)
+
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
+#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Pipe A */
+#define _PIPEADSL 0x70000
+#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
+#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANSACONF 0x70008
+#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
+/*
+ * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
+ * DBL=power saving pixel doubling, PF-ID* requires panel fitter
+ */
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
+#define _PIPEASTAT 0x70024
+#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
+#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
+#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
+#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
+#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
+#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+
+#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
+
+#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
+
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH 0x70040
+#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+
+#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_G4X 0x70040
+#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+
+#define _PIPEA_FLIPCOUNT_G4X 0x70044
+#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
+
+/* CHV pipe B blender */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_BLEND_MASK REG_GENMASK(31, 30)
+#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
+#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
+#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
+
+#define _CHV_CANVAS_A 0x60a04
+#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
+#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
+#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
+
+/*
+ * VBIOS flags
+ * gen2:
+ * [00:06] alm,mgm
+ * [10:16] all
+ * [30:32] alm,mgm
+ * gen3+:
+ * [00:0f] all
+ * [10:1f] all
+ * [30:32] all
+ */
+#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
+#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
+#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
+#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL _MMIO(0x45300)
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define _PIPEA_DATA_M1 0x60030
+#define _PIPEB_DATA_M1 0x61030
+#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+
+#define _PIPEA_DATA_N1 0x60034
+#define _PIPEB_DATA_N1 0x61034
+#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+
+#define _PIPEA_DATA_M2 0x60038
+#define _PIPEB_DATA_M2 0x61038
+#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+
+#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEB_DATA_N2 0x6103c
+#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+
+#define _PIPEA_LINK_M1 0x60040
+#define _PIPEB_LINK_M1 0x61040
+#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+
+#define _PIPEA_LINK_N1 0x60044
+#define _PIPEB_LINK_N1 0x61044
+#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+
+#define _PIPEA_LINK_M2 0x60048
+#define _PIPEB_LINK_M2 0x61048
+#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+
+#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEB_LINK_N2 0x6104c
+#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
+
+/*
+ * Skylake scalers
+ */
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
+#define _PS_1A_CTRL 0x68180
+#define _PS_2A_CTRL 0x68280
+#define _PS_1B_CTRL 0x68980
+#define _PS_2B_CTRL 0x68A80
+#define _PS_1C_CTRL 0x69180
+#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define PS_SCALER_EN REG_BIT(31)
+#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
+#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
+#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
+#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
+#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
+#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
+#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
+#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
+#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
+#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
+#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
+#define PS_BINDING_MASK REG_GENMASK(27, 25)
+#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
+#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
+#define PS_FILTER_MASK REG_GENMASK(24, 23)
+#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
+#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
+#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
+#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
+#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
+#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
+#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
+#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
+#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
+#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
+#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
+#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
+#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
+#define PS_PWRUP_PROGRESS REG_BIT(17)
+#define PS_V_FILTER_BYPASS REG_BIT(8)
+#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
+#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
+#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
+#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
+#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
+#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
+#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
+#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
+#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
+#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
+#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
+#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
+#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
+#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
+#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
+
+#define _PS_PWR_GATE_1A 0x68160
+#define _PS_PWR_GATE_2A 0x68260
+#define _PS_PWR_GATE_1B 0x68960
+#define _PS_PWR_GATE_2B 0x68A60
+#define _PS_PWR_GATE_1C 0x69160
+#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
+#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
+#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
+#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
+#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
+#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
+#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
+#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
+#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
+#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
+#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
+
+#define _PS_WIN_POS_1A 0x68170
+#define _PS_WIN_POS_2A 0x68270
+#define _PS_WIN_POS_1B 0x68970
+#define _PS_WIN_POS_2B 0x68A70
+#define _PS_WIN_POS_1C 0x69170
+#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
+#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
+
+#define _PS_WIN_SZ_1A 0x68174
+#define _PS_WIN_SZ_2A 0x68274
+#define _PS_WIN_SZ_1B 0x68974
+#define _PS_WIN_SZ_2B 0x68A74
+#define _PS_WIN_SZ_1C 0x69174
+#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
+#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
+
+#define _PS_VSCALE_1A 0x68184
+#define _PS_VSCALE_2A 0x68284
+#define _PS_VSCALE_1B 0x68984
+#define _PS_VSCALE_2B 0x68A84
+#define _PS_VSCALE_1C 0x69184
+#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+
+#define _PS_HSCALE_1A 0x68190
+#define _PS_HSCALE_2A 0x68290
+#define _PS_HSCALE_1B 0x68990
+#define _PS_HSCALE_2B 0x68A90
+#define _PS_HSCALE_1C 0x69190
+#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+
+#define _PS_VPHASE_1A 0x68188
+#define _PS_VPHASE_2A 0x68288
+#define _PS_VPHASE_1B 0x68988
+#define _PS_VPHASE_2B 0x68A88
+#define _PS_VPHASE_1C 0x69188
+#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
+#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
+#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
+#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
+
+#define _PS_HPHASE_1A 0x68194
+#define _PS_HPHASE_2A 0x68294
+#define _PS_HPHASE_1B 0x68994
+#define _PS_HPHASE_2B 0x68A94
+#define _PS_HPHASE_1C 0x69194
+#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+
+#define _PS_ECC_STAT_1A 0x681D0
+#define _PS_ECC_STAT_2A 0x682D0
+#define _PS_ECC_STAT_1B 0x689D0
+#define _PS_ECC_STAT_2B 0x68AD0
+#define _PS_ECC_STAT_1C 0x691D0
+#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
+
+#define _PS_COEF_SET0_INDEX_1A 0x68198
+#define _PS_COEF_SET0_INDEX_2A 0x68298
+#define _PS_COEF_SET0_INDEX_1B 0x68998
+#define _PS_COEF_SET0_INDEX_2B 0x68A98
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
+#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
+
+#define _PS_COEF_SET0_DATA_1A 0x6819C
+#define _PS_COEF_SET0_DATA_2A 0x6829C
+#define _PS_COEF_SET0_DATA_1B 0x6899C
+#define _PS_COEF_SET0_DATA_2B 0x68A9C
+#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
+
+#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
+
+#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
+#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
+#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
+#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl-mtl */
+#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl */
+#define GEN12_PIPEDMC_FLIPQ_DONE REG_BIT(24) /* tgl-adl */
+#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
+#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
+#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
+#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
+#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
+#define MTL_PIPEDMC_FLIPQ_DONE REG_BIT(17) /* mtl */
+#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
+#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
+#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
+#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
+#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
+#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
+#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
+#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
+#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
+#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
+#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
+#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
+#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
+#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
+ REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
+#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
+#define GEN8_PIPE_VSYNC REG_BIT(1)
+#define GEN8_PIPE_VBLANK REG_BIT(0)
+
+#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
+ GEN8_DE_PIPE_IER(pipe), \
+ GEN8_DE_PIPE_IIR(pipe))
+
+#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
+#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
+
+#define GEN8_DE_PORT_ISR _MMIO(0x44440)
+#define GEN8_DE_PORT_IMR _MMIO(0x44444)
+#define GEN8_DE_PORT_IIR _MMIO(0x44448)
+#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
+#define ICL_AUX_CHANNEL_E (1 << 29)
+#define ICL_AUX_CHANNEL_F (1 << 28)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
+#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
+#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
+#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
+#define BXT_DE_PORT_GMBUS (1 << 1)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
+#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
+#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
+#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
+#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
+#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
+#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
+#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
+#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
+#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
+#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
+
+#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
+ GEN8_DE_PORT_IER, \
+ GEN8_DE_PORT_IIR)
+
+#define GEN8_DE_MISC_ISR _MMIO(0x44460)
+#define GEN8_DE_MISC_IMR _MMIO(0x44464)
+#define GEN8_DE_MISC_IIR _MMIO(0x44468)
+#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
+#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
+#define GEN8_DE_MISC_GSE REG_BIT(27)
+#define GEN8_DE_EDP_PSR REG_BIT(19)
+#define XELPDP_PMDEMAND_RSP REG_BIT(3)
+#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
+
+#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
+ GEN8_DE_MISC_IER, \
+ GEN8_DE_MISC_IIR)
+
+#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
+#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
+#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
+#define GEN11_DE_PCH_IRQ (1 << 23)
+#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
+#define GEN11_DE_PORT_IRQ (1 << 20)
+#define GEN11_DE_PIPE_C (1 << 18)
+#define GEN11_DE_PIPE_B (1 << 17)
+#define GEN11_DE_PIPE_A (1 << 16)
+
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC1))
+#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
+
+#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
+ GEN11_DE_HPD_IER, \
+ GEN11_DE_HPD_IIR)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
+#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
+#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
+#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
+#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
+#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
+#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
+#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
+#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+
+#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
+ PICAINTERRUPT_IER, \
+ PICAINTERRUPT_IIR)
+
+#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
+#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
+#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
+#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
+#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
+#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
+#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+
+#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
+#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
+#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
+#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
+#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
+#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
+#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
+
+#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
+#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
+#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
+#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
+#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
+
+#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
+#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
+
+#define FUSE_STRAP _MMIO(0x42014)
+#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
+#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
+#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
+#define IVB_PIPE_C_DISABLE REG_BIT(28)
+#define ILK_HDCP_DISABLE REG_BIT(25)
+#define ILK_eDP_A_DISABLE REG_BIT(24)
+#define HSW_CDCLK_LIMIT REG_BIT(24)
+#define ILK_DESKTOP REG_BIT(23)
+#define HSW_CPU_SSC_ENABLE REG_BIT(21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT REG_BIT(1)
+
+#define CHICKEN_MISC_2 _MMIO(0x42084)
+#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
+#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
+#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
+#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
+#define GLK_CL2_PWR_DOWN REG_BIT(12)
+#define GLK_CL1_PWR_DOWN REG_BIT(11)
+#define GLK_CL0_PWR_DOWN REG_BIT(10)
+
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
+#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
+#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
+#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
+#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
+#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
+
+#define DISP_ARB_CTL2 _MMIO(0x45004)
+#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
+#define DISP_IPC_ENABLE REG_BIT(3)
+
+#define GEN7_MSG_CTL _MMIO(0x45010)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
+#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
+
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
+
+#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
+
+#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
+#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
+#define DCPR_MASK_LPMODE REG_BIT(26)
+#define DCPR_SEND_RESP_IMM REG_BIT(25)
+#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+
+#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
+#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
+
+#define SKL_DFSM _MMIO(0x51000)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+
+#define XE2LPD_DE_CAP _MMIO(0x41100)
+#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
+#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
+#define XE2LPD_DE_CAP_DSC_REMOVED 1
+#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
+#define XE2LPD_DE_CAP_SCALER_SINGLE 1
+
+#define SKL_DSSM _MMIO(0x51004)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
+#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+
+#define PCH_DISPLAY_BASE 0xc0000u
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+
+/* south display engine interrupt: CPT - CNP */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT | \
+ SDE_PORTA_HOTPLUG_SPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
+
+/* south display engine interrupt: ICP/TGP/MTP */
+#define SDE_PICAINTERRUPT REG_BIT(31)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
+#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
+#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
+#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
+#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+
+#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
+ SDEIER, \
+ SDEIIR)
+
+#define SERR_INT _MMIO(0xc4040)
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
+#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
+#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
+#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
+#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
+#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
+#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
+#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
+
+#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
+#define PORTE_HOTPLUG_ENABLE (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+#define SHPD_FILTER_CNT_250 0x000F8
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define _PCH_FPB0 0xc6048
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define FP_CB_TUNE (0x3 << 22)
+
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB1 0xc604c
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST _MMIO(0xc606c)
+
+#define PCH_DREF_CONTROL _MMIO(0xC6200)
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3 << 12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3 << 10)
+#define RAWCLK_FREQ_MASK 0x3ff
+#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
+#define CNP_RAWCLK_DIV(div) ((div) << 16)
+#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
+#define ICP_RAWCLK_NUM(num) ((num) << 11)
+
+#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
+
+#define PCH_SSC4_PARMS _MMIO(0xc6210)
+#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
+
+#define PCH_DPLL_SEL _MMIO(0xc7000)
+#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
+
+/* transcoder */
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+
+#define _PCH_TRANSA_LINK_N2 0xe004c
+#define _PCH_TRANSB_LINK_N2 0xe104c
+#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+/* Per-transcoder DIP controls (PCH) */
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+
+#define _VIDEO_DIP_GCP_A 0xe0210
+#define _VIDEO_DIP_GCP_B 0xe1210
+#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
+
+/* Per-transcoder DIP controls (VLV) */
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+
+/* Haswell DIP controls */
+#define _HSW_VIDEO_DIP_CTL_A 0x60200
+#define _HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+
+#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+
+/*ADLP and later: */
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
+
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
+#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+
+#define _HSW_VIDEO_DIP_GCP_A 0x60210
+#define _HSW_VIDEO_DIP_GCP_B 0x61210
+#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+
+#define _HSW_STEREO_3D_CTL_A 0x70020
+#define _HSW_STEREO_3D_CTL_B 0x71020
+#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
+#define S3D_ENABLE (1 << 31)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
+#define TRANS_ENABLE REG_BIT(31)
+#define TRANS_STATE_ENABLE REG_BIT(30)
+#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
+#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
+#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
+#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
+#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
+#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
+#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
+#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
+#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
+#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+
+#define PCH_DP_B _MMIO(0xe4100)
+#define PCH_DP_C _MMIO(0xe4200)
+#define PCH_DP_D _MMIO(0xe4300)
+
+/* CPT */
+#define _TRANS_DP_CTL_A 0xe0300
+#define _TRANS_DP_CTL_B 0xe1300
+#define _TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
+#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
+#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
+#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
+#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
+#define TRANS_DP_ENH_FRAMING REG_BIT(18)
+#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
+#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
+#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
+#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
+#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
+
+#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
+
+/*
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
+ * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
+ */
+#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
+#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
+#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define HSW_PW_CTL_IDX_GLOBAL 15
+
+/* SKL/BXT/GLK power wells */
+#define SKL_PW_CTL_IDX_PW_2 15
+#define SKL_PW_CTL_IDX_PW_1 14
+#define GLK_PW_CTL_IDX_AUX_C 10
+#define GLK_PW_CTL_IDX_AUX_B 9
+#define GLK_PW_CTL_IDX_AUX_A 8
+#define SKL_PW_CTL_IDX_DDI_D 4
+#define SKL_PW_CTL_IDX_DDI_C 3
+#define SKL_PW_CTL_IDX_DDI_B 2
+#define SKL_PW_CTL_IDX_DDI_A_E 1
+#define GLK_PW_CTL_IDX_DDI_A 1
+#define SKL_PW_CTL_IDX_MISC_IO 0
+
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
+#define ICL_PW_CTL_IDX_PW_4 3
+#define ICL_PW_CTL_IDX_PW_3 2
+#define ICL_PW_CTL_IDX_PW_2 1
+#define ICL_PW_CTL_IDX_PW_1 0
+
+/* XE_LPD - power wells */
+#define XELPD_PW_CTL_IDX_PW_D 8
+#define XELPD_PW_CTL_IDX_PW_C 7
+#define XELPD_PW_CTL_IDX_PW_B 6
+#define XELPD_PW_CTL_IDX_PW_A 5
+
+#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
+#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
+#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
+#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
+#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define XELPD_PW_CTL_IDX_AUX_E 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define XELPD_PW_CTL_IDX_AUX_D 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
+#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
+#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
+#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
+#define ICL_PW_CTL_IDX_AUX_C 2
+#define ICL_PW_CTL_IDX_AUX_B 1
+#define ICL_PW_CTL_IDX_AUX_A 0
+
+#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define XELPD_PW_CTL_IDX_DDI_E 8
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define XELPD_PW_CTL_IDX_DDI_D 7
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
+#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
+#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
+#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
+#define ICL_PW_CTL_IDX_DDI_C 2
+#define ICL_PW_CTL_IDX_DDI_B 1
+#define ICL_PW_CTL_IDX_DDI_A 0
+
+/* HSW - power well misc debug registers */
+#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
+#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+
+/* SKL Fuse Status */
+enum skl_power_gate {
+ SKL_PG0,
+ SKL_PG1,
+ SKL_PG2,
+ ICL_PG3,
+ ICL_PG4,
+};
+
+#define SKL_FUSE_STATUS _MMIO(0x42000)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
+#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
+
+/* Per-pipe DDI Function Control */
+#define _TRANS_DDI_FUNC_CTL_A 0x60400
+#define _TRANS_DDI_FUNC_CTL_B 0x61400
+#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
+#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
+#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
+
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_SHIFT 28
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
+#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
+
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
+
+#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
+#define DISABLE_DPT_CLK_GATING REG_BIT(1)
+
+/* DisplayPort Transport Control */
+#define _DP_TP_CTL_A 0x64040
+#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
+#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
+
+/* DisplayPort Transport Status */
+#define _DP_TP_STATUS_A 0x64044
+#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
+#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
+
+/* DDI Buffer Control */
+#define _DDI_BUF_CTL_A 0x64000
+#define _DDI_BUF_CTL_B 0x64100
+/* Known as DDI_CTL_DE in MTL+ */
+#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE REG_BIT(31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
+#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
+#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
+#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
+#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
+#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
+#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
+#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
+#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
+#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
+#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
+#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
+ (symbols))
+#define DDI_BUF_IS_IDLE REG_BIT(7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
+#define DDI_A_4_LANES REG_BIT(4)
+#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
+#define DDI_PORT_WIDTH_SHIFT 1
+#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
+
+/* DDI Buffer Translations */
+#define _DDI_BUF_TRANS_A 0x64E00
+#define _DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
+#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE _MMIO(0xC6020)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
+
+/* SPLL */
+#define SPLL_CTL _MMIO(0x46020)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
+
+/* WRPLL */
+#define _WRPLL_CTL1 0x46040
+#define _WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
+
+/* Port clock selection */
+#define _PORT_CLK_SEL_A 0x46100
+#define _PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
+
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
+
+/* Transcoder clock selection */
+#define _TRANS_CLK_SEL_A 0x46140
+#define _TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
+#define CDCLK_FREQ _MMIO(0x46200)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define _TRANSC_MSA_MISC 0x62410
+#define _TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
+/* See DP_MSA_MISC_* for the bit definitions */
+
+#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
+#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
+#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
+#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
+#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
+#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
+
+/* LCPLL Control */
+#define LCPLL_CTL _MMIO(0x130040)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
+
+/*
+ * SKL Clocks
+ */
+/* CDCLK_CTL */
+#define CDCLK_CTL _MMIO(0x46000)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL _MMIO(0x46010)
+#define LCPLL2_CTL _MMIO(0x46014)
+#define LCPLL_PLL_ENABLE (1 << 31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 _MMIO(0x6C058)
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 _MMIO(0x6C05C)
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
+
+/* DPLL Status */
+#define DPLL_STATUS _MMIO(0x6C060)
+#define DPLL_LOCK(id) (1 << ((id) * 8))
+
+/* DPLL cfg */
+#define _DPLL1_CFGCR1 0x6C040
+#define _DPLL2_CFGCR1 0x6C048
+#define _DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define _DPLL1_CFGCR2 0x6C044
+#define _DPLL2_CFGCR2 0x6C04C
+#define _DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
+#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+/* ICL Clocks */
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - TC_PORT_4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
+ (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
+ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/*
+ * DG1 Clocks
+ * First registers controls the first A and B, while the second register
+ * controls the phy C and D. The bits on these registers are the
+ * same, but refer to different phys
+ */
+#define _DG1_DPCLKA_CFGCR0 0x164280
+#define _DG1_DPCLKA1_CFGCR0 0x16C280
+#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
+#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
+#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
+ _DG1_DPCLKA_CFGCR0, \
+ _DG1_DPCLKA1_CFGCR0)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
+/* ICL PLL */
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
+
+#define _DG2_PLL3_ENABLE 0x4601C
+
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
+
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
+#define _MG_PLL1_ENABLE 0x46030
+#define _MG_PLL2_ENABLE 0x46034
+#define _MG_PLL3_ENABLE 0x46038
+#define _MG_PLL4_ENABLE 0x4603C
+/* Bits are the same as _DPLL0_ENABLE */
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
+ _MG_PLL2_ENABLE)
+
+/* DG1 PLL */
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
+
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
+#define _ICL_DPLL0_CFGCR0 0x164000
+#define _ICL_DPLL1_CFGCR0 0x164080
+#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+ _ICL_DPLL1_CFGCR0)
+#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
+#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
+#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
+#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
+#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
+#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
+#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
+#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
+#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
+
+#define _ICL_DPLL0_CFGCR1 0x164004
+#define _ICL_DPLL1_CFGCR1 0x164084
+#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+ _ICL_DPLL1_CFGCR1)
+#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
+#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
+#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
+#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
+#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
+#define DPLL_CFGCR1_KDIV_1 (1 << 6)
+#define DPLL_CFGCR1_KDIV_2 (2 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
+#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
+#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR1_PDIV_2 (1 << 2)
+#define DPLL_CFGCR1_PDIV_3 (2 << 2)
+#define DPLL_CFGCR1_PDIV_5 (4 << 2)
+#define DPLL_CFGCR1_PDIV_7 (8 << 2)
+#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
+#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
+
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
+#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0)
+
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
+#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1)
+
+#define _DG1_DPLL2_CFGCR0 0x16C284
+#define _DG1_DPLL3_CFGCR0 0x16C28C
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
+
+#define _DG1_DPLL2_CFGCR1 0x16C288
+#define _DG1_DPLL3_CFGCR1 0x16C290
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
+
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
+
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
+
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL _MMIO(0x6d000)
+#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
+#define BXT_DE_PLL_RATIO_MASK 0xff
+
+#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
+#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
+#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
+#define ICL_CDCLK_PLL_RATIO(x) (x)
+#define ICL_CDCLK_PLL_RATIO_MASK 0xff
+
+/* GEN9 DC */
+#define DC_STATE_EN _MMIO(0x45504)
+#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
+
+#define D_COMP_BDW _MMIO(0x138144)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP _MMIO(0xc2014)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
+
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
+#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
+ ICL_PHY_MISC(port))
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
+
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
+
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
+#define CLKREQ_POLICY _MMIO(0x101038)
+#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
+#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
+#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
+#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
+
+
+#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1dbd3e841df3..f5f38dca14d7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "intel_clock_gating.h"
#include "intel_cx0_phy.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
index 48da67dd0136..56c4024201c1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rpm.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -2,6 +2,7 @@
/* Copyright © 2025 Intel Corporation */
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_runtime_pm.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 678b24115951..82ea1ec482e4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -9,6 +9,7 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_display_core.h"
#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -45,12 +46,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
return;
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ if (DISPLAY_VER(display) < 6)
return;
if (drm_crtc_vblank_get(crtc))
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index d6d0440dcee9..ce45261c4a8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_panel.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
@@ -145,6 +146,8 @@ struct intel_framebuffer {
unsigned int min_alignment;
unsigned int vtd_guard;
+
+ unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
};
enum intel_hotplug_state {
@@ -384,6 +387,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Simple drm_panel */
+ struct drm_panel *base;
+
/* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
const struct drm_edid *fixed_edid;
@@ -550,6 +556,10 @@ struct intel_connector {
struct intel_dp *dp;
} mst;
+ struct {
+ int force_bpp_x16;
+ } link;
+
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
@@ -591,7 +601,7 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
+ struct intel_dpll_state dpll_state[I915_NUM_PLLS];
struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
@@ -1075,8 +1085,8 @@ struct intel_crtc_state {
* haswell. */
struct dpll dpll;
- /* Selected dpll when shared or NULL. */
- struct intel_shared_dpll *shared_dpll;
+ /* Selected dpll or NULL. */
+ struct intel_dpll *intel_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -1086,7 +1096,7 @@ struct intel_crtc_state {
* setting shared_dpll and dpll_hw_state to one of these reserved ones.
*/
struct icl_port_dpll {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
struct intel_dpll_hw_state hw_state;
} icl_port_dplls[ICL_PORT_DPLL_COUNT];
@@ -1293,8 +1303,9 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
/* For DSB based pipe updates */
- struct intel_dsb *dsb_color_vblank, *dsb_commit;
+ struct intel_dsb *dsb_color, *dsb_commit;
bool use_dsb;
+ bool use_flipq;
u32 psr2_man_track_ctl;
@@ -1361,6 +1372,21 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
};
+enum intel_flipq_id {
+ INTEL_FLIPQ_PLANE_1,
+ INTEL_FLIPQ_PLANE_2,
+ INTEL_FLIPQ_PLANE_3,
+ INTEL_FLIPQ_GENERAL,
+ INTEL_FLIPQ_FAST,
+ MAX_INTEL_FLIPQ,
+};
+
+struct intel_flipq {
+ u32 start_mmioaddr;
+ enum intel_flipq_id flipq_id;
+ u8 tail;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1387,11 +1413,15 @@ struct intel_crtc {
struct drm_pending_vblank_event *flip_done_event;
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
+ /* armed event for flip queue based updates */
+ struct drm_pending_vblank_event *flipq_event;
/* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
+ struct intel_flipq flipq[MAX_INTEL_FLIPQ];
+
/* per-pipe watermark state */
struct {
/* watermarks currently being used */
@@ -1513,6 +1543,8 @@ struct intel_plane {
bool async_flip);
void (*enable_flip_done)(struct intel_plane *plane);
void (*disable_flip_done)(struct intel_plane *plane);
+ /* For drm_panic */
+ void (*disable_tiling)(struct intel_plane *plane);
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -1665,7 +1697,9 @@ struct intel_dp {
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
- u8 pr_dpcd;
+ u8 pr_dpcd[DP_PANEL_REPLAY_CAP_SIZE];
+#define INTEL_PR_DPCD_INDEX(pr_dpcd_register) ((pr_dpcd_register) - DP_PANEL_REPLAY_CAP_SUPPORT)
+
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index da429c332914..f57280e9d041 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -6,6 +6,7 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_wa.h"
static void gen11_display_wa_apply(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index dad7192132ad..35e919eae369 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -25,7 +25,9 @@ dkl_phy_set_hip_idx(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
enum tc_port tc_port = DKL_REG_TC_PORT(reg);
- drm_WARN_ON(display->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+ if (drm_WARN_ON(display->drm,
+ tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS))
+ return;
intel_de_write(display,
HIP_INDEX_REG(tc_port),
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
index 56085b32956d..f8ffeec29e93 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "intel_display_reg_defs.h"
+
struct intel_dkl_phy_reg {
u32 reg:24;
u32 bank_idx:4;
@@ -151,6 +153,7 @@ struct intel_dkl_phy_reg {
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
+#define LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define _DKL_TX_FW_CALIB_LN0 0x02F8
#define _DKL_TX_FW_CALIB_LN1 0x12F8
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b58189d24e7e..744f51c0eab8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -24,14 +24,22 @@
#include <linux/debugfs.h>
#include <linux/firmware.h>
+#include <drm/drm_vblank.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_rpm.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
+#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
+#include "intel_flipq.h"
#include "intel_step.h"
/**
@@ -176,7 +184,8 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002 ||
+ DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -425,29 +434,26 @@ static void disable_event_handler(struct intel_display *display,
REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
+ DMC_EVENT_FALSE));
intel_de_write(display, htp_reg, 0);
}
-static void disable_all_event_handlers(struct intel_display *display)
+static void disable_all_event_handlers(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
{
- enum intel_dmc_id dmc_id;
+ int handler;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(display) < 12)
return;
- for_each_dmc_id(dmc_id) {
- int handler;
-
- if (!has_dmc_id_fw(display, dmc_id))
- continue;
+ if (!has_dmc_id_fw(display, dmc_id))
+ return;
- for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
- disable_event_handler(display,
- DMC_EVT_CTL(display, dmc_id, handler),
- DMC_EVT_HTP(display, dmc_id, handler));
- }
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(display,
+ DMC_EVT_CTL(display, dmc_id, handler),
+ DMC_EVT_HTP(display, dmc_id, handler));
}
static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
@@ -479,89 +485,36 @@ static void mtl_pipedmc_clock_gating_wa(struct intel_display *display)
* for pipe A and B.
*/
intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0,
- MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+ MTL_PIPEDMC_GATING_DIS(PIPE_A) |
+ MTL_PIPEDMC_GATING_DIS(PIPE_B));
}
static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
{
- if (DISPLAY_VER(display) >= 14 && enable)
+ if (display->platform.meteorlake && enable)
mtl_pipedmc_clock_gating_wa(display);
else if (DISPLAY_VER(display) == 13)
adlp_pipedmc_clock_gating_wa(display, enable);
}
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe)
+static u32 pipedmc_interrupt_mask(struct intel_display *display)
{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
-}
-
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
-{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
-}
-
-/**
- * intel_dmc_block_pkgc() - block PKG C-state
- * @display: display instance
- * @pipe: pipe which register use to block
- * @block: block/unblock
- *
- * This interface is target for Wa_16025596647 usage. I.e. to set/clear
- * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
- */
-void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
- bool block)
-{
- intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
- PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
- PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+ /*
+ * FIXME PIPEDMC_ERROR not enabled for now due to LNL pipe B
+ * triggering it during the first DC state transition. Figure
+ * out what is going on...
+ */
+ return PIPEDMC_FLIPQ_PROG_DONE |
+ PIPEDMC_GTT_FAULT |
+ PIPEDMC_ATS_FAULT;
}
-/**
- * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
- * C-state exit
- * @display: display instance
- * @pipe: pipe which register use to block
- * @enable: enable/disable
- *
- * This interface is target for Wa_16025596647 usage. I.e. start the package C
- * exit at the start of the undelayed vblank
- */
-void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
- enum pipe pipe, bool enable)
+static u32 dmc_evt_ctl_disable(void)
{
- u32 val;
-
- if (enable)
- val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_VBLANK_A);
- else
- val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE) |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1);
-
- intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
- val);
+ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVENT_FALSE);
}
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
@@ -584,6 +537,15 @@ static bool is_dmc_evt_htp_reg(struct intel_display *display,
return offset >= start && offset < end;
}
+static bool is_event_handler(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ i915_reg_t reg, u32 data)
+{
+ return is_dmc_evt_ctl_reg(display, dmc_id, reg) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -597,12 +559,12 @@ static bool disable_dmc_evt(struct intel_display *display,
/* also disable the flip queue event on the main DMC on TGL */
if (display->platform.tigerlake &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data))
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
if ((display->platform.tigerlake || display->platform.alderlake_s) &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
return true;
return false;
@@ -615,14 +577,267 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE);
+ return dmc_evt_ctl_disable();
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
+static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc_mmiodata(display, dmc, dmc_id, i));
+ }
+}
+
+static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ disable_all_event_handlers(display, dmc_id);
+
+ preempt_disable();
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(display,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
+ }
+
+ preempt_enable();
+
+ dmc_load_mmio(display, dmc_id);
+}
+
+static void assert_dmc_loaded(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 expected, found;
+ int i;
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0));
+ expected = dmc->dmc_info[dmc_id].payload[0];
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, expected, found);
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+
+ found = intel_de_read(display, reg);
+ expected = dmc_mmiodata(display, dmc, dmc_id, i);
+
+ /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
+ if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
+ found &= ~DMC_EVT_CTL_ENABLE;
+ expected &= ~DMC_EVT_CTL_ENABLE;
+ }
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
+ }
+}
+
+void assert_main_dmc_loaded(struct intel_display *display)
+{
+ assert_dmc_loaded(display, DMC_FW_MAIN);
+}
+
+static bool need_pipedmc_load_program(struct intel_display *display)
+{
+ /* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */
+ return DISPLAY_VER(display) == 12;
+}
+
+static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
+{
+ /*
+ * PTL:
+ * - pipe A/B DMC doesn't need save/restore
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (DISPLAY_VER(display) == 30)
+ return pipe >= PIPE_C;
+
+ /*
+ * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0
+ * save/restore, but so far unable to see the loss of pipe DMC state
+ * in action. Are we just failing to turn off PG0 due to some other
+ * SoC level stuff?
+ */
+ if (DISPLAY_VER(display) == 20)
+ return false;
+
+ /*
+ * FIXME BMG untested, main DMC firmware has the
+ * pipe DMC A/B PG0 save/restore...
+ */
+ if (display->platform.battlemage)
+ return false;
+
+ /*
+ * DG2:
+ * - Pipe DMCs presumably in PG0?
+ * - No DC6, and even DC9 doesn't seem to result
+ * in loss of DMC state for whatever reason
+ */
+ if (display->platform.dg2)
+ return false;
+
+ /*
+ * ADL/MTL:
+ * - pipe A/B DMC is in PG0, saved/restored by the main DMC
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (IS_DISPLAY_VER(display, 13, 14))
+ return pipe >= PIPE_C;
+
+ return false;
+}
+
+static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * On TGL/derivatives pipe DMC state is lost when PG1 is disabled.
+ * Do not even enable the pipe DMC when that can happen outside
+ * of driver control (PSR+DC5/6).
+ */
+ if (DISPLAY_VER(display) == 12 && crtc_state->has_psr)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (!can_enable_pipedmc(crtc_state)) {
+ intel_dmc_disable_pipe(crtc_state);
+ return;
+ }
+
+ if (need_pipedmc_load_program(display))
+ dmc_load_program(display, dmc_id);
+ else if (need_pipedmc_load_mmio(display, pipe))
+ dmc_load_mmio(display, dmc_id);
+
+ assert_dmc_loaded(display, dmc_id);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_flipq_reset(display, pipe);
+
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display));
+ }
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0);
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+
+ intel_flipq_reset(display, pipe);
+ }
+}
+
+static void dmc_configure_event(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ bool enable)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int num_handlers = 0;
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+ u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
+
+ if (!is_event_handler(display, dmc_id, event_id, reg, data))
+ continue;
+
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ num_handlers++;
+ }
+
+ drm_WARN_ONCE(display->drm, num_handlers != 1,
+ "DMC %d has %d handlers for event 0x%x\n",
+ dmc_id, num_handlers, event_id);
+}
+
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @display: display instance
@@ -634,37 +849,26 @@ static u32 dmc_mmiodata(struct intel_display *display,
void intel_dmc_load_program(struct intel_display *display)
{
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
- u32 i;
if (!intel_dmc_has_payload(display))
return;
- pipedmc_clock_gating_wa(display, true);
-
- disable_all_event_handlers(display);
-
assert_display_rpm_held(display);
- preempt_disable();
+ pipedmc_clock_gating_wa(display, true);
for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
- intel_de_write_fw(display,
- DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
- dmc->dmc_info[dmc_id].payload[i]);
- }
+ dmc_load_program(display, dmc_id);
+ assert_dmc_loaded(display, dmc_id);
}
- preempt_enable();
-
- for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
- intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc_mmiodata(display, dmc, dmc_id, i));
- }
- }
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL,
+ PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) |
+ PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) |
+ PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) |
+ PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A));
power_domains->dc_state = 0;
@@ -682,26 +886,17 @@ void intel_dmc_load_program(struct intel_display *display)
*/
void intel_dmc_disable_program(struct intel_display *display)
{
+ enum intel_dmc_id dmc_id;
+
if (!intel_dmc_has_payload(display))
return;
pipedmc_clock_gating_wa(display, true);
- disable_all_event_handlers(display);
- pipedmc_clock_gating_wa(display, false);
-}
-void assert_dmc_loaded(struct intel_display *display)
-{
- struct intel_dmc *dmc = display_to_dmc(display);
+ for_each_dmc_id(dmc_id)
+ disable_all_event_handlers(display, dmc_id);
- drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n");
- drm_WARN_ONCE(display->drm, dmc &&
- !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
- "DMC program storage start is NULL\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE),
- "DMC SSP Base Not fine\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL),
- "DMC HTP Not fine\n");
+ pipedmc_clock_gating_wa(display, false);
}
static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
@@ -1120,7 +1315,6 @@ out:
*/
void intel_dmc_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc;
if (!HAS_DMC(display))
@@ -1163,7 +1357,7 @@ void intel_dmc_init(struct intel_display *display)
display->dmc.dmc = dmc;
drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path);
- queue_work(i915->unordered_wq, &dmc->work);
+ queue_work(display->wq.unordered, &dmc->work);
return;
@@ -1194,6 +1388,17 @@ void intel_dmc_suspend(struct intel_display *display)
intel_dmc_runtime_pm_put(display);
}
+void intel_dmc_wait_fw_load(struct intel_display *display)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+
+ if (!HAS_DMC(display))
+ return;
+
+ if (dmc)
+ flush_work(&dmc->work);
+}
+
/**
* intel_dmc_resume() - init DMC firmware during system resume
* @display: display instance
@@ -1403,3 +1608,73 @@ void intel_dmc_debugfs_register(struct intel_display *display)
debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ u32 tmp = 0, int_vector;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe));
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp);
+
+ if (tmp & PIPEDMC_FLIPQ_PROG_DONE) {
+ spin_lock(&display->drm->event_lock);
+
+ if (crtc->flipq_event) {
+ /*
+ * Update vblank counter/timestamp in case it
+ * hasn't been done yet for this frame.
+ */
+ drm_crtc_accurate_vblank_count(&crtc->base);
+
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event);
+ crtc->flipq_event = NULL;
+ }
+
+ spin_unlock(&display->drm->event_lock);
+ }
+
+ if (tmp & PIPEDMC_ATS_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_GTT_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_ERROR)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
+ crtc->base.base.id, crtc->base.name);
+ }
+
+ int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
+ if (tmp == 0 && int_vector != 0)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
+ crtc->base.base.id, crtc->base.name, tmp);
+}
+
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, true);
+}
+
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, false);
+}
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dmc *dmc = display_to_dmc(display);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index bd1c459b0075..40e9dcb033cc 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -9,15 +9,19 @@
#include <linux/types.h>
enum pipe;
+enum pipedmc_event_id;
struct drm_printer;
+struct intel_crtc;
+struct intel_crtc_state;
struct intel_display;
struct intel_dmc_snapshot;
void intel_dmc_init(struct intel_display *display);
void intel_dmc_load_program(struct intel_display *display);
+void intel_dmc_wait_fw_load(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state);
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state);
void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
bool block);
void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
@@ -32,6 +36,16 @@ struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *disp
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
-void assert_dmc_loaded(struct intel_display *display);
+void assert_main_dmc_loaded(struct intel_display *display);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc);
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index e16ea3f16ed8..c5aa49921cb9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -6,7 +6,273 @@
#ifndef __INTEL_DMC_REGS_H__
#define __INTEL_DMC_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
+
+enum dmc_event_id {
+ DMC_EVENT_TRUE = 0x0,
+ DMC_EVENT_FALSE = 0x1,
+};
+
+enum maindmc_event_id {
+ MAINDMC_EVENT_CMP_ZERO = 0x8,
+ MAINDMC_EVENT_CMP_ODD = 0x9,
+ MAINDMC_EVENT_CMP_NEG = 0xa,
+ MAINDMC_EVENT_CMP_CARRY = 0xb,
+
+ MAINDMC_EVENT_TMR0_DONE = 0x14,
+ MAINDMC_EVENT_TMR1_DONE = 0x15,
+ MAINDMC_EVENT_TMR2_DONE = 0x16,
+ MAINDMC_EVENT_COUNT0_DONE = 0x17,
+ MAINDMC_EVENT_COUNT1_DONE = 0x18,
+ MAINDMC_EVENT_PERF_CNTR_DARBF = 0x19,
+
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_A_TRIGGER = 0x22,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_B_TRIGGER = 0x23,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_C_TRIGGER = 0x24,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_D_TRIGGER = 0x25,
+ MAINDMC_EVENT_1KHZ_FQ_A_TRIGGER = 0x26,
+ MAINDMC_EVENT_1KHZ_FQ_B_TRIGGER = 0x27,
+ MAINDMC_EVENT_1KHZ_FQ_C_TRIGGER = 0x28,
+ MAINDMC_EVENT_1KHZ_FQ_D_TRIGGER = 0x29,
+ MAINDMC_EVENT_SCANLINE_COMP_A = 0x2a,
+ MAINDMC_EVENT_SCANLINE_COMP_B = 0x2b,
+ MAINDMC_EVENT_SCANLINE_COMP_C = 0x2c,
+ MAINDMC_EVENT_SCANLINE_COMP_D = 0x2d,
+ MAINDMC_EVENT_VBLANK_DELAYED_A = 0x2e,
+ MAINDMC_EVENT_VBLANK_DELAYED_B = 0x2f,
+ MAINDMC_EVENT_VBLANK_DELAYED_C = 0x30,
+ MAINDMC_EVENT_VBLANK_DELAYED_D = 0x31,
+ MAINDMC_EVENT_VBLANK_A = 0x32,
+ MAINDMC_EVENT_VBLANK_B = 0x33,
+ MAINDMC_EVENT_VBLANK_C = 0x34,
+ MAINDMC_EVENT_VBLANK_D = 0x35,
+ MAINDMC_EVENT_HBLANK_A = 0x36,
+ MAINDMC_EVENT_HBLANK_B = 0x37,
+ MAINDMC_EVENT_HBLANK_C = 0x38,
+ MAINDMC_EVENT_HBLANK_D = 0x39,
+ MAINDMC_EVENT_VSYNC_A = 0x3a,
+ MAINDMC_EVENT_VSYNC_B = 0x3b,
+ MAINDMC_EVENT_VSYNC_C = 0x3c,
+ MAINDMC_EVENT_VSYNC_D = 0x3d,
+ MAINDMC_EVENT_SCANLINE_A = 0x3e,
+ MAINDMC_EVENT_SCANLINE_B = 0x3f,
+ MAINDMC_EVENT_SCANLINE_C = 0x40,
+ MAINDMC_EVENT_SCANLINE_D = 0x41,
+
+ MAINDMC_EVENT_PLANE1_FLIP_A = 0x42,
+ MAINDMC_EVENT_PLANE2_FLIP_A = 0x43,
+ MAINDMC_EVENT_PLANE3_FLIP_A = 0x44,
+ MAINDMC_EVENT_PLANE4_FLIP_A = 0x45,
+ MAINDMC_EVENT_PLANE5_FLIP_A = 0x46,
+ MAINDMC_EVENT_PLANE6_FLIP_A = 0x47,
+ MAINDMC_EVENT_PLANE7_FLIP_A = 0x48,
+ MAINDMC_EVENT_PLANE1_FLIP_B = 0x49,
+ MAINDMC_EVENT_PLANE2_FLIP_B = 0x4a,
+ MAINDMC_EVENT_PLANE3_FLIP_B = 0x4b,
+ MAINDMC_EVENT_PLANE4_FLIP_B = 0x4c,
+ MAINDMC_EVENT_PLANE5_FLIP_B = 0x4d,
+ MAINDMC_EVENT_PLANE6_FLIP_B = 0x4e,
+ MAINDMC_EVENT_PLANE7_FLIP_B = 0x4f,
+ MAINDMC_EVENT_PLANE1_FLIP_C = 0x50,
+ MAINDMC_EVENT_PLANE2_FLIP_C = 0x51,
+ MAINDMC_EVENT_PLANE3_FLIP_C = 0x52,
+ MAINDMC_EVENT_PLANE4_FLIP_C = 0x53,
+ MAINDMC_EVENT_PLANE5_FLIP_C = 0x54,
+ MAINDMC_EVENT_PLANE6_FLIP_C = 0x55,
+ MAINDMC_EVENT_PLANE7_FLIP_C = 0x56,
+ MAINDMC_EVENT_PLANE1_FLIP_D = 0x57,
+ MAINDMC_EVENT_PLANE2_FLIP_D = 0x58,
+ MAINDMC_EVENT_PLANE3_FLIP_D = 0x59,
+ MAINDMC_EVENT_PLANE4_FLIP_D = 0x5a,
+ MAINDMC_EVENT_PLANE5_FLIP_D = 0x5b,
+ MAINDMC_EVENT_PLANE6_FLIP_D = 0x5c,
+ MAINDMC_EVENT_PLANE7_FLIP_D = 0x5d,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_A = 0x5e,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_A = 0x5f,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_A = 0x60,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_A = 0x61,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_A = 0x62,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_A = 0x63,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_A = 0x64,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_B = 0x65,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_B = 0x66,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_B = 0x67,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_B = 0x68,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_B = 0x69,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_B = 0x6a,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_B = 0x6b,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_C = 0x6c,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_C = 0x6d,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_C = 0x6e,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_C = 0x6f,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_C = 0x70,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_C = 0x71,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_C = 0x72,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_D = 0x73,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_D = 0x74,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_D = 0x75,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_D = 0x76,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_D = 0x77,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_D = 0x78,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_D = 0x79,
+
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL1 = 0x7d,
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL2 = 0x7e,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL1 = 0x7f,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL2 = 0x80,
+
+ MAINDMC_EVENT_RENUKE_A = 0x85,
+ MAINDMC_EVENT_RENUKE_B = 0x86,
+ MAINDMC_EVENT_RENUKE_C = 0x87,
+ MAINDMC_EVENT_RENUKE_D = 0x88,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_A = 0x89,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_B = 0x8a,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_C = 0x8b,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_D = 0x8c,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_A = 0x8d,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_B = 0x8e,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_C = 0x8f,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_D = 0x90,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_A = 0x91,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_B = 0x92,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_C = 0x93,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_D = 0x94,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_A = 0x95,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_B = 0x96,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_C = 0x97,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_D = 0x98,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_A = 0x99,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_B = 0x9a,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_C = 0x9b,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_D = 0x9c,
+ MAINDMC_EVENT_DISP_PCH_INT = 0x9d,
+ MAINDMC_EVENT_GTT_ERR = 0x9e,
+ MAINDMC_EVENT_VTD_ERR = 0x9f,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_A = 0xa0,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_B = 0xa1,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_C = 0xa2,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_D = 0xa3,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_A = 0xa4,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_B = 0xa5,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_C = 0xa6,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_D = 0xa7,
+
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_EDP = 0xb2,
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_DSI = 0xb3,
+ MAINDMC_EVENT_DCPR_DMC_CSR_START = 0xb4,
+ MAINDMC_EVENT_IN_PSR = 0xb5,
+
+ MAINDMC_EVENT_IN_MEMUP = 0xb7,
+ MAINDMC_EVENT_IN_VGA = 0xb8,
+
+ MAINDMC_EVENT_IN_KVM_SESSION = 0xba,
+ MAINDMC_EVENT_DEWAKE = 0xbb,
+
+ MAINDMC_EVENT_TRAP_HIT = 0xbd,
+ MAINDMC_EVENT_CLK_USEC = 0xbe,
+ MAINDMC_EVENT_CLK_MSEC = 0xbf,
+
+ MAINDMC_EVENT_CHICKEN1 = 0xc8,
+ MAINDMC_EVENT_CHICKEN2 = 0xc9,
+ MAINDMC_EVENT_CHICKEN3 = 0xca,
+ MAINDMC_EVENT_DDT_UBP = 0xcb,
+
+ MAINDMC_EVENT_HP_LATENCY = 0xcd,
+ MAINDMC_EVENT_LP_LATENCY = 0xce,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL1 = 0xcf,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL2 = 0xd0,
+
+ MAINDMC_EVENT_DG_DMC_EVT_0 = 0xd3,
+ MAINDMC_EVENT_DG_DMC_EVT_1 = 0xd4,
+ MAINDMC_EVENT_DG_DMC_EVT_2 = 0xd5,
+ MAINDMC_EVENT_DG_DMC_EVT_3 = 0xd6,
+ MAINDMC_EVENT_DG_DMC_EVT_4 = 0xd7,
+ MAINDMC_EVENT_DACFE_CLK_STOP = 0xd8,
+ MAINDMC_EVENT_DACFE_AZILIA_SDI_WAKE = 0xd9,
+ MAINDMC_EVENT_AUDIO_DOUBLE_FUNC_GRP_RST = 0xda,
+ MAINDMC_EVENT_AUDIO_CMD_VALID = 0xdb,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_BCLK = 0xdc,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_CDCLK = 0xdd,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_A = 0xde,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_B = 0xdf,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_C = 0xe0,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_E = 0xe1,
+ MAINDMC_EVENT_CMTG_SCANLINE_IN_GB_DC6v = 0xe2,
+ MAINDMC_EVENT_DCPR_CMTG_SCANLINE_OUTSIDE_GB = 0xe3,
+ MAINDMC_EVENT_DC6v_BACKWARD_COMPAT = 0xe4,
+ MAINDMC_EVENT_DPMA_PM_ABORT = 0xe5,
+
+ MAINDMC_EVENT_STACK_OVF = 0xfc,
+ MAINDMC_EVENT_NO_CLAIM = 0xfd,
+ MAINDMC_EVENT_UNK_CMD = 0xfe,
+ MAINDMC_EVENT_HTP_MOD = 0xff,
+};
+
+enum pipedmc_event_id {
+ PIPEDMC_EVENT_TMR0_DONE = 0x14,
+ PIPEDMC_EVENT_TMR1_DONE = 0x15,
+ PIPEDMC_EVENT_TMR2_DONE = 0x16,
+ PIPEDMC_EVENT_COUNT0_DONE = 0x17,
+ PIPEDMC_EVENT_COUNT1_DONE = 0x18,
+ PIPEDMC_EVENT_PGA_PGB_RESTORE_DONE = 0x19,
+ PIPEDMC_EVENT_PG1_PG2_RESTORE_DONE = 0x1a,
+ PIPEDMC_EVENT_PGA_PGB_SAVE_DONE = 0x1b,
+ PIPEDMC_EVENT_PG1_PG2_SAVE_DONE = 0x1c,
+
+ PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER = 0x2b,
+ PIPEDMC_EVENT_1KHZ_FQ_TRIGGER = 0x2c,
+ PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER = 0x2d,
+ PIPEDMC_EVENT_SCANLINE_INRANGE = 0x2e,
+ PIPEDMC_EVENT_SCANLINE_OUTRANGE = 0x2f,
+ PIPEDMC_EVENT_SCANLINE_EQUAL = 0x30,
+ PIPEDMC_EVENT_DELAYED_VBLANK = 0x31,
+ PIPEDMC_EVENT_VBLANK = 0x32,
+ PIPEDMC_EVENT_HBLANK = 0x33,
+ PIPEDMC_EVENT_VSYNC = 0x34,
+ PIPEDMC_EVENT_SCANLINE_FROM_DMUX = 0x35,
+ PIPEDMC_EVENT_PLANE1_FLIP = 0x36,
+ PIPEDMC_EVENT_PLANE2_FLIP = 0x37,
+ PIPEDMC_EVENT_PLANE3_FLIP = 0x38,
+ PIPEDMC_EVENT_PLANE4_FLIP = 0x39,
+ PIPEDMC_EVENT_PLANE5_FLIP = 0x3a,
+ PIPEDMC_EVENT_PLANE6_FLIP = 0x3b,
+ PIPEDMC_EVENT_PLANE7_FLIP = 0x3c,
+ PIPEDMC_EVENT_ADAPTIVE_DCB_TRIGGER = 0x3d,
+
+ PIPEDMC_EVENT_PLANE1_FLIP_DONE = 0x56,
+ PIPEDMC_EVENT_PLANE2_FLIP_DONE = 0x57,
+ PIPEDMC_EVENT_PLANE3_FLIP_DONE = 0x58,
+ PIPEDMC_EVENT_PLANE4_FLIP_DONE = 0x59,
+ PIPEDMC_EVENT_PLANE5_FLIP_DONE = 0x5a,
+ PIPEDMC_EVENT_PLANE6_FLIP_DONE = 0x5b,
+ PIPEDMC_EVENT_PLANE7_FLIP_DONE = 0x5c,
+
+ PIPEDMC_EVENT_GTT_ERR = 0x9b,
+
+ PIPEDMC_EVENT_IN_PSR = 0xb5,
+ PIPEDMC_EVENT_DSI_DMC_IDLE = 0xb6,
+ PIPEDMC_EVENT_PSR2_DMC_IDLE = 0xb7,
+ PIPEDMC_EVENT_IN_VGA = 0xb8,
+
+ PIPEDMC_EVENT_TRAP_HIT = 0xbd,
+ PIPEDMC_EVENT_CLK_USEC = 0xbe,
+ PIPEDMC_EVENT_CLK_MSEC = 0xbf,
+
+ PIPEDMC_EVENT_CHICKEN1 = 0xc8,
+ PIPEDMC_EVENT_CHICKEN2 = 0xc9,
+ PIPEDMC_EVENT_CHICKEN3 = 0xca,
+ PIPEDMC_EVENT_DDT_UBP = 0xcb,
+
+ PIPEDMC_EVENT_LP_LATENCY = 0xce,
+
+ PIPEDMC_EVENT_LACE_PART_A_HIST_TRIGGER = 0xdf,
+ PIPEDMC_EVENT_LACE_PART_B_HIST_TRIGGER = 0xe0,
+
+ PIPEDMC_EVENT_STACK_OVF = 0xfc,
+ PIPEDMC_EVENT_NO_CLAIM = 0xfd,
+ PIPEDMC_EVENT_UNK_CMD = 0xfe,
+ PIPEDMC_EVENT_HTP_MOD = 0xff,
+};
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
@@ -21,11 +287,170 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
-#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
-#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
-#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
- _MTL_PIPEDMC_EVT_CTL_4_A, \
- _MTL_PIPEDMC_EVT_CTL_4_B)
+#define _PIPEDMC_LOAD_HTP_A 0x5f000
+#define _PIPEDMC_LOAD_HTP_B 0x5f400
+#define PIPEDMC_LOAD_HTP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_LOAD_HTP_A, _PIPEDMC_LOAD_HTP_B)
+
+#define _PIPEDMC_CTL_A 0x5f064
+#define _PIPEDMC_CTL_B 0x5f464
+#define PIPEDMC_CTL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_CTL_A, _PIPEDMC_CTL_B)
+#define PIPEDMC_HALT REG_BIT(31)
+#define PIPEDMC_STEP REG_BIT(27)
+#define PIPEDMC_CLOCKGATE REG_BIT(23)
+
+#define _PIPEDMC_STATUS_A 0x5f06c
+#define _PIPEDMC_STATUS_B 0x5f46c
+#define PIPEDMC_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_STATUS_A, _PIPEDMC_STATUS_B)
+#define PIPEDMC_SSP REG_GENMASK(31, 16)
+#define PIPEDMC_INT_VECTOR_MASK REG_GENMASK(15, 8)
+/* PIPEDMC_INT_VECTOR values defined by firmware */
+#define PIPEDMC_INT_VECTOR_SCANLINE_COMP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x1)
+#define PIPEDMC_INT_VECTOR_DC6V_FLIPQ_OVERLAP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x2)
+#define PIPEDMC_INT_VECTOR_FLIPQ_PROG_DONE REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0xff) /* Wa_16018781658:lnl[a0] */
+#define PIPEDMC_EVT_PENDING REG_GENMASK(7, 0)
+
+#define _PIPEDMC_FQ_CTRL_A 0x5f078
+#define _PIPEDMC_FQ_CTRL_B 0x5f478
+#define PIPEDMC_FQ_CTRL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_CTRL_A, _PIPEDMC_FQ_CTRL_B)
+#define PIPEDMC_FQ_CTRL_ENABLE REG_BIT(31)
+#define PIPEDMC_FQ_CTRL_ASYNC REG_BIT(29)
+#define PIPEDMC_FQ_CTRL_PREEMPT REG_BIT(0)
+
+#define _PIPEDMC_FQ_STATUS_A 0x5f098
+#define _PIPEDMC_FQ_STATUS_B 0x5f498
+#define PIPEDMC_FQ_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_STATUS_A, _PIPEDMC_FQ_STATUS_B)
+#define PIPEDMC_FQ_STATUS_BUSY REG_BIT(31)
+#define PIPEDMC_FQ_STATUS_W2_LIVE_STATUS REG_BIT(1)
+#define PIPEDMC_FQ_STATUS_W1_LIVE_STATUS REG_BIT(0)
+
+#define _PIPEDMC_FPQ_ATOMIC_TP_A 0x5f0a0
+#define _PIPEDMC_FPQ_ATOMIC_TP_B 0x5f4a0
+#define PIPEDMC_FPQ_ATOMIC_TP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_ATOMIC_TP_A, _PIPEDMC_FPQ_ATOMIC_TP_B)
+#define PIPEDMC_FPQ_PLANEQ_3_TP_MASK REG_GENMASK(31, 26)
+#define PIPEDMC_FPQ_PLANEQ_3_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_2_TP_MASK REG_GENMASK(24, 19)
+#define PIPEDMC_FPQ_PLANEQ_2_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_1_TP_MASK REG_GENMASK(17, 12)
+#define PIPEDMC_FPQ_PLANEQ_1_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, (tail))
+#define PIPEDMC_FPQ_FASTQ_TP_MASK REG_GENMASK(10, 6)
+#define PIPEDMC_FPQ_FASTQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_FASTQ_TP_MASK, (tail))
+#define PIPEDMC_FPQ_GENERALQ_TP_MASK REG_GENMASK(4, 0)
+#define PIPEDMC_FPQ_GENERALQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_GENERALQ_TP_MASK, (tail))
+
+#define _PIPEDMC_FPQ_LINES_TO_W1_A 0x5f0a4
+#define _PIPEDMC_FPQ_LINES_TO_W1_B 0x5f4a4
+#define PIPEDMC_FPQ_LINES_TO_W1 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W1_A, _PIPEDMC_FPQ_LINES_TO_W1_B)
+
+#define _PIPEDMC_FPQ_LINES_TO_W2_A 0x5f0a8
+#define _PIPEDMC_FPQ_LINES_TO_W2_B 0x5f4a8
+#define PIPEDMC_FPQ_LINES_TO_W2 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W2_A, _PIPEDMC_FPQ_LINES_TO_W2_B)
+
+#define _PIPEDMC_SCANLINECMP_A 0x5f11c
+#define _PIPEDMC_SCANLINECMP_B 0x5f51c
+#define PIPEDMC_SCANLINECMP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMP_A, _PIPEDMC_SCANLINECMP_B)
+#define PIPEDMC_SCANLINECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINE_NUMBER REG_GENMASK(20, 0)
+
+#define _PIPEDMC_SCANLINECMPLOWER_A 0x5f120
+#define _PIPEDMC_SCANLINECMPLOWER_B 0x5f520
+#define PIPEDMC_SCANLINECMPLOWER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPLOWER_A, _PIPEDMC_SCANLINECMPLOWER_B)
+#define PIPEDMC_SCANLINEINRANGECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINEOUTRANGECMP_EN REG_BIT(30)
+#define PIPEDMC_SCANLINE_LOWER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_LOWER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_LOWER_MASK, (scanline))
+
+#define _PIPEDMC_SCANLINECMPUPPER_A 0x5f124
+#define _PIPEDMC_SCANLINECMPUPPER_B 0x5f524
+#define PIPEDMC_SCANLINECMPUPPER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPUPPER_A, _PIPEDMC_SCANLINECMPUPPER_B)
+#define PIPEDMC_SCANLINE_UPPER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_UPPER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_UPPER_MASK, (scanline))
+
+#define _MMIO_PIPEDMC_FPQ(pipe, fq_id, \
+ reg_fpq1_a, reg_fpq2_a, reg_fpq3_a, reg_fpq4_a, \
+ reg_fpq1_b, reg_fpq2_b, reg_fpq3_b, reg_fpq4_b) \
+ _MMIO(_PICK_EVEN_2RANGES((fq_id), INTEL_FLIPQ_PLANE_3, \
+ _PIPE((pipe), (reg_fpq1_a), (reg_fpq1_b)), \
+ _PIPE((pipe), (reg_fpq2_a), (reg_fpq2_b)), \
+ _PIPE((pipe), (reg_fpq3_a), (reg_fpq3_b)), \
+ _PIPE((pipe), (reg_fpq4_a), (reg_fpq4_b))))
+
+#define _PIPEDMC_FPQ1_HP_A 0x5f128
+#define _PIPEDMC_FPQ2_HP_A 0x5f138
+#define _PIPEDMC_FPQ3_HP_A 0x5f168
+#define _PIPEDMC_FPQ4_HP_A 0x5f174
+#define _PIPEDMC_FPQ5_HP_A 0x5f180
+#define _PIPEDMC_FPQ1_HP_B 0x5f528
+#define _PIPEDMC_FPQ2_HP_B 0x5f538
+#define _PIPEDMC_FPQ3_HP_B 0x5f568
+#define _PIPEDMC_FPQ4_HP_B 0x5f574
+#define _PIPEDMC_FPQ5_HP_B 0x5f580
+#define PIPEDMC_FPQ_HP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_HP_A, _PIPEDMC_FPQ2_HP_A, \
+ _PIPEDMC_FPQ3_HP_A, _PIPEDMC_FPQ4_HP_A, \
+ _PIPEDMC_FPQ1_HP_B, _PIPEDMC_FPQ2_HP_B, \
+ _PIPEDMC_FPQ3_HP_B, _PIPEDMC_FPQ4_HP_B)
+
+#define _PIPEDMC_FPQ1_TP_A 0x5f12c
+#define _PIPEDMC_FPQ2_TP_A 0x5f13c
+#define _PIPEDMC_FPQ3_TP_A 0x5f16c
+#define _PIPEDMC_FPQ4_TP_A 0x5f178
+#define _PIPEDMC_FPQ5_TP_A 0x5f184
+#define _PIPEDMC_FPQ1_TP_B 0x5f52c
+#define _PIPEDMC_FPQ2_TP_B 0x5f53c
+#define _PIPEDMC_FPQ3_TP_B 0x5f56c
+#define _PIPEDMC_FPQ4_TP_B 0x5f578
+#define _PIPEDMC_FPQ5_TP_B 0x5f584
+#define PIPEDMC_FPQ_TP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_TP_A, _PIPEDMC_FPQ2_TP_A, \
+ _PIPEDMC_FPQ3_TP_A, _PIPEDMC_FPQ4_TP_A, \
+ _PIPEDMC_FPQ1_TP_B, _PIPEDMC_FPQ2_TP_B, \
+ _PIPEDMC_FPQ3_TP_B, _PIPEDMC_FPQ4_TP_B)
+
+#define _PIPEDMC_FPQ1_CHP_A 0x5f130
+#define _PIPEDMC_FPQ2_CHP_A 0x5f140
+#define _PIPEDMC_FPQ3_CHP_A 0x5f170
+#define _PIPEDMC_FPQ4_CHP_A 0x5f17c
+#define _PIPEDMC_FPQ5_CHP_A 0x5f188
+#define _PIPEDMC_FPQ1_CHP_B 0x5f530
+#define _PIPEDMC_FPQ2_CHP_B 0x5f540
+#define _PIPEDMC_FPQ3_CHP_B 0x5f570
+#define _PIPEDMC_FPQ4_CHP_B 0x5f57c
+#define _PIPEDMC_FPQ5_CHP_B 0x5f588
+#define PIPEDMC_FPQ_CHP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_CHP_A, _PIPEDMC_FPQ2_CHP_A, \
+ _PIPEDMC_FPQ3_CHP_A, _PIPEDMC_FPQ4_CHP_A, \
+ _PIPEDMC_FPQ1_CHP_B, _PIPEDMC_FPQ2_CHP_B, \
+ _PIPEDMC_FPQ3_CHP_B, _PIPEDMC_FPQ4_CHP_B)
+
+#define _PIPEDMC_FPQ_TS_A 0x5f134
+#define _PIPEDMC_FPQ_TS_B 0x5f534
+#define PIPEDMC_FPQ_TS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_TS_A, _PIPEDMC_FPQ_TS_B)
+
+#define _PIPEDMC_SCANLINE_RO_A 0x5f144
+#define _PIPEDMC_SCANLINE_RO_B 0x5f544
+#define PIPEDMC_SCANLINE_RO(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINE_RO_A, _PIPEDMC_SCANLINE_RO_B)
+
+#define _PIPEDMC_FPQ_CTL1_A 0x5f160
+#define _PIPEDMC_FPQ_CTL1_B 0x5f560
+#define PIPEDMC_FPQ_CTL1(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL1_A, _PIPEDMC_FPQ_CTL1_B)
+#define PIPEDMC_SW_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_FPQ_CTL2_A 0x5f164
+#define _PIPEDMC_FPQ_CTL2_B 0x5f564
+#define PIPEDMC_FPQ_CTL2(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL2_A, _PIPEDMC_FPQ_CTL2_B)
+#define PIPEDMC_DMC_INT_AT_DELAYED_VBLANK REG_BIT(1)
+#define PIPEDMC_W1_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_INTERRUPT_A 0x5f190 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_B 0x5f590 /* lnl+ */
+#define PIPEDMC_INTERRUPT(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_A, _PIPEDMC_INTERRUPT_B)
+#define _PIPEDMC_INTERRUPT_MASK_A 0x5f194 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_MASK_B 0x5f594 /* lnl+ */
+#define PIPEDMC_INTERRUPT_MASK(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_MASK_A, _PIPEDMC_INTERRUPT_MASK_B)
+#define PIPEDMC_FLIPQ_PROG_DONE REG_BIT(3)
+#define PIPEDMC_ERROR REG_BIT(2)
+#define PIPEDMC_GTT_FAULT REG_BIT(1)
+#define PIPEDMC_ATS_FAULT REG_BIT(0)
#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
@@ -71,12 +496,7 @@
#define DMC_EVT_CTL_TYPE_LEVEL_1 1
#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
-
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
-#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
-#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
-/* An event handler scheduled to run at a 1 kHz frequency. */
-#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
@@ -117,4 +537,51 @@
#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+#define DMC_FQ_W2_PTS_CFG_SEL _MMIO(0x8f240)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(26, 24)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(18, 16)
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(10, 8)
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(2, 0)
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+
+/* plane/general flip queue entries */
+#define PIPEDMC_FQ_RAM(start_mmioaddr, i) _MMIO((start_mmioaddr) + (i) * 4)
+/* LNL */
+/* DW0 pts */
+/* DW1 head */
+/* DW2 size/etc. */
+#define LNL_FQ_INTERRUPT REG_BIT(31)
+#define LNL_FQ_DSB_ID_MASK REG_GENMASK(30, 29)
+#define LNL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(LNL_FQ_DSB_ID_MASK, (dsb_id))
+#define LNL_FQ_EXECUTED REG_BIT(28)
+#define LNL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define LNL_FQ_DSB_SIZE(size) REG_FIELD_PREP(LNL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 reserved (plane queues) */
+/* DW3 second DSB head (general queue) */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 reserved (general queue) */
+
+/* PTL+ */
+/* DW0 pts */
+/* DW1 reserved */
+/* DW2 size/etc. */
+#define PTL_FQ_INTERRUPT REG_BIT(31)
+#define PTL_FQ_NEED_PUSH REG_BIT(30)
+#define PTL_FQ_BLOCK_PUSH REG_BIT(29)
+#define PTL_FQ_EXECUTED REG_BIT(28)
+#define PTL_FQ_DSB_ID_MASK REG_GENMASK(25, 24)
+#define PTL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(PTL_FQ_DSB_ID_MASK, (dsb_id))
+#define PTL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define PTL_FQ_DSB_SIZE(size) REG_FIELD_PREP(PTL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 head */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 second DSB head (general queue) */
+
+/* undocumented magic DMC variables */
+#define PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6b8)
+#define PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6c0)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 7e2ce0c2f6c3..b3bb89ba34f9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -7,9 +7,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -155,12 +154,11 @@ static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
static void __intel_dmc_wl_release(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc_wl *wl = &display->wl;
WARN_ON(refcount_read(&wl->refcount));
- queue_delayed_work(i915->unordered_wq, &wl->work,
+ queue_delayed_work(display->wq.unordered, &wl->work,
msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 724de7ed3c04..7976fec88606 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,8 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/math.h>
#include <linux/notifier.h>
#include <linux/seq_buf.h>
#include <linux/slab.h>
@@ -34,7 +36,6 @@
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
-
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
@@ -49,8 +50,6 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
@@ -64,6 +63,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -847,7 +847,7 @@ small_joiner_ram_size_bits(struct intel_display *display)
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
+static u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
@@ -939,6 +939,7 @@ static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
return ultrajoiner_ram_bits() / mode_hdisplay;
}
+/* TODO: return a bpp_x16 value */
static
u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
u32 mode_clock, u32 mode_hdisplay,
@@ -955,6 +956,7 @@ u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
return max_bpp;
}
+/* TODO: return a bpp_x16 value */
u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
@@ -1195,7 +1197,7 @@ intel_dp_output_format(struct intel_connector *connector,
int intel_dp_min_bpp(enum intel_output_format output_format)
{
if (output_format == INTEL_OUTPUT_FORMAT_RGB)
- return 6 * 3;
+ return intel_display_min_pipe_bpp();
else
return 8 * 3;
}
@@ -2073,7 +2075,7 @@ int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector
pipe_config, bpc) >> 4;
}
-static int dsc_src_min_compressed_bpp(void)
+int intel_dp_dsc_min_src_compressed_bpp(void)
{
/* Min Compressed bpp supported by source is 8 */
return 8;
@@ -2105,7 +2107,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
/*
* Note: for pre-13 display you still need to check the validity of each step.
*/
-static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
@@ -2113,12 +2115,19 @@ static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
if (DISPLAY_VER(display) < 14 || !incr)
return fxp_q4_from_int(1);
+ if (connector->mst.dp &&
+ !connector->link.force_bpp_x16 && !connector->mst.dp->force_dsc_fractional_bpp_en)
+ return fxp_q4_from_int(1);
+
/* fxp q4 */
return fxp_q4_from_int(1) / incr;
}
-/* Note: This is not universally usable! */
-static bool intel_dp_dsc_valid_bpp(struct intel_dp *intel_dp, int bpp_x16)
+/*
+ * Note: for bpp_x16 to be valid it must be also within the source/sink's
+ * min..max bpp capability range.
+ */
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16)
{
struct intel_display *display = to_intel_display(intel_dp);
int i;
@@ -2156,24 +2165,16 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int output_bpp;
- int dsc_min_bpp;
- int dsc_max_bpp;
int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int bpp_x16;
int ret;
- dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
-
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
- dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
-
- /* FIXME: remove the round trip via integers */
- min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
- max_bpp_x16 = fxp_q4_from_int(dsc_max_bpp);
+ max_bpp_x16 = min(fxp_q4_from_int(dsc_joiner_max_bpp), limits->link.max_bpp_x16);
bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
@@ -2181,8 +2182,12 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_bpp_x16 = round_up(limits->link.min_bpp_x16, bpp_step_x16);
+ max_bpp_x16 = round_down(max_bpp_x16, bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
- if (!intel_dp_dsc_valid_bpp(intel_dp, bpp_x16))
+ if (!intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16))
continue;
ret = dsc_compute_link_config(intel_dp,
@@ -2485,7 +2490,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
@@ -3727,6 +3732,9 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
@@ -5791,6 +5799,28 @@ intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
+static bool intel_dp_needs_dpcd_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (intel_dp_is_edp(intel_dp))
+ return false;
+
+ if (force_on_external)
+ return true;
+
+ if (intel_dp->is_mst)
+ return false;
+
+ return drm_edid_has_quirk(&connector->base, DRM_EDID_QUIRK_DP_DPCD_PROBE);
+}
+
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ drm_dp_dpcd_set_probe(&intel_dp->aux,
+ intel_dp_needs_dpcd_probe(intel_dp, force_on_external));
+}
+
static int
intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -5919,6 +5949,8 @@ out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
+ intel_dp_dpcd_set_probe(intel_dp, false);
+
if (!intel_dp_is_edp(intel_dp))
drm_dp_set_subconnector_property(&connector->base,
status,
@@ -5949,6 +5981,8 @@ intel_dp_force(struct drm_connector *_connector)
return;
intel_dp_set_edid(intel_dp);
+
+ intel_dp_dpcd_set_probe(intel_dp, false);
}
static int intel_dp_get_modes(struct drm_connector *_connector)
@@ -6321,10 +6355,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
* complete the DP tunnel BW request for the latter connector/encoder
* waiting for this encoder's DPRX read, perform a dummy read here.
*/
- if (long_hpd)
+ if (long_hpd) {
+ intel_dp_dpcd_set_probe(intel_dp, true);
+
intel_dp_read_dprx_caps(intel_dp, dpcd);
- if (long_hpd) {
intel_dp->reset_link_params = true;
intel_dp_invalidate_source_oui(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 742ae26ac4a9..0657f5681196 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -147,6 +147,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_con
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
int bpc);
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes);
@@ -173,8 +174,6 @@ bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
-
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -209,7 +208,11 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_dsc_min_src_compressed_bpp(void);
int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index bf8e8e0cc19c..829a7c0fbe4f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -835,6 +834,8 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ intel_dp_dpcd_set_probe(intel_dp, true);
}
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 271b27c9de51..41228478b21c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -145,7 +145,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* ranges for such panels.
*/
if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
- !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ !(connector->base.display_info.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
"[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
@@ -475,31 +475,6 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en
return connector->panel.backlight.level;
}
-static int
-intel_dp_aux_vesa_set_luminance(struct intel_connector *connector, u32 level)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- u8 buf[3];
- int ret;
-
- level = level * 1000;
- level &= 0xffffff;
- buf[0] = (level & 0x0000ff);
- buf[1] = (level & 0x00ff00) >> 8;
- buf[2] = (level & 0xff0000) >> 16;
-
- ret = drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
- buf, sizeof(buf));
- if (ret != sizeof(buf)) {
- drm_err(intel_dp->aux.drm_dev,
- "%s: Failed to set VESA Aux Luminance: %d\n",
- intel_dp->aux.name, ret);
- return -EINVAL;
- } else {
- return 0;
- }
-}
-
static void
intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -507,11 +482,6 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support) {
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
-
if (!panel->backlight.edp.vesa.info.aux_set) {
const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
@@ -528,18 +498,6 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int ret;
-
- if (panel->backlight.edp.vesa.luminance_control_support) {
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
- DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE);
-
- if (ret == 1)
- return;
-
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
if (!panel->backlight.edp.vesa.info.aux_enable) {
u32 pwm_level;
@@ -580,13 +538,41 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
&connector->base.display_info.luminance_range;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- u16 current_level;
+ u32 current_level;
u8 current_mode;
int ret;
- if (panel->backlight.edp.vesa.luminance_control_support) {
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+ luminance_range->max_luminance,
+ panel->vbt.backlight.pwm_freq_hz,
+ intel_dp->edp_dpcd, &current_level, &current_mode,
+ false);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
+ if (!panel->backlight.edp.vesa.info.aux_set ||
+ !panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
+ return ret;
+ }
+ }
+
+ if (panel->backlight.edp.vesa.info.luminance_set) {
if (luminance_range->max_luminance) {
- panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = luminance_range->min_luminance;
} else {
panel->backlight.max = 512;
@@ -597,54 +583,26 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
connector->base.base.id, connector->base.name);
- } else {
- ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
- &current_level, &current_mode);
- if (ret < 0)
- return ret;
-
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
-
- if (!panel->backlight.edp.vesa.info.aux_set ||
- !panel->backlight.edp.vesa.info.aux_enable) {
- ret = panel->backlight.pwm_funcs->setup(connector, pipe);
- if (ret < 0) {
- drm_err(display->drm,
- "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
- connector->base.base.id, connector->base.name, ret);
- return ret;
- }
+ } else if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
-
- if (panel->backlight.edp.vesa.info.aux_set) {
- panel->backlight.max = panel->backlight.edp.vesa.info.max;
- panel->backlight.min = 0;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- panel->backlight.level = current_level;
- panel->backlight.enabled = panel->backlight.level != 0;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ } else {
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level =
+ panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
} else {
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.min = panel->backlight.pwm_level_min;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
- panel->backlight.level =
- panel->backlight.pwm_funcs->get(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index cc312596fb77..bd757db85927 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -11,9 +11,9 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -805,10 +805,16 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
enum pipe pipe = (enum pipe)cpu_transcoder;
enum port port = dig_port->base.port;
int ret;
-
- drm_WARN_ON(display->drm, enable &&
- !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port))
- & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+ u32 val;
+ u8 stream_type;
+
+ if (DISPLAY_VER(display) < 30) {
+ val = intel_de_read(display,
+ HDCP2_AUTH_STREAM(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(AUTH_STREAM_TYPE_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
if (ret)
@@ -824,6 +830,14 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return -ETIMEDOUT;
}
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display,
+ HDCP2_STREAM_STATUS(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(STREAM_TYPE_STATUS_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index e1501b13f08f..74497c9a0554 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/log2.h>
+#include <linux/math.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
@@ -30,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -39,6 +41,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -135,6 +138,7 @@ static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
return intel_dp->mst.active_streams++ == 0;
}
+/* TODO: return a bpp_x16 value */
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -241,6 +245,15 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
+static void mst_stream_update_slots(const struct intel_crtc_state *crtc_state,
+ struct drm_dp_mst_topology_state *topology_state)
+{
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
+
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
+}
+
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
@@ -263,6 +276,12 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
fxp_q4_to_frac(max_bpp_x16) ||
fxp_q4_to_frac(bpp_step_x16)));
+ if (!bpp_step_x16) {
+ /* Allow using zero step only to indicate single try for a given bpp. */
+ drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16);
+ bpp_step_x16 = 1;
+ }
+
if (is_mst) {
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
if (IS_ERR(mst_state))
@@ -270,6 +289,8 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
crtc_state->lane_count);
+
+ mst_stream_update_slots(crtc_state, mst_state);
}
if (dsc) {
@@ -298,12 +319,20 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
}
+ drm_WARN_ON(display->drm, min_bpp_x16 % bpp_step_x16 || max_bpp_x16 % bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
int local_bw_overhead;
int link_bpp_x16;
drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
+ if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) {
+ /* SST must have validated the single bpp tried here already earlier. */
+ drm_WARN_ON(display->drm, !is_mst);
+ continue;
+ }
+
link_bpp_x16 = dsc ? bpp_x16 :
fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
fxp_q4_to_int(bpp_x16)));
@@ -367,6 +396,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
connector->mst.port,
dfixed_trunc(pbn));
+
+ /* TODO: Check this already in drm_dp_atomic_find_time_slots(). */
+ if (slots > mst_state->total_avail_slots)
+ slots = -EINVAL;
} else {
/* Same as above for remote_tu */
crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
@@ -386,10 +419,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
-
- /* Allow using zero step to indicate one try */
- if (!bpp_step_x16)
- break;
}
if (slots < 0) {
@@ -437,7 +466,8 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
int num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- int min_compressed_bpp, max_compressed_bpp;
+ int min_compressed_bpp_x16, max_compressed_bpp_x16;
+ int bpp_step_x16;
max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
@@ -462,46 +492,28 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
- min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
+ min_compressed_bpp_x16 = limits->link.min_bpp_x16;
+ max_compressed_bpp_x16 = limits->link.max_bpp_x16;
- drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
- min_compressed_bpp, max_compressed_bpp);
+ drm_dbg_kms(display->drm,
+ "DSC Sink supported compressed min bpp " FXP_Q4_FMT " compressed max bpp " FXP_Q4_FMT "\n",
+ FXP_Q4_ARGS(min_compressed_bpp_x16), FXP_Q4_ARGS(max_compressed_bpp_x16));
- /* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
- crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
- crtc_state->pipe_bpp);
+ bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
+
+ max_compressed_bpp_x16 = min(max_compressed_bpp_x16, fxp_q4_from_int(crtc_state->pipe_bpp) - bpp_step_x16);
+
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_compressed_bpp_x16 = round_up(min_compressed_bpp_x16, bpp_step_x16);
+ max_compressed_bpp_x16 = round_down(max_compressed_bpp_x16, bpp_step_x16);
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
- fxp_q4_from_int(min_compressed_bpp),
- fxp_q4_from_int(max_compressed_bpp),
- fxp_q4_from_int(1), true);
-}
-
-static int mst_stream_update_slots(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
- struct drm_dp_mst_topology_state *topology_state;
- u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
- DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
-
- topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
- if (IS_ERR(topology_state)) {
- drm_dbg_kms(display->drm, "slot update failed\n");
- return PTR_ERR(topology_state);
- }
-
- drm_dp_mst_update_slots(topology_state, link_coding_cap);
-
- return 0;
+ min_compressed_bpp_x16,
+ max_compressed_bpp_x16,
+ bpp_step_x16, true);
}
static int mode_hblank_period_ns(const struct drm_display_mode *mode)
@@ -709,10 +721,6 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
- if (ret)
- return ret;
-
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index bd61f3c3ec91..6ed5012c5fac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -10,9 +10,9 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 69f242139420..3f77ad92c156 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,13 +21,15 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -426,7 +428,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
* use 1ms due to occasional timeouts observed with that.
*/
if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL))
drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
@@ -715,53 +717,53 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= DPIO_SWING_MARGIN000(margin_reg_value);
@@ -774,7 +776,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -784,70 +786,70 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW1(ch), val);
}
}
@@ -855,11 +857,11 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- vlv_dpio_get(i915);
+ vlv_dpio_get(display->drm);
__chv_data_lane_soft_reset(encoder, crtc_state, reset);
- vlv_dpio_put(i915);
+ vlv_dpio_put(display->drm);
}
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
@@ -867,7 +869,6 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
@@ -886,47 +887,47 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_powergate_lanes(encoder, true, lane_mask);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Assert data lane reset */
__chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -934,38 +935,38 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW19(ch));
if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
else
val &= ~CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW19(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -975,7 +976,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0;
else
data = (i == 1) ? 0 : DPIO_UPAR;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -990,17 +991,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -1008,7 +1009,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -1019,7 +1020,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
/* Deassert data lane reset */
__chv_data_lane_soft_reset(encoder, crtc_state, false);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
@@ -1036,25 +1037,25 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Leave the power down bit cleared for at least one
@@ -1073,97 +1074,97 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW2_GRP(ch),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
DPIO_PCS_CLK_DATAWIDTH_8_10 |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable clock channels for this port */
val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
val |= 0xc4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
+ vlv_dpio_put(display->drm);
}
void vlv_wait_port_ready(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index a9e9b98d0bf9..f969c5399a51 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,13 +6,14 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -24,11 +25,11 @@
#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
-struct intel_dpll_funcs {
+struct intel_dpll_global_funcs {
int (*crtc_compute_clock)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
- int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+ int (*crtc_get_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
};
struct intel_limit {
@@ -513,8 +514,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -526,9 +527,9 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
@@ -541,8 +542,8 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -554,13 +555,13 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
@@ -1161,7 +1162,7 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- ret = intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_dpll_compute(state, crtc, encoder);
if (ret)
return ret;
@@ -1176,8 +1177,8 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
@@ -1189,7 +1190,7 @@ static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_reserve_shared_dplls(state, crtc, encoder);
+ return intel_dpll_reserve(state, crtc, encoder);
}
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1223,7 +1224,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
if (ret)
return ret;
- /* TODO: Do the readback via intel_compute_shared_dplls() */
+ /* TODO: Do the readback via intel_dpll_compute() */
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1394,7 +1395,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- ret = intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_dpll_compute(state, crtc, NULL);
if (ret)
return ret;
@@ -1404,8 +1405,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
}
-static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1414,7 +1415,7 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
if (!crtc_state->has_pch_encoder)
return 0;
- return intel_reserve_shared_dplls(state, crtc, NULL);
+ return intel_dpll_reserve(state, crtc, NULL);
}
static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
@@ -1690,45 +1691,45 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static const struct intel_dpll_funcs mtl_dpll_funcs = {
+static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
-static const struct intel_dpll_funcs dg2_dpll_funcs = {
+static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
.crtc_compute_clock = dg2_crtc_compute_clock,
};
-static const struct intel_dpll_funcs hsw_dpll_funcs = {
+static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
.crtc_compute_clock = hsw_crtc_compute_clock,
- .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
+ .crtc_get_dpll = hsw_crtc_get_dpll,
};
-static const struct intel_dpll_funcs ilk_dpll_funcs = {
+static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
.crtc_compute_clock = ilk_crtc_compute_clock,
- .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
+ .crtc_get_dpll = ilk_crtc_get_dpll,
};
-static const struct intel_dpll_funcs chv_dpll_funcs = {
+static const struct intel_dpll_global_funcs chv_dpll_funcs = {
.crtc_compute_clock = chv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs vlv_dpll_funcs = {
+static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
.crtc_compute_clock = vlv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs g4x_dpll_funcs = {
+static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
.crtc_compute_clock = g4x_crtc_compute_clock,
};
-static const struct intel_dpll_funcs pnv_dpll_funcs = {
+static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
.crtc_compute_clock = pnv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
.crtc_compute_clock = i9xx_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
@@ -1758,8 +1759,8 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
@@ -1767,15 +1768,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
int ret;
drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
- if (!crtc_state->hw.enable || crtc_state->shared_dpll)
+ if (!crtc_state->hw.enable || crtc_state->intel_dpll)
return 0;
- if (!display->funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_dpll)
return 0;
- ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
if (ret) {
drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
@@ -1871,45 +1872,43 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
tmp |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp, coreclk;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -1918,15 +1917,15 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
tmp &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
tmp = DPIO_M1_DIV(clock->m1) |
@@ -1942,48 +1941,42 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
tmp |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x009f0003);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x00d0000f);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
+ coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
@@ -2028,8 +2021,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
@@ -2038,44 +2031,44 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
m2_frac = clock->m2 & 0x3fffff;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
DPIO_CHV_S1_DIV(5) |
DPIO_CHV_P1_DIV(clock->p1) |
DPIO_CHV_P2_DIV(clock->p2) |
DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
if (m2_frac)
tmp |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
if (!m2_frac)
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
if (clock->vco == 5400000) {
@@ -2100,40 +2093,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
DPIO_AFC_RECAL);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -2252,7 +2244,6 @@ void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2268,14 +2259,14 @@ void chv_disable_pll(struct intel_display *display, enum pipe pipe)
intel_de_write(display, DPLL(display, pipe), val);
intel_de_posting_read(display, DPLL(display, pipe));
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 280e90a57c87..3444a2dd3166 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -19,8 +19,8 @@ struct intel_dpll_hw_state;
void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 9da051a3f455..33e0398120c8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,10 +27,10 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -52,34 +52,34 @@
* share a PLL if their configurations match.
*
* This file provides an abstraction over display PLLs. The function
- * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * intel_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
* modset interface. During an atomic operation, required PLLs can be reserved
* for a given CRTC and encoder configuration by calling
- * intel_reserve_shared_dplls() and previously reserved PLLs can be released
- * with intel_release_shared_dplls().
+ * intel_dpll_reserve() and previously reserved PLLs can be released
+ * with intel_dpll_release().
* Changes to the users are first staged in the atomic state, and then made
- * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * effective by calling intel_dpll_swap_state() during the atomic
* commit phase.
*/
/* platform specific hooks for managing DPLLs */
-struct intel_shared_dpll_funcs {
+struct intel_dpll_funcs {
/*
- * Hook for enabling the pll, called from intel_enable_shared_dpll() if
+ * Hook for enabling the pll, called from intel_enable_dpll() if
* the pll is not already enabled.
*/
void (*enable)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
/*
- * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * Hook for disabling the pll, called from intel_disable_dpll()
* only when it is safe to disable the pll, i.e., there are no more
* tracked users for it.
*/
void (*disable)(struct intel_display *display,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
/*
* Hook for reading the values currently programmed to the DPLL
@@ -87,7 +87,7 @@ struct intel_shared_dpll_funcs {
* verification after a mode set.
*/
bool (*get_hw_state)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
/*
@@ -95,7 +95,7 @@ struct intel_shared_dpll_funcs {
* in state.
*/
int (*get_freq)(struct intel_display *i915,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
};
@@ -122,18 +122,18 @@ struct intel_dpll_mgr {
static void
intel_atomic_duplicate_dpll_state(struct intel_display *display,
- struct intel_shared_dpll_state *shared_dpll)
+ struct intel_dpll_state *dpll_state)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- /* Copy shared dpll state */
- for_each_shared_dpll(display, pll, i)
- shared_dpll[pll->index] = pll->state;
+ /* Copy dpll state */
+ for_each_dpll(display, pll, i)
+ dpll_state[pll->index] = pll->state;
}
-static struct intel_shared_dpll_state *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+static struct intel_dpll_state *
+intel_atomic_get_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
struct intel_display *display = to_intel_display(state);
@@ -144,28 +144,28 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
state->dpll_set = true;
intel_atomic_duplicate_dpll_state(display,
- state->shared_dpll);
+ state->dpll_state);
}
- return state->shared_dpll;
+ return state->dpll_state;
}
/**
- * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * intel_get_dpll_by_id - get a DPLL given its id
* @display: intel_display device instance
* @id: pll id
*
* Returns:
* A pointer to the DPLL with @id
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id)
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
if (pll->info->id == id)
return pll;
}
@@ -175,9 +175,9 @@ intel_get_shared_dpll_by_id(struct intel_display *display,
}
/* For ILK+ */
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -205,7 +205,7 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static i915_reg_t
intel_combo_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (display->platform.dg1)
return DG1_DPLL_ENABLE(pll->info->id);
@@ -218,7 +218,7 @@ intel_combo_pll_enable_reg(struct intel_display *display,
static i915_reg_t
intel_tc_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
@@ -230,7 +230,7 @@ intel_tc_pll_enable_reg(struct intel_display *display,
}
static void _intel_enable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
@@ -240,7 +240,7 @@ static void _intel_enable_shared_dpll(struct intel_display *display,
}
static void _intel_disable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
pll->info->funcs->disable(display, pll);
pll->on = false;
@@ -250,16 +250,16 @@ static void _intel_disable_shared_dpll(struct intel_display *display,
}
/**
- * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc_state: CRTC, and its state, which has a shared DPLL
+ * intel_dpll_enable - enable a CRTC's DPLL
+ * @crtc_state: CRTC, and its state, which has a DPLL
*
- * Enable the shared DPLL used by @crtc.
+ * Enable DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
@@ -282,7 +282,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (old_mask) {
drm_WARN_ON(display->drm, !pll->on);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
goto out;
}
drm_WARN_ON(display->drm, pll->on);
@@ -296,16 +296,16 @@ out:
}
/**
- * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * intel_dpll_disable - disable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
- * Disable the shared DPLL used by @crtc.
+ * Disable DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
@@ -326,7 +326,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id, crtc->base.name);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
drm_WARN_ON(display->drm, !pll->on);
pll->active_mask &= ~pipe_mask;
@@ -344,11 +344,11 @@ out:
static unsigned long
intel_dpll_mask_all(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
unsigned long dpll_mask = 0;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
dpll_mask |= BIT(pll->info->id);
@@ -357,44 +357,44 @@ intel_dpll_mask_all(struct intel_display *display)
return dpll_mask;
}
-static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *dpll_hw_state,
- unsigned long dpll_mask)
+static struct intel_dpll *
+intel_find_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *dpll_hw_state,
+ unsigned long dpll_mask)
{
struct intel_display *display = to_intel_display(crtc);
unsigned long dpll_mask_all = intel_dpll_mask_all(display);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *unused_pll = NULL;
+ struct intel_dpll_state *dpll_state;
+ struct intel_dpll *unused_pll = NULL;
enum intel_dpll_id id;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
if (!pll)
continue;
/* Only want to check enabled timings first */
- if (shared_dpll[pll->index].pipe_mask == 0) {
+ if (dpll_state[pll->index].pipe_mask == 0) {
if (!unused_pll)
unused_pll = pll;
continue;
}
if (memcmp(dpll_hw_state,
- &shared_dpll[pll->index].hw_state,
+ &dpll_state[pll->index].hw_state,
sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
- shared_dpll[pll->index].pipe_mask,
+ dpll_state[pll->index].pipe_mask,
pll->active_mask);
return pll;
}
@@ -412,76 +412,76 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
}
/**
- * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
+ * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is taken
* @pll: DPLL for which the reference is taken
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Take a reference for @pll tracking the use of it by @crtc.
*/
static void
-intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_get(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
- shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
+ dpll_state->pipe_mask |= BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
static void
-intel_reference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *dpll_hw_state)
+intel_reference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *dpll_hw_state;
+ if (dpll_state[pll->index].pipe_mask == 0)
+ dpll_state[pll->index].hw_state = *dpll_hw_state;
- intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
}
/**
- * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
+ * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is dropped
* @pll: DPLL for which the reference is dropped
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Drop a reference for @pll tracking the end of use of it by @crtc.
*/
void
-intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
- shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
+ dpll_state->pipe_mask &= ~BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
-static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll)
+static void intel_unreference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -492,16 +492,16 @@ static void intel_put_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
- if (!old_crtc_state->shared_dpll)
+ if (!old_crtc_state->intel_dpll)
return;
- intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+ intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
}
/**
- * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * intel_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
*
* This is the dpll version of drm_atomic_helper_swap_state() since the
@@ -511,22 +511,22 @@ static void intel_put_dpll(struct intel_atomic_state *state,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
+void intel_dpll_swap_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
- struct intel_shared_dpll *pll;
+ struct intel_dpll_state *dpll_state = state->dpll_state;
+ struct intel_dpll *pll;
int i;
if (!state->dpll_set)
return;
- for_each_shared_dpll(display, pll, i)
- swap(pll->state, shared_dpll[pll->index]);
+ for_each_dpll(display, pll, i)
+ swap(pll->state, dpll_state[pll->index]);
}
static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -562,7 +562,7 @@ static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
}
static void ibx_pch_dpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -591,7 +591,7 @@ static void ibx_pch_dpll_enable(struct intel_display *display,
}
static void ibx_pch_dpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -614,33 +614,33 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_PCH_PLL_B) |
- BIT(DPLL_ID_PCH_PLL_A));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
return -EINVAL;
/* reference the pll */
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -670,7 +670,7 @@ static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->fp1 == b->fp1;
}
-static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -692,7 +692,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -704,7 +704,7 @@ static void hsw_ddi_wrpll_enable(struct intel_display *display,
}
static void hsw_ddi_spll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -715,7 +715,7 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
}
static void hsw_ddi_wrpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -731,7 +731,7 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
}
static void hsw_ddi_spll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
@@ -747,7 +747,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -769,7 +769,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
}
static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -996,7 +996,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1059,14 +1059,14 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc,
+ return intel_find_dpll(state, crtc,
&crtc_state->dpll_hw_state,
BIT(DPLL_ID_WRPLL2) |
BIT(DPLL_ID_WRPLL1));
@@ -1090,11 +1090,11 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
}
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1113,7 +1113,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
return NULL;
}
- pll = intel_get_shared_dpll_by_id(display, pll_id);
+ pll = intel_get_dpll_by_id(display, pll_id);
if (!pll)
return NULL;
@@ -1122,7 +1122,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
}
static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1162,19 +1162,19 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
BIT(DPLL_ID_SPLL));
}
static int hsw_ddi_spll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1221,7 +1221,7 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll = NULL;
+ struct intel_dpll *pll = NULL;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
@@ -1233,10 +1233,10 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -1270,14 +1270,14 @@ static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->spll == b->spll;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
.get_freq = hsw_ddi_wrpll_get_freq,
};
-static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
@@ -1285,24 +1285,24 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *hw_state)
{
}
static void hsw_ddi_lcpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
@@ -1364,7 +1364,7 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -1378,7 +1378,7 @@ static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
}
static void skl_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1400,7 +1400,7 @@ static void skl_ddi_pll_enable(struct intel_display *display,
}
static void skl_ddi_dpll0_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1409,7 +1409,7 @@ static void skl_ddi_dpll0_enable(struct intel_display *display,
}
static void skl_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
@@ -1420,12 +1420,12 @@ static void skl_ddi_pll_disable(struct intel_display *display,
}
static void skl_ddi_dpll0_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1463,7 +1463,7 @@ out:
}
static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1736,7 +1736,7 @@ skip_remaining_dividers:
}
static int skl_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1884,7 +1884,7 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
}
static int skl_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1939,31 +1939,31 @@ static int skl_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL0));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL0));
else
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL3) |
- BIT(DPLL_ID_SKL_DPLL2) |
- BIT(DPLL_ID_SKL_DPLL1));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
static int skl_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -2004,14 +2004,14 @@ static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->cfgcr2 == b->cfgcr2;
}
-static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
.get_freq = skl_ddi_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
@@ -2038,7 +2038,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2141,7 +2141,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
}
static void bxt_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
@@ -2160,7 +2160,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
}
static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2360,7 +2360,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
}
static int bxt_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2429,20 +2429,20 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -2486,7 +2486,7 @@ static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->pcsdw12 == b->pcsdw12;
}
-static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
@@ -2755,7 +2755,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
@@ -2826,7 +2826,7 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3199,7 +3199,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3285,7 +3285,7 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[port_dpll_id];
- crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->intel_dpll = port_dpll->pll;
crtc_state->dpll_hw_state = port_dpll->hw_state;
}
@@ -3388,14 +3388,14 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
/* Eliminate DPLLs from consideration if reserved by HTI */
dpll_mask &= ~intel_hti_dpll_mask(display);
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- dpll_mask);
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_mask);
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3428,8 +3428,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
@@ -3452,26 +3452,25 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(DPLL_ID_ICL_TBTPLL));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
-
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(dpll_id));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(dpll_id));
if (!port_dpll->pll) {
ret = -EINVAL;
goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3479,7 +3478,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
err_unreference_tbt_pll:
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+ intel_unreference_dpll(state, crtc, port_dpll->pll);
return ret;
}
@@ -3521,7 +3520,7 @@ static void icl_put_dplls(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum icl_port_dpll_id id;
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
const struct icl_port_dpll *old_port_dpll =
@@ -3534,12 +3533,12 @@ static void icl_put_dplls(struct intel_atomic_state *state,
if (!old_port_dpll->pll)
continue;
- intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ intel_unreference_dpll(state, crtc, old_port_dpll->pll);
}
}
static bool mg_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3606,7 +3605,7 @@ out:
}
static bool dkl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3678,7 +3677,7 @@ out:
}
static bool icl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
@@ -3739,7 +3738,7 @@ out:
}
static bool combo_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -3748,14 +3747,14 @@ static bool combo_pll_get_hw_state(struct intel_display *display,
}
static bool tbt_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -3797,7 +3796,7 @@ static void icl_dpll_write(struct intel_display *display,
}
static void icl_mg_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3840,7 +3839,7 @@ static void icl_mg_pll_write(struct intel_display *display,
}
static void dkl_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3905,7 +3904,7 @@ static void dkl_pll_write(struct intel_display *display,
}
static void icl_pll_power_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
@@ -3920,7 +3919,7 @@ static void icl_pll_power_enable(struct intel_display *display,
}
static void icl_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
@@ -3930,7 +3929,7 @@ static void icl_pll_enable(struct intel_display *display,
drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
-static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
+static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
{
u32 val;
@@ -3955,7 +3954,7 @@ static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct inte
}
static void combo_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3979,7 +3978,7 @@ static void combo_pll_enable(struct intel_display *display,
}
static void tbt_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4000,7 +3999,7 @@ static void tbt_pll_enable(struct intel_display *display,
}
static void mg_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4025,7 +4024,7 @@ static void mg_pll_enable(struct intel_display *display,
}
static void icl_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
/* The first steps are done by intel_ddi_post_disable(). */
@@ -4056,7 +4055,7 @@ static void icl_pll_disable(struct intel_display *display,
}
static void combo_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -4064,13 +4063,13 @@ static void combo_pll_disable(struct intel_display *display,
}
static void tbt_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
icl_pll_disable(display, pll, TBT_PLL_ENABLE);
}
static void mg_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
@@ -4129,21 +4128,21 @@ static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
}
-static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+static const struct intel_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
.get_freq = icl_ddi_combo_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+static const struct intel_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
.get_freq = icl_ddi_tbt_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+static const struct intel_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
@@ -4191,7 +4190,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.compare_hw_state = icl_compare_hw_state,
};
-static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+static const struct intel_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
@@ -4300,12 +4299,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
};
/**
- * intel_shared_dpll_init - Initialize shared DPLLs
+ * intel_dpll_init - Initialize DPLLs
* @display: intel_display device
*
- * Initialize shared DPLLs for @display.
+ * Initialize DPLLs for @display.
*/
-void intel_shared_dpll_init(struct intel_display *display)
+void intel_dpll_init(struct intel_display *display)
{
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
@@ -4346,23 +4345,23 @@ void intel_shared_dpll_init(struct intel_display *display)
for (i = 0; dpll_info[i].name; i++) {
if (drm_WARN_ON(display->drm,
- i >= ARRAY_SIZE(display->dpll.shared_dplls)))
+ i >= ARRAY_SIZE(display->dpll.dplls)))
break;
/* must fit into unsigned long bitmask on 32bit */
if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
break;
- display->dpll.shared_dplls[i].info = &dpll_info[i];
- display->dpll.shared_dplls[i].index = i;
+ display->dpll.dplls[i].info = &dpll_info[i];
+ display->dpll.dplls[i].index = i;
}
display->dpll.mgr = dpll_mgr;
- display->dpll.num_shared_dpll = i;
+ display->dpll.num_dpll = i;
}
/**
- * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * intel_dpll_compute - compute DPLL state CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to compute DPLLs for
* @encoder: encoder
@@ -4370,14 +4369,14 @@ void intel_shared_dpll_init(struct intel_display *display)
* This function computes the DPLL state for the given CRTC and encoder.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* Returns:
* 0 on success, negative error code on failure.
*/
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4389,7 +4388,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
@@ -4399,18 +4398,18 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
* state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* The reserved DPLLs should be released by calling
- * intel_release_shared_dplls().
+ * intel_dpll_release().
*
* Returns:
* 0 if all required DPLLs were successfully reserved,
* negative error code otherwise.
*/
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4422,18 +4421,18 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
+ * intel_dpll_release - end use of DPLLs by CRTC in atomic state
* @state: atomic state
* @crtc: crtc from which the DPLLs are to be released
*
- * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * This function releases all DPLLs reserved by intel_dpll_reserve()
* from the current atomic commit @state and the old @crtc atomic state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*/
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4441,7 +4440,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
/*
* FIXME: this function is called for every platform having a
* compute_clock hook, even though the platform doesn't yet support
- * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * the DPLL framework and intel_dpll_reserve() is not
* called on those.
*/
if (!dpll_mgr)
@@ -4451,16 +4450,16 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
* @state: atomic state
* @crtc: the CRTC for which to update the active DPLL
* @encoder: encoder determining the type of port DPLL
*
* Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
- * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * from the port DPLLs reserved previously by intel_dpll_reserve(). The
* DPLL selected will be based on the current mode of the encoder's port.
*/
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
@@ -4482,7 +4481,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
@@ -4500,14 +4499,14 @@ int intel_dpll_get_freq(struct intel_display *display,
* Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_crtc *crtc;
@@ -4521,8 +4520,8 @@ static void readout_dpll_hw_state(struct intel_display *display,
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
- intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
+ if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
+ intel_dpll_crtc_get(crtc, pll, &pll->state);
}
pll->active_mask = pll->state.pipe_mask;
@@ -4539,15 +4538,15 @@ void intel_dpll_update_ref_clks(struct intel_display *display)
void intel_dpll_readout_hw_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
readout_dpll_hw_state(display, pll);
}
static void sanitize_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (!pll->on)
return;
@@ -4566,12 +4565,12 @@ static void sanitize_dpll_state(struct intel_display *display,
void intel_dpll_sanitize_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
intel_cx0_pll_power_save_wa(display);
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
sanitize_dpll_state(display, pll);
}
@@ -4623,7 +4622,7 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
static void
verify_single_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
@@ -4676,15 +4675,15 @@ verify_single_dpll_state(struct intel_display *display,
pll->info->name);
}
-static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
- const struct intel_shared_dpll *new_pll)
+static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
+ const struct intel_dpll *new_pll)
{
return old_pll && new_pll && old_pll != new_pll &&
(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
@@ -4692,34 +4691,34 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(display, new_crtc_state->shared_dpll,
+ if (new_crtc_state->intel_dpll)
+ verify_single_dpll_state(display, new_crtc_state->intel_dpll,
crtc, new_crtc_state);
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
u8 pipe_mask = BIT(crtc->pipe);
- struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+ struct intel_dpll *pll = old_crtc_state->intel_dpll;
INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
- INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
- new_crtc_state->shared_dpll) &&
+ INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
+ new_crtc_state->intel_dpll) &&
pll->state.pipe_mask & pipe_mask,
"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
+void intel_dpll_verify_disabled(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
verify_single_dpll_state(display, pll, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index caffb084830c..f131bdd1c975 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -30,18 +30,18 @@
#include "intel_display_power.h"
#include "intel_wakeref.h"
-#define for_each_shared_dpll(__display, __pll, __i) \
- for ((__i) = 0; (__i) < (__display)->dpll.num_shared_dpll && \
- ((__pll) = &(__display)->dpll.shared_dplls[(__i)]) ; (__i)++)
+#define for_each_dpll(__display, __pll, __i) \
+ for ((__i) = 0; (__i) < (__display)->dpll.num_dpll && \
+ ((__pll) = &(__display)->dpll.dplls[(__i)]) ; (__i)++)
enum tc_port;
struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_funcs;
struct intel_encoder;
struct intel_shared_dpll;
-struct intel_shared_dpll_funcs;
/**
* enum intel_dpll_id - possible DPLL ids
@@ -280,7 +280,7 @@ struct intel_dpll_hw_state {
};
/**
- * struct intel_shared_dpll_state - hold the DPLL atomic state
+ * struct intel_dpll_state - hold the DPLL atomic state
*
* This structure holds an atomic state for the DPLL, that can represent
* either its current state (in struct &intel_shared_dpll) or a desired
@@ -289,7 +289,7 @@ struct intel_dpll_hw_state {
*
* See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
-struct intel_shared_dpll_state {
+struct intel_dpll_state {
/**
* @pipe_mask: mask of pipes using this DPLL, active or not
*/
@@ -314,7 +314,7 @@ struct dpll_info {
/**
* @funcs: platform specific hooks
*/
- const struct intel_shared_dpll_funcs *funcs;
+ const struct intel_dpll_funcs *funcs;
/**
* @id: unique identifier for this DPLL
@@ -344,16 +344,16 @@ struct dpll_info {
};
/**
- * struct intel_shared_dpll - display PLL with tracked state and users
+ * struct intel_dpll - display PLL with tracked state and users
*/
-struct intel_shared_dpll {
+struct intel_dpll {
/**
* @state:
*
* Store the state for the pll, including its hw state
* and CRTCs using it.
*/
- struct intel_shared_dpll_state state;
+ struct intel_dpll_state state;
/**
* @index: index for atomic state
@@ -387,41 +387,41 @@ struct intel_shared_dpll {
#define SKL_DPLL2 2
#define SKL_DPLL3 3
-/* shared dpll functions */
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id);
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state);
+/* dpll functions */
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id);
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state);
+#define assert_dpll_enabled(d, p) assert_dpll(d, p, true)
+#define assert_dpll_disabled(d, p) assert_dpll(d, p, false)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *shared_dpll_state);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
enum icl_port_dpll_id port_dpll_id);
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct intel_display *display);
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_swap_state(struct intel_atomic_state *state);
+void intel_dpll_init(struct intel_display *display);
void intel_dpll_update_ref_clks(struct intel_display *display);
void intel_dpll_readout_hw_state(struct intel_display *display);
void intel_dpll_sanitize_state(struct intel_display *display);
@@ -435,8 +435,8 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state);
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_verify_disabled(struct intel_atomic_state *state);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 43bd97e4f589..aea249e2699f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
@@ -126,7 +127,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment)
{
struct drm_i915_private *i915 = vm->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
struct ref_tracker *wakeref;
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index ce5aa0ca0fa5..5eb88d51dba1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt_common.h"
#include "skl_universal_plane_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 05cd0f6e6d71..0fdb32ef241c 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -5,10 +5,11 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_frontbuffer.h"
@@ -123,9 +124,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+ mod_delayed_work(display->wq.unordered, &crtc->drrs.work, msecs_to_jiffies(1000));
}
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 481488d1fe67..53d8ae3a70e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -7,11 +7,10 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
@@ -94,6 +93,10 @@ struct intel_dsb {
/* see DSB_REG_VALUE_MASK */
#define DSB_OPCODE_POLL 0xA
/* see DSB_REG_VALUE_MASK */
+#define DSB_OPCODE_GOSUB 0xC /* ptl+ */
+#define DSB_GOSUB_HEAD_SHIFT 26
+#define DSB_GOSUB_TAIL_SHIFT 0
+#define DSB_GOSUB_CONVERT_ADDR(x) ((x) >> 6)
static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -205,6 +208,15 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
crtc->base.base.id, crtc->base.name, dsb->id);
}
+static bool assert_dsb_tail_is_aligned(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+
+ return !drm_WARN_ON(display->drm,
+ !IS_ALIGNED(dsb->free_pos * 4, CACHELINE_BYTES));
+}
+
static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
@@ -229,13 +241,40 @@ static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
}
+unsigned int intel_dsb_size(struct intel_dsb *dsb)
+{
+ return dsb->free_pos * 4;
+}
+
+unsigned int intel_dsb_head(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
+}
+
+static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + intel_dsb_size(dsb);
+}
+
+static void intel_dsb_ins_align(struct intel_dsb *dsb)
+{
+ /*
+ * Every instruction should be 8 byte aligned.
+ *
+ * The only way to get unaligned free_pos is via
+ * intel_dsb_reg_write_indexed() which already
+ * makes sure the next dword is zeroed, so no need
+ * to clear it here.
+ */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+}
+
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
if (!assert_dsb_has_room(dsb))
return;
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ intel_dsb_ins_align(dsb);
dsb->ins_start_offset = dsb->free_pos;
dsb->ins[0] = ldw;
@@ -493,6 +532,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
+ intel_dsb_ins_align(dsb);
+
tail = dsb->free_pos * 4;
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
@@ -503,20 +544,90 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
dsb->free_pos = aligned_tail / 4;
}
-void intel_dsb_finish(struct intel_dsb *dsb)
+static void intel_dsb_gosub_align(struct intel_dsb *dsb)
+{
+ u32 aligned_tail, tail;
+
+ intel_dsb_ins_align(dsb);
+
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
+
+ /*
+ * Wa_16024917128
+ * "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
+ */
+ if (aligned_tail - tail <= 2 * 8)
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
+
+ dsb->free_pos = aligned_tail / 4;
+}
+
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb)
{
struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ unsigned int head, tail;
+ u64 head_tail;
+
+ if (drm_WARN_ON(display->drm, dsb->id != sub_dsb->id))
+ return;
+
+ if (!assert_dsb_tail_is_aligned(sub_dsb))
+ return;
+
+ intel_dsb_gosub_align(dsb);
+
+ head = intel_dsb_head(sub_dsb);
+ tail = intel_dsb_tail(sub_dsb);
/*
- * DSB_FORCE_DEWAKE remains active even after DSB is
- * disabled, so make sure to clear it (if set during
- * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
- * well for good measure.
+ * The GOSUB instruction has the following memory layout.
+ *
+ * +------------------------------------------------------------+
+ * | Opcode | Rsvd | Head Ptr | Tail Ptr |
+ * | 0x0c | | | |
+ * +------------------------------------------------------------+
+ * |<- 8bits->|<- 4bits ->|<-- 26bits -->|<-- 26bits -->|
+ *
+ * We have only 26 bits each to represent the head and tail
+ * pointers even though the addresses itself are of 32 bit. However, this
+ * is not a problem because the addresses are 64 bit aligned and therefore
+ * the last 6 bits are always Zero's. Therefore, we right shift the address
+ * by 6 before embedding it into the GOSUB instruction.
*/
- intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
- intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
- DSB_FORCE_DEWAKE, 0);
+ head_tail = ((u64)(DSB_GOSUB_CONVERT_ADDR(head)) << DSB_GOSUB_HEAD_SHIFT) |
+ ((u64)(DSB_GOSUB_CONVERT_ADDR(tail)) << DSB_GOSUB_TAIL_SHIFT);
+
+ intel_dsb_emit(dsb, lower_32_bits(head_tail),
+ (DSB_OPCODE_GOSUB << DSB_OPCODE_SHIFT) |
+ upper_32_bits(head_tail));
+
+ /*
+ * "NOTE: the instructions within the cacheline
+ * FOLLOWING the GOSUB instruction must be NOPs."
+ */
+ intel_dsb_align_tail(dsb);
+}
+
+void intel_dsb_gosub_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
+
+ /*
+ * Wa_16024917128
+ * "Ensure that all subroutines called by GOSUB end with a cacheline of NOPs"
+ */
+ intel_dsb_noop(dsb, 8);
+
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
+}
+
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
intel_dsb_align_tail(dsb);
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
@@ -539,6 +650,9 @@ static u32 dsb_error_int_status(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_STATUS;
+ if (DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_STATUS;
+
return errors;
}
@@ -553,17 +667,46 @@ static u32 dsb_error_int_en(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_EN;
+ /*
+ * Wa_16024917128
+ * "Disable nested GOSUB interrupt (DSB_INTERRUPT bit 21)"
+ */
+ if (0 && DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_EN;
+
return errors;
}
+/*
+ * FIXME calibrate these sensibly, ideally compute based on
+ * the number of regisetrs to be written. But that requires
+ * measuring the actual DSB execution speed on each platform
+ * (and the speed also depends on CDCLK and memory clock)...
+ */
+static int intel_dsb_noarm_exec_time_us(void)
+{
+ return 80;
+}
+
+static int intel_dsb_arm_exec_time_us(void)
+{
+ return 20;
+}
+
+int intel_dsb_exec_time_us(void)
+{
+ return intel_dsb_noarm_exec_time_us() +
+ intel_dsb_arm_exec_time_us();
+}
+
void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- /* FIXME calibrate sensibly */
- int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
+ int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_dsb_arm_exec_time_us());
int start, end;
/*
@@ -605,13 +748,11 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_crtc *crtc = dsb->crtc;
enum pipe pipe = crtc->pipe;
- u32 tail;
if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
return;
- tail = chained_dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(chained_dsb))
return;
intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
@@ -631,13 +772,15 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
DSB_ENABLE_DEWAKE |
DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
+ } else {
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id), 0);
}
intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
+ intel_dsb_head(chained_dsb));
intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
+ intel_dsb_tail(chained_dsb));
if (ctrl & DSB_WAIT_FOR_VBLANK) {
/*
@@ -652,6 +795,13 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_wait_scanline_out(state, dsb,
dsb_dewake_scanline_start(state, crtc),
dsb_dewake_scanline_end(state, crtc));
+
+ /*
+ * DSB_FORCE_DEWAKE remains active even after DSB is
+ * disabled, so make sure to clear it.
+ */
+ intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
+ DSB_FORCE_DEWAKE, 0);
}
}
@@ -676,16 +826,19 @@ void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
intel_dsb_wait_usec(dsb, usecs);
}
-static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- int hw_dewake_scanline)
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: DSB context
+ *
+ * This function is used to do actual write to hardware using DSB.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 tail;
- tail = dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(dsb))
return;
if (is_dsb_busy(display, pipe, dsb->id)) {
@@ -695,7 +848,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
- ctrl | DSB_ENABLE);
+ DSB_ENABLE);
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
dsb->chicken);
@@ -704,45 +857,13 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
dsb_error_int_en(display) | DSB_PROG_INT_EN);
- intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
-
- if (hw_dewake_scanline >= 0) {
- int diff, position;
+ intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id), 0);
- intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
- DSB_ENABLE_DEWAKE |
- DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
-
- /*
- * Force DEwake immediately if we're already past
- * or close to racing past the target scanline.
- */
- position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
-
- diff = hw_dewake_scanline - position;
- intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
- (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
- DSB_BLOCK_DEWAKE_EXTENSION);
- }
+ intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
+ intel_dsb_head(dsb));
intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
-}
-
-/**
- * intel_dsb_commit() - Trigger workload execution of DSB.
- * @dsb: DSB context
- * @wait_for_vblank: wait for vblank before executing
- *
- * This function is used to do actual write to hardware using DSB.
- */
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank)
-{
- _intel_dsb_commit(dsb,
- wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
- wait_for_vblank ? dsb->hw_dewake_scanline : -1);
+ intel_dsb_tail(dsb));
}
void intel_dsb_wait(struct intel_dsb *dsb)
@@ -895,4 +1016,7 @@ void intel_dsb_irq_handler(struct intel_display *display,
if (errors & DSB_POLL_ERR_INT_STATUS)
drm_err(display->drm, "[CRTC:%d:%s] DSB %d poll error\n",
crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_GOSUB_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d GOSUB programming error\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index e843c52bf97c..c8f4499916eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -26,12 +26,16 @@ enum intel_dsb_id {
I915_MAX_DSBS,
};
+unsigned int intel_dsb_size(struct intel_dsb *dsb);
+unsigned int intel_dsb_head(struct intel_dsb *dsb);
struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc,
enum intel_dsb_id dsb_id,
unsigned int max_cmds);
void intel_dsb_finish(struct intel_dsb *dsb);
+void intel_dsb_gosub_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
+int intel_dsb_exec_time_us(void);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
@@ -57,13 +61,14 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
void intel_dsb_poll(struct intel_dsb *dsb,
i915_reg_t reg, u32 mask, u32 val,
int wait_us, int count);
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb);
void intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
bool wait_for_vblank);
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank);
+void intel_dsb_commit(struct intel_dsb *dsb);
void intel_dsb_wait(struct intel_dsb *dsb);
void intel_dsb_irq_handler(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_regs.h b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
index cb6e0e5624a6..230104f36145 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
@@ -51,11 +51,13 @@
#define DSB_RESET_SM_STATE_MASK REG_GENMASK(5, 4)
#define DSB_RUN_SM_STATE_MASK REG_GENMASK(2, 0)
#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_GOSUB_INT_EN REG_BIT(21) /* ptl+ */
#define DSB_ATS_FAULT_INT_EN REG_BIT(20) /* mtl+ */
#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
#define DSB_POLL_ERR_INT_EN REG_BIT(17)
#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_GOSUB_INT_STATUS REG_BIT(5) /* ptl+ */
#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) /* mtl+ */
#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 29c920983413..e6a851d276f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,12 +36,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-
#include <video/mipi_display.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index b61520353c92..08b48e36aca6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,11 +34,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 21d638535497..0b7bd26f4339 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -5,8 +5,7 @@
#include <linux/workqueue.h>
-#include "i915_drv.h"
-
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
@@ -32,9 +31,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder)
void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mod_delayed_work(i915->unordered_wq,
+ mod_delayed_work(display->wq.unordered,
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 05393bd60c98..0da842bd2f2f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,14 +11,15 @@
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -421,21 +422,22 @@ unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
/**
* intel_fb_get_format_info: Get a modifier specific format information
- * @cmd: FB add command structure
+ * @pixel_format: pixel format
+ * @modifier: modifier
*
* Returns:
- * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
+ * Returns the format information for @pixel_format specific to @modifier,
* or %NULL if the modifier doesn't override the format.
*/
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+intel_fb_get_format_info(u32 pixel_format, u64 modifier)
{
- const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
if (!md || !md->formats)
return NULL;
- return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
+ return lookup_format_info(md->formats, md->format_count, pixel_format);
}
static bool plane_caps_contain_any(u8 caps, u8 mask)
@@ -1285,10 +1287,10 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return DISPLAY_VER(dev_priv) < 4 ||
+ return DISPLAY_VER(display) < 4 ||
(plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -2205,6 +2207,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_display *display = to_intel_display(obj->dev);
@@ -2252,7 +2255,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err_frontbuffer_put;
}
- drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++) {
unsigned int stride_alignment;
@@ -2322,6 +2325,7 @@ err:
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
@@ -2332,7 +2336,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (IS_ERR(obj))
return ERR_CAST(obj);
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(obj, info, &mode_cmd);
drm_gem_object_put(obj);
return fb;
@@ -2340,16 +2344,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ ret = intel_framebuffer_init(intel_fb, obj, info, mode_cmd);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index bdd76b372957..403b8b63721a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -47,7 +47,7 @@ u64 *intel_fb_plane_get_modifiers(struct intel_display *display,
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+intel_fb_get_format_info(u32 pixel_format, u64 modifier);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
@@ -102,13 +102,16 @@ void intel_add_fb_offsets(int *x, int *y,
int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index 3d338a728354..b0e8b89f7ce8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c648ab8a93d7..5a0151775a3a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -11,12 +11,13 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_plane.h"
static struct i915_vma *
intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
@@ -333,3 +334,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_dpt_unpin_from_ggtt(fb->dpt_vm);
}
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ iosys_map_set_vaddr_iomem(map, i915_vma_get_iomap(vma));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index 01770dbba2e0..81ab79da1af7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -12,6 +12,7 @@ struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
struct i915_gtt_view;
+struct iosys_map;
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
@@ -27,5 +28,6 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags);
int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index bed2bba20b55..685ac98bd001 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -45,9 +45,10 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_gt_types.h"
+
#include "i915_drv.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
@@ -55,6 +56,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -550,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
-
- /* wa_18038517565 Enable DPFC clock gating after FBC disable */
- if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
- fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -1574,7 +1572,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
+ if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
plane_state->no_fbc_reason = "pixel rate too high";
return 0;
}
@@ -1708,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
+
fbc->state.plane = NULL;
fbc->flip_pending = false;
fbc->busy_bits = 0;
@@ -2009,7 +2011,7 @@ void intel_fbc_reset_underrun(struct intel_display *display)
static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = to_i915(fbc->display->drm);
+ struct intel_display *display = fbc->display;
/*
* There's no guarantee that underrun_detected won't be set to true
@@ -2022,7 +2024,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- queue_work(i915->unordered_wq, &fbc->underrun_work);
+ queue_work(display->wq.unordered, &fbc->underrun_work);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 2dc4029d71ed..7c4709d58aa3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -512,3 +512,8 @@ struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
{
return fbdev ? fbdev->vma : NULL;
}
+
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+ intel_fb_get_map(fbdev->vma, map);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index a15e3e222a0c..150cc5f45bb3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -13,6 +13,7 @@ struct drm_fb_helper_surface_size;
struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
+struct iosys_map;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
@@ -22,7 +23,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
-
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
@@ -39,6 +40,9 @@ static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev
return NULL;
}
+static inline void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+}
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 5f4cb3328265..210aee9ae88b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -61,7 +62,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd);
+ fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
i915_gem_object_put(obj);
return to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 169bbe154b5c..8039a84671cc 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_fdi.h"
@@ -910,7 +911,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ drm_WARN_ON(display->drm, crtc_state->intel_dpll->info->id != DPLL_ID_SPLL);
intel_ddi_enable_clock(encoder, crtc_state);
/* Start the training iterating through available voltages and emphasis,
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 2a787897b2d3..c2ce8461ac9e 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -30,6 +30,7 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
new file mode 100644
index 000000000000..6ab2272ab2df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_step.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_flipq.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dsb.h"
+#include "intel_vblank.h"
+#include "intel_vrr.h"
+
+/**
+ * DOC: DMC Flip Queue
+ *
+ * A flip queue is a ring buffer implemented by the pipe DMC firmware.
+ * The driver inserts entries into the queues to be executed by the
+ * pipe DMC at a specified presentation timestamp (PTS).
+ *
+ * Each pipe DMC provides several queues:
+ *
+ * - 1 general queue (two DSB buffers executed per entry)
+ * - 3 plane queues (one DSB buffer executed per entry)
+ * - 1 fast queue (deprecated)
+ */
+
+#define for_each_flipq(flipq_id) \
+ for ((flipq_id) = INTEL_FLIPQ_PLANE_1; (flipq_id) < MAX_INTEL_FLIPQ; (flipq_id)++)
+
+static int intel_flipq_offset(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ return 0x008;
+ case INTEL_FLIPQ_PLANE_2:
+ return 0x108;
+ case INTEL_FLIPQ_PLANE_3:
+ return 0x208;
+ case INTEL_FLIPQ_GENERAL:
+ return 0x308;
+ case INTEL_FLIPQ_FAST:
+ return 0x3c8;
+ default:
+ MISSING_CASE(flipq_id);
+ return 0;
+ }
+}
+
+static int intel_flipq_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 64;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 48;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_elem_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 4;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 6;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_size_entries(enum intel_flipq_id flipq_id)
+{
+ return intel_flipq_size_dw(flipq_id) / intel_flipq_elem_size_dw(flipq_id);
+}
+
+static void intel_flipq_crtc_init(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_flipq_id flipq_id;
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ flipq->start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc) + intel_flipq_offset(flipq_id);
+ flipq->flipq_id = flipq_id;
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] FQ %d: start 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ flipq_id, flipq->start_mmioaddr);
+ }
+}
+
+bool intel_flipq_supported(struct intel_display *display)
+{
+ if (!display->params.enable_flipq)
+ return false;
+
+ if (!display->dmc.dmc)
+ return false;
+
+ if (DISPLAY_VER(display) == 20)
+ return true;
+
+ /* DMC firmware expects VRR timing generator to be used */
+ return DISPLAY_VER(display) >= 30 && intel_vrr_always_use_vrr_tg(display);
+}
+
+void intel_flipq_init(struct intel_display *display)
+{
+ struct intel_crtc *crtc;
+
+ intel_dmc_wait_fw_load(display);
+
+ for_each_intel_crtc(display->drm, crtc)
+ intel_flipq_crtc_init(crtc);
+}
+
+static int cdclk_factor(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return 120;
+ else
+ return 280;
+}
+
+int intel_flipq_exec_time_us(struct intel_display *display)
+{
+ return intel_dsb_exec_time_us() +
+ DIV_ROUND_UP(display->cdclk.hw.cdclk * cdclk_factor(display), 540000) +
+ display->sagv.block_time_us;
+}
+
+static int intel_flipq_preempt_timeout_ms(struct intel_display *display)
+{
+ return DIV_ROUND_UP(intel_flipq_exec_time_us(display), 1000);
+}
+
+static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_rmw(display, PIPEDMC_FQ_CTRL(crtc->pipe),
+ PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
+
+ if (preempt &&
+ intel_de_wait_for_clear(display,
+ PIPEDMC_FQ_STATUS(crtc->pipe),
+ PIPEDMC_FQ_STATUS_BUSY,
+ intel_flipq_preempt_timeout_ms(display)))
+ drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
+ crtc->base.base.id, crtc->base.name);
+}
+
+static int intel_flipq_current_head(struct intel_crtc *crtc, enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ return intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id));
+}
+
+static void intel_flipq_write_tail(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe),
+ PIPEDMC_FPQ_PLANEQ_3_TP(crtc->flipq[INTEL_FLIPQ_PLANE_3].tail) |
+ PIPEDMC_FPQ_PLANEQ_2_TP(crtc->flipq[INTEL_FLIPQ_PLANE_2].tail) |
+ PIPEDMC_FPQ_PLANEQ_1_TP(crtc->flipq[INTEL_FLIPQ_PLANE_1].tail) |
+ PIPEDMC_FPQ_FASTQ_TP(crtc->flipq[INTEL_FLIPQ_FAST].tail) |
+ PIPEDMC_FPQ_GENERALQ_TP(crtc->flipq[INTEL_FLIPQ_GENERAL].tail));
+}
+
+static void intel_flipq_sw_dmc_wake(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_CTL1(crtc->pipe), PIPEDMC_SW_DMC_WAKE);
+}
+
+static int intel_flipq_exec_time_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_flipq_exec_time_us(display));
+}
+
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ u32 tmp;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d @ 0x%x: ",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ flipq->start_mmioaddr);
+ for (int i = 0 ; i < intel_flipq_size_dw(flipq_id); i++) {
+ printk(KERN_CONT " 0x%08x",
+ intel_de_read(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, i)));
+ if (i % intel_flipq_elem_size_dw(flipq_id) == intel_flipq_elem_size_dw(flipq_id) - 1)
+ printk(KERN_CONT "\n");
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: chp=0x%x, hp=0x%x\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)),
+ intel_de_read(display, PIPEDMC_FPQ_HP(crtc->pipe, flipq_id)));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: current head %d\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_flipq_current_head(crtc, flipq_id));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue timestamp: 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe)));
+
+ tmp = intel_de_read(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue atomic tails: P3 %d, P2 %d, P1 %d, G %d, F %d\n",
+ crtc->base.base.id, crtc->base.name,
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_GENERALQ_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_FASTQ_TP_MASK, tmp));
+}
+
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ enum intel_flipq_id flipq_id;
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(pipe), 0);
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(pipe), 0);
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ intel_de_write(display, PIPEDMC_FPQ_HP(pipe, flipq_id), 0);
+ intel_de_write(display, PIPEDMC_FPQ_CHP(pipe, flipq_id), 0);
+
+ flipq->tail = 0;
+ }
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(pipe), 0);
+}
+
+static enum pipedmc_event_id flipq_event_id(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER;
+ else
+ return PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER;
+}
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ /* FIXME what to do with VRR? */
+ int scanline = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
+ intel_flipq_exec_time_lines(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30) {
+ u32 start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc);
+
+ /* undocumented magic DMC variables */
+ intel_de_write(display, PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr),
+ intel_flipq_exec_time_lines(crtc_state));
+ intel_de_write(display, PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr),
+ 100);
+ }
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe),
+ PIPEDMC_SCANLINE_UPPER(scanline));
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe),
+ PIPEDMC_SCANLINEINRANGECMP_EN |
+ PIPEDMC_SCANLINE_LOWER(scanline - 2));
+
+ intel_pipedmc_enable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), PIPEDMC_FQ_CTRL_ENABLE);
+}
+
+void intel_flipq_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_flipq_preempt(crtc, true);
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), 0);
+
+ intel_pipedmc_disable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), 0);
+}
+
+static bool assert_flipq_has_room(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ int head, size = intel_flipq_size_entries(flipq_id);
+
+ head = intel_flipq_current_head(crtc, flipq_id);
+
+ return !drm_WARN(display->drm,
+ (flipq->tail + size - head) % size >= size - 1,
+ "[CRTC:%d:%s] FQ %d overflow (head %d, tail %d, size %d)\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ head, flipq->tail, size);
+}
+
+static void intel_flipq_write(struct intel_display *display,
+ struct intel_flipq *flipq, u32 data, int i)
+{
+ intel_de_write(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, flipq->tail *
+ intel_flipq_elem_size_dw(flipq->flipq_id) + i), data);
+}
+
+static void lnl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+static void ptl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ if (!assert_flipq_has_room(crtc, flipq_id))
+ return;
+
+ pts += intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe));
+
+ intel_flipq_preempt(crtc, true);
+
+ if (DISPLAY_VER(display) >= 30)
+ ptl_flipq_add(display, flipq, pts, dsb_id, dsb);
+ else
+ lnl_flipq_add(display, flipq, pts, dsb_id, dsb);
+
+ flipq->tail = (flipq->tail + 1) % intel_flipq_size_entries(flipq->flipq_id);
+ intel_flipq_write_tail(crtc);
+
+ intel_flipq_preempt(crtc, false);
+
+ intel_flipq_sw_dmc_wake(crtc);
+}
+
+/* Wa_18034343758 */
+static bool need_dmc_halt_wa(struct intel_display *display)
+{
+ return DISPLAY_VER(display) == 20 ||
+ (display->platform.pantherlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
+}
+
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_wait_usec(dsb, 2);
+}
+
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.h b/drivers/gpu/drm/i915/display/intel_flipq.h
new file mode 100644
index 000000000000..012e3e9a6bcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_FLIPQ_H__
+#define __INTEL_FLIPQ_H__
+
+#include <linux/types.h>
+
+enum intel_dsb_id;
+enum intel_flipq_id;
+enum pipe;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_display;
+struct intel_dsb;
+
+bool intel_flipq_supported(struct intel_display *display);
+void intel_flipq_init(struct intel_display *display);
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe);
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state);
+void intel_flipq_disable(const struct intel_crtc_state *old_crtc_state);
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb);
+int intel_flipq_exec_time_us(struct intel_display *display);
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id);
+
+#endif /* __INTEL_FLIPQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d55cc77650b7..0d73f32fe7f1 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -414,7 +415,7 @@ gmbus_wait_idle(struct intel_display *display)
add_wait_queue(&display->gmbus.wait_queue, &wait);
intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
index 59bad1dda6d6..ab750562566b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_GMBUS_REGS_H__
#define __INTEL_GMBUS_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 3e3038f4ee1f..42202c8bb066 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -14,14 +14,16 @@
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
@@ -31,6 +33,7 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#include "intel_step.h"
#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
@@ -373,7 +376,6 @@ static void intel_hdcp_clear_keys(struct intel_display *display)
static int intel_hdcp_load_keys(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 val;
@@ -398,7 +400,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* Mailbox interface.
*/
if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
- ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
@@ -1088,7 +1090,6 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
u64 value, bool update_property)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1109,7 +1110,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
}
@@ -2236,16 +2237,15 @@ static void intel_hdcp_check_work(struct work_struct *work)
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_connector_is_unregistered(&connector->base))
return;
if (!intel_hdcp2_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
@@ -2436,7 +2436,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2495,7 +2494,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
if (!ret) {
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
check_link_interval);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
@@ -2566,7 +2565,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (!connector->hdcp.shim)
return;
@@ -2593,7 +2592,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
mutex_unlock(&hdcp->mutex);
}
@@ -2611,7 +2610,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
@@ -2735,7 +2734,6 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!hdcp->shim)
return;
@@ -2743,7 +2741,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue);
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0);
}
static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index f590d7f48ba7..112ce8c896d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -247,7 +247,7 @@
_TRANSA_HDCP2_STREAM_STATUS, \
_TRANSB_HDCP2_STREAM_STATUS)
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
-#define STREAM_TYPE_STATUS REG_BIT(30)
+#define STREAM_TYPE_STATUS_MASK REG_GENMASK(30, 30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
@@ -263,7 +263,7 @@
#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
_TRANSA_HDCP2_AUTH_STREAM, \
_TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE REG_BIT(31)
+#define AUTH_STREAM_TYPE_MASK REG_GENMASK(31, 31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 98033471902c..9961ff259298 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -41,11 +41,9 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
-
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -54,6 +52,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index fc5d8928c37e..265aa97fcc75 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,8 +30,10 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
@@ -191,40 +193,34 @@ static bool detection_work_enabled(struct intel_display *display)
static bool
mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return mod_delayed_work(i915->unordered_wq, work, delay);
+ return mod_delayed_work(display->wq.unordered, work, delay);
}
static bool
queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return queue_delayed_work(i915->unordered_wq, work, delay);
+ return queue_delayed_work(display->wq.unordered, work, delay);
}
static bool
queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return queue_work(i915->unordered_wq, work);
+ return queue_work(display->wq.unordered, work);
}
static void
@@ -905,9 +901,14 @@ void intel_hpd_poll_enable(struct intel_display *display)
*/
void intel_hpd_poll_disable(struct intel_display *display)
{
+ struct intel_encoder *encoder;
+
if (!HAS_DISPLAY(display))
return;
+ for_each_intel_dp(display->drm, encoder)
+ intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true);
+
WRITE_ONCE(display->hotplug.poll_enabled, false);
spin_lock_irq(&display->irq.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index c024b42369c8..43aee70597bf 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -9,6 +9,7 @@
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hti_regs.h b/drivers/gpu/drm/i915/display/intel_hti_regs.h
index e206f2837fc8..39c046bd351c 100644
--- a/drivers/gpu/drm/i915/display/intel_hti_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hti_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_HTI_REGS_H__
#define __INTEL_HTI_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index a10cd3992607..3caef7f9c7c4 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -3,6 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/int_log.h>
+#include <linux/math.h>
+
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -10,11 +15,33 @@
#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ int force_bpp_x16 = INT_MAX;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->base.crtc != &crtc->base)
+ continue;
+
+ if (!connector->link.force_bpp_x16)
+ continue;
+
+ force_bpp_x16 = min(force_bpp_x16, connector->link.force_bpp_x16);
+ }
+
+ return force_bpp_x16 < INT_MAX ? force_bpp_x16 : 0;
+}
+
/**
* intel_link_bw_init_limits - initialize BW limits
* @state: Atomic state
@@ -31,9 +58,10 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- intel_crtc_for_pipe(display, pipe));
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int forced_bpp_x16 = get_forced_link_bpp_x16(state, crtc);
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
@@ -42,15 +70,19 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
+
+ if (forced_bpp_x16)
+ limits->max_bpp_x16[pipe] = min(limits->max_bpp_x16[pipe], forced_bpp_x16);
}
}
/**
- * intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
+ * __intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
* @state: atomic state
* @limits: link BW limits
* @pipe_mask: mask of pipes to select from
* @reason: explanation of why bpp reduction is needed
+ * @reduce_forced_bpp: allow reducing bpps below their forced link bpp
*
* Select the pipe from @pipe_mask with the biggest link bpp value and set the
* maximum of link bpp in @limits below this value. Modeset the selected pipe,
@@ -64,10 +96,11 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
* - %-ENOSPC if no pipe can further reduce its link bpp
* - Other negative error, if modesetting the selected pipe failed
*/
-int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
- struct intel_link_bw_limits *limits,
- u8 pipe_mask,
- const char *reason)
+static int __intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason,
+ bool reduce_forced_bpp)
{
struct intel_display *display = to_intel_display(state);
enum pipe max_bpp_pipe = INVALID_PIPE;
@@ -97,6 +130,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
*/
link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp);
+ if (!reduce_forced_bpp &&
+ link_bpp_x16 <= get_forced_link_bpp_x16(state, crtc))
+ continue;
+
if (link_bpp_x16 > max_bpp_x16) {
max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
@@ -112,6 +149,21 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
BIT(max_bpp_pipe));
}
+int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason)
+{
+ int ret;
+
+ /* Try to keep any forced link BPP. */
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, false);
+ if (ret == -ENOSPC)
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, true);
+
+ return ret;
+}
+
/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
@@ -245,3 +297,176 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state,
return -EAGAIN;
}
+
+static int force_link_bpp_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+
+ seq_printf(m, FXP_Q4_FMT "\n", FXP_Q4_ARGS(connector->link.force_bpp_x16));
+
+ return 0;
+}
+
+static int str_to_fxp_q4_nonneg_int(const char *str, int *val_x16)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(str, 10, &val);
+ if (err)
+ return err;
+
+ if (val > INT_MAX >> 4)
+ return -ERANGE;
+
+ *val_x16 = fxp_q4_from_int(val);
+
+ return 0;
+}
+
+/* modifies str */
+static int str_to_fxp_q4_nonneg(char *str, int *val_x16)
+{
+ const char *int_str;
+ char *frac_str;
+ int frac_digits;
+ int frac_val;
+ int err;
+
+ int_str = strim(str);
+ frac_str = strchr(int_str, '.');
+
+ if (frac_str)
+ *frac_str++ = '\0';
+
+ err = str_to_fxp_q4_nonneg_int(int_str, val_x16);
+ if (err)
+ return err;
+
+ if (!frac_str)
+ return 0;
+
+ /* prevent negative number/leading +- sign mark */
+ if (!isdigit(*frac_str))
+ return -EINVAL;
+
+ err = str_to_fxp_q4_nonneg_int(frac_str, &frac_val);
+ if (err)
+ return err;
+
+ frac_digits = strlen(frac_str);
+ if (frac_digits > intlog10(INT_MAX) >> 24 ||
+ frac_val > INT_MAX - int_pow(10, frac_digits) / 2)
+ return -ERANGE;
+
+ frac_val = DIV_ROUND_CLOSEST(frac_val, (int)int_pow(10, frac_digits));
+
+ if (*val_x16 > INT_MAX - frac_val)
+ return -ERANGE;
+
+ *val_x16 += frac_val;
+
+ return 0;
+}
+
+static int user_str_to_fxp_q4_nonneg(const char __user *ubuf, size_t len, int *val_x16)
+{
+ char *kbuf;
+ int err;
+
+ kbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ err = str_to_fxp_q4_nonneg(kbuf, val_x16);
+
+ kfree(kbuf);
+
+ return err;
+}
+
+static bool connector_supports_dsc(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ return intel_dp_has_dsc(connector);
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return HAS_DSC_MST(display);
+
+ return HAS_DSC(display);
+ default:
+ return false;
+ }
+}
+
+static ssize_t
+force_link_bpp_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ int min_bpp;
+ int bpp_x16;
+ int err;
+
+ err = user_str_to_fxp_q4_nonneg(ubuf, len, &bpp_x16);
+ if (err)
+ return err;
+
+ /* TODO: Make the non-DSC min_bpp value connector specific. */
+ if (connector_supports_dsc(connector))
+ min_bpp = intel_dp_dsc_min_src_compressed_bpp();
+ else
+ min_bpp = intel_display_min_pipe_bpp();
+
+ if (bpp_x16 &&
+ (bpp_x16 < fxp_q4_from_int(min_bpp) ||
+ bpp_x16 > fxp_q4_from_int(intel_display_max_pipe_bpp(display))))
+ return -EINVAL;
+
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
+ if (err)
+ return err;
+
+ connector->link.force_bpp_x16 = bpp_x16;
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+
+ *offp += len;
+
+ return len;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_link_bpp);
+
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DVID:
+ if (HAS_FDI(display))
+ break;
+
+ return;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (HAS_FDI(display) && !HAS_DDI(display))
+ break;
+
+ return;
+ default:
+ return;
+ }
+
+ debugfs_create_file("intel_force_link_bpp", 0644, root,
+ connector, &force_link_bpp_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e69049cf178f..b499042e62b1 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -11,6 +11,7 @@
#include "intel_display_limits.h"
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
@@ -32,5 +33,6 @@ bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
enum pipe pipe);
int intel_link_bw_atomic_check(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits);
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f94b7eeae20f..abc4b562083d 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -29,9 +29,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 8ce7c630da52..7e48a235c99f 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -40,7 +40,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -249,7 +248,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
- assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
+ assert_dpll_disabled(display, crtc_state->intel_dpll);
} else {
assert_pll_disabled(display, pipe);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 0325b0c9506d..8415f3d703ed 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -23,6 +23,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_fifo_underrun.h"
@@ -92,10 +93,10 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
crtc->active = false;
crtc->base.enabled = false;
- if (crtc_state->shared_dpll)
- intel_unreference_shared_dpll_crtc(crtc,
- crtc_state->shared_dpll,
- &crtc_state->shared_dpll->state);
+ if (crtc_state->intel_dpll)
+ intel_dpll_crtc_put(crtc,
+ crtc_state->intel_dpll,
+ &crtc_state->intel_dpll->state);
}
static void set_encoder_for_connector(struct intel_connector *connector,
@@ -565,7 +566,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
*/
return display->platform.sandybridge &&
crtc_state->hw.active &&
- crtc_state->shared_dpll &&
+ crtc_state->intel_dpll &&
crtc_state->port_clock == 0;
}
@@ -960,7 +961,7 @@ void intel_modeset_setup_hw_state(struct intel_display *display,
drm_crtc_vblank_reset(&crtc->base);
if (crtc_state->hw.active) {
- intel_dmc_enable_pipe(display, crtc->pipe);
+ intel_dmc_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 766a9983665a..f2f6b9d9afa1 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -243,7 +243,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_wm_state_verify(state, crtc);
verify_connector_state(state, crtc);
verify_crtc_state(state, crtc);
- intel_shared_dpll_state_verify(state, crtc);
+ intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
}
@@ -252,5 +252,5 @@ void intel_modeset_verify_disabled(struct intel_atomic_state *state)
{
verify_encoder_state(state);
verify_connector_state(state, NULL);
- intel_shared_dpll_verify_disabled(state);
+ intel_dpll_verify_disabled(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0eaa6cd6fe80..81efdb17fc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -31,10 +31,13 @@
#include <acpi/video.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
#include "intel_pci_config.h"
@@ -664,11 +667,10 @@ bool intel_opregion_asle_present(struct intel_display *display)
void intel_opregion_asle_intr(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_opregion *opregion = display->opregion;
if (opregion && opregion->asle)
- queue_work(i915->unordered_wq, &opregion->asle_work);
+ queue_work(display->wq.unordered, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 12308495afa5..159a5f998ea0 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -31,6 +31,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
+
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
@@ -38,6 +39,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index f5c972880391..2a20aaaaac39 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -462,3 +462,135 @@ void intel_panel_fini(struct intel_connector *connector)
drm_mode_destroy(connector->base.dev, fixed_mode);
}
}
+
+/*
+ * If the panel was already enabled at probe, and we took over the state, the
+ * panel prepared state is out of sync, and the panel followers won't be
+ * notified. We need to call drm_panel_prepare() on enabled panels.
+ *
+ * It would be natural to handle this e.g. in the connector ->sync_state hook at
+ * intel_modeset_readout_hw_state(), but that's unfortunately too early. We
+ * don't have drm_connector::kdev at that time. For now, figure out the state at
+ * ->late_register, and sync there.
+ */
+static void intel_panel_sync_state(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
+ if (ret)
+ return;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Panel prepare\n",
+ connector->base.base.id, connector->base.name);
+ intel_panel_prepare(crtc_state, conn_state);
+ }
+ }
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+}
+
+static const struct drm_panel_funcs dummy_panel_funcs = {
+};
+
+int intel_panel_register(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ ret = intel_backlight_device_register(connector);
+ if (ret)
+ return ret;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct device *dev = connector->base.kdev;
+ struct drm_panel *base;
+
+ /* Sanity check. */
+ if (drm_WARN_ON(display->drm, !dev))
+ goto out;
+
+ /*
+ * We need drm_connector::kdev for allocating the panel, to make
+ * drm_panel_add_follower() lookups work. The kdev is
+ * initialized in drm_sysfs_connector_add(), just before the
+ * connector .late_register() hooks. So we can't allocate the
+ * panel at connector init time, and can't allocate struct
+ * intel_panel with a drm_panel sub-struct. For now, use
+ * __devm_drm_panel_alloc() directly.
+ *
+ * The lookups also depend on drm_connector::fwnode being set in
+ * intel_acpi_assign_connector_fwnodes(). However, if that's
+ * missing, it will gracefully lead to -EPROBE_DEFER in
+ * drm_panel_add_follower().
+ */
+ base = __devm_drm_panel_alloc(dev, sizeof(*base), 0,
+ &dummy_panel_funcs,
+ connector->base.connector_type);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
+
+ panel->base = base;
+
+ drm_panel_add(panel->base);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Registered panel device '%s', has fwnode: %s\n",
+ connector->base.base.id, connector->base.name,
+ dev_name(dev), str_yes_no(dev_fwnode(dev)));
+
+ intel_panel_sync_state(connector);
+ }
+
+out:
+ return 0;
+
+err:
+ intel_backlight_device_unregister(connector);
+
+ return ret;
+}
+
+void intel_panel_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->base)
+ drm_panel_remove(panel->base);
+
+ intel_backlight_device_unregister(connector);
+}
+
+/* Notify followers, if any, about power being up. */
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_prepare(panel->base);
+}
+
+/* Notify followers, if any, about power going down. */
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_unprepare(panel->base);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b60d12322e5d..56a6412cf0fb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -23,6 +23,8 @@ void intel_panel_init_alloc(struct intel_connector *connector);
int intel_panel_init(struct intel_connector *connector,
const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
+int intel_panel_register(struct intel_connector *connector);
+void intel_panel_unregister(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
bool intel_panel_use_ssc(struct intel_display *display);
@@ -51,4 +53,8 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
struct intel_encoder *encoder);
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state);
+
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 1743ebf551cb..3456c794e0e7 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -10,6 +10,7 @@
#include "intel_crt.h"
#include "intel_crt_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_fdi.h"
@@ -251,7 +252,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(display, crtc_state->shared_dpll);
+ assert_dpll_enabled(display, crtc_state->intel_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(display, pipe);
@@ -381,8 +382,8 @@ void ilk_pch_enable(struct intel_atomic_state *state,
temp = intel_de_read(display, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
+ if (crtc_state->intel_dpll ==
+ intel_get_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
@@ -394,11 +395,11 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that
+ * Note that dpll_enable tries to do the right thing, but
+ * get_dpll unconditionally resets the pll - we need that
* to have the right LVDS enable sequence.
*/
- intel_enable_shared_dpll(crtc_state);
+ intel_dpll_enable(crtc_state);
/* set transcoder timing, panel must allow it */
assert_pps_unlocked(display, pipe);
@@ -472,7 +473,7 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_fdi_pll_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
+ intel_dpll_disable(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -496,7 +497,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
bool pll_active;
@@ -528,8 +529,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_PCH_PLL_A;
}
- crtc_state->shared_dpll = intel_get_shared_dpll_by_id(display, pll_id);
- pll = crtc_state->shared_dpll;
+ crtc_state->intel_dpll = intel_get_dpll_by_id(display, pll_id);
+ pll = crtc_state->intel_dpll;
pll_active = intel_dpll_get_hw_state(display, pll,
&crtc_state->dpll_hw_state);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 693b90e3dfc3..d3c5255bf1a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,13 +3,17 @@
* Copyright © 2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
static void lpt_fdi_reset_mphy(struct intel_display *display)
{
@@ -29,95 +33,93 @@ static void lpt_fdi_reset_mphy(struct intel_display *display)
/* WaMPhyProgramming:hsw */
static void lpt_fdi_program_mphy(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
lpt_fdi_reset_mphy(display);
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x8008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2108, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x206C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x216C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2180, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x208C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x218C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2098, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2198, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20EC, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21EC, tmp, SBI_MPHY);
}
void lpt_disable_iclkip(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
struct iclkip_params {
@@ -178,8 +180,6 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
@@ -199,30 +199,30 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
/* Wait for initialization time */
udelay(24);
@@ -232,7 +232,6 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
int lpt_get_iclkip(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
@@ -241,25 +240,25 @@ int lpt_get_iclkip(struct intel_display *display)
iclkip_params_init(&p);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
return 0;
}
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -275,7 +274,6 @@ int lpt_get_iclkip(struct intel_display *display)
static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
if (drm_WARN(display->drm, with_fdi && !with_spread,
@@ -285,57 +283,56 @@ static void lpt_enable_clkout_dp(struct intel_display *display,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(24);
if (with_spread) {
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
lpt_fdi_program_mphy(display);
}
reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
/* Sequence to disable CLKOUT_DP */
void lpt_disable_clkout_dp(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
}
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -372,7 +369,6 @@ static const u16 sscdivintphase[] = {
*/
static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
@@ -382,20 +378,20 @@ static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
- intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#undef BEND_IDX
@@ -499,7 +495,7 @@ static void lpt_init_pch_refclk(struct intel_display *display)
static void ilk_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
u32 val, final;
bool has_lvds = false;
@@ -535,7 +531,7 @@ static void ilk_init_pch_refclk(struct intel_display *display)
}
/* Check if any DPLLs are using the SSC source */
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
u32 temp;
temp = intel_de_read(display, PCH_DPLL(pll->info->id));
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 3c3ecf288570..13541be4d6df 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 6182f484b5bd..c2b4b2254190 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,10 +30,10 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
#include "intel_pipe_crc_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 1bcfa5f4fd63..36fb07471deb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -33,20 +33,22 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-resv.h>
+#include <linux/iosys-map.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_cache.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
#include "gem/i915_gem_object.h"
-#include "i915_config.h"
#include "i915_scheduler_types.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
@@ -54,6 +56,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
+#include "intel_plane.h"
+#include "intel_psr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -334,7 +339,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
* display blinking due to constant cdclk changes.
*/
if (new_crtc_state->min_cdclk[plane->id] <=
- cdclk_state->min_cdclk[crtc->pipe])
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
return 0;
drm_dbg_kms(display->drm,
@@ -342,7 +347,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
plane->base.base.id, plane->base.name,
new_crtc_state->min_cdclk[plane->id],
crtc->base.base.id, crtc->base.name,
- cdclk_state->min_cdclk[crtc->pipe]);
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
*need_cdclk_calc = true;
return 0;
@@ -735,8 +740,8 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
return NULL;
}
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane)
+static int plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state);
struct intel_plane_state *new_plane_state =
@@ -984,10 +989,10 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
i9xx_crtc_planes_update_arm(dsb, state, crtc);
}
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position)
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1086,7 +1091,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
/* Wa_16023981245 */
if ((DISPLAY_VERx100(display) == 2000 ||
- DISPLAY_VERx100(display) == 3000) &&
+ DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 3002) &&
src_x % 2 != 0)
hsub = 2;
} else {
@@ -1267,14 +1273,176 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
intel_plane_unpin_fb(old_plane_state);
}
+/* Handle Y-tiling, only if DPT is enabled (otherwise disabling tiling is easier)
+ * All DPT hardware have 128-bytes width tiling, so Y-tile dimension is 32x32
+ * pixels for 32bits pixels.
+ */
+#define YTILE_WIDTH 32
+#define YTILE_HEIGHT 32
+#define YTILE_SIZE (YTILE_WIDTH * YTILE_HEIGHT * 4)
+
+static unsigned int intel_ytile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is x4 x3 x2 y4 y3 y2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 0x1f) << 2) | ((x & 0x1c) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static unsigned int intel_4tile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is y4 y3 x4 y2 x3 x2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 3) << 2) | ((x & 0xc) << 2) | (y & 4) << 4 |
+ ((x & 0x10) << 3) | ((y & 0x18) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static void intel_panic_flush(struct drm_plane *plane)
+{
+ struct intel_plane_state *plane_state = to_intel_plane_state(plane->state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(plane->state->crtc->state);
+ struct intel_plane *iplane = to_intel_plane(plane);
+ struct intel_display *display = to_intel_display(iplane);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ intel_bo_panic_finish(intel_fb);
+
+ if (crtc_state->enable_psr2_sel_fetch) {
+ /* Force a full update for psr2 */
+ intel_psr2_panic_force_full_update(display, crtc_state);
+ }
+
+ /* Flush the cache and don't disable tiling if it's the fbdev framebuffer.*/
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ struct iosys_map map;
+
+ intel_fbdev_get_map(display->fbdev.fbdev, &map);
+ drm_clflush_virt_range(map.vaddr, fb->pitches[0] * fb->height);
+ return;
+ }
+
+ if (fb->modifier && iplane->disable_tiling)
+ iplane->disable_tiling(iplane);
+}
+
+static unsigned int (*intel_get_tiling_func(u64 fb_modifier))(unsigned int width,
+ unsigned int x,
+ unsigned int y)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return intel_ytile_get_offset;
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return intel_4tile_get_offset;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ default:
+ /* Not supported yet */
+ return NULL;
+ }
+}
+
+static int intel_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct intel_plane_state *plane_state;
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct intel_display *display = to_intel_display(plane->dev);
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ plane_state = to_intel_plane_state(plane->state);
+ fb = plane_state->hw.fb;
+ intel_fb = to_intel_framebuffer(fb);
+
+ obj = intel_fb_bo(fb);
+ if (!obj)
+ return -ENODEV;
+
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]);
+ } else {
+ int ret;
+ /* Can't disable tiling if DPT is in use */
+ if (intel_fb_uses_dpt(fb)) {
+ if (fb->format->cpp[0] != 4)
+ return -EOPNOTSUPP;
+ intel_fb->panic_tiling = intel_get_tiling_func(fb->modifier);
+ if (!intel_fb->panic_tiling)
+ return -EOPNOTSUPP;
+ }
+ sb->private = intel_fb;
+ ret = intel_bo_panic_setup(sb);
+ if (ret)
+ return ret;
+ }
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling, RC, CCS, CC
+ * will be disabled in disable_tiling()
+ */
+ sb->format = drm_format_info(fb->format->format);
+ sb->pitch[0] = fb->pitches[0];
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
};
+static const struct drm_plane_helper_funcs intel_primary_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+ .get_scanout_buffer = intel_get_scanout_buffer,
+ .panic_flush = intel_panic_flush,
+};
+
void intel_plane_helper_add(struct intel_plane *plane)
{
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&plane->base, &intel_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
}
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
@@ -1434,8 +1602,8 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1529,7 +1697,7 @@ static int intel_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_check_planes(struct intel_atomic_state *state)
+int intel_plane_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1543,7 +1711,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_atomic_check(state, plane);
+ ret = plane_atomic_check(state, plane);
if (ret) {
drm_dbg_atomic(display->drm,
"[PLANE:%d:%s] atomic driver check failed\n",
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 317320c32285..4ef012c08fa4 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -3,8 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
-#ifndef __INTEL_ATOMIC_PLANE_H__
-#define __INTEL_ATOMIC_PLANE_H__
+#ifndef __INTEL_PLANE_H__
+#define __INTEL_PLANE_H__
#include <linux/types.h>
@@ -69,15 +69,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane);
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position);
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -85,13 +83,13 @@ void intel_plane_helper_add(struct intel_plane *plane);
bool intel_plane_needs_physical(struct intel_plane *plane);
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state);
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_atomic_check_planes(struct intel_atomic_state *state);
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier);
-#endif /* __INTEL_ATOMIC_PLANE_H__ */
+#endif /* __INTEL_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index c00d9184c586..4246173ed311 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -6,12 +6,13 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
@@ -288,7 +289,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
+ intel_bo_to_drm_bo(vma->obj),
+ fb->format, &mode_cmd)) {
drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 1253376c7654..d806c15db7ce 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -7,12 +7,12 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
@@ -294,40 +294,17 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *new_bw_state, *old_bw_state;
- const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
struct intel_crtc *crtc;
int i;
- new_bw_state = intel_atomic_get_new_bw_state(state);
- old_bw_state = intel_atomic_get_old_bw_state(state);
- if (new_bw_state && new_bw_state->qgv_point_peakbw !=
- old_bw_state->qgv_point_peakbw)
+ if (intel_bw_pmdemand_needs_update(state))
return true;
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (new_dbuf_state &&
- new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ if (intel_dbuf_pmdemand_needs_update(state))
return true;
- if (DISPLAY_VER(display) < 30) {
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices !=
- old_dbuf_state->enabled_slices)
- return true;
- }
-
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- if (new_cdclk_state &&
- (new_cdclk_state->actual.cdclk !=
- old_cdclk_state->actual.cdclk ||
- new_cdclk_state->actual.voltage_level !=
- old_cdclk_state->actual.voltage_level))
+ if (intel_cdclk_pmdemand_needs_update(state))
return true;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -362,7 +339,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
new_pmdemand_state->params.qclk_gv_index = 0;
- new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
+ new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(new_bw_state);
new_dbuf_state = intel_atomic_get_dbuf_state(state);
if (IS_ERR(new_dbuf_state))
@@ -370,12 +347,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (DISPLAY_VER(display) < 30) {
new_pmdemand_state->params.active_dbufs =
- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3);
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3);
} else {
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display));
}
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -383,9 +360,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return PTR_ERR(new_cdclk_state);
new_pmdemand_state->params.voltage_index =
- new_cdclk_state->actual.voltage_level;
+ intel_cdclk_actual_voltage_level(new_cdclk_state);
new_pmdemand_state->params.cdclk_freq_mhz =
- DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
+ DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000);
intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 05e1e5c7e8b7..b64d0b30f5b1 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -5,11 +5,14 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -891,7 +894,6 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -907,7 +909,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations.
*/
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
- queue_delayed_work(i915->unordered_wq,
+ queue_delayed_work(display->wq.unordered,
&intel_dp->pps.panel_vdd_work, delay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps_regs.h b/drivers/gpu/drm/i915/display/intel_pps_regs.h
index 8f9dbfab9523..2f014d929d32 100644
--- a/drivers/gpu/drm/i915/display/intel_pps_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_pps_regs.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_PPS_REGS_H__
#define __INTEL_PPS_REGS_H__
-#include "intel_display_conversion.h"
#include "intel_display_reg_defs.h"
/* Panel power sequencing */
@@ -14,11 +13,11 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(dev_priv, pps_idx, reg) \
- _MMIO(__to_intel_display(dev_priv)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
+#define _MMIO_PPS(display, pps_idx, reg) \
+ _MMIO((display)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
#define _PP_STATUS 0x61200
-#define PP_STATUS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_STATUS)
+#define PP_STATUS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_STATUS)
#define PP_ON REG_BIT(31)
/*
* Indicates that all dependencies of the panel are on:
@@ -45,7 +44,7 @@
#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
-#define PP_CONTROL(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_CONTROL)
+#define PP_CONTROL(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_CONTROL)
#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
@@ -55,7 +54,7 @@
#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
-#define PP_ON_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_ON_DELAYS)
+#define PP_ON_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_ON_DELAYS)
#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
@@ -66,12 +65,12 @@
#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
-#define PP_OFF_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_OFF_DELAYS)
+#define PP_OFF_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_OFF_DELAYS)
#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
-#define PP_DIVISOR(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_DIVISOR)
+#define PP_DIVISOR(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_DIVISOR)
#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 430ad4ef7146..41988e193a41 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,7 +28,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
@@ -37,6 +36,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -47,6 +47,7 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -447,7 +448,6 @@ static void psr_event_print(struct intel_display *display,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
@@ -492,7 +492,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
}
@@ -516,7 +516,7 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
if (intel_dp->psr.sink_panel_replay_su_support)
drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_PANEL_REPLAY_CAPABILITY,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
&su_capability);
else
su_capability = intel_dp->psr_dpcd[1];
@@ -528,7 +528,7 @@ static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_X_GRANULARITY :
DP_PSR2_SU_X_GRANULARITY;
}
@@ -536,7 +536,7 @@ static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
DP_PSR2_SU_Y_GRANULARITY;
}
@@ -608,7 +608,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
return;
}
- if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
@@ -617,7 +618,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_panel_replay_support = true;
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
drm_dbg_kms(display->drm,
@@ -676,10 +678,12 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
- &intel_dp->pr_dpcd);
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT)
_panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr_dpcd[0])
@@ -736,7 +740,8 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return false;
return panel_replay ?
- intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
+ intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
psr2_su_region_et_global_enabled(intel_dp);
}
@@ -1574,6 +1579,12 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (!CAN_PSR(intel_dp))
return false;
+ /*
+ * Currently PSR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return false;
+
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
if (entry_setup_frames >= 0) {
@@ -1691,12 +1702,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- /*
- * Currently PSR/PR doesn't work reliably with VRR enabled.
- */
- if (crtc_state->vrr.enable)
- return;
-
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -2883,6 +2888,26 @@ skip_sel_fetch_set_loop:
return 0;
}
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = man_trk_ctl_enable_bit_get(display);
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
+
+ /* Directly write the register */
+ intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
+}
+
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -3250,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
intel_psr_configure_full_frame_update(intel_dp);
@@ -3314,7 +3341,6 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3329,16 +3355,15 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return;
tgl_psr2_enable_dc3co(intel_dp);
- mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
+ mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
}
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@@ -3355,13 +3380,15 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* existing SU configuration
*/
intel_psr_configure_full_frame_update(intel_dp);
- }
- intel_psr_force_update(intel_dp);
+ intel_psr_force_update(intel_dp);
+ } else {
+ intel_psr_exit(intel_dp);
+ }
- if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
!intel_dp->psr.busy_frontbuffer_bits)
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
/**
@@ -3916,7 +3943,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
seq_printf(m, "\n");
}
@@ -4021,24 +4049,30 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
int frame;
/*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
+ * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
+ * (it returns zeros only) and it has been removed on Xe2_LPD.
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(display,
- PSR2_SU_STATUS(display, cpu_transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
+ if (DISPLAY_VER(display) < 13) {
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
}
seq_printf(m, "PSR2 selective fetch: %s\n",
@@ -4234,3 +4268,9 @@ bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_sta
return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
crtc_state->has_panel_replay);
}
+
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 73c3fa40844b..9b061a22361f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -57,6 +57,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
@@ -77,5 +79,7 @@ int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 248136456048..8afbf5a38335 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -266,6 +266,16 @@
#define _PIPE_SRCSZ_ERLY_TPT_B 0x71074
#define PIPE_SRCSZ_ERLY_TPT(pipe) _MMIO_PIPE((pipe), _PIPE_SRCSZ_ERLY_TPT_A, _PIPE_SRCSZ_ERLY_TPT_B)
+#define _PR_ALPM_CTL_A 0x60948
+#define PR_ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PR_ALPM_CTL_A)
+#define PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU BIT(6)
+#define PR_ALPM_CTL_RFB_UPDATE_CONTROL BIT(5)
+#define PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE BIT(4)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK REG_GENMASK(1, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1_OR_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 1)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 2)
+
#define _ALPM_CTL_A 0x60950
#define ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c
new file mode 100644
index 000000000000..dfcff924f0ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ *
+ * LPT/WPT IOSF sideband.
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
+
+/* SBI access */
+static int intel_sbi_rw(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
+{
+ u32 cmd;
+
+ lockdep_assert_held(&display->sbi.lock);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
+ drm_err(display->drm, "timeout waiting for SBI to become ready\n");
+ return -EBUSY;
+ }
+
+ intel_de_write_fw(display, SBI_ADDR, SBI_ADDR_VALUE(reg));
+ intel_de_write_fw(display, SBI_DATA, is_read ? 0 : *val);
+
+ if (destination == SBI_ICLK)
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= SBI_CTL_OP_WR;
+ intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
+ drm_err(display->drm, "timeout waiting for SBI to complete read\n");
+ return -ETIMEDOUT;
+ }
+
+ if (cmd & SBI_RESPONSE_FAIL) {
+ drm_err(display->drm, "error during SBI read of reg %x\n", reg);
+ return -ENXIO;
+ }
+
+ if (is_read)
+ *val = intel_de_read_fw(display, SBI_DATA);
+
+ return 0;
+}
+
+void intel_sbi_lock(struct intel_display *display)
+{
+ mutex_lock(&display->sbi.lock);
+}
+
+void intel_sbi_unlock(struct intel_display *display)
+{
+ mutex_unlock(&display->sbi.lock);
+}
+
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(display, reg, destination, &result, true);
+
+ return result;
+}
+
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ intel_sbi_rw(display, reg, destination, &value, false);
+}
+
+void intel_sbi_init(struct intel_display *display)
+{
+ mutex_init(&display->sbi.lock);
+}
+
+void intel_sbi_fini(struct intel_display *display)
+{
+ mutex_destroy(&display->sbi.lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.h b/drivers/gpu/drm/i915/display/intel_sbi.h
new file mode 100644
index 000000000000..841f77a142a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SBI_H_
+#define _INTEL_SBI_H_
+
+#include <linux/types.h>
+
+struct intel_display;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+void intel_sbi_init(struct intel_display *display);
+void intel_sbi_fini(struct intel_display *display);
+void intel_sbi_lock(struct intel_display *display);
+void intel_sbi_unlock(struct intel_display *display);
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_sbi_regs.h b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
new file mode 100644
index 000000000000..ec76652de02d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_SBI_REGS_H__
+#define __INTEL_SBI_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/*
+ * Sideband Interface (SBI) is programmed indirectly, via SBI_ADDR, which
+ * contains the register offset; and SBI_DATA, which contains the payload.
+ */
+#define SBI_ADDR _MMIO(0xC6000)
+#define SBI_ADDR_MASK REG_GENMASK(31, 16)
+#define SBI_ADDR_VALUE(addr) REG_FIELD_PREP(SBI_ADDR_MASK, (addr))
+
+#define SBI_DATA _MMIO(0xC6004)
+
+#define SBI_CTL_STAT _MMIO(0xC6008)
+#define SBI_CTL_DEST_MASK REG_GENMASK(16, 16)
+#define SBI_CTL_DEST_ICLK REG_FIELD_PREP(SBI_CTL_DEST_MASK, 0)
+#define SBI_CTL_DEST_MPHY REG_FIELD_PREP(SBI_CTL_DEST_MASK, 1)
+#define SBI_CTL_OP_MASK REG_GENMASK(15, 8)
+#define SBI_CTL_OP_IORD REG_FIELD_PREP(SBI_CTL_OP_MASK, 2)
+#define SBI_CTL_OP_IOWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 3)
+#define SBI_CTL_OP_CRRD REG_FIELD_PREP(SBI_CTL_OP_MASK, 6)
+#define SBI_CTL_OP_CRWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 7)
+#define SBI_CTL_OP_WR REG_BIT(8)
+#define SBI_RESPONSE_MASK REG_GENMASK(2, 1)
+#define SBI_RESPONSE_FAIL REG_FIELD_PREP(SBI_RESPONSE_MASK, 1)
+#define SBI_RESPONSE_SUCCESS REG_FIELD_PREP(SBI_RESPONSE_MASK, 0)
+#define SBI_STATUS_MASK REG_GENMASK(0, 0)
+#define SBI_STATUS_BUSY REG_FIELD_PREP(SBI_STATUS_MASK, 1)
+#define SBI_STATUS_READY REG_FIELD_PREP(SBI_STATUS_MASK, 0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE 0x0200
+
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
+#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
+
+#define SBI_SSCDITHPHASE 0x0204
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
+
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
+
+#define SBI_DBUFF0 0x2a00
+
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
+
+#endif /* __INTEL_SBI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 8a38df2c0283..87aff2754f69 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,13 +39,13 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 5111bdc3075b..7fe6b4a18213 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -106,7 +106,7 @@ static void get_ana_cp_int_prop(u64 vco_clk,
DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
CURVE2_MULTIPLIER);
- *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
+ *ana_cp_int = clamp(ana_cp_int_temp, 1, 127);
curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int);
@@ -125,7 +125,7 @@ static void get_ana_cp_int_prop(u64 vco_clk,
curve_1_interpolated);
*ana_cp_prop = DIV64_U64_ROUND_UP(adjusted_vco_clk2, curve_2_scaled2);
- *ana_cp_prop = max(1, min(*ana_cp_prop, 127));
+ *ana_cp_prop = clamp(*ana_cp_prop, 1, 127);
}
static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 2b53ac9f4935..b2dd69a11124 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -7,11 +7,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fd92e6b89b43..e6844df837af 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -41,11 +41,11 @@
#include "i915_utils.h"
#include "i9xx_plane.h"
-#include "intel_atomic_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
#include "intel_sprite_regs.h"
@@ -1366,8 +1366,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -1421,10 +1421,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c1014e74791f..3bc57579fe53 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -14,6 +14,7 @@
#include "intel_display.h"
#include "intel_display_driver.h"
#include "intel_display_power_map.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index acf0b3733908..e3ab49815a3c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -36,12 +36,12 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 139fa5deba80..70ba7aa26bf4 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -6,10 +6,10 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index e9b809568cd4..92c04811aa28 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -446,7 +446,7 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * When we copy the child device configs to display->vbt.child_dev, we
* reserve space for the full structure below, and initialize the tail not
* actually present in VBT to zeros. Accessing those fields is fine, as long as
* the default zero is taken into account, again according to the BDB version.
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 05d140c8032d..6e125564db34 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -13,7 +13,6 @@
#include "soc/intel_gmch.h"
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_vga.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index c6565baf815a..3eed37f271b0 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -6,8 +6,8 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_vrr.h"
@@ -576,6 +576,25 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
return false;
}
+static
+void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -595,6 +614,8 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
TRANS_PUSH_EN);
if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
if (crtc_state->cmrr.enable) {
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
@@ -646,6 +667,8 @@ void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
index 6ed0e0dc97e7..ba9b9215dc11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
@@ -8,120 +8,119 @@
#include "intel_display_reg_defs.h"
-/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
-#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
+#define TRANS_VRR_CTL(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
-#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VMAX(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
-#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+#define TRANS_VRR_VMIN(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VMAXSHIFT_A)
-#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
-#define VRR_VMAXSHIFT_DEC REG_BIT(16)
-#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+#define TRANS_VRR_VMAXSHIFT(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
#define _TRANS_VRR_STATUS_A 0x6042c
#define _TRANS_VRR_STATUS_B 0x6142c
#define _TRANS_VRR_STATUS_C 0x6242c
#define _TRANS_VRR_STATUS_D 0x6342c
-#define TRANS_VRR_STATUS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
-#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
-#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
-#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
-#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
-#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
-#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
-#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
-#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
-#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
-#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
-#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
-#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
-#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
-#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+#define TRANS_VRR_STATUS(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VTOTAL_PREV_A)
-#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
-#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
-#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
-#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VTOTAL_PREV(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_FLIPLINE_A 0x60438
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_FLIPLINE_A)
-#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_FLIPLINE(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_STATUS2_A 0x6043c
#define _TRANS_VRR_STATUS2_B 0x6143c
#define _TRANS_VRR_STATUS2_C 0x6243c
#define _TRANS_VRR_STATUS2_D 0x6343c
-#define TRANS_VRR_STATUS2(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
-#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_STATUS2(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60a70
#define _TRANS_PUSH_B 0x61a70
#define _TRANS_PUSH_C 0x62a70
#define _TRANS_PUSH_D 0x63a70
-#define TRANS_PUSH(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
-#define TRANS_PUSH_EN REG_BIT(31)
-#define TRANS_PUSH_SEND REG_BIT(30)
+#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
#define _TRANS_VRR_VSYNC_A 0x60078
-#define TRANS_VRR_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
-#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
-#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
-#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
-#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
-
-/*CMRR Registers*/
+#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
+/* Common register for HDMI EMP and DP AS SDP */
+#define _EMP_AS_SDP_TL_A 0x60204
+#define EMP_AS_SDP_TL(display, trans) _MMIO_TRANS2((display), (trans), _EMP_AS_SDP_TL_A)
+#define EMP_AS_SDP_DB_TL_MASK REG_GENMASK(12, 0)
+#define EMP_AS_SDP_DB_TL(db_transmit_line) REG_FIELD_PREP(EMP_AS_SDP_DB_TL_MASK, (db_transmit_line))
#define _TRANS_CMRR_M_LO_A 0x604F0
-#define TRANS_CMRR_M_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_LO_A)
+#define TRANS_CMRR_M_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_LO_A)
#define _TRANS_CMRR_M_HI_A 0x604F4
-#define TRANS_CMRR_M_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_HI_A)
+#define TRANS_CMRR_M_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_HI_A)
#define _TRANS_CMRR_N_LO_A 0x604F8
-#define TRANS_CMRR_N_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_LO_A)
+#define TRANS_CMRR_N_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_LO_A)
#define _TRANS_CMRR_N_HI_A 0x604FC
-#define TRANS_CMRR_N_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_HI_A)
-
-#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define TRANS_CMRR_N_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_HI_A)
#endif /* __INTEL_VRR_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index c855426544cf..d77798499c57 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -5,9 +5,9 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -695,15 +695,14 @@ static void glk_program_nearest_filter_coefs(struct intel_display *display,
GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter)
{
- if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR)
return (PS_FILTER_PROGRAMMED |
- PS_Y_VERT_FILTER_SELECT(set) |
- PS_Y_HORZ_FILTER_SELECT(set) |
- PS_UV_VERT_FILTER_SELECT(set) |
- PS_UV_HORZ_FILTER_SELECT(set));
- }
+ PS_Y_VERT_FILTER_SELECT(0) |
+ PS_Y_HORZ_FILTER_SELECT(0) |
+ PS_UV_VERT_FILTER_SELECT(0) |
+ PS_UV_HORZ_FILTER_SELECT(0));
return PS_FILTER_MEDIUM;
}
@@ -761,7 +760,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
id = scaler_state->scaler_id;
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
- skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(crtc_state->hw.scaling_filter);
trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
@@ -827,7 +826,7 @@ skl_program_plane_scaler(struct intel_dsb *dsb,
}
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
- skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(plane_state->hw.scaling_filter);
trace_intel_plane_scaler_update_arm(plane, scaler_id,
crtc_x, crtc_y, crtc_w, crtc_h);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index c7b336359a5e..e20972ddfa09 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -8,24 +8,24 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
-#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
@@ -2327,8 +2327,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
max_scale = skl_plane_max_scale(display, fb);
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -2791,6 +2791,32 @@ static u8 tgl_plane_caps(struct intel_display *display,
return caps;
}
+static void skl_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_plane_state *state = to_intel_plane_state(plane->base.state);
+ struct intel_display *display = to_intel_display(plane);
+ const struct drm_framebuffer *fb = state->hw.fb;
+ u32 plane_ctl;
+
+ plane_ctl = intel_de_read(display, PLANE_CTL(plane->pipe, plane->id));
+
+ if (intel_fb_uses_dpt(fb)) {
+ /* if DPT is enabled, keep tiling, but disable compression */
+ plane_ctl &= ~PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ } else {
+ /* if DPT is not supported, disable tiling, and update stride */
+ u32 stride = state->view.color_plane[0].scanout_stride / 64;
+
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ intel_de_write_fw(display, PLANE_STRIDE(plane->pipe, plane->id),
+ PLANE_STRIDE_(stride));
+ }
+ intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
+
+ intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
+ skl_plane_surf(state, 0));
+}
+
struct intel_plane *
skl_universal_plane_create(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
@@ -2837,6 +2863,7 @@ skl_universal_plane_create(struct intel_display *display,
plane->max_height = skl_plane_max_height;
plane->min_cdclk = skl_plane_min_cdclk;
}
+ plane->disable_tiling = skl_disable_tiling;
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
@@ -3009,7 +3036,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 8080f777910a..222c069fdadb 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,12 +6,14 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "soc/intel_dram.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
@@ -19,21 +21,38 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
+#include "intel_flipq.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_wm.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
-/*It is expected that DSB can do posted writes to every register in
- * the pipe and planes within 100us. For flip queue use case, the
- * recommended DSB execution time is 100us + one SAGV block time.
- */
-#define DSB_EXE_TIME 100
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
+ bool joined_mbus;
+};
+
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
+
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
static void skl_sagv_disable(struct intel_display *display);
@@ -84,8 +103,6 @@ intel_has_sagv(struct intel_display *display)
static u32
intel_sagv_block_time(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -96,9 +113,9 @@ intel_sagv_block_time(struct intel_display *display)
u32 val = 0;
int ret;
- ret = snb_pcode_read(&i915->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
+ ret = intel_pcode_read(display->drm,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
if (ret) {
drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
@@ -156,7 +173,6 @@ static void intel_sagv_init(struct intel_display *display)
*/
static void skl_sagv_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!intel_has_sagv(display))
@@ -166,8 +182,8 @@ static void skl_sagv_enable(struct intel_display *display)
return;
drm_dbg_kms(display->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
+ ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -189,7 +205,6 @@ static void skl_sagv_enable(struct intel_display *display)
static void skl_sagv_disable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!intel_has_sagv(display))
@@ -200,10 +215,9 @@ static void skl_sagv_disable(struct intel_display *display)
drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
+ ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -229,7 +243,7 @@ static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(display, new_bw_state))
+ if (!intel_bw_can_enable_sagv(display, new_bw_state))
skl_sagv_disable(display);
}
@@ -242,74 +256,10 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(display, new_bw_state))
+ if (intel_bw_can_enable_sagv(display, new_bw_state))
skl_sagv_enable(display);
}
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(display, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(display, new_mask);
-}
-
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -443,16 +393,6 @@ bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(display) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -2233,7 +2173,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
}
return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * cdclk_state->logical.cdclk));
+ 2 * intel_cdclk_logical(cdclk_state)));
}
static int
@@ -2677,6 +2617,97 @@ static char enast(bool enable)
return enable ? '*' : ' ';
}
+static noinline_for_stack void
+skl_print_plane_changes(struct intel_display *display,
+ struct intel_plane *plane,
+ const struct skl_plane_wm *old_wm,
+ const struct skl_plane_wm *new_wm)
+{
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@@ -2706,7 +2737,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
-
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
@@ -2724,89 +2754,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
+ skl_print_plane_changes(display, plane, old_wm, new_wm);
}
}
}
@@ -2910,67 +2858,79 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
-/*
- * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
- * watermark level1 and up and above. If watermark level 1 is
- * invalid program it with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = DSB execution time
- * If Variable Refresh Rate where Vmin != Vmax != Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = 0
- */
+static int pkgc_max_linetime(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, max_linetime;
+
+ /*
+ * Apparenty the hardware uses WM_LINETIME internally for
+ * this stuff, compute everything based on that.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ display->pkgc.disable[crtc->pipe] = crtc_state->vrr.enable;
+ display->pkgc.linetime[crtc->pipe] = DIV_ROUND_UP(crtc_state->linetime, 8);
+ }
+
+ max_linetime = 0;
+ for_each_intel_crtc(display->drm, crtc) {
+ if (display->pkgc.disable[crtc->pipe])
+ return 0;
+
+ max_linetime = max(display->pkgc.linetime[crtc->pipe], max_linetime);
+ }
+
+ return max_linetime;
+}
+
void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- u32 latency = LNL_PKG_C_LATENCY_MASK;
- u32 added_wake_time = 0;
- u32 max_linetime = 0;
- u32 clear, val;
- bool fixed_refresh_rate = false;
- int i;
+ int max_linetime, latency, added_wake_time = 0;
if (DISPLAY_VER(display) < 20)
return;
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->vrr.enable ||
- (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
- fixed_refresh_rate = true;
+ mutex_lock(&display->wm.wm_mutex);
- max_linetime = max(new_crtc_state->linetime, max_linetime);
- }
+ latency = skl_watermark_max_latency(display, 1);
- if (fixed_refresh_rate) {
- added_wake_time = DSB_EXE_TIME +
- display->sagv.block_time_us;
+ /* FIXME runtime changes to enable_flipq are racy */
+ if (display->params.enable_flipq)
+ added_wake_time = intel_flipq_exec_time_us(display);
- latency = skl_watermark_max_latency(display, 1);
+ /*
+ * Wa_22020432604
+ * "PKG_C_LATENCY Added Wake Time field is not working"
+ */
+ if (latency && IS_DISPLAY_VER(display, 20, 30)) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
- /* Wa_22020432604 */
- if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
- latency += added_wake_time;
- added_wake_time = 0;
- }
+ max_linetime = pkgc_max_linetime(state);
- /* Wa_22020299601 */
- if ((latency && max_linetime) &&
- (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
- latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
- } else if (!latency) {
- latency = LNL_PKG_C_LATENCY_MASK;
- }
+ if (max_linetime == 0 || latency == 0) {
+ latency = REG_FIELD_GET(LNL_PKG_C_LATENCY_MASK,
+ LNL_PKG_C_LATENCY_MASK);
+ added_wake_time = 0;
+ } else {
+ /*
+ * Wa_22020299601
+ * "Increase the latency programmed in PKG_C_LATENCY Pkg C Latency to be a
+ * multiple of the pipeline time from WM_LINETIME"
+ */
+ latency = roundup(latency, max_linetime);
}
- clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
- REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ intel_de_write(display, LNL_PKG_C_LATENCY,
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time) |
+ REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency));
- intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
+ mutex_unlock(&display->wm.wm_mutex);
}
static int
@@ -3008,7 +2968,7 @@ skl_compute_wm(struct intel_atomic_state *state)
* drm_atomic_check_only() gets upset if we pull more crtcs
* into the state, so we have to calculate this based on the
* individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
+ * the overall intel_bw_can_enable_sagv(). Otherwise the
* crtcs not included in the commit would not switch to the
* SAGV watermarks when we are about to enable SAGV, and that
* would lead to underruns. This does mean extra power draw
@@ -3184,8 +3144,6 @@ void skl_watermark_ipc_update(struct intel_display *display)
static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
/* Display WA #0477 WaDisableIPC: skl */
if (display->platform.skylake)
return false;
@@ -3193,8 +3151,11 @@ static bool skl_watermark_ipc_can_enable(struct intel_display *display)
/* Display WA #1141: SKL:all KBL:all CFL */
if (display->platform.kabylake ||
display->platform.coffeelake ||
- display->platform.cometlake)
- return i915->dram_info.symmetric_memory;
+ display->platform.cometlake) {
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+
+ return dram_info->symmetric_memory;
+ }
return true;
}
@@ -3213,8 +3174,7 @@ static void
adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
int i, level;
/*
@@ -3250,7 +3210,7 @@ adjust_wm_latency(struct intel_display *display,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (wm_lv_0_adjust_needed)
+ if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
wm[0] += 1;
}
@@ -3276,7 +3236,6 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int num_levels = display->wm.num_levels;
int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
int mult = display->platform.dg2 ? 2 : 1;
@@ -3285,7 +3244,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3298,7 +3257,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3690,6 +3649,38 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->enabled_slices);
+}
+
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->active_pipes);
+}
+
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+
+ if (new_dbuf_state &&
+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ return true;
+
+ if (DISPLAY_VER(display) < 30) {
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices)
+ return true;
+ }
+
+ return false;
+}
+
static void skl_mbus_sanitize(struct intel_display *display)
{
struct intel_dbuf_state *dbuf_state =
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 95b0b599d5c3..62790816f030 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,17 +8,15 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-#include "intel_wm_types.h"
-
+enum plane_id;
struct intel_atomic_state;
-struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dbuf_state;
struct intel_display;
struct intel_plane;
struct intel_plane_state;
+struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -27,8 +25,6 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
-bool intel_can_enable_sagv(struct intel_display *display,
- const struct intel_bw_state *bw_state);
bool intel_has_sagv(struct intel_display *display);
u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
@@ -63,28 +59,11 @@ unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_st
struct intel_plane *plane, int width,
int height, int cpp);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- u8 mdclk_cdclk_ratio;
- bool joined_mbus;
-};
-
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(global_state) \
- container_of_const((global_state), struct intel_dbuf_state, base)
-
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state);
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state);
int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -98,5 +77,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
void intel_program_dpkgc_latency(struct intel_atomic_state *state);
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 2007bb9d974d..6d9f3312de7e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -30,15 +30,17 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
@@ -253,18 +255,16 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
static void band_gap_reset(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ vlv_flisdsi_get(display->drm);
- vlv_flisdsi_get(dev_priv);
-
- vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0001);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0005);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0025);
udelay(150);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
- vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0000);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_put(display->drm);
}
static int intel_dsi_compute_config(struct intel_encoder *encoder,
@@ -457,17 +457,16 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- vlv_flisdsi_get(dev_priv);
+ vlv_flisdsi_get(display->drm);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
- vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_write(display->drm, 0x04, 0x0004);
+ vlv_flisdsi_put(display->drm);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(display);
@@ -1020,7 +1019,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 hactive, hfp, hsync, hbp, vfp, vsync;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
u16 hfp_sw, hsync_sw, hbp_sw;
u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
crtc_hblank_start_sw, crtc_hblank_end_sw;
@@ -1084,6 +1083,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* vertical values are in terms of lines */
vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vbp = intel_de_read(display, MIPI_VBP_COUNT(display, port));
vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
@@ -1092,6 +1092,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+ drm_WARN_ON(display->drm, adjusted_mode->crtc_vdisplay +
+ vfp + vsync + vbp != adjusted_mode->crtc_vtotal);
adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 7ce924a5ef90..d42b61e6f076 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,9 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -214,15 +216,14 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO,
@@ -230,16 +231,16 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
*/
usleep_range(10, 50);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
@@ -247,19 +248,18 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, tmp);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
}
bool bxt_dsi_pll_is_enabled(struct intel_display *display)
@@ -323,15 +323,14 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
- pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- vlv_cck_put(dev_priv);
+ vlv_cck_get(display->drm);
+ pll_ctl = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_DIVIDER);
+ vlv_cck_put(display->drm);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
@@ -592,12 +591,11 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void assert_dsi_pll(struct intel_display *display, bool state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool cur_state;
- vlv_cck_get(i915);
- cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
- vlv_cck_put(i915);
+ vlv_cck_get(display->drm);
+ cur_state = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(display->drm);
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.c b/drivers/gpu/drm/i915/display/vlv_sideband.c
new file mode 100644
index 000000000000..e18045f2b89d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_dpio_phy.h"
+#include "vlv_sideband.h"
+
+static enum vlv_iosf_sb_unit vlv_dpio_phy_to_unit(struct intel_display *display,
+ enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (display->platform.cherryview)
+ return phy == DPIO_PHY0 ? VLV_IOSF_SB_DPIO_2 : VLV_IOSF_SB_DPIO;
+ else
+ return VLV_IOSF_SB_DPIO;
+}
+
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+ u32 val;
+
+ val = vlv_iosf_sb_read(drm, unit, reg);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ drm_WARN(display->drm, val == 0xffffffff,
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+
+ vlv_iosf_sb_write(drm, unit, reg, val);
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.h b/drivers/gpu/drm/i915/display/vlv_sideband.h
new file mode 100644
index 000000000000..2c240d81fead
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#include "vlv_iosf_sb.h"
+#include "vlv_iosf_sb_reg.h"
+
+enum dpio_phy;
+struct drm_device;
+
+static inline void vlv_bunit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline u32 vlv_bunit_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_BUNIT, reg);
+}
+
+static inline void vlv_bunit_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_BUNIT, reg, val);
+}
+
+static inline void vlv_bunit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline void vlv_cck_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline u32 vlv_cck_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCK, reg);
+}
+
+static inline void vlv_cck_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCK, reg, val);
+}
+
+static inline void vlv_cck_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline void vlv_ccu_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline u32 vlv_ccu_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCU, reg);
+}
+
+static inline void vlv_ccu_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCU, reg, val);
+}
+
+static inline void vlv_ccu_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline void vlv_dpio_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+#ifdef I915
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg);
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val);
+#else
+static inline u32 vlv_dpio_read(struct drm_device *drm, int phy, int reg)
+{
+ return 0;
+}
+static inline void vlv_dpio_write(struct drm_device *drm,
+ int phy, int reg, u32 val)
+{
+}
+#endif
+
+static inline void vlv_dpio_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+static inline void vlv_flisdsi_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline u32 vlv_flisdsi_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_FLISDSI, reg);
+}
+
+static inline void vlv_flisdsi_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_FLISDSI, reg, val);
+}
+
+static inline void vlv_flisdsi_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline void vlv_nc_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline u32 vlv_nc_read(struct drm_device *drm, u8 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_NC, addr);
+}
+
+static inline void vlv_nc_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline void vlv_punit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+static inline u32 vlv_punit_read(struct drm_device *drm, u32 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_PUNIT, addr);
+}
+
+static inline int vlv_punit_write(struct drm_device *drm, u32 addr, u32 val)
+{
+ return vlv_iosf_sb_write(drm, VLV_IOSF_SB_PUNIT, addr, val);
+}
+
+static inline void vlv_punit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 7a0cc51923b3..ef3b14ae2e0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -3,7 +3,6 @@
* Copyright © 2014-2016 Intel Corporation
*/
-#include "display/intel_display.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index f6d37dff320d..75f5b0e871ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -5,7 +5,6 @@
#include <linux/anon_inodes.h>
#include <linux/mman.h>
-#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c34f41605b46..565f8fa330db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,7 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
+struct drm_scanout_buffer;
enum intel_region_id;
+struct intel_framebuffer;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -691,6 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index 9fbf14867a2a..b6dc3d1b9bb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -77,7 +77,7 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
* Set object's frontbuffer pointer. If frontbuffer is already set for the
* object keep it and return it's pointer to the caller. Please note that RCU
* mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
- * function is protected by i915->display.fb_tracking.lock
+ * function is protected by i915->display->fb_tracking.lock
*
* Return: pointer to frontbuffer which was set.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 7f83f8bdc8fb..c16a57160b26 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -4,8 +4,11 @@
*/
#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
#include <linux/vmalloc.h>
+#include "display/intel_fb.h"
+#include "display/intel_display_types.h"
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
@@ -354,6 +357,145 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
+struct i915_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct i915_framebuffer {
+ struct intel_framebuffer base;
+ struct i915_panic_data panic;
+};
+
+static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct i915_framebuffer, base)->panic;
+}
+
+static void i915_panic_kunmap(struct i915_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *page;
+ struct page **pages;
+ struct sgt_iter iter;
+
+ /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
+ if (!pages)
+ return NULL;
+
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ return pages;
+}
+
+static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ unsigned int offset = fb->panic_tiling(sb->width, x, y);
+
+ iosys_map_wr(&sb->map[0], offset, u32, color);
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ unsigned int new_page;
+ unsigned int offset;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ i915_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr =
+ kmap_local_page_try_from_panic(panic->pages[panic->page]);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+{
+ struct i915_framebuffer *i915_fb;
+
+ i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
+ if (i915_fb)
+ return &i915_fb->base;
+ return NULL;
+}
+
+/*
+ * Setup the gem framebuffer for drm_panic access.
+ * Use current vaddr if it exists, or setup a list of pages.
+ * pfn is not supported yet.
+ */
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+{
+ enum i915_map_type has_type;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ void *ptr;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr) {
+ if (i915_gem_object_has_iomem(obj))
+ iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
+ else
+ iosys_map_set_vaddr(&sb->map[0], ptr);
+
+ if (fb->panic_tiling)
+ sb->set_pixel = i915_gem_object_panic_map_set_pixel;
+ return 0;
+ }
+ if (i915_gem_object_has_struct_page(obj)) {
+ panic->pages = i915_gem_object_panic_pages(obj);
+ if (!panic->pages)
+ return -ENOMEM;
+ panic->page = -1;
+ sb->set_pixel = i915_gem_object_panic_page_set_pixel;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+{
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ i915_panic_kunmap(panic);
+ panic->page = -1;
+ kfree(panic->pages);
+ panic->pages = NULL;
+}
+
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9cbb0f68a5bb..e3d188455f67 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -303,7 +303,6 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
- .for_reclaim = 1,
};
struct folio *folio = NULL;
int error = 0;
@@ -318,7 +317,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
if (folio_mapped(folio))
folio_redirty_for_writepage(&wbc, folio);
else
- error = shmem_writeout(folio, &wbc);
+ error = shmem_writeout(folio, NULL, NULL);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 7127e90c1a8f..991666fd9f85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -106,11 +106,6 @@ static void fence_set_priority(struct dma_fence *fence,
rcu_read_unlock();
}
-static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_chain_ops;
-}
-
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
@@ -126,7 +121,7 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
for (i = 0; i < array->num_fences; i++)
fence_set_priority(array->fences[i], attr);
- } else if (__dma_fence_is_chain(fence)) {
+ } else if (dma_fence_is_chain(fence)) {
struct dma_fence *iter;
/* The chain is ordered; if we boost the last, we boost all */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index bac15196b4d2..86d9d2fcb6a6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "display/intel_display_core.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 325da0414d94..f6a98cf1e5a5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -79,6 +79,29 @@ struct lock_class_key;
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
+
+#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+
+#define ENGINE_INSTANCES_MASK(gt, first, count) \
+ __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
+
+#define RCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
+#define BCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
+#define VDBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
+#define CCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+
#define GEN6_RING_FAULT_REG_READ(engine__) \
intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
@@ -355,4 +378,12 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 0c723e7c71a2..889e61843ff3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -328,7 +328,7 @@ static bool fence_is_active(const struct i915_fence_reg *fence)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct intel_display *display = &ggtt->vm.i915->display;
+ struct intel_display *display = ggtt->vm.i915->display;
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 3182f19b9837..c7f59d60fac6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -6,6 +6,8 @@
#include <linux/string_helpers.h>
#include <linux/suspend.h>
+#include "display/intel_display_power.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_params.h"
@@ -70,7 +72,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
@@ -104,7 +106,7 @@ static int __gt_park(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index b635aa2820d9..87ef85483bae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -22,7 +22,7 @@
#include "intel_rps.h"
#include "intel_runtime_pm.h"
#include "intel_uncore.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
@@ -366,9 +366,9 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_printf(p, "SW control enabled: %s\n",
str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
- vlv_punit_get(i915);
- freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq_sts = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index d1a382dfaa1d..93298820bee2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -250,11 +250,17 @@ void intel_gt_watchdog_work(struct work_struct *work)
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
if (!i915_request_completed(rq)) {
struct dma_fence *f = &rq->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(f);
+ timeline = dma_fence_timeline_name(f);
pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
- f->ops->get_driver_name(f),
- f->ops->get_timeline_name(f),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
f->seqno);
+ rcu_read_unlock();
i915_request_cancel(rq, -EINTR);
}
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index dbdcfe130ad4..4a1675dea1c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1205,7 +1205,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
- struct intel_display *display = &gt->i915->display;
+ struct intel_display *display = gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1423,7 +1423,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
bool need_display_reset;
bool reset_display;
@@ -1448,7 +1448,8 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
else
drm_dev_wedged_event(&gt->i915->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index eb89948cc112..0b35fdd461d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -23,7 +23,7 @@
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
@@ -550,7 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -620,7 +620,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -820,9 +820,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- vlv_punit_get(i915);
- err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ err = vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
val, intel_gpu_freq(rps, val));
@@ -1280,7 +1280,7 @@ static int chv_rps_max_freq(struct intel_rps *rps)
struct intel_gt *gt = rps_to_gt(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
switch (gt->info.sseu.eu_total) {
case 8:
@@ -1307,7 +1307,7 @@ static int chv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_GPU_DUTYCYCLE_REG);
val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
@@ -1318,7 +1318,7 @@ static int chv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
return val & FB_GFX_FREQ_FUSE_MASK;
}
@@ -1328,7 +1328,7 @@ static u32 chv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMIN_AT_VMIN_FUSE);
val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
return val & FB_GFX_FREQ_FUSE_MASK;
@@ -1362,14 +1362,14 @@ static bool chv_rps_enable(struct intel_rps *rps)
GEN6_PM_RP_DOWN_TIMEOUT);
/* Setting Fixed Bias */
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1387,7 +1387,7 @@ static int vlv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp1;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
@@ -1400,7 +1400,7 @@ static int vlv_rps_max_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp0;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
@@ -1414,9 +1414,9 @@ static int vlv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rpe;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
@@ -1427,7 +1427,7 @@ static int vlv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
@@ -1463,15 +1463,15 @@ static bool vlv_rps_enable(struct intel_rps *rps)
/* WaGsvRC0ResidencyMethod:vlv */
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1684,7 +1684,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
rps->gpll_ref_freq =
- vlv_get_cck_clock(i915, "GPLL ref",
+ vlv_get_cck_clock(&i915->drm, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
@@ -1696,7 +1696,7 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1720,7 +1720,7 @@ static void vlv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1730,7 +1730,7 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1754,7 +1754,7 @@ static void chv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -2119,9 +2119,9 @@ static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
} else if (GRAPHICS_VER(i915) >= 12) {
r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_punit_get(i915);
- freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
} else if (GRAPHICS_VER(i915) >= 6) {
r = GEN6_RPSTAT1;
} else {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 9df80c325fc1..f360f020d8f1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -313,8 +313,13 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
*
* The same WA bit is used for both and 22011391025 is applicable to
* all DG2.
+ *
+ * Platforms post DG2 prevent this issue in hardware by stalling
+ * submissions. With this flag GuC will schedule as to avoid such
+ * stalls.
*/
- if (IS_DG2(gt->i915))
+ if (IS_DG2(gt->i915) ||
+ (CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)))
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e8a04e476c57..09a64f224c49 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -220,8 +220,7 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
/*
* Use no-overwrite mode by default, where relay will stop accepting
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index ec33ad942115..e848a04a80dc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1116,7 +1116,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
if (ret)
gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index f25ee2953baf..a91e23c22ea1 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -38,6 +38,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
@@ -50,6 +51,7 @@
#include "trace.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
@@ -1286,7 +1288,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1333,7 +1335,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1421,7 +1423,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 1e1af5e545a4..74197e337585 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
@@ -43,6 +44,7 @@
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_sprite_regs.h"
@@ -69,7 +71,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
@@ -82,7 +84,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
@@ -183,7 +185,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -634,7 +636,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -664,7 +666,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8090bc53c7e1..bc7f05f9a271 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -63,19 +63,6 @@ struct intel_vgpu;
#define AUX_BURST_SIZE 20
-#define SBI_RESPONSE_MASK 0x3
-#define SBI_RESPONSE_SHIFT 0x1
-#define SBI_STAT_MASK 0x1
-#define SBI_STAT_SHIFT 0x0
-#define SBI_OPCODE_SHIFT 8
-#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
-#define SBI_CMD_IORD 2
-#define SBI_CMD_IOWR 3
-#define SBI_CMD_CRRD 6
-#define SBI_CMD_CRWR 7
-#define SBI_ADDR_OFFSET_SHIFT 16
-#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
-
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 89147d33168c..2031b97de2b7 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -35,6 +35,7 @@
#include <drm/display/drm_dp.h>
#include "display/intel_dp_aux_regs.h"
+#include "display/intel_gmbus.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index f9f7ef131371..a8079cfa8e1d 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -39,9 +39,11 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -154,7 +156,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -211,7 +213,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, fmt;
int pipe;
@@ -342,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1344e6d20a34..f446f73f0fe2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,6 +40,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
@@ -47,6 +48,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -55,6 +57,7 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -658,7 +661,7 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
@@ -1022,7 +1025,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -1064,7 +1067,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -1412,12 +1415,12 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
- vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
- sbi_offset);
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
+
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset);
}
read_vreg(vgpu, offset, p_data, bytes);
return 0;
@@ -1431,21 +1434,20 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
- data |= SBI_READY;
+ data &= ~SBI_STATUS_MASK;
+ data |= SBI_STATUS_READY;
- data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data &= ~SBI_RESPONSE_MASK;
data |= SBI_RESPONSE_SUCCESS;
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
- write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg_t(vgpu, SBI_DATA));
+ write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -2200,7 +2202,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 336d079c4207..a956da68e6bd 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index e16e0d4c9534..da1135fa7cda 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -36,6 +36,7 @@
#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 273bc43468a0..c6263c6d3384 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -51,6 +51,7 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_driver.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
@@ -58,9 +59,11 @@
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
+#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
+#include "display/intel_sbi.h"
#include "display/intel_sprite_uapi.h"
#include "display/skl_watermark.h"
@@ -107,8 +110,7 @@
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
-#include "intel_sbi.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -133,10 +135,6 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->display.hotplug.dp_wq == NULL)
- goto out_free_wq;
-
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
@@ -145,12 +143,10 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
*/
dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
if (dev_priv->unordered_wq == NULL)
- goto out_free_dp_wq;
+ goto out_free_wq;
return 0;
-out_free_dp_wq:
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
@@ -162,7 +158,6 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->unordered_wq);
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -222,7 +217,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -236,7 +231,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->gpu_error.lock);
- intel_sbi_init(dev_priv);
+ intel_sbi_init(display);
vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
@@ -285,7 +280,7 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(display);
@@ -297,9 +292,11 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
mutex_destroy(&dev_priv->sb_lock);
vlv_iosf_sb_fini(dev_priv);
- intel_sbi_fini(dev_priv);
+ intel_sbi_fini(display);
i915_params_free(&dev_priv->params);
+
+ intel_display_device_remove(display);
}
/**
@@ -313,7 +310,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -460,7 +457,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
@@ -571,7 +568,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(dev_priv);
+ ret = intel_dram_detect(dev_priv);
+ if (ret)
+ goto err_opregion;
intel_bw_init_hw(display);
@@ -599,7 +598,7 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
@@ -619,7 +618,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static int i915_driver_register(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
int ret;
@@ -670,7 +669,7 @@ static int i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -742,6 +741,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct drm_i915_private *i915;
+ struct intel_display *display;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
struct drm_i915_private, drm);
@@ -756,7 +756,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return ERR_CAST(display);
+
+ i915->display = display;
return i915;
}
@@ -790,7 +794,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
- display = &i915->display;
+ display = i915->display;
ret = i915_driver_early_probe(i915);
if (ret < 0)
@@ -882,7 +886,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -915,7 +919,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -938,8 +941,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
-
- intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -969,7 +970,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
@@ -991,10 +992,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&i915->display);
- intel_encoder_shutdown_all(&i915->display);
+ intel_encoder_suspend_all(display);
+ intel_encoder_shutdown_all(display);
- intel_dmc_suspend(&i915->display);
+ intel_dmc_suspend(display);
i915_gem_suspend(i915);
@@ -1049,7 +1050,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
@@ -1074,7 +1075,7 @@ static int i915_drm_suspend(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&dev_priv->display);
+ intel_encoder_suspend_all(display);
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(display);
@@ -1099,7 +1100,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1171,7 +1172,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -1256,7 +1257,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1488,7 +1489,7 @@ static int i915_pm_restore(struct device *kdev)
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1587,7 +1588,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d0e1980dcba2..4e4e89746aa6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,13 +32,11 @@
#include <uapi/drm/i915_drm.h>
+#include <linux/pci.h>
#include <linux/pm_qos.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display_limits.h"
-#include "display/intel_display_core.h"
-
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
@@ -62,11 +60,11 @@
#include "intel_step.h"
#include "intel_uncore.h"
+struct dram_info;
struct drm_i915_clock_gating_funcs;
-struct vlv_s0ix_state;
+struct intel_display;
struct intel_pxp;
-
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+struct vlv_s0ix_state;
/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
struct i915_dsm {
@@ -177,7 +175,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
- struct intel_display display;
+ struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -224,12 +222,10 @@ struct drm_i915_private {
bool irqs_enabled;
- /* LPT/WPT IOSF sideband protection */
- struct mutex sbi_lock;
-
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
+ unsigned long locked_unit_mask;
struct pm_qos_request qos;
} vlv_iosf_sb;
@@ -285,25 +281,7 @@ struct drm_i915_private {
u32 suspend_count;
struct vlv_s0ix_state *vlv_s0ix_state;
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- __INTEL_DRAM_TYPE_MAX,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ const struct dram_info *dram_info;
struct intel_runtime_pm runtime_pm;
@@ -374,14 +352,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-#define rb_to_uabi_engine(rb) \
- rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
-
-#define for_each_uabi_engine(engine__, i915__) \
- for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
- (engine__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -590,29 +560,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
-#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
-#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
-
-#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
- unsigned int first__ = (first); \
- unsigned int count__ = (count); \
- ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
-})
-
-#define ENGINE_INSTANCES_MASK(gt, first, count) \
- __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
-
-#define RCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
-#define BCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
-#define VDBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
-#define CCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
-
#define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode)
/*
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 82e9d289398c..20b3cb29cfff 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -134,4 +134,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index be8149e46281..6fcda6d7b5b7 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -16,7 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 568525d49428..0e4b832dff84 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -2067,7 +2067,7 @@ static struct i915_gpu_coredump *
__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 95042879bec4..191ed8bb1d9c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,6 +33,7 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
@@ -230,7 +231,7 @@ out:
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -324,7 +325,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -418,7 +419,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -507,7 +508,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -558,7 +559,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -616,7 +617,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -660,7 +661,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -681,7 +682,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -693,7 +694,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
@@ -705,7 +706,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
@@ -720,7 +721,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -740,7 +741,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -755,7 +756,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen5_gt_irq_postinstall(to_gt(dev_priv));
@@ -764,7 +765,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen5_gt_irq_postinstall(to_gt(dev_priv));
@@ -776,7 +777,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(display);
@@ -786,7 +787,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
@@ -802,7 +803,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -821,7 +822,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen8_gt_irq_postinstall(to_gt(dev_priv));
@@ -894,7 +895,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_display_irq_reset(display);
@@ -906,7 +907,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -941,7 +942,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -996,7 +997,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_display_irq_reset(display);
@@ -1027,7 +1028,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1059,7 +1060,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1152,71 +1153,62 @@ void intel_irq_fini(struct drm_i915_private *i915)
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- return cherryview_irq_handler;
- else if (IS_VALLEYVIEW(dev_priv))
- return valleyview_irq_handler;
- else if (GRAPHICS_VER(dev_priv) == 4)
- return i965_irq_handler;
- else
- return i915_irq_handler;
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- return dg1_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 11)
- return gen11_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 8)
- return gen8_irq_handler;
- else
- return ilk_irq_handler;
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ return dg1_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ return ilk_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ return i965_irq_handler;
+ else
+ return i915_irq_handler;
}
static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_reset(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_reset(dev_priv);
- else
- i915_irq_reset(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_reset(dev_priv);
- else
- ilk_irq_reset(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_reset(dev_priv);
+ else
+ i915_irq_reset(dev_priv);
}
static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_postinstall(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_postinstall(dev_priv);
- else
- i915_irq_postinstall(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_postinstall(dev_priv);
- else
- ilk_irq_postinstall(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_postinstall(dev_priv);
+ else
+ i915_irq_postinstall(dev_priv);
}
/**
@@ -1265,7 +1257,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 21006c7f615c..b2e311f4791a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -663,7 +663,6 @@ static const struct intel_device_info dg1_info = {
DGFX_FEATURES,
.__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .require_force_probe = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2e4190da3e0d..03b895897f60 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,10 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
-#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
-#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
-
/*
* Reset registers
*/
@@ -187,46 +183,6 @@
/* DPIO registers */
#define DPIO_DEVFN 0
-#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1 << 1)
-#define DPIO_CMNRST (1 << 0)
-
-#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
-#define MIPIO_RST_CTRL (1 << 2)
-
-#define _BXT_PHY_CTL_DDI_A 0x64C00
-#define _BXT_PHY_CTL_DDI_B 0x64C10
-#define _BXT_PHY_CTL_DDI_C 0x64C20
-#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
-#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
-#define BXT_PHY_LANE_ENABLED (1 << 8)
-#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
- _BXT_PHY_CTL_DDI_B)
-
-#define _PHY_CTL_FAMILY_DDI 0x64C90
-#define _PHY_CTL_FAMILY_EDP 0x64C80
-#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
-#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) \
- _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
- _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-
-/* UAIMI scratch pad register 1 */
-#define UAIMI_SPR1 _MMIO(0x4F074)
-/* SKL VccIO mask */
-#define SKL_VCCIO_MASK 0x1
-/* SKL balance leg register */
-#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
-/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
-#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
-/* Balance leg disable bits */
-#define BALANCE_LEG_DISABLE_SHIFT 23
-#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
-
/*
* Fence registers
* [0-7] @ 0x2000 gen2,gen3
@@ -372,16 +328,6 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
-#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
-#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
-#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
-#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
-#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
-#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
-#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
-#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
-#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
-
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
#define ERR_INT_INVALID_GTT_PTE (1 << 29)
@@ -413,25 +359,6 @@
#define CLAIM_ER_OVERFLOW REG_BIT(16)
#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
-#define DERRMR _MMIO(0x44050)
-/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1 << 0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
-#define DERRMR_PIPEA_VBLANK (1 << 3)
-#define DERRMR_PIPEA_HBLANK (1 << 5)
-#define DERRMR_PIPEB_SCANLINE (1 << 8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
-#define DERRMR_PIPEB_VBLANK (1 << 11)
-#define DERRMR_PIPEB_HBLANK (1 << 13)
-/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1 << 14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
-#define DERRMR_PIPEC_VBLANK (1 << 21)
-#define DERRMR_PIPEC_HBLANK (1 << 22)
-
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -458,11 +385,6 @@
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
- VLV_IER, \
- VLV_IIR)
-
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
@@ -475,16 +397,6 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
-#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
-#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
-#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
-#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
-#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
-#define VLV_ERROR_PAGE_TABLE (1 << 4)
-#define VLV_ERROR_CLAIM (1 << 0)
-
-#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
-
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
@@ -509,23 +421,6 @@
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
-#define _MBUS_ABOX0_CTL 0x45038
-#define _MBUS_ABOX1_CTL 0x45048
-#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) \
- _MMIO(_PICK_EVEN_2RANGES(x, 2, \
- _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
-
-#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
-#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
-#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
-#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
-#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
-#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-
/*
* Make render/texture TLB fetches lower priority than associated data
* fetches. This is not turned on by default.
@@ -700,173 +595,6 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-#define IPS_CTL _MMIO(0x43408)
-#define IPS_ENABLE REG_BIT(31)
-#define IPS_FALSE_COLOR REG_BIT(4)
-
-/*
- * Clock control & power management
- */
-#define _DPLL_A 0x6014
-#define _DPLL_B 0x6018
-#define _CHV_DPLL_C 0x6030
-#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
-
-#define VGA0 _MMIO(0x6000)
-#define VGA1 _MMIO(0x6004)
-#define VGA_PD _MMIO(0x6010)
-#define VGA0_PD_P2_DIV_4 (1 << 7)
-#define VGA0_PD_P1_DIV_2 (1 << 5)
-#define VGA0_PD_P1_SHIFT 0
-#define VGA0_PD_P1_MASK (0x1f << 0)
-#define VGA1_PD_P2_DIV_4 (1 << 15)
-#define VGA1_PD_P1_DIV_2 (1 << 13)
-#define VGA1_PD_P1_SHIFT 8
-#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_SDVO_HIGH_SPEED (1 << 30)
-#define DPLL_DVO_2X_MODE (1 << 30)
-#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
-#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
-#define DPLL_VGA_MODE_DIS (1 << 28)
-#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-#define DPLL_MODE_MASK (3 << 26)
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1 << 15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
-#define DPLL_SSC_REF_CLK_CHV (1 << 13)
-#define DPLL_PORTC_READY_MASK (0xf << 4)
-#define DPLL_PORTB_READY_MASK (0xf)
-
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-
-/* Additional CHV pll/phy registers */
-#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
-#define DPLL_PORTD_READY_MASK (0xf)
-#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
-#define PHY_LDO_DELAY_0NS 0x0
-#define PHY_LDO_DELAY_200NS 0x1
-#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
-#define PHY_CH_SU_PSR 0x1
-#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
-#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
-#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
-
-/*
- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
- * this field (only one bit may be set).
- */
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
-/* i830, required in DVO non-gang */
-#define PLL_P2_DIVIDE_BY_4 (1 << 23)
-#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-#define PLL_REF_INPUT_DREFCLK (0 << 13)
-#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
-#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-#define PLL_REF_INPUT_MASK (3 << 13)
-#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* Ironlake */
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
-# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
-# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
-
-/*
- * Parallel to Serial Load Pulse phase selection.
- * Selects the phase for the 10X DPLL clock for the PCIe
- * digital display port. The range is 4 to 13; 10 or more
- * is just a flip delay. The default is 6
- */
-#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
-/*
- * SDVO multiplier for 945G/GM. Not used on 965.
- */
-#define SDVO_MULTIPLIER_MASK 0x000000ff
-#define SDVO_MULTIPLIER_SHIFT_HIRES 4
-#define SDVO_MULTIPLIER_SHIFT_VGA 0
-
-#define _DPLL_A_MD 0x601c
-#define _DPLL_B_MD 0x6020
-#define _CHV_DPLL_C_MD 0x603c
-#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
-
-/*
- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
- *
- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
- */
-#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-#define DPLL_MD_UDI_DIVIDER_SHIFT 24
-/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
-/*
- * SDVO/UDI pixel multiplier.
- *
- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
- * dummy bytes in the datastream at an increased clock rate, with both sides of
- * the link knowing how many bytes are fill.
- *
- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
- * through an SDVO command.
- *
- * This register field has values of multiplication factor minus 1, with
- * a maximum multiplier of 5 for SDVO.
- */
-#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
-/*
- * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
- * This best be set to the default value (3) or the CRT won't work. No,
- * I don't entirely understand what this does...
- */
-#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-
-#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
-
-#define _FPA0 0x6040
-#define _FPA1 0x6044
-#define _FPB0 0x6048
-#define _FPB1 0x604c
-#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
-#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
-#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
-#define FP_N_DIV_SHIFT 16
-#define FP_M1_DIV_MASK 0x00003f00
-#define FP_M1_DIV_SHIFT 8
-#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
-#define FP_M2_DIV_SHIFT 0
-
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -1000,27 +728,6 @@
#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
#define DEUC _MMIO(0x6214) /* CRL only */
-#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1 << 15)
-
-#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
-
-#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
-#define CDCLK_FREQ_SHIFT 4
-#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
-#define CZCLK_FREQ_MASK 0xf
-
-#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
-#define PFI_CREDIT_63 (9 << 28) /* chv only */
-#define PFI_CREDIT_31 (8 << 28) /* chv only */
-#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
-#define PFI_CREDIT_RESEND (1 << 27)
-#define VGA_FAST_MODE_DISABLE (1 << 14)
-
-#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-
-#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
-
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
@@ -1051,25 +758,11 @@
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
/*
- * Overlay regs
- */
-#define OVADD _MMIO(0x30000)
-#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3 << 20)
-#define OGAMC5 _MMIO(0x30010)
-#define OGAMC4 _MMIO(0x30014)
-#define OGAMC3 _MMIO(0x30018)
-#define OGAMC2 _MMIO(0x3001c)
-#define OGAMC1 _MMIO(0x30020)
-#define OGAMC0 _MMIO(0x30024)
-
-/*
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS REG_BIT(27)
-#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
-#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
#define PWM2_GATING_DIS REG_BIT(14)
#define PWM1_GATING_DIS REG_BIT(13)
@@ -1077,577 +770,6 @@
#define TGL_VRH_GATING_DIS REG_BIT(31)
#define DPT_GATING_DIS REG_BIT(22)
-#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
-#define BXT_GMBUS_GATING_DIS (1 << 14)
-#define DG2_DPFC_GATING_DIS REG_BIT(31)
-
-#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
-#define DPCE_GATING_DIS REG_BIT(17)
-
-#define _CLKGATE_DIS_PSL_A 0x46520
-#define _CLKGATE_DIS_PSL_B 0x46524
-#define _CLKGATE_DIS_PSL_C 0x46528
-#define DUPS1_GATING_DIS (1 << 15)
-#define DUPS2_GATING_DIS (1 << 19)
-#define DUPS3_GATING_DIS (1 << 23)
-#define CURSOR_GATING_DIS REG_BIT(28)
-#define DPF_GATING_DIS (1 << 10)
-#define DPF_RAM_GATING_DIS (1 << 9)
-#define DPFR_GATING_DIS (1 << 8)
-
-#define CLKGATE_DIS_PSL(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
-
-#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
-#define _CLKGATE_DIS_PSL_EXT_B 0x46550
-#define PIPEDMC_GATING_DIS REG_BIT(12)
-
-#define CLKGATE_DIS_PSL_EXT(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-
-/*
- * Display engine regs
- */
-/* Pipe/transcoder A timing regs */
-#define _TRANS_HTOTAL_A 0x60000
-#define _TRANS_HTOTAL_B 0x61000
-#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
-#define HTOTAL_MASK REG_GENMASK(31, 16)
-#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
-#define HACTIVE_MASK REG_GENMASK(15, 0)
-#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
-
-#define _TRANS_HBLANK_A 0x60004
-#define _TRANS_HBLANK_B 0x61004
-#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
-#define HBLANK_END_MASK REG_GENMASK(31, 16)
-#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
-#define HBLANK_START_MASK REG_GENMASK(15, 0)
-#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
-
-#define _TRANS_HSYNC_A 0x60008
-#define _TRANS_HSYNC_B 0x61008
-#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
-#define HSYNC_END_MASK REG_GENMASK(31, 16)
-#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
-#define HSYNC_START_MASK REG_GENMASK(15, 0)
-#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
-
-#define _TRANS_VTOTAL_A 0x6000c
-#define _TRANS_VTOTAL_B 0x6100c
-#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
-#define VTOTAL_MASK REG_GENMASK(31, 16)
-#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
-#define VACTIVE_MASK REG_GENMASK(15, 0)
-#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
-
-#define _TRANS_VBLANK_A 0x60010
-#define _TRANS_VBLANK_B 0x61010
-#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
-#define VBLANK_END_MASK REG_GENMASK(31, 16)
-#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
-#define VBLANK_START_MASK REG_GENMASK(15, 0)
-#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
-
-#define _TRANS_VSYNC_A 0x60014
-#define _TRANS_VSYNC_B 0x61014
-#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
-#define VSYNC_END_MASK REG_GENMASK(31, 16)
-#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
-#define VSYNC_START_MASK REG_GENMASK(15, 0)
-#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
-
-#define _PIPEASRC 0x6001c
-#define _PIPEBSRC 0x6101c
-#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
-#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
-#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
-#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
-#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-
-#define _BCLRPAT_A 0x60020
-#define _BCLRPAT_B 0x61020
-#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
-
-#define _TRANS_VSYNCSHIFT_A 0x60028
-#define _TRANS_VSYNCSHIFT_B 0x61028
-#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
-
-#define _TRANS_MULT_A 0x6002c
-#define _TRANS_MULT_B 0x6102c
-#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-
-/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
-#define PORTB_HOTPLUG_INT_EN (1 << 29)
-#define PORTC_HOTPLUG_INT_EN (1 << 28)
-#define PORTD_HOTPLUG_INT_EN (1 << 27)
-#define SDVOB_HOTPLUG_INT_EN (1 << 26)
-#define SDVOC_HOTPLUG_INT_EN (1 << 25)
-#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_HOTPLUG_INT_EN (1 << 9)
-#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
- PORTC_HOTPLUG_INT_EN | \
- PORTD_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
-/* must use period 64 on GM45 according to docs */
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
-#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
-#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
-#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
-#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-
-#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/* HDMI/DP bits are g4x+ */
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
-#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
-#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
-#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
-#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
-#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
-#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
-#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
-#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
-#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
-/* CRT/TV common between gen3+ */
-#define CRT_HOTPLUG_INT_STATUS (1 << 11)
-#define TV_HOTPLUG_INT_STATUS (1 << 10)
-#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
-#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
-#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
-#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
-#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
-#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
-
-/* SDVO is different across gen3/4 */
-#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
-#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
-/*
- * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
- * since reality corrobates that they're the same as on gen3. But keep these
- * bits here (and the comment!) to help any other lost wanderers back onto the
- * right tracks.
- */
-#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
-#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
-#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
-#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_G4X | \
- SDVOC_HOTPLUG_INT_STATUS_G4X | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_I915 | \
- SDVOC_HOTPLUG_INT_STATUS_I915 | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-/* SDVO and HDMI port control.
- * The same register may be used for SDVO or HDMI */
-#define _GEN3_SDVOB 0x61140
-#define _GEN3_SDVOC 0x61160
-#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
-#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
-#define GEN4_HDMIB GEN3_SDVOB
-#define GEN4_HDMIC GEN3_SDVOC
-#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
-#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
-#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
-#define PCH_SDVOB _MMIO(0xe1140)
-#define PCH_HDMIB PCH_SDVOB
-#define PCH_HDMIC _MMIO(0xe1150)
-#define PCH_HDMID _MMIO(0xe1160)
-
-#define PORT_DFT_I9XX _MMIO(0x61150)
-#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
-#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
-#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
-#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
-#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
-
-/* Gen 3 SDVO bits: */
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL_SHIFT 30
-#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/*
- * 915G/GM SDVO pixel multiplier.
- * Programmed value is multiplier - 1, up to 5x.
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
-#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
-#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
-#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
-#define SDVO_DETECTED (1 << 2)
-/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
- SDVO_INTERRUPT_ENABLE)
-#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
-
-/* Gen 4 SDVO/HDMI bits: */
-#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
-#define SDVO_COLOR_FORMAT_MASK (7 << 26)
-#define SDVO_ENCODING_SDVO (0 << 10)
-#define SDVO_ENCODING_HDMI (2 << 10)
-#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
-#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
-#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
-#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
-/* VSYNC/HSYNC bits new with 965, default is to be set */
-#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
-#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
-
-/* Gen 5 (IBX) SDVO/HDMI bits: */
-#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
-#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
-
-/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CPT 29
-#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-
-/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CHV 24
-#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-
-/* Video Data Island Packet control */
-#define VIDEO_DIP_DATA _MMIO(0x61178)
-/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
- * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
- * of the infoframe structure specified by CEA-861. */
-#define VIDEO_DIP_DATA_SIZE 32
-#define VIDEO_DIP_ASYNC_DATA_SIZE 36
-#define VIDEO_DIP_GMP_DATA_SIZE 36
-#define VIDEO_DIP_VSC_DATA_SIZE 36
-#define VIDEO_DIP_PPS_DATA_SIZE 132
-#define VIDEO_DIP_CTL _MMIO(0x61170)
-/* Pre HSW: */
-#define VIDEO_DIP_ENABLE (1 << 31)
-#define VIDEO_DIP_PORT(port) ((port) << 29)
-#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
-#define VIDEO_DIP_ENABLE_AVI (1 << 21)
-#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
-#define VIDEO_DIP_ENABLE_SPD (8 << 21)
-#define VIDEO_DIP_SELECT_AVI (0 << 19)
-#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
-#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
-#define VIDEO_DIP_SELECT_SPD (3 << 19)
-#define VIDEO_DIP_SELECT_MASK (3 << 19)
-#define VIDEO_DIP_FREQ_ONCE (0 << 16)
-#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
-#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
-#define VIDEO_DIP_FREQ_MASK (3 << 16)
-/* HSW and later: */
-#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 25)
-#define VSC_SELECT_SHIFT 25
-#define VSC_DIP_HW_HEA_DATA (0 << 25)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
-#define VSC_DIP_SW_HEA_DATA (3 << 25)
-#define VDIP_ENABLE_PPS (1 << 24)
-#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
-#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
-#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
-#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
-#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
-#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-/* ADL and later: */
-#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
-
-#define PCH_GTC_CTL _MMIO(0xe7000)
-#define PCH_GTC_ENABLE (1 << 31)
-
-/* Display Port */
-#define DP_A _MMIO(0x64000) /* eDP */
-#define DP_B _MMIO(0x64100)
-#define DP_C _MMIO(0x64200)
-#define DP_D _MMIO(0x64300)
-#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
-#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
-#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN REG_BIT(31)
-#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
-#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
-#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
-#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
-#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
-#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
-#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
-#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
-#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
-#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
-#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
-#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
-#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
-#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
-#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
-#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
-#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
-#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
-#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
-#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
-#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
-#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
-#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
-#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
-#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
-#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
-#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
-#define DP_ENHANCED_FRAMING REG_BIT(18)
-#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
-#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
-#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
-#define DP_PORT_REVERSAL REG_BIT(15)
-#define EDP_PLL_ENABLE REG_BIT(14)
-#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
-#define DP_SCRAMBLING_DISABLE REG_BIT(12)
-#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
-#define DP_COLOR_RANGE_16_235 REG_BIT(8)
-#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
-#define DP_SYNC_VS_HIGH REG_BIT(4)
-#define DP_SYNC_HS_HIGH REG_BIT(3)
-#define DP_DETECTED REG_BIT(2)
-
-/*
- * Computing GMCH M and N values for the Display Port link
- *
- * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
- *
- * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
- *
- * The GMCH value is used internally
- *
- * bytes_per_pixel is the number of bytes coming out of the plane,
- * which is after the LUTs, so we want the bytes for our color format.
- * For our current usage, this is always 3, one byte for R, G and B.
- */
-#define _PIPEA_DATA_M_G4X 0x70050
-#define _PIPEB_DATA_M_G4X 0x71050
-#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
-/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE_MASK REG_GENMASK(30, 25)
-#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
-#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
-#define DATA_LINK_N_MAX (0x800000)
-
-#define _PIPEA_DATA_N_G4X 0x70054
-#define _PIPEB_DATA_N_G4X 0x71054
-#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
-
-/*
- * Computing Link M and N values for the Display Port link
- *
- * Link M / N = pixel_clock / ls_clk
- *
- * (the DP spec calls pixel_clock the 'strm_clk')
- *
- * The Link value is transmitted in the Main Stream
- * Attributes and VB-ID.
- */
-#define _PIPEA_LINK_M_G4X 0x70060
-#define _PIPEB_LINK_M_G4X 0x71060
-#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
-
-#define _PIPEA_LINK_N_G4X 0x70064
-#define _PIPEB_LINK_N_G4X 0x71064
-#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
-
-/* Pipe A */
-#define _PIPEADSL 0x70000
-#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
-#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
-#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-
-#define _TRANSACONF 0x70008
-#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
-#define TRANSCONF_ENABLE REG_BIT(31)
-#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
-#define TRANSCONF_FORCE_BORDER REG_BIT(25)
-#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
-#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
-#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
-#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
-/*
- * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
- * DBL=power saving pixel doubling, PF-ID* requires panel fitter
- */
-#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
-#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
-#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
-#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
-#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
-#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
-#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
-#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
-#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
-#define TRANSCONF_DITHER_EN REG_BIT(4)
-#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
-#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
-#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
-#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
-#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
-#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
-
-#define _PIPEASTAT 0x70024
-#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
-#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
-#define PIPE_CRC_DONE_ENABLE (1UL << 28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
-#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
-#define PIPE_DPST_EVENT_STATUS (1UL << 7)
-#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
-#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_HBLANK_INT_STATUS (1UL << 0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
-#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
-#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-
-#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
-#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
-
-#define _PIPE_MISC_A 0x70030
-#define _PIPE_MISC_B 0x71030
-#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
-#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
-#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
-#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
-#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
-#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
-#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
-#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
-#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
-#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
-/*
- * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
- * valid values of: 6, 8, 10 BPC.
- * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
- * 6, 8, 10, 12 BPC.
- */
-#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
-#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
-#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
-#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
-#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
-#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
-#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
-#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
-#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
-#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
-
-#define _PIPE_MISC2_A 0x7002C
-#define _PIPE_MISC2_B 0x7102C
-#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
-#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
-
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
@@ -1669,129 +791,6 @@
#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
-#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
-#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
-#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
-#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
-#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
-#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
-#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
-#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
-#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
-#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
-#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
-#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
-#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
-#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
-#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
-#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
-#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
-#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
-#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
-#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
-#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
-#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
-#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
-#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
-#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
-#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
-#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
-#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-
-#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1 << 31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
-
-#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-
-/*
- * The two pipe frame counter registers are not synchronized, so
- * reading a stable value is somewhat tricky. The following code
- * should work:
- *
- * do {
- * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT;
- * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
- * PIPE_FRAME_LOW_SHIFT);
- * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT);
- * } while (high1 != high2);
- * frame = (high1 << 8) | low1;
- */
-#define _PIPEAFRAMEHIGH 0x70040
-#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
-#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-#define PIPE_FRAME_HIGH_SHIFT 0
-
-#define _PIPEAFRAMEPIXEL 0x70044
-#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
-#define PIPE_FRAME_LOW_MASK 0xff000000
-#define PIPE_FRAME_LOW_SHIFT 24
-#define PIPE_PIXEL_MASK 0x00ffffff
-#define PIPE_PIXEL_SHIFT 0
-
-/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_G4X 0x70040
-#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
-
-#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
-
-/* CHV pipe B blender */
-#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
-#define CHV_BLEND_MASK REG_GENMASK(31, 30)
-#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
-#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
-#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
-
-#define _CHV_CANVAS_A 0x60a04
-#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
-#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
-#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
-#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
-
-/* Display/Sprite base address macros */
-#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
-
-/*
- * VBIOS flags
- * gen2:
- * [00:06] alm,mgm
- * [10:16] all
- * [30:32] alm,mgm
- * gen3+:
- * [00:0f] all
- * [10:1f] all
- * [30:32] all
- */
-#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
-#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
-#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
-#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-
-#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
-#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
-#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
-#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
-#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
-#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* refresh rate hardware control */
-#define RR_HW_CTL _MMIO(0x45300)
-#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
-#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
-
#define PCH_3DCGDIS0 _MMIO(0x46020)
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -1799,211 +798,6 @@
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
-#define _PIPEA_DATA_M1 0x60030
-#define _PIPEB_DATA_M1 0x61030
-#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
-
-#define _PIPEA_DATA_N1 0x60034
-#define _PIPEB_DATA_N1 0x61034
-#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
-
-#define _PIPEA_DATA_M2 0x60038
-#define _PIPEB_DATA_M2 0x61038
-#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
-
-#define _PIPEA_DATA_N2 0x6003c
-#define _PIPEB_DATA_N2 0x6103c
-#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
-
-#define _PIPEA_LINK_M1 0x60040
-#define _PIPEB_LINK_M1 0x61040
-#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
-
-#define _PIPEA_LINK_N1 0x60044
-#define _PIPEB_LINK_N1 0x61044
-#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
-
-#define _PIPEA_LINK_M2 0x60048
-#define _PIPEB_LINK_M2 0x61048
-#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
-
-#define _PIPEA_LINK_N2 0x6004c
-#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
-
-/*
- * Skylake scalers
- */
-#define _ID(id, a, b) _PICK_EVEN(id, a, b)
-#define _PS_1A_CTRL 0x68180
-#define _PS_2A_CTRL 0x68280
-#define _PS_1B_CTRL 0x68980
-#define _PS_2B_CTRL 0x68A80
-#define _PS_1C_CTRL 0x69180
-#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
- _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
-#define PS_SCALER_EN REG_BIT(31)
-#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
-#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
-#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
-#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
-#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
-#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
-#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
-#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
-#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
-#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
-#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
-#define PS_BINDING_MASK REG_GENMASK(27, 25)
-#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
-#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
-#define PS_FILTER_MASK REG_GENMASK(24, 23)
-#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
-#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
-#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
-#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
-#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
-#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
-#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
-#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
-#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
-#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
-#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
-#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
-#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
-#define PS_PWRUP_PROGRESS REG_BIT(17)
-#define PS_V_FILTER_BYPASS REG_BIT(8)
-#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
-#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
-#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
-#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
-#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
-#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
-#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
-#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
-#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
-#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
-#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
-#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
-#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
-#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
-#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
-
-#define _PS_PWR_GATE_1A 0x68160
-#define _PS_PWR_GATE_2A 0x68260
-#define _PS_PWR_GATE_1B 0x68960
-#define _PS_PWR_GATE_2B 0x68A60
-#define _PS_PWR_GATE_1C 0x69160
-#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
- _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
-#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
-#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
-#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
-#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
-#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
-#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
-#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
-#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
-#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
-#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
-#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
-
-#define _PS_WIN_POS_1A 0x68170
-#define _PS_WIN_POS_2A 0x68270
-#define _PS_WIN_POS_1B 0x68970
-#define _PS_WIN_POS_2B 0x68A70
-#define _PS_WIN_POS_1C 0x69170
-#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
- _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
-#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
-#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
-
-#define _PS_WIN_SZ_1A 0x68174
-#define _PS_WIN_SZ_2A 0x68274
-#define _PS_WIN_SZ_1B 0x68974
-#define _PS_WIN_SZ_2B 0x68A74
-#define _PS_WIN_SZ_1C 0x69174
-#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
- _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
-#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
-#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
-
-#define _PS_VSCALE_1A 0x68184
-#define _PS_VSCALE_2A 0x68284
-#define _PS_VSCALE_1B 0x68984
-#define _PS_VSCALE_2B 0x68A84
-#define _PS_VSCALE_1C 0x69184
-#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
- _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
-
-#define _PS_HSCALE_1A 0x68190
-#define _PS_HSCALE_2A 0x68290
-#define _PS_HSCALE_1B 0x68990
-#define _PS_HSCALE_2B 0x68A90
-#define _PS_HSCALE_1C 0x69190
-#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
- _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
-
-#define _PS_VPHASE_1A 0x68188
-#define _PS_VPHASE_2A 0x68288
-#define _PS_VPHASE_1B 0x68988
-#define _PS_VPHASE_2B 0x68A88
-#define _PS_VPHASE_1C 0x69188
-#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
- _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
-#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
-#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
-#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
-#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
-#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
-#define PS_PHASE_TRIP (1 << 0)
-
-#define _PS_HPHASE_1A 0x68194
-#define _PS_HPHASE_2A 0x68294
-#define _PS_HPHASE_1B 0x68994
-#define _PS_HPHASE_2B 0x68A94
-#define _PS_HPHASE_1C 0x69194
-#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
- _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
-
-#define _PS_ECC_STAT_1A 0x681D0
-#define _PS_ECC_STAT_2A 0x682D0
-#define _PS_ECC_STAT_1B 0x689D0
-#define _PS_ECC_STAT_2B 0x68AD0
-#define _PS_ECC_STAT_1C 0x691D0
-#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
- _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
-
-#define _PS_COEF_SET0_INDEX_1A 0x68198
-#define _PS_COEF_SET0_INDEX_2A 0x68298
-#define _PS_COEF_SET0_INDEX_1B 0x68998
-#define _PS_COEF_SET0_INDEX_2B 0x68A98
-#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
-#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
-
-#define _PS_COEF_SET0_DATA_1A 0x6819C
-#define _PS_COEF_SET0_DATA_2A 0x6829C
-#define _PS_COEF_SET0_DATA_1B 0x6899C
-#define _PS_COEF_SET0_DATA_2B 0x68A9C
-#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
-
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
@@ -2042,25 +836,6 @@
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
-/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1 << 30)
-#define DE_GSE_IVB (1 << 29)
-#define DE_PCH_EVENT_IVB (1 << 28)
-#define DE_DP_A_HOTPLUG_IVB (1 << 27)
-#define DE_AUX_CHANNEL_A_IVB (1 << 26)
-#define DE_EDP_PSR_INT_HSW (1 << 19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
-#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
-#define DE_PIPEC_VBLANK_IVB (1 << 10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
-#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
-#define DE_PIPEB_VBLANK_IVB (1 << 5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
-#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
-#define DE_PIPEA_VBLANK_IVB (1 << 0)
-#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
-
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1 << 31)
@@ -2100,8 +875,6 @@
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
-#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
-
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -2118,106 +891,6 @@
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
-#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
-#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
-#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
-#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
-#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
-#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */
-#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
-#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
-#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
-#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
-#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
-#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
-#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
-#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
-#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
-#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
-#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
-#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
-#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
-#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
-#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
-#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
-#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
-#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
-#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
-#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
- REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
-#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
-#define GEN8_PIPE_VSYNC REG_BIT(1)
-#define GEN8_PIPE_VBLANK REG_BIT(0)
-
-#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
- GEN8_DE_PIPE_IER(pipe), \
- GEN8_DE_PIPE_IIR(pipe))
-
-#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
-#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
-
-#define GEN8_DE_PORT_ISR _MMIO(0x44440)
-#define GEN8_DE_PORT_IMR _MMIO(0x44444)
-#define GEN8_DE_PORT_IIR _MMIO(0x44448)
-#define GEN8_DE_PORT_IER _MMIO(0x4444c)
-#define DSI1_NON_TE (1 << 31)
-#define DSI0_NON_TE (1 << 30)
-#define ICL_AUX_CHANNEL_E (1 << 29)
-#define ICL_AUX_CHANNEL_F (1 << 28)
-#define GEN9_AUX_CHANNEL_D (1 << 27)
-#define GEN9_AUX_CHANNEL_C (1 << 26)
-#define GEN9_AUX_CHANNEL_B (1 << 25)
-#define DSI1_TE (1 << 24)
-#define DSI0_TE (1 << 23)
-#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
-#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
-#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
-#define BXT_DE_PORT_GMBUS (1 << 1)
-#define GEN8_AUX_CHANNEL_A (1 << 0)
-#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
-#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
-#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
-#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
-#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
-#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
-#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
-#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
-#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
-#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
-#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
-
-#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
- GEN8_DE_PORT_IER, \
- GEN8_DE_PORT_IIR)
-
-#define GEN8_DE_MISC_ISR _MMIO(0x44460)
-#define GEN8_DE_MISC_IMR _MMIO(0x44464)
-#define GEN8_DE_MISC_IIR _MMIO(0x44468)
-#define GEN8_DE_MISC_IER _MMIO(0x4446c)
-#define XELPDP_RM_TIMEOUT REG_BIT(29)
-#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
-#define GEN8_DE_MISC_GSE REG_BIT(27)
-#define GEN8_DE_EDP_PSR REG_BIT(19)
-#define XELPDP_PMDEMAND_RSP REG_BIT(3)
-#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
-
-#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
- GEN8_DE_MISC_IER, \
- GEN8_DE_MISC_IIR)
-
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
#define GEN8_PCU_IIR _MMIO(0x444e8)
@@ -2250,110 +923,12 @@
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
-#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
-#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
-#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
-#define GEN11_DE_PCH_IRQ (1 << 23)
-#define GEN11_DE_MISC_IRQ (1 << 22)
-#define GEN11_DE_HPD_IRQ (1 << 21)
-#define GEN11_DE_PORT_IRQ (1 << 20)
-#define GEN11_DE_PIPE_C (1 << 18)
-#define GEN11_DE_PIPE_B (1 << 17)
-#define GEN11_DE_PIPE_A (1 << 16)
-
-#define GEN11_DE_HPD_ISR _MMIO(0x44470)
-#define GEN11_DE_HPD_IMR _MMIO(0x44474)
-#define GEN11_DE_HPD_IIR _MMIO(0x44478)
-#define GEN11_DE_HPD_IER _MMIO(0x4447c)
-#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC1))
-#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
-
-#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
- GEN11_DE_HPD_IER, \
- GEN11_DE_HPD_IIR)
-
-#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
-#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
-#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
-#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
-#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
-#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
-#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
-#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
-#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
-#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
-#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
-
-#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
- PICAINTERRUPT_IER, \
- PICAINTERRUPT_IIR)
-
-#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
-#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
-#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
-#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
-#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
-#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
-#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
-
-#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
-#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
-#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
-#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
-#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
-#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
-#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
-#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
-
-#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
-#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
-#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
-#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
-#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
-
-#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
-#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
-
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT REG_BIT(25)
#define ILK_DPARB_GATE REG_BIT(22)
#define ILK_VSDPFD_FULL REG_BIT(21)
-#define FUSE_STRAP _MMIO(0x42014)
-#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
-#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
-#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
-#define IVB_PIPE_C_DISABLE REG_BIT(28)
-#define ILK_HDCP_DISABLE REG_BIT(25)
-#define ILK_eDP_A_DISABLE REG_BIT(24)
-#define HSW_CDCLK_LIMIT REG_BIT(24)
-#define ILK_DESKTOP REG_BIT(23)
-#define HSW_CPU_SSC_ENABLE REG_BIT(21)
-
-#define FUSE_STRAP3 _MMIO(0x42020)
-#define HSW_REF_CLK_SELECT REG_BIT(1)
-
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE REG_BIT(28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE REG_BIT(9)
@@ -2378,25 +953,6 @@
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
-#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
-#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
-#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
-#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
-#define GLK_CL2_PWR_DOWN REG_BIT(12)
-#define GLK_CL1_PWR_DOWN REG_BIT(11)
-#define GLK_CL0_PWR_DOWN REG_BIT(10)
-
-#define CHICKEN_MISC_3 _MMIO(0x42088)
-#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
-#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
-#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
-
-#define CHICKEN_MISC_4 _MMIO(0x4208c)
-#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
-#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
-#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
-
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
@@ -2420,72 +976,11 @@
#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
-#define _CHICKEN_TRANS_A 0x420c0
-#define _CHICKEN_TRANS_B 0x420c4
-#define _CHICKEN_TRANS_C 0x420c8
-#define _CHICKEN_TRANS_EDP 0x420cc
-#define _CHICKEN_TRANS_D 0x420d8
-#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
- [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
- [TRANSCODER_A] = _CHICKEN_TRANS_A, \
- [TRANSCODER_B] = _CHICKEN_TRANS_B, \
- [TRANSCODER_C] = _CHICKEN_TRANS_C, \
- [TRANSCODER_D] = _CHICKEN_TRANS_D))
-#define _MTL_CHICKEN_TRANS_A 0x604e0
-#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
- _MTL_CHICKEN_TRANS_A, \
- _MTL_CHICKEN_TRANS_B)
-#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
-#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
-#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
-#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
-#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
-#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
-#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
-#define DP_FEC_BS_JITTER_WA REG_BIT(15)
-#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
-#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
-#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
-
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
#define DISP_FBC_WM_DIS REG_BIT(15)
-#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
-#define DISP_IPC_ENABLE REG_BIT(3)
-
-#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
-#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
-
-#define _BW_BUDDY0_CTL 0x45130
-#define _BW_BUDDY1_CTL 0x45140
-#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_CTL, \
- _BW_BUDDY1_CTL))
-#define BW_BUDDY_DISABLE REG_BIT(31)
-#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
-#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
-
-#define _BW_BUDDY0_PAGE_MASK 0x45134
-#define _BW_BUDDY1_PAGE_MASK 0x45144
-#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_PAGE_MASK, \
- _BW_BUDDY1_PAGE_MASK))
-
-#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
-#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
-
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -2502,518 +997,24 @@
#define MASK_WAKEMEM REG_BIT(13)
#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
-#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
-#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
-#define DCPR_MASK_LPMODE REG_BIT(26)
-#define DCPR_SEND_RESP_IMM REG_BIT(25)
-#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
-
-#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
-#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
-
-#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
-#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define ICL_DFSM_DMC_DISABLE (1 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
-#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
-#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
-
-#define XE2LPD_DE_CAP _MMIO(0x41100)
-#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
-#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
-#define XE2LPD_DE_CAP_DSC_REMOVED 1
-#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
-#define XE2LPD_DE_CAP_SCALER_SINGLE 1
-
-#define SKL_DSSM _MMIO(0x51004)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
-
#define GMD_ID_DISPLAY _MMIO(0x510a0)
#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
#define GMD_ID_STEP REG_GENMASK(5, 0)
-/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
-#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
-#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
-#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
-#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
-#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
-
/* PCH */
-#define PCH_DISPLAY_BASE 0xc0000u
-
-/* south display engine interrupt: IBX */
-#define SDE_AUDIO_POWER_D (1 << 27)
-#define SDE_AUDIO_POWER_C (1 << 26)
-#define SDE_AUDIO_POWER_B (1 << 25)
-#define SDE_AUDIO_POWER_SHIFT (25)
-#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
-#define SDE_GMBUS (1 << 24)
-#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
-#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
-#define SDE_AUDIO_HDCP_MASK (3 << 22)
-#define SDE_AUDIO_TRANSB (1 << 21)
-#define SDE_AUDIO_TRANSA (1 << 20)
-#define SDE_AUDIO_TRANS_MASK (3 << 20)
-#define SDE_POISON (1 << 19)
-/* 18 reserved */
-#define SDE_FDI_RXB (1 << 17)
-#define SDE_FDI_RXA (1 << 16)
-#define SDE_FDI_MASK (3 << 16)
-#define SDE_AUXD (1 << 15)
-#define SDE_AUXC (1 << 14)
-#define SDE_AUXB (1 << 13)
-#define SDE_AUX_MASK (7 << 13)
-/* 12 reserved */
-#define SDE_CRT_HOTPLUG (1 << 11)
-#define SDE_PORTD_HOTPLUG (1 << 10)
-#define SDE_PORTC_HOTPLUG (1 << 9)
-#define SDE_PORTB_HOTPLUG (1 << 8)
-#define SDE_SDVOB_HOTPLUG (1 << 6)
-#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
- SDE_SDVOB_HOTPLUG | \
- SDE_PORTB_HOTPLUG | \
- SDE_PORTC_HOTPLUG | \
- SDE_PORTD_HOTPLUG)
-#define SDE_TRANSB_CRC_DONE (1 << 5)
-#define SDE_TRANSB_CRC_ERR (1 << 4)
-#define SDE_TRANSB_FIFO_UNDER (1 << 3)
-#define SDE_TRANSA_CRC_DONE (1 << 2)
-#define SDE_TRANSA_CRC_ERR (1 << 1)
-#define SDE_TRANSA_FIFO_UNDER (1 << 0)
-#define SDE_TRANS_MASK (0x3f)
-
-/* south display engine interrupt: CPT - CNP */
-#define SDE_AUDIO_POWER_D_CPT (1 << 31)
-#define SDE_AUDIO_POWER_C_CPT (1 << 30)
-#define SDE_AUDIO_POWER_B_CPT (1 << 29)
-#define SDE_AUDIO_POWER_SHIFT_CPT 29
-#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
-#define SDE_AUXD_CPT (1 << 27)
-#define SDE_AUXC_CPT (1 << 26)
-#define SDE_AUXB_CPT (1 << 25)
-#define SDE_AUX_MASK_CPT (7 << 25)
-#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
-#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
-#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
-#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
-#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
-#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
-#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
- SDE_SDVOB_HOTPLUG_CPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT)
-#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT | \
- SDE_PORTA_HOTPLUG_SPT)
-#define SDE_GMBUS_CPT (1 << 17)
-#define SDE_ERROR_CPT (1 << 16)
-#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
-#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
-#define SDE_FDI_RXC_CPT (1 << 8)
-#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
-#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
-#define SDE_FDI_RXB_CPT (1 << 4)
-#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
-#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
-#define SDE_FDI_RXA_CPT (1 << 0)
-#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
- SDE_AUDIO_CP_REQ_B_CPT | \
- SDE_AUDIO_CP_REQ_A_CPT)
-#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
- SDE_AUDIO_CP_CHG_B_CPT | \
- SDE_AUDIO_CP_CHG_A_CPT)
-#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
- SDE_FDI_RXB_CPT | \
- SDE_FDI_RXA_CPT)
-
-/* south display engine interrupt: ICP/TGP/MTP */
-#define SDE_PICAINTERRUPT REG_BIT(31)
-#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
-#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
-#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
-#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
-#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
-
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
-#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
- SDEIER, \
- SDEIIR)
-
-#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1 << 31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
-
-/* digital port hotplug */
-#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
-#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
-#define BXT_DDIA_HPD_INVERT (1 << 27)
-#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
-#define PORTD_HOTPLUG_ENABLE (1 << 20)
-#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
-#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
-#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
-#define PORTC_HOTPLUG_ENABLE (1 << 12)
-#define BXT_DDIC_HPD_INVERT (1 << 11)
-#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
-#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
-#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
-#define PORTB_HOTPLUG_ENABLE (1 << 4)
-#define BXT_DDIB_HPD_INVERT (1 << 3)
-#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
-#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
-#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
- BXT_DDIB_HPD_INVERT | \
- BXT_DDIC_HPD_INVERT)
-
-#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
-#define PORTE_HOTPLUG_ENABLE (1 << 4)
-#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* This register is a reuse of PCH_PORT_HOTPLUG register. The
- * functionality covered in PCH_PORT_HOTPLUG is split into
- * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
- */
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-
-#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
-#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define SHPD_FILTER_CNT _MMIO(0xc4038)
-#define SHPD_FILTER_CNT_500_ADJ 0x001D9
-#define SHPD_FILTER_CNT_250 0x000F8
-
-#define _PCH_DPLL_A 0xc6014
-#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
-
-#define _PCH_FPA0 0xc6040
-#define _PCH_FPB0 0xc6048
-#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define FP_CB_TUNE (0x3 << 22)
-
-#define _PCH_FPA1 0xc6044
-#define _PCH_FPB1 0xc604c
-#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
-
-#define PCH_DPLL_TEST _MMIO(0xc606c)
-
-#define PCH_DREF_CONTROL _MMIO(0xC6200)
-#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
-#define DREF_SSC_SOURCE_DISABLE (0 << 11)
-#define DREF_SSC_SOURCE_ENABLE (2 << 11)
-#define DREF_SSC_SOURCE_MASK (3 << 11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
-#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
-#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
-#define DREF_SSC4_DOWNSPREAD (0 << 6)
-#define DREF_SSC4_CENTERSPREAD (1 << 6)
-#define DREF_SSC1_DISABLE (0 << 1)
-#define DREF_SSC1_ENABLE (1 << 1)
-#define DREF_SSC4_DISABLE (0)
-#define DREF_SSC4_ENABLE (1)
-
-#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
-#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3 << 12)
-#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3 << 10)
-#define RAWCLK_FREQ_MASK 0x3ff
-#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
-#define CNP_RAWCLK_DIV(div) ((div) << 16)
-#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_DEN(den) ((den) << 26)
-#define ICP_RAWCLK_NUM(num) ((num) << 11)
-
-#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
-
-#define PCH_SSC4_PARMS _MMIO(0xc6210)
-#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
-
-#define PCH_DPLL_SEL _MMIO(0xc7000)
-#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
-#define TRANS_DPLLA_SEL(pipe) 0
-#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
-
-/* transcoder */
-#define _PCH_TRANS_HTOTAL_A 0xe0000
-#define _PCH_TRANS_HTOTAL_B 0xe1000
-#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
-#define TRANS_HTOTAL_SHIFT 16
-#define TRANS_HACTIVE_SHIFT 0
-
-#define _PCH_TRANS_HBLANK_A 0xe0004
-#define _PCH_TRANS_HBLANK_B 0xe1004
-#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
-#define TRANS_HBLANK_END_SHIFT 16
-#define TRANS_HBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_HSYNC_A 0xe0008
-#define _PCH_TRANS_HSYNC_B 0xe1008
-#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
-#define TRANS_HSYNC_END_SHIFT 16
-#define TRANS_HSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VTOTAL_A 0xe000c
-#define _PCH_TRANS_VTOTAL_B 0xe100c
-#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
-#define TRANS_VTOTAL_SHIFT 16
-#define TRANS_VACTIVE_SHIFT 0
-
-#define _PCH_TRANS_VBLANK_A 0xe0010
-#define _PCH_TRANS_VBLANK_B 0xe1010
-#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
-#define TRANS_VBLANK_END_SHIFT 16
-#define TRANS_VBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNC_A 0xe0014
-#define _PCH_TRANS_VSYNC_B 0xe1014
-#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
-#define TRANS_VSYNC_END_SHIFT 16
-#define TRANS_VSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
-#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
-#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
-
-#define _PCH_TRANSA_DATA_M1 0xe0030
-#define _PCH_TRANSB_DATA_M1 0xe1030
-#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
-
-#define _PCH_TRANSA_DATA_N1 0xe0034
-#define _PCH_TRANSB_DATA_N1 0xe1034
-#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
-
-#define _PCH_TRANSA_DATA_M2 0xe0038
-#define _PCH_TRANSB_DATA_M2 0xe1038
-#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
-
-#define _PCH_TRANSA_DATA_N2 0xe003c
-#define _PCH_TRANSB_DATA_N2 0xe103c
-#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
-
-#define _PCH_TRANSA_LINK_M1 0xe0040
-#define _PCH_TRANSB_LINK_M1 0xe1040
-#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
-
-#define _PCH_TRANSA_LINK_N1 0xe0044
-#define _PCH_TRANSB_LINK_N1 0xe1044
-#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
-
-#define _PCH_TRANSA_LINK_M2 0xe0048
-#define _PCH_TRANSB_LINK_M2 0xe1048
-#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
-
-#define _PCH_TRANSA_LINK_N2 0xe004c
-#define _PCH_TRANSB_LINK_N2 0xe104c
-#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
-
-/* Per-transcoder DIP controls (PCH) */
-#define _VIDEO_DIP_CTL_A 0xe0200
-#define _VIDEO_DIP_CTL_B 0xe1200
-#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
-
-#define _VIDEO_DIP_DATA_A 0xe0208
-#define _VIDEO_DIP_DATA_B 0xe1208
-#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
-
-#define _VIDEO_DIP_GCP_A 0xe0210
-#define _VIDEO_DIP_GCP_B 0xe1210
-#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define GCP_COLOR_INDICATION (1 << 2)
-#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
-#define GCP_AV_MUTE (1 << 0)
-
-/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A 0x60200
-#define _VLV_VIDEO_DIP_CTL_B 0x61170
-#define _CHV_VIDEO_DIP_CTL_C 0x611f0
-#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, \
- _CHV_VIDEO_DIP_CTL_C)
-
-#define _VLV_VIDEO_DIP_DATA_A 0x60208
-#define _VLV_VIDEO_DIP_DATA_B 0x61174
-#define _CHV_VIDEO_DIP_DATA_C 0x611f4
-#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, \
- _CHV_VIDEO_DIP_DATA_C)
-
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
-#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
- _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
-
-/* Haswell DIP controls */
-#define _HSW_VIDEO_DIP_CTL_A 0x60200
-#define _HSW_VIDEO_DIP_CTL_B 0x61200
-#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
-
-#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
-#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
-#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
-#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
-#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
-#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
-#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
-#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
-#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
-#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
-#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-
-/*ADLP and later: */
-#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
-#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
-#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
- _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
-
-#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
-#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
-#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
-#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
-#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
-#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
-#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
-#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
-#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
-#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
-#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
-#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
-
-#define _HSW_VIDEO_DIP_GCP_A 0x60210
-#define _HSW_VIDEO_DIP_GCP_B 0x61210
-#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
-
/* Icelake PPS_DATA and _ECC DIP Registers.
* These are available for transcoders B,C and eDP.
* Adding the _A so as to reuse the _MMIO_TRANS2
* definition, with which it offsets to the right location.
*/
-#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
-#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
-#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-
-#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
-#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
-
-#define _HSW_STEREO_3D_CTL_A 0x70020
-#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
-#define S3D_ENABLE (1 << 31)
-
-#define _PCH_TRANSACONF 0xf0008
-#define _PCH_TRANSBCONF 0xf1008
-#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
-#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_ENABLE REG_BIT(31)
-#define TRANS_STATE_ENABLE REG_BIT(30)
-#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
-#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
-#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
-#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
-#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
-#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
-#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
-#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
-#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
-#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
-#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
-
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
@@ -3066,88 +1067,6 @@
#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
-#define PCH_DP_B _MMIO(0xe4100)
-#define PCH_DP_C _MMIO(0xe4200)
-#define PCH_DP_D _MMIO(0xe4300)
-
-/* CPT */
-#define _TRANS_DP_CTL_A 0xe0300
-#define _TRANS_DP_CTL_B 0xe1300
-#define _TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
-#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
-#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
-#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
-#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
-#define TRANS_DP_ENH_FRAMING REG_BIT(18)
-#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
-#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
-#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
-#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
-#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
-#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
-
-#define _TRANS_DP2_CTL_A 0x600a0
-#define _TRANS_DP2_CTL_B 0x610a0
-#define _TRANS_DP2_CTL_C 0x620a0
-#define _TRANS_DP2_CTL_D 0x630a0
-#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
-#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
-#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
-#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
-
-#define _TRANS_DP2_VFREQHIGH_A 0x600a4
-#define _TRANS_DP2_VFREQHIGH_B 0x610a4
-#define _TRANS_DP2_VFREQHIGH_C 0x620a4
-#define _TRANS_DP2_VFREQHIGH_D 0x630a4
-#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
-
-#define _TRANS_DP2_VFREQLOW_A 0x600a8
-#define _TRANS_DP2_VFREQLOW_B 0x610a8
-#define _TRANS_DP2_VFREQLOW_C 0x620a8
-#define _TRANS_DP2_VFREQLOW_D 0x630a8
-#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
-
-#define _DP_MIN_HBLANK_CTL_A 0x600ac
-#define _DP_MIN_HBLANK_CTL_B 0x610ac
-#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
-
-/* SNB eDP training params */
-/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
-/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
-
-/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
-
-/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
-
#define VLV_PMWGICZ _MMIO(0x1300a4)
#define HSW_EDRAM_CAP _MMIO(0x120010)
@@ -3156,10 +1075,6 @@
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
-#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
-#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
-#define PIXEL_OVERLAP_CNT_SHIFT 30
-
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
@@ -3288,837 +1203,12 @@
*/
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
-/*
- * HSW - ICL power wells
- *
- * Platforms have up to 3 power well control register sets, each set
- * controlling up to 16 power wells via a request/status HW flag tuple:
- * - main (HSW_PWR_WELL_CTL[1-4])
- * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
- * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
- * Each control register set consists of up to 4 registers used by different
- * sources that can request a power well to be enabled:
- * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
- * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
- * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
- * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
- */
-#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
-#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
-#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
-#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
-#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
-#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
-
-/* HSW/BDW power well */
-#define HSW_PW_CTL_IDX_GLOBAL 15
-
-/* SKL/BXT/GLK power wells */
-#define SKL_PW_CTL_IDX_PW_2 15
-#define SKL_PW_CTL_IDX_PW_1 14
-#define GLK_PW_CTL_IDX_AUX_C 10
-#define GLK_PW_CTL_IDX_AUX_B 9
-#define GLK_PW_CTL_IDX_AUX_A 8
-#define SKL_PW_CTL_IDX_DDI_D 4
-#define SKL_PW_CTL_IDX_DDI_C 3
-#define SKL_PW_CTL_IDX_DDI_B 2
-#define SKL_PW_CTL_IDX_DDI_A_E 1
-#define GLK_PW_CTL_IDX_DDI_A 1
-#define SKL_PW_CTL_IDX_MISC_IO 0
-
-/* ICL/TGL - power wells */
-#define TGL_PW_CTL_IDX_PW_5 4
-#define ICL_PW_CTL_IDX_PW_4 3
-#define ICL_PW_CTL_IDX_PW_3 2
-#define ICL_PW_CTL_IDX_PW_2 1
-#define ICL_PW_CTL_IDX_PW_1 0
-
-/* XE_LPD - power wells */
-#define XELPD_PW_CTL_IDX_PW_D 8
-#define XELPD_PW_CTL_IDX_PW_C 7
-#define XELPD_PW_CTL_IDX_PW_B 6
-#define XELPD_PW_CTL_IDX_PW_A 5
-
-#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
-#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
-#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
-#define TGL_PW_CTL_IDX_AUX_TBT6 14
-#define TGL_PW_CTL_IDX_AUX_TBT5 13
-#define TGL_PW_CTL_IDX_AUX_TBT4 12
-#define ICL_PW_CTL_IDX_AUX_TBT4 11
-#define TGL_PW_CTL_IDX_AUX_TBT3 11
-#define ICL_PW_CTL_IDX_AUX_TBT3 10
-#define TGL_PW_CTL_IDX_AUX_TBT2 10
-#define ICL_PW_CTL_IDX_AUX_TBT2 9
-#define TGL_PW_CTL_IDX_AUX_TBT1 9
-#define ICL_PW_CTL_IDX_AUX_TBT1 8
-#define TGL_PW_CTL_IDX_AUX_TC6 8
-#define XELPD_PW_CTL_IDX_AUX_E 8
-#define TGL_PW_CTL_IDX_AUX_TC5 7
-#define XELPD_PW_CTL_IDX_AUX_D 7
-#define TGL_PW_CTL_IDX_AUX_TC4 6
-#define ICL_PW_CTL_IDX_AUX_F 5
-#define TGL_PW_CTL_IDX_AUX_TC3 5
-#define ICL_PW_CTL_IDX_AUX_E 4
-#define TGL_PW_CTL_IDX_AUX_TC2 4
-#define ICL_PW_CTL_IDX_AUX_D 3
-#define TGL_PW_CTL_IDX_AUX_TC1 3
-#define ICL_PW_CTL_IDX_AUX_C 2
-#define ICL_PW_CTL_IDX_AUX_B 1
-#define ICL_PW_CTL_IDX_AUX_A 0
-
-#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
-#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
-#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
-#define XELPD_PW_CTL_IDX_DDI_E 8
-#define TGL_PW_CTL_IDX_DDI_TC6 8
-#define XELPD_PW_CTL_IDX_DDI_D 7
-#define TGL_PW_CTL_IDX_DDI_TC5 7
-#define TGL_PW_CTL_IDX_DDI_TC4 6
-#define ICL_PW_CTL_IDX_DDI_F 5
-#define TGL_PW_CTL_IDX_DDI_TC3 5
-#define ICL_PW_CTL_IDX_DDI_E 4
-#define TGL_PW_CTL_IDX_DDI_TC2 4
-#define ICL_PW_CTL_IDX_DDI_D 3
-#define TGL_PW_CTL_IDX_DDI_TC1 3
-#define ICL_PW_CTL_IDX_DDI_C 2
-#define ICL_PW_CTL_IDX_DDI_B 1
-#define ICL_PW_CTL_IDX_DDI_A 0
-
-/* HSW - power well misc debug registers */
-#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
-#define HSW_PWR_WELL_FORCE_ON (1 << 19)
-#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
-
-/* SKL Fuse Status */
-enum skl_power_gate {
- SKL_PG0,
- SKL_PG1,
- SKL_PG2,
- ICL_PG3,
- ICL_PG4,
-};
-
-#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2
- */
-#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4
- */
-#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
-#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-
-/* Per-pipe DDI Function Control */
-#define _TRANS_DDI_FUNC_CTL_A 0x60400
-#define _TRANS_DDI_FUNC_CTL_B 0x61400
-#define _TRANS_DDI_FUNC_CTL_C 0x62400
-#define _TRANS_DDI_FUNC_CTL_D 0x63400
-#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
-#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
-
-#define TRANS_DDI_FUNC_ENABLE (1 << 31)
-/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_SHIFT 28
-#define TGL_TRANS_DDI_PORT_SHIFT 27
-#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
-#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
-#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
-#define TRANS_DDI_BPC_MASK (7 << 20)
-#define TRANS_DDI_BPC_8 (0 << 20)
-#define TRANS_DDI_BPC_10 (1 << 20)
-#define TRANS_DDI_BPC_6 (2 << 20)
-#define TRANS_DDI_BPC_12 (3 << 20)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
-#define TRANS_DDI_PVSYNC (1 << 17)
-#define TRANS_DDI_PHSYNC (1 << 16)
-#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
-#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
-#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
-#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
-#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
-#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
-#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
- REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
-#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
-#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
-#define TRANS_DDI_BFI_ENABLE (1 << 4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
-#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
-#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
-#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
- | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
- | TRANS_DDI_HDMI_SCRAMBLING)
-
-#define _TRANS_DDI_FUNC_CTL2_A 0x60404
-#define _TRANS_DDI_FUNC_CTL2_B 0x61404
-#define _TRANS_DDI_FUNC_CTL2_C 0x62404
-#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
-#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
-#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
-#define CMTG_SECONDARY_MODE REG_BIT(3)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
-
-#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
-#define DISABLE_DPT_CLK_GATING REG_BIT(1)
-
-/* DisplayPort Transport Control */
-#define _DP_TP_CTL_A 0x64040
-#define _DP_TP_CTL_B 0x64140
-#define _TGL_DP_TP_CTL_A 0x60540
-#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE REG_BIT(31)
-#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
-#define DP_TP_CTL_MODE_MASK REG_BIT(27)
-#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
-#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
-#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
-#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
-#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
-#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
-#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
-
-/* DisplayPort Transport Status */
-#define _DP_TP_STATUS_A 0x64044
-#define _DP_TP_STATUS_B 0x64144
-#define _TGL_DP_TP_STATUS_A 0x60544
-#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
-#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
-#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
-#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
-#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
-#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
-
-/* DDI Buffer Control */
-#define _DDI_BUF_CTL_A 0x64000
-#define _DDI_BUF_CTL_B 0x64100
-/* Known as DDI_CTL_DE in MTL+ */
-#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE REG_BIT(31)
-#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
-#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
-#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
-#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
-#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
-#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
-#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
-#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
-#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
-#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
-#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
-#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
-#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
- (symbols))
-#define DDI_BUF_IS_IDLE REG_BIT(7)
-#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
-#define DDI_A_4_LANES REG_BIT(4)
-#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
- ((width) == 3 ? 4 : (width) - 1))
-#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
-
-/* DDI Buffer Translations */
-#define _DDI_BUF_TRANS_A 0x64E00
-#define _DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
-#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
-#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
-
-/* DDI DP Compliance Control */
-#define _DDI_DP_COMP_CTL_A 0x605F0
-#define _DDI_DP_COMP_CTL_B 0x615F0
-#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
-#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
-#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
-#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
-#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
-#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
-#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
-
-/* DDI DP Compliance Pattern */
-#define _DDI_DP_COMP_PAT_A 0x605F4
-#define _DDI_DP_COMP_PAT_B 0x615F4
-#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
-
-/* Sideband Interface (SBI) is programmed indirectly, via
- * SBI_ADDR, which contains the register offset; and SBI_DATA,
- * which contains the payload */
-#define SBI_ADDR _MMIO(0xC6000)
-#define SBI_DATA _MMIO(0xC6004)
-#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0 << 16)
-#define SBI_CTL_DEST_MPHY (0x1 << 16)
-#define SBI_CTL_OP_IORD (0x2 << 8)
-#define SBI_CTL_OP_IOWR (0x3 << 8)
-#define SBI_CTL_OP_CRRD (0x6 << 8)
-#define SBI_CTL_OP_CRWR (0x7 << 8)
-#define SBI_RESPONSE_FAIL (0x1 << 1)
-#define SBI_RESPONSE_SUCCESS (0x0 << 1)
-#define SBI_BUSY (0x1 << 0)
-#define SBI_READY (0x0 << 0)
-
-/* SBI offsets */
-#define SBI_SSCDIVINTPHASE 0x0200
-#define SBI_SSCDIVINTPHASE6 0x0600
-#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
-#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
-#define SBI_SSCDITHPHASE 0x0204
-#define SBI_SSCCTL 0x020c
-#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1 << 3)
-#define SBI_SSCCTL_DISABLE (1 << 0)
-#define SBI_SSCAUXDIV6 0x0610
-#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
-#define SBI_DBUFF0 0x2a00
-#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
-
-/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1 << 0)
-#define PIXCLK_GATE_GATE (0 << 0)
-
-/* SPLL */
-#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1 << 31)
-#define SPLL_REF_BCLK (0 << 28)
-#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define SPLL_REF_NON_SSC_HSW (2 << 28)
-#define SPLL_REF_PCH_SSC_BDW (2 << 28)
-#define SPLL_REF_LCPLL (3 << 28)
-#define SPLL_REF_MASK (3 << 28)
-#define SPLL_FREQ_810MHz (0 << 26)
-#define SPLL_FREQ_1350MHz (1 << 26)
-#define SPLL_FREQ_2700MHz (2 << 26)
-#define SPLL_FREQ_MASK (3 << 26)
-
-/* WRPLL */
-#define _WRPLL_CTL1 0x46040
-#define _WRPLL_CTL2 0x46060
-#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1 << 31)
-#define WRPLL_REF_BCLK (0 << 28)
-#define WRPLL_REF_PCH_SSC (1 << 28)
-#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
-#define WRPLL_REF_LCPLL (3 << 28)
-#define WRPLL_REF_MASK (3 << 28)
-/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
-#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x) << 8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
-#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
-#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
-
-/* Port clock selection */
-#define _PORT_CLK_SEL_A 0x46100
-#define _PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
-#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
-#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
-#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
-#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
-#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
-#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
-#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
-#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
-
-/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
-#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
-#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
-#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
-#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
-#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
-#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
-#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
-#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
-
-/* Transcoder clock selection */
-#define _TRANS_CLK_SEL_A 0x46140
-#define _TRANS_CLK_SEL_B 0x46144
-#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
-/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
-#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
-#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
-#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
-
-
-#define CDCLK_FREQ _MMIO(0x46200)
-
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define _TRANSC_MSA_MISC 0x62410
-#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
-/* See DP_MSA_MISC_* for the bit definitions */
-
-#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
-#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
-#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
-#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
-#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
-#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
-
-/* LCPLL Control */
-#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1 << 31)
-#define LCPLL_PLL_LOCK (1 << 30)
-#define LCPLL_REF_NON_SSC (0 << 28)
-#define LCPLL_REF_BCLK (2 << 28)
-#define LCPLL_REF_PCH_SSC (3 << 28)
-#define LCPLL_REF_MASK (3 << 28)
-#define LCPLL_CLK_FREQ_MASK (3 << 26)
-#define LCPLL_CLK_FREQ_450 (0 << 26)
-#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
-#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
-#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
-#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
-#define LCPLL_CD_SOURCE_FCLK (1 << 21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
-
-/*
- * SKL Clocks
- */
-/* CDCLK_CTL */
-#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
-#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
-#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
-#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
-#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
-#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
-#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
-#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
-#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
-#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
-#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
-#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
-#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
-#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
-/* CDCLK_SQUASH_CTL */
-#define CDCLK_SQUASH_CTL _MMIO(0x46008)
-#define CDCLK_SQUASH_ENABLE REG_BIT(31)
-#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
-#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
-#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
-#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
-
-/* LCPLL_CTL */
-#define LCPLL1_CTL _MMIO(0x46010)
-#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1 << 31)
-
-/* DPLL control1 */
-#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
-#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
-#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
-#define DPLL_CTRL1_LINK_RATE_2700 0
-#define DPLL_CTRL1_LINK_RATE_1350 1
-#define DPLL_CTRL1_LINK_RATE_810 2
-#define DPLL_CTRL1_LINK_RATE_1620 3
-#define DPLL_CTRL1_LINK_RATE_1080 4
-#define DPLL_CTRL1_LINK_RATE_2160 5
-
-/* DPLL control2 */
-#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
-
-/* DPLL Status */
-#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1 << ((id) * 8))
-
-/* DPLL cfg */
-#define _DPLL1_CFGCR1 0x6C040
-#define _DPLL2_CFGCR1 0x6C048
-#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
-#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
-#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
-
-#define _DPLL1_CFGCR2 0x6C044
-#define _DPLL2_CFGCR2 0x6C04C
-#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
-#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
-#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
-#define DPLL_CFGCR2_KDIV_5 (0 << 5)
-#define DPLL_CFGCR2_KDIV_2 (1 << 5)
-#define DPLL_CFGCR2_KDIV_3 (2 << 5)
-#define DPLL_CFGCR2_KDIV_1 (3 << 5)
-#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
-#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR2_PDIV_1 (0 << 2)
-#define DPLL_CFGCR2_PDIV_2 (1 << 2)
-#define DPLL_CFGCR2_PDIV_3 (2 << 2)
-#define DPLL_CFGCR2_PDIV_7 (4 << 2)
-#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
-#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
-
-/* ICL Clocks */
-#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
- (tc_port) + 12 : \
- (tc_port) - TC_PORT_4 + 21))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
- (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
- ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/*
- * DG1 Clocks
- * First registers controls the first A and B, while the second register
- * controls the phy C and D. The bits on these registers are the
- * same, but refer to different phys
- */
-#define _DG1_DPCLKA_CFGCR0 0x164280
-#define _DG1_DPCLKA1_CFGCR0 0x16C280
-#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
-#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
-#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
- _DG1_DPCLKA_CFGCR0, \
- _DG1_DPCLKA1_CFGCR0)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/* ADLS Clocks */
-#define _ADLS_DPCLKA_CFGCR0 0x164280
-#define _ADLS_DPCLKA_CFGCR1 0x1642BC
-#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
- _ADLS_DPCLKA_CFGCR0, \
- _ADLS_DPCLKA_CFGCR1)
-#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
-/* ADLS DPCLKA_CFGCR0 DDI mask */
-#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
-#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
-/* ADLS DPCLKA_CFGCR1 DDI mask */
-#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
-#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
- ADLS_DPCLKA_DDIA_SEL_MASK, \
- ADLS_DPCLKA_DDIB_SEL_MASK, \
- ADLS_DPCLKA_DDII_SEL_MASK, \
- ADLS_DPCLKA_DDIJ_SEL_MASK, \
- ADLS_DPCLKA_DDIK_SEL_MASK)
-
-/* ICL PLL */
-#define _DPLL0_ENABLE 0x46010
-#define _DPLL1_ENABLE 0x46014
-#define _ADLS_DPLL2_ENABLE 0x46018
-#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE REG_BIT(31)
-#define PLL_LOCK REG_BIT(30)
-#define PLL_POWER_ENABLE REG_BIT(27)
-#define PLL_POWER_STATE REG_BIT(26)
-#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
-
-#define _DG2_PLL3_ENABLE 0x4601C
-
-#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
-
-#define TBT_PLL_ENABLE _MMIO(0x46020)
-
-#define _MG_PLL1_ENABLE 0x46030
-#define _MG_PLL2_ENABLE 0x46034
-#define _MG_PLL3_ENABLE 0x46038
-#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as _DPLL0_ENABLE */
-#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
- _MG_PLL2_ENABLE)
-
-/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
-
-/* ADL-P Type C PLL */
-#define PORTTC1_PLL_ENABLE 0x46038
-#define PORTTC2_PLL_ENABLE 0x46040
-#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
- PORTTC1_PLL_ENABLE, \
- PORTTC2_PLL_ENABLE)
-
-#define _ICL_DPLL0_CFGCR0 0x164000
-#define _ICL_DPLL1_CFGCR0 0x164080
-#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
- _ICL_DPLL1_CFGCR0)
-#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
-#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
-#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
-#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
-#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
-#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
-#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
-#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
-#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
-#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
-
-#define _ICL_DPLL0_CFGCR1 0x164004
-#define _ICL_DPLL1_CFGCR1 0x164084
-#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
- _ICL_DPLL1_CFGCR1)
-#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
-#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
-#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
-#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
-#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
-#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
-#define DPLL_CFGCR1_KDIV_SHIFT (6)
-#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
-#define DPLL_CFGCR1_KDIV_1 (1 << 6)
-#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_3 (4 << 6)
-#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
-#define DPLL_CFGCR1_PDIV_SHIFT (2)
-#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR1_PDIV_2 (1 << 2)
-#define DPLL_CFGCR1_PDIV_3 (2 << 2)
-#define DPLL_CFGCR1_PDIV_5 (4 << 2)
-#define DPLL_CFGCR1_PDIV_7 (8 << 2)
-#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
-#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
-#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
-
-#define _TGL_DPLL0_CFGCR0 0x164284
-#define _TGL_DPLL1_CFGCR0 0x16428C
-#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
-#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0)
-
-#define _TGL_DPLL0_DIV0 0x164B00
-#define _TGL_DPLL1_DIV0 0x164C00
-#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
-#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
-#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
-
-#define _TGL_DPLL0_CFGCR1 0x164288
-#define _TGL_DPLL1_CFGCR1 0x164290
-#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
-#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1)
-
-#define _DG1_DPLL2_CFGCR0 0x16C284
-#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
-
-#define _DG1_DPLL2_CFGCR1 0x16C288
-#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
-
-/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL4_CFGCR0 0x164294
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
-#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-
-#define _ADLS_DPLL4_CFGCR1 0x164298
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
-#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
-
-/* BXT display engine PLL */
-#define BXT_DE_PLL_CTL _MMIO(0x6d000)
-#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
-#define BXT_DE_PLL_RATIO_MASK 0xff
-
-#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
-#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
-#define BXT_DE_PLL_LOCK (1 << 30)
-#define BXT_DE_PLL_FREQ_REQ (1 << 23)
-#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
-#define ICL_CDCLK_PLL_RATIO(x) (x)
-#define ICL_CDCLK_PLL_RATIO_MASK 0xff
-
-/* GEN9 DC */
-#define DC_STATE_EN _MMIO(0x45504)
-#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_DC3CO REG_BIT(30)
-#define DC_STATE_DC3CO_STATUS REG_BIT(29)
-#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
-#define HOLD_PHY_PG1_LATCH REG_BIT(20)
-#define DC_STATE_EN_UPTO_DC5 (1 << 0)
-#define DC_STATE_EN_DC9 (1 << 3)
-#define DC_STATE_EN_UPTO_DC6 (2 << 0)
-#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
-
-#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-
-#define D_COMP_BDW _MMIO(0x138144)
-
-/* Pipe WM_LINETIME - watermark line time */
-#define _WM_LINETIME_A 0x45270
-#define _WM_LINETIME_B 0x45274
-#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
-#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
-#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
-#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
-#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
-
-/* SFUSE_STRAP */
-#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
-#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
-#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
-#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
-#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
-#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-
-/* Gen4+ Timestamp and Pipe Frame time stamp registers */
-#define GEN4_TIMESTAMP _MMIO(0x2358)
-#define ILK_TIMESTAMP_HI _MMIO(0x70070)
-#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
-
#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
-/* g4x+, except vlv/chv! */
-#define _PIPE_FRMTMSTMP_A 0x70048
-#define _PIPE_FRMTMSTMP_B 0x71048
-#define PIPE_FRMTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
-
-/* g4x+, except vlv/chv! */
-#define _PIPE_FLIPTMSTMP_A 0x7004C
-#define _PIPE_FLIPTMSTMP_B 0x7104C
-#define PIPE_FLIPTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
-
-/* tgl+ */
-#define _PIPE_FLIPDONETMSTMP_A 0x70054
-#define _PIPE_FLIPDONETMSTMP_B 0x71054
-#define PIPE_FLIPDONETIMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
-
-#define _VLV_PIPE_MSA_MISC_A 0x70048
-#define VLV_PIPE_MSA_MISC(__display, pipe) \
- _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
-#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
-#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
-
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
@@ -4133,45 +1223,6 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define _ICL_PHY_MISC_A 0x64C00
-#define _ICL_PHY_MISC_B 0x64C04
-#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
-#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
-#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
- ICL_PHY_MISC(port))
-#define ICL_PHY_MISC_MUX_DDID (1 << 28)
-#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
-#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-
-#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
-#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
-#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
-#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
-
-#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
-#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
-#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
-#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
-
-#define _TCSS_DDI_STATUS_1 0x161500
-#define _TCSS_DDI_STATUS_2 0x161504
-#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
- _TCSS_DDI_STATUS_1, \
- _TCSS_DDI_STATUS_2))
-#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
-#define TCSS_DDI_STATUS_READY REG_BIT(2)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
-
#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
@@ -4180,37 +1231,11 @@ enum skl_power_gate {
#define OROM_OFFSET _MMIO(0x1020c0)
#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
-#define CLKREQ_POLICY _MMIO(0x101038)
-#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
-
-#define CLKGATE_DIS_MISC _MMIO(0x46534)
-#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
-
-#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
-#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
-#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
-
-#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
-#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
-#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
-#define MTL_DPFC_GATING_DIS REG_BIT(6)
-
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
-#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
-#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
-#define MTL_TRCD_MASK REG_GENMASK(31, 24)
-#define MTL_TRP_MASK REG_GENMASK(23, 16)
-#define MTL_DCLK_MASK REG_GENMASK(15, 0)
-
-#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
-#define MTL_TRAS_MASK REG_GENMASK(16, 8)
-#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
-
#define MTL_MEDIA_GSI_BASE 0x380000
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c3d27eadc0a7..b9a2b2194c8f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -2184,7 +2184,7 @@ void i915_request_show(struct drm_printer *m,
const char *prefix,
int indent)
{
- const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ const char __rcu *timeline;
char buf[80] = "";
int x = 0;
@@ -2220,6 +2220,8 @@ void i915_request_show(struct drm_printer *m,
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+ rcu_read_lock();
+ timeline = dma_fence_timeline_name((struct dma_fence *)&rq->fence);
drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
prefix, indent, " ",
queue_status(rq),
@@ -2228,7 +2230,8 @@ void i915_request_show(struct drm_printer *m,
fence_status(rq),
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
+ rcu_dereference(timeline));
+ rcu_read_unlock();
}
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a4902ee08b6e..73e89b168fc3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -430,16 +430,22 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
timer);
struct i915_sw_fence *fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
fence = xchg(&cb->base.fence, NULL);
if (!fence)
return;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(cb->dma);
+ timeline = dma_fence_timeline_name(cb->dma);
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
- cb->dma->ops->get_driver_name(cb->dma),
- cb->dma->ops->get_timeline_name(cb->dma),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ rcu_read_unlock();
i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index f45bd6b6cede..4c02a04be681 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,6 +5,8 @@
#include <linux/vga_switcheroo.h>
+#include "display/intel_display_core.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_switcheroo.h"
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 632e316f8b05..25e97031d76e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1607,6 +1607,26 @@ err_rpm:
return err;
}
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return err;
+}
+
static void flush_idle_contexts(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 6a6be8048aa8..0f9eee6d18d2 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -289,26 +289,8 @@ int __must_check
i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct i915_gem_ww_ctx ww;
- int err;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- err = i915_gem_object_lock(vma->obj, &ww);
- if (!err)
- err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- return err;
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags);
@@ -353,6 +335,11 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node,
return drm_mm_node_allocated(node) && node->color != color;
}
+static inline void __iomem *i915_vma_get_iomap(struct i915_vma *vma)
+{
+ return READ_ONCE(vma->iomap);
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 387b26400169..f86a3629ae9e 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -27,6 +27,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
@@ -37,7 +38,7 @@
#include "i915_reg.h"
#include "intel_clock_gating.h"
#include "intel_mchbar_regs.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
@@ -201,6 +202,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *i915)
static void cpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
u32 val;
@@ -220,7 +222,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (i915->display.vbt.fdi_rx_polarity_inverted)
+ if (display->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index d581a9d2c063..87ac4446d306 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -11,6 +11,7 @@
#include "display/intel_color_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -20,6 +21,7 @@
#include "display/intel_lvds_regs.h"
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -32,6 +34,7 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "intel_gvt.h"
#include "intel_mchbar_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 3db2ba439bb5..81da75108c60 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -110,13 +110,12 @@ int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
}
int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
+ int timeout_ms)
{
int err;
mutex_lock(&uncore->i915->sb_lock);
- err = __snb_pcode_rw(uncore, mbox, &val, NULL,
- fast_timeout_us, slow_timeout_ms, false);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false);
mutex_unlock(&uncore->i915->sb_lock);
if (err) {
@@ -273,3 +272,27 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3
return err;
}
+
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_read(&i915->uncore, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
+ timeout_base_ms);
+}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 8d2198e29422..c91a821a88d4 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
+struct drm_device;
struct intel_uncore;
int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int timeout_ms);
#define snb_pcode_write(uncore, mbox, val) \
- snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
+ snb_pcode_write_timeout((uncore), (mbox), (val), 1)
int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
@@ -27,4 +27,13 @@ int intel_pcode_init(struct intel_uncore *uncore);
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d9f4c410546..7ce3e6de0c19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -59,7 +59,9 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
+ if (!rpm->debug.class)
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT,
+ "intel_runtime_pm");
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
deleted file mode 100644
index 41e85ac773dc..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2013-2021 Intel Corporation
- *
- * LPT/WPT IOSF sideband.
- */
-
-#include "i915_drv.h"
-#include "intel_sbi.h"
-#include "i915_reg.h"
-
-/* SBI access */
-static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination,
- u32 *val, bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 cmd;
-
- lockdep_assert_held(&i915->sbi_lock);
-
- if (intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to become ready\n");
- return -EBUSY;
- }
-
- intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
- intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
-
- if (destination == SBI_ICLK)
- cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
- else
- cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- if (!is_read)
- cmd |= BIT(8);
- intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100, 100, &cmd)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to complete read\n");
- return -ETIMEDOUT;
- }
-
- if (cmd & SBI_RESPONSE_FAIL) {
- drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
- return -ENXIO;
- }
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, SBI_DATA);
-
- return 0;
-}
-
-void intel_sbi_lock(struct drm_i915_private *i915)
-{
- mutex_lock(&i915->sbi_lock);
-}
-
-void intel_sbi_unlock(struct drm_i915_private *i915)
-{
- mutex_unlock(&i915->sbi_lock);
-}
-
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination)
-{
- u32 result = 0;
-
- intel_sbi_rw(i915, reg, destination, &result, true);
-
- return result;
-}
-
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination)
-{
- intel_sbi_rw(i915, reg, destination, &value, false);
-}
-
-void intel_sbi_init(struct drm_i915_private *i915)
-{
- mutex_init(&i915->sbi_lock);
-}
-
-void intel_sbi_fini(struct drm_i915_private *i915)
-{
- mutex_destroy(&i915->sbi_lock);
-}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
deleted file mode 100644
index 85161a4f13b8..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _INTEL_SBI_H_
-#define _INTEL_SBI_H_
-
-#include <linux/types.h>
-
-struct drm_i915_private;
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
-
-void intel_sbi_init(struct drm_i915_private *i915);
-void intel_sbi_fini(struct drm_i915_private *i915);
-void intel_sbi_lock(struct drm_i915_private *i915);
-void intel_sbi_unlock(struct drm_i915_private *i915);
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-
-#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 48a10ff80148..c8e29fd72290 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
+#include "display/intel_display_core.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
@@ -2642,7 +2644,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm,
"Failed to wait for Driver-FLR bit to clear! %d\n",
@@ -2657,7 +2659,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware teardown to complete */
ret = intel_wait_for_register_fw(uncore, GU_CNTL,
DRIVERFLR, 0,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
@@ -2666,7 +2668,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e39582950627..6048b99b96cb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -313,10 +313,11 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms)
+ unsigned int timeout_ms,
+ u32 *out_value)
{
return __intel_wait_for_register_fw(uncore, reg, mask, value,
- 2, timeout_ms, NULL);
+ 2, timeout_ms, out_value);
}
#define IS_GSI_REG(reg) ((reg) < 0x40000)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 51561b190b93..7fa194de5d35 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -114,7 +114,8 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
"wakeref.work", &key->work, 0);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
- ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+ if (!wf->debug.class)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref");
#endif
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index f08f6674911e..7b856b5090f9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -413,15 +413,8 @@ static int igt_mock_splintered_region(void *arg)
close_objects(mem, &objects);
- /*
- * While we should be able allocate everything without any flag
- * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
- * actually limited to the largest power-of-two for the region size i.e
- * max_order, due to the inner workings of the buddy allocator. So make
- * sure that does indeed hold true.
- */
-
- obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, roundup_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
if (!IS_ERR(obj)) {
pr_err("%s too large contiguous allocation was not rejected\n",
__func__);
@@ -429,8 +422,7 @@ static int igt_mock_splintered_region(void *arg)
goto out_close;
}
- obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
- I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) {
pr_err("%s largest possible contiguous allocation failed\n",
__func__);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a77e5b26542c..fb8751bd5df0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -28,6 +28,8 @@
#include <drm/drm_managed.h>
+#include "display/intel_display_device.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -141,6 +143,7 @@ struct drm_i915_private *mock_gem_device(void)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
+ struct intel_display *display;
struct pci_dev *pdev;
int ret;
@@ -180,7 +183,11 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ goto err_device;
+
+ i915->display = display;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -257,6 +264,7 @@ err_ttm:
intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
+err_device:
mock_destroy_device(i915);
return NULL;
@@ -266,6 +274,8 @@ void mock_destroy_device(struct drm_i915_private *i915)
{
struct device *dev = i915->drm.dev;
+ intel_display_device_remove(i915->display);
+
devres_release_group(dev, NULL);
put_device(dev);
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index eee5c4f45a43..deb159548a09 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -5,12 +5,17 @@
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "vlv_sideband.h"
+#include "intel_uncore.h"
+#include "vlv_iosf_sb.h"
struct dram_dimm_info {
u16 size;
@@ -97,9 +102,9 @@ static unsigned int chv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
- val = vlv_cck_read(i915, CCK_FUSE_REG);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_CCK));
switch ((val >> 2) & 0x7) {
case 3:
@@ -113,9 +118,9 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
switch ((val >> 6) & 3) {
case 0:
@@ -381,9 +386,8 @@ intel_is_dram_symmetric(const struct dram_channel_info *ch0,
}
static int
-skl_dram_get_channels_info(struct drm_i915_private *i915)
+skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
@@ -444,14 +448,13 @@ skl_get_dram_type(struct drm_i915_private *i915)
}
static int
-skl_get_dram_info(struct drm_i915_private *i915)
+skl_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
int ret;
dram_info->type = skl_get_dram_type(i915);
- ret = skl_dram_get_channels_info(i915);
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -536,9 +539,8 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
}
-static int bxt_get_dram_info(struct drm_i915_private *i915)
+static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
u32 val;
u8 valid_ranks = 0;
int i;
@@ -583,14 +585,14 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct dram_info *dram_info)
{
- struct dram_info *dram_info = &dev_priv->dram_info;
u32 val = 0;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
@@ -645,27 +647,26 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
return 0;
}
-static int gen11_get_dram_info(struct drm_i915_private *i915)
+static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915);
+ int ret = skl_get_dram_info(i915, dram_info);
if (ret)
return ret;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int gen12_get_dram_info(struct drm_i915_private *i915)
+static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- i915->dram_info.wm_lv_0_adjust_needed = false;
+ dram_info->wm_lv_0_adjust_needed = false;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
- struct dram_info *dram_info = &i915->dram_info;
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
case 0:
@@ -706,16 +707,22 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-void intel_dram_detect(struct drm_i915_private *i915)
+int intel_dram_detect(struct drm_i915_private *i915)
{
- struct dram_info *dram_info = &i915->dram_info;
+ struct dram_info *dram_info;
int ret;
detect_fsb_freq(i915);
detect_mem_freq(i915);
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
- return;
+ return 0;
+
+ dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
+ if (!dram_info)
+ return -ENOMEM;
+
+ i915->dram_info = dram_info;
/*
* Assume level 0 watermark latency adjustment is needed until proven
@@ -724,21 +731,22 @@ void intel_dram_detect(struct drm_i915_private *i915)
dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
if (DISPLAY_VER(i915) >= 14)
- ret = xelpdp_get_dram_info(i915);
+ ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
- ret = gen12_get_dram_info(i915);
+ ret = gen12_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 11)
- ret = gen11_get_dram_info(i915);
+ ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
- ret = bxt_get_dram_info(i915);
+ ret = bxt_get_dram_info(i915, dram_info);
else
- ret = skl_get_dram_info(i915);
+ ret = skl_get_dram_info(i915, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
+ /* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
- return;
+ return 0;
drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
@@ -746,6 +754,20 @@ void intel_dram_detect(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
str_yes_no(dram_info->wm_lv_0_adjust_needed));
+
+ return 0;
+}
+
+/*
+ * Returns NULL for platforms that don't have dram info. Avoid overzealous NULL
+ * checks, and prefer not dereferencing on platforms that shouldn't look at dram
+ * info, to catch accidental and incorrect dram info checks.
+ */
+const struct dram_info *intel_dram_info(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return i915->dram_info;
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index a10136eda674..2a696e03aad4 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -6,10 +6,34 @@
#ifndef __INTEL_DRAM_H__
#define __INTEL_DRAM_H__
+#include <linux/types.h>
+
struct drm_i915_private;
+struct drm_device;
+
+struct dram_info {
+ bool wm_lv_0_adjust_needed;
+ u8 num_channels;
+ bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
+ INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
+ } type;
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+};
void intel_dram_edram_detect(struct drm_i915_private *i915);
-void intel_dram_detect(struct drm_i915_private *i915);
+int intel_dram_detect(struct drm_i915_private *i915);
unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+const struct dram_info *intel_dram_info(struct drm_device *drm);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 734e9f2801ea..5346b8dda79a 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -10,6 +10,8 @@
#include <drm/drm_managed.h>
#include <drm/intel/i915_drm.h>
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_iosf_sb.c
index 114ae8eb9cd5..f4b386933141 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.c
@@ -6,9 +6,7 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "vlv_sideband.h"
-
-#include "display/intel_dpio_phy.h"
+#include "vlv_iosf_sb.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
@@ -57,19 +55,29 @@ static void __vlv_punit_put(struct drm_i915_private *i915)
iosf_mbi_punit_release();
}
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
{
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
mutex_lock(&i915->vlv_iosf_sb.lock);
+
+ i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
}
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
+
+ drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
+
mutex_unlock(&i915->vlv_iosf_sb.lock);
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
}
@@ -123,131 +131,83 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
return err;
}
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
+ unit == VLV_IOSF_SB_FLISDSI)
+ return DPIO_DEVFN;
+ else
+ return PCI_DEVFN(0, 0);
}
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRWRDA_NP, reg, &val);
+ switch (unit) {
+ case VLV_IOSF_SB_BUNIT:
+ return IOSF_PORT_BUNIT;
+ case VLV_IOSF_SB_CCK:
+ return IOSF_PORT_CCK;
+ case VLV_IOSF_SB_CCU:
+ return IOSF_PORT_CCU;
+ case VLV_IOSF_SB_DPIO:
+ return IOSF_PORT_DPIO;
+ case VLV_IOSF_SB_DPIO_2:
+ return IOSF_PORT_DPIO_2;
+ case VLV_IOSF_SB_FLISDSI:
+ return IOSF_PORT_FLISDSI;
+ case VLV_IOSF_SB_GPIO:
+ return 0; /* FIXME: unused */
+ case VLV_IOSF_SB_NC:
+ return IOSF_PORT_NC;
+ case VLV_IOSF_SB_PUNIT:
+ return IOSF_PORT_PUNIT;
+ default:
+ return 0;
+ }
}
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
+ return write ? SB_MWR_NP : SB_MRD_NP;
+ else
+ return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
}
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRWRDA_NP, reg, &val);
-}
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode, val = 0;
-static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
-{
- /*
- * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(i915))
- return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
- else
- return IOSF_PORT_DPIO;
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, false);
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return 0;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
- /*
- * FIXME: There might be some registers where all 1's is a valid value,
- * so ideally we should check the register offset instead...
- */
- drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO PHY%d read reg 0x%x == 0x%x\n",
- phy, reg, val);
+ vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
return val;
}
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val)
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, true);
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return -EINVAL;
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
-}
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
+ return vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
}
void vlv_iosf_sb_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/vlv_iosf_sb.h b/drivers/gpu/drm/i915/vlv_iosf_sb.h
new file mode 100644
index 000000000000..e2fea29a30ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+struct drm_i915_private;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask);
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask);
+
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr);
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val);
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband_reg.h b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
index b7fbff3d0409..f977fb3b6e17 100644
--- a/drivers/gpu/drm/i915/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
@@ -3,8 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#ifndef _VLV_SIDEBAND_REG_H_
-#define _VLV_SIDEBAND_REG_H_
+#ifndef _VLV_IOSF_SB_REG_H_
+#define _VLV_IOSF_SB_REG_H_
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
@@ -177,4 +177,4 @@
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-#endif /* _VLV_SIDEBAND_REG_H_ */
+#endif /* _VLV_IOSF_SB_REG_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
deleted file mode 100644
index 31813e07c56f..000000000000
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/bitops.h>
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum dpio_phy;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-void vlv_iosf_sb_init(struct drm_i915_private *i915);
-void vlv_iosf_sb_fini(struct drm_i915_private *i915);
-
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
-
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg);
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg);
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg);
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val);
-
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg);
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr);
-
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr);
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val);
-
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 59b334d094fa..7564b0f21b42 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -446,7 +446,7 @@ create_job(struct pvr_device *pvr_dev,
if (err)
goto err_put_job;
- err = pvr_queue_job_init(job);
+ err = pvr_queue_job_init(job, pvr_file->file->client_id);
if (err)
goto err_put_job;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 3e349d039fc0..187a07e0bd9a 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -340,6 +340,63 @@ pvr_power_device_idle(struct device *dev)
return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
}
+static int
+pvr_power_clear_error(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+ int err;
+
+ /* Ensure the device state is known and nothing is happening past this point */
+ pm_runtime_disable(dev);
+
+ /* Attempt to clear the runtime PM error by setting the current state again */
+ if (pm_runtime_status_suspended(dev))
+ err = pm_runtime_set_suspended(dev);
+ else
+ err = pm_runtime_set_active(dev);
+
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev),
+ "%s: Failed to clear runtime PM error (new error %d)\n",
+ __func__, err);
+ }
+
+ pm_runtime_enable(dev);
+
+ return err;
+}
+
+/**
+ * pvr_power_get_clear() - Acquire a power reference, correcting any errors
+ * @pvr_dev: Device pointer
+ *
+ * Attempt to acquire a power reference on the device. If the runtime PM
+ * is in error state, attempt to clear the error and retry.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_power_get() or the runtime PM API.
+ */
+static int
+pvr_power_get_clear(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_power_get(pvr_dev);
+ if (err == 0)
+ return err;
+
+ drm_warn(from_pvr_device(pvr_dev),
+ "%s: pvr_power_get returned error %d, attempting recovery\n",
+ __func__, err);
+
+ err = pvr_power_clear_error(pvr_dev);
+ if (err)
+ return err;
+
+ return pvr_power_get(pvr_dev);
+}
+
/**
* pvr_power_reset() - Reset the GPU
* @pvr_dev: Device pointer
@@ -364,7 +421,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
* Take a power reference during the reset. This should prevent any interference with the
* power state during reset.
*/
- WARN_ON(pvr_power_get(pvr_dev));
+ WARN_ON(pvr_power_get_clear(pvr_dev));
down_write(&pvr_dev->reset_sem);
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5e9bc0992824..fc415dd0d7a7 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -803,7 +803,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
* the scheduler, and re-assign parent fences in the middle.
*
* Return:
- * * DRM_GPU_SCHED_STAT_NOMINAL.
+ * * DRM_GPU_SCHED_STAT_RESET.
*/
static enum drm_gpu_sched_stat
pvr_queue_timedout_job(struct drm_sched_job *s_job)
@@ -854,7 +854,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
drm_sched_start(sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/**
@@ -1073,6 +1073,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
/**
* pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
* @job: The job to initialize.
+ * @drm_client_id: drm_file.client_id submitting the job
*
* Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
* and pvr_queue_job_push() can't fail. We also make sure the context type is
@@ -1082,7 +1083,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
* * 0 on success, or
* * An error code if something failed.
*/
-int pvr_queue_job_init(struct pvr_job *job)
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id)
{
/* Fragment jobs need at least one native fence wait on the geometry job fence. */
u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
@@ -1099,7 +1100,7 @@ int pvr_queue_job_init(struct pvr_job *job)
if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
return -E2BIG;
- err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE, drm_client_id);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index 93fe9ac9f58c..fc1986d73fc8 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -143,7 +143,7 @@ struct pvr_queue {
bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
-int pvr_queue_job_init(struct pvr_job *job);
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id);
void pvr_queue_job_cleanup(struct pvr_job *job);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 03535a15dd8f..3e8c6edbc17c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/gpu/drm/imx/dc/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 86f38e7c7422..c7b317640d71 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_IMX8_DC) += dc/
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX) += ipuv3/
obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dc/Kconfig b/drivers/gpu/drm/imx/dc/Kconfig
new file mode 100644
index 000000000000..415993207f2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX8_DC
+ tristate "Freescale i.MX8 Display Controller Graphics"
+ depends on DRM && COMMON_CLK && OF && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select GENERIC_IRQ_CHIP
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ enable Freescale i.MX8 Display Controller(DC) graphics support
diff --git a/drivers/gpu/drm/imx/dc/Makefile b/drivers/gpu/drm/imx/dc/Makefile
new file mode 100644
index 000000000000..b9d33c074984
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imx8-dc-drm-objs := dc-cf.o dc-crtc.o dc-de.o dc-drv.o dc-ed.o dc-fg.o dc-fl.o \
+ dc-fu.o dc-fw.o dc-ic.o dc-kms.o dc-lb.o dc-pe.o \
+ dc-plane.o dc-tc.o
+
+obj-$(CONFIG_DRM_IMX8_DC) += imx8-dc-drm.o
diff --git a/drivers/gpu/drm/imx/dc/dc-cf.c b/drivers/gpu/drm/imx/dc/dc-cf.c
new file mode 100644
index 000000000000..2f077161e912
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-cf.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define STATICCONTROL 0x8
+
+#define FRAMEDIMENSIONS 0xc
+#define HEIGHT(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define WIDTH(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define CONSTANTCOLOR 0x10
+#define BLUE(x) FIELD_PREP(GENMASK(15, 8), (x))
+
+static const struct dc_subdev_info dc_cf_info[] = {
+ { .reg_start = 0x56180960, .id = 0, },
+ { .reg_start = 0x561809e0, .id = 1, },
+ { .reg_start = 0x561809a0, .id = 4, },
+ { .reg_start = 0x56180a20, .id = 5, },
+};
+
+static const struct regmap_range dc_cf_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, CONSTANTCOLOR),
+};
+
+static const struct regmap_access_table dc_cf_regmap_access_table = {
+ .yes_ranges = dc_cf_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_cf_regmap_ranges),
+};
+
+static const struct regmap_config dc_cf_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_cf_regmap_access_table,
+ .rd_table = &dc_cf_regmap_access_table,
+ .max_register = CONSTANTCOLOR,
+};
+
+static inline void dc_cf_enable_shden(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, STATICCONTROL, SHDEN);
+}
+
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf)
+{
+ return cf->link;
+}
+
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w,
+ unsigned int h)
+{
+ regmap_write(cf->reg_cfg, FRAMEDIMENSIONS, WIDTH(w) | HEIGHT(h));
+}
+
+void dc_cf_constantcolor_black(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, 0);
+}
+
+void dc_cf_constantcolor_blue(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, BLUE(0xff));
+}
+
+void dc_cf_init(struct dc_cf *cf)
+{
+ dc_cf_enable_shden(cf);
+}
+
+static int dc_cf_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_cf *cf;
+ int id;
+
+ cf = devm_kzalloc(dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ cf->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_cf_cfg_regmap_config);
+ if (IS_ERR(cf->reg_cfg))
+ return PTR_ERR(cf->reg_cfg);
+
+ id = dc_subdev_get_id(dc_cf_info, ARRAY_SIZE(dc_cf_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ cf->link = LINK_ID_CONSTFRAME0;
+ dc_drm->cf_cont[0] = cf;
+ break;
+ case 1:
+ cf->link = LINK_ID_CONSTFRAME1;
+ dc_drm->cf_cont[1] = cf;
+ break;
+ case 4:
+ cf->link = LINK_ID_CONSTFRAME4;
+ dc_drm->cf_safe[0] = cf;
+ break;
+ case 5:
+ cf->link = LINK_ID_CONSTFRAME5;
+ dc_drm->cf_safe[1] = cf;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_cf_ops = {
+ .bind = dc_cf_bind,
+};
+
+static int dc_cf_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_cf_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_cf_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_cf_ops);
+}
+
+static const struct of_device_id dc_cf_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-constframe" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_cf_dt_ids);
+
+struct platform_driver dc_cf_driver = {
+ .probe = dc_cf_probe,
+ .remove = dc_cf_remove,
+ .driver = {
+ .name = "imx8-dc-constframe",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_cf_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-crtc.c b/drivers/gpu/drm/imx/dc/dc-crtc.c
new file mode 100644
index 000000000000..31d3a982deaf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-crtc.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+#define dc_crtc_dbg(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_dbg_kms(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define dc_crtc_err(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_err(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(c) \
+do { \
+ unsigned long ret; \
+ ret = wait_for_completion_timeout(&dc_crtc->c, HZ); \
+ if (ret == 0) \
+ dc_crtc_err(crtc, "%s: wait for " #c " timeout\n", \
+ __func__); \
+} while (0)
+
+#define DC_CRTC_CHECK_FRAMEGEN_FIFO(fg) \
+do { \
+ struct dc_fg *_fg = (fg); \
+ if (dc_fg_secondary_requests_to_read_empty_fifo(_fg)) { \
+ dc_fg_secondary_clear_channel_status(_fg); \
+ dc_crtc_err(crtc, "%s: FrameGen FIFO empty\n", \
+ __func__); \
+ } \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(fg) \
+do { \
+ if (dc_fg_wait_for_secondary_syncup(fg)) \
+ dc_crtc_err(crtc, \
+ "%s: FrameGen secondary channel isn't syncup\n",\
+ __func__); \
+} while (0)
+
+static inline struct dc_crtc *to_dc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dc_crtc, base);
+}
+
+static u32 dc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ return dc_fg_get_frame_index(dc_crtc->fg);
+}
+
+static int dc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ enable_irq(dc_crtc->irq_dec_framecomplete);
+
+ return 0;
+}
+
+static void dc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ /* nosync due to atomic context */
+ disable_irq_nosync(dc_crtc->irq_dec_framecomplete);
+}
+
+static const struct drm_crtc_funcs dc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .get_vblank_counter = dc_crtc_get_vblank_counter,
+ .enable_vblank = dc_crtc_enable_vblank,
+ .disable_vblank = dc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static void dc_crtc_queue_state_event(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc_state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dc_crtc->event);
+ dc_crtc->event = crtc_state->event;
+ crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static inline enum drm_mode_status
+dc_crtc_check_clock(struct dc_crtc *dc_crtc, int clk_khz)
+{
+ return dc_fg_check_clock(dc_crtc->fg, clk_khz);
+}
+
+static enum drm_mode_status
+dc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, mode->clock);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->crtc_clock > DC_FRAMEGEN_MAX_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int
+dc_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, adj->clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+dc_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ int idx, ret;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->active)
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ return;
+
+ /* request pixel engine power-on when CRTC starts to be active */
+ ret = pm_runtime_resume_and_get(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to get DC pixel engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+}
+
+static void
+dc_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ (!old_crtc_state->active && !new_crtc_state->active))
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ /* flush plane update out to display */
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum dc_link_id cf_link;
+ int idx, ret;
+
+ dc_crtc_dbg(crtc, "mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(adj));
+
+ drm_crtc_vblank_on(crtc);
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ /* request display engine power-on when CRTC is enabled */
+ ret = pm_runtime_resume_and_get(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to get DC display engine RPM: %d\n",
+ ret);
+
+ enable_irq(dc_crtc->irq_dec_shdload);
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+ enable_irq(dc_crtc->irq_ed_safe_shdload);
+
+ dc_fg_cfg_videomode(dc_crtc->fg, adj);
+
+ dc_cf_framedimensions(dc_crtc->cf_cont,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+ dc_cf_framedimensions(dc_crtc->cf_safe,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+
+ /* constframe in safety stream shows blue frame */
+ dc_cf_constantcolor_blue(dc_crtc->cf_safe);
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_safe);
+ dc_ed_pec_src_sel(dc_crtc->ed_safe, cf_link);
+
+ /* show CRTC background if no plane is enabled */
+ if (new_crtc_state->plane_mask == 0) {
+ /* constframe in content stream shows black frame */
+ dc_cf_constantcolor_black(dc_crtc->cf_cont);
+
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_cont);
+ dc_ed_pec_src_sel(dc_crtc->ed_cont, cf_link);
+ }
+
+ dc_fg_enable_clock(dc_crtc->fg);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_safe);
+ dc_fg_shdtokgen(dc_crtc->fg);
+ dc_fg_enable(dc_crtc->fg);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_safe_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_safe_shdload);
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+ disable_irq(dc_crtc->irq_dec_shdload);
+
+ DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(dc_crtc->fg);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx, ret;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_dec_seqcomplete);
+ dc_fg_disable(dc_crtc->fg);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_seqcomplete_done);
+ disable_irq(dc_crtc->irq_dec_seqcomplete);
+
+ dc_fg_disable_clock(dc_crtc->fg);
+
+ /* request pixel engine power-off as plane is off too */
+ ret = pm_runtime_put(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to put DC pixel engine RPM: %d\n",
+ ret);
+
+ /* request display engine power-off when CRTC is disabled */
+ ret = pm_runtime_put(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to put DC display engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+
+out:
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (new_crtc_state->event && !new_crtc_state->active) {
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool dc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int vdisplay = mode->crtc_vdisplay;
+ int vtotal = mode->crtc_vtotal;
+ bool reliable;
+ int line;
+ int idx;
+
+ if (stime)
+ *stime = ktime_get();
+
+ if (!drm_dev_enter(crtc->dev, &idx)) {
+ reliable = false;
+ *vpos = 0;
+ *hpos = 0;
+ goto out;
+ }
+
+ /* line index starts with 0 for the first active output line */
+ line = dc_fg_get_line_index(dc_crtc->fg);
+
+ if (line < vdisplay)
+ /* active scanout area - positive */
+ *vpos = line + 1;
+ else
+ /* inside vblank - negative */
+ *vpos = line - (vtotal - 1);
+
+ *hpos = 0;
+
+ reliable = true;
+
+ drm_dev_exit(idx);
+out:
+ if (etime)
+ *etime = ktime_get();
+
+ return reliable;
+}
+
+static const struct drm_crtc_helper_funcs dc_helper_funcs = {
+ .mode_valid = dc_crtc_mode_valid,
+ .atomic_check = dc_crtc_atomic_check,
+ .atomic_begin = dc_crtc_atomic_begin,
+ .atomic_flush = dc_crtc_atomic_flush,
+ .atomic_enable = dc_crtc_atomic_enable,
+ .atomic_disable = dc_crtc_atomic_disable,
+ .get_scanout_position = dc_crtc_get_scanout_position,
+};
+
+static irqreturn_t dc_crtc_irq_handler_dec_framecomplete(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dc_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dc_crtc->event);
+ dc_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_dec_seqcomplete_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_seqcomplete_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dc_crtc_irq_handler_dec_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_cont_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_cont_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_safe_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_safe_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dc_crtc_request_irqs(struct drm_device *drm, struct dc_crtc *dc_crtc)
+{
+ struct {
+ struct device *dev;
+ unsigned int irq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ } irqs[DC_CRTC_IRQS] = {
+ {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_framecomplete,
+ dc_crtc_irq_handler_dec_framecomplete,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_seqcomplete,
+ dc_crtc_irq_handler_dec_seqcomplete_done,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_shdload,
+ dc_crtc_irq_handler_dec_shdload_done,
+ }, {
+ dc_crtc->ed_cont->dev,
+ dc_crtc->irq_ed_cont_shdload,
+ dc_crtc_irq_handler_ed_cont_shdload_done,
+ }, {
+ dc_crtc->ed_safe->dev,
+ dc_crtc->irq_ed_safe_shdload,
+ dc_crtc_irq_handler_ed_safe_shdload_done,
+ },
+ };
+ int i, ret;
+
+ for (i = 0; i < DC_CRTC_IRQS; i++) {
+ struct dc_crtc_irq *irq = &dc_crtc->irqs[i];
+
+ ret = devm_request_irq(irqs[i].dev, irqs[i].irq,
+ irqs[i].irq_handler, IRQF_NO_AUTOEN,
+ dev_name(irqs[i].dev), dc_crtc);
+ if (ret) {
+ dev_err(irqs[i].dev, "failed to request irq(%u): %d\n",
+ irqs[i].irq, ret);
+ return ret;
+ }
+
+ irq->dc_crtc = dc_crtc;
+ irq->irq = irqs[i].irq;
+ }
+
+ return 0;
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct dc_de *de = dc_drm->de[crtc_index];
+ struct dc_pe *pe = dc_drm->pe;
+ struct dc_plane *dc_primary;
+ int ret;
+
+ dc_crtc->de = de;
+
+ init_completion(&dc_crtc->dec_seqcomplete_done);
+ init_completion(&dc_crtc->dec_shdload_done);
+ init_completion(&dc_crtc->ed_cont_shdload_done);
+ init_completion(&dc_crtc->ed_safe_shdload_done);
+
+ dc_crtc->cf_cont = pe->cf_cont[crtc_index];
+ dc_crtc->cf_safe = pe->cf_safe[crtc_index];
+ dc_crtc->ed_cont = pe->ed_cont[crtc_index];
+ dc_crtc->ed_safe = pe->ed_safe[crtc_index];
+ dc_crtc->fg = de->fg;
+
+ dc_crtc->irq_dec_framecomplete = de->irq_framecomplete;
+ dc_crtc->irq_dec_seqcomplete = de->irq_seqcomplete;
+ dc_crtc->irq_dec_shdload = de->irq_shdload;
+ dc_crtc->irq_ed_safe_shdload = dc_crtc->ed_safe->irq_shdload;
+ dc_crtc->irq_ed_cont_shdload = dc_crtc->ed_cont->irq_shdload;
+
+ dc_primary = &dc_drm->dc_primary[crtc_index];
+ ret = dc_plane_init(dc_drm, dc_primary);
+ if (ret) {
+ dev_err(de->dev, "failed to initialize primary plane: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&dc_crtc->base, &dc_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &dc_crtc->base, &dc_primary->base,
+ NULL, &dc_crtc_funcs, NULL);
+ if (ret)
+ dev_err(de->dev, "failed to add CRTC: %d\n", ret);
+
+ return ret;
+}
+
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+
+ return dc_crtc_request_irqs(drm, dc_crtc);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-de.c b/drivers/gpu/drm/imx/dc/dc-de.c
new file mode 100644
index 000000000000..5a3125596fdf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define POLARITYCTRL 0xc
+#define POLEN_HIGH BIT(2)
+
+static const struct dc_subdev_info dc_de_info[] = {
+ { .reg_start = 0x5618b400, .id = 0, },
+ { .reg_start = 0x5618b420, .id = 1, },
+};
+
+static const struct regmap_range dc_de_regmap_ranges[] = {
+ regmap_reg_range(POLARITYCTRL, POLARITYCTRL),
+};
+
+static const struct regmap_access_table dc_de_regmap_access_table = {
+ .yes_ranges = dc_de_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_de_regmap_ranges),
+};
+
+static const struct regmap_config dc_de_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_de_regmap_access_table,
+ .rd_table = &dc_de_regmap_access_table,
+ .max_register = POLARITYCTRL,
+};
+
+static inline void dc_dec_init(struct dc_de *de)
+{
+ regmap_write_bits(de->reg_top, POLARITYCTRL, POLARITYCTRL, POLEN_HIGH);
+}
+
+static int dc_de_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_top;
+ void __iomem *base_top;
+ struct dc_de *de;
+ int ret, id;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ base_top = devm_platform_get_and_ioremap_resource(pdev, 0, &res_top);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ de->reg_top = devm_regmap_init_mmio(dev, base_top,
+ &dc_de_top_regmap_config);
+ if (IS_ERR(de->reg_top))
+ return PTR_ERR(de->reg_top);
+
+ de->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (de->irq_shdload < 0)
+ return de->irq_shdload;
+
+ de->irq_framecomplete = platform_get_irq_byname(pdev, "framecomplete");
+ if (de->irq_framecomplete < 0)
+ return de->irq_framecomplete;
+
+ de->irq_seqcomplete = platform_get_irq_byname(pdev, "seqcomplete");
+ if (de->irq_seqcomplete < 0)
+ return de->irq_seqcomplete;
+
+ de->dev = dev;
+
+ dev_set_drvdata(dev, de);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ id = dc_subdev_get_id(dc_de_info, ARRAY_SIZE(dc_de_info), res_top);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ dc_drm->de[id] = de;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the display engine component first. To avoid the dependency, post bind
+ * to get the pointers from dc_drm in a safe manner.
+ */
+void dc_de_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_de *de;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ de = dc_drm->de[i];
+ de->fg = dc_drm->fg[i];
+ de->tc = dc_drm->tc[i];
+ }
+}
+
+static const struct component_ops dc_de_ops = {
+ .bind = dc_de_bind,
+};
+
+static int dc_de_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_de_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_de_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_de_ops);
+}
+
+static int dc_de_runtime_resume(struct device *dev)
+{
+ struct dc_de *de = dev_get_drvdata(dev);
+
+ dc_dec_init(de);
+ dc_fg_init(de->fg);
+ dc_tc_init(de->tc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_de_pm_ops = {
+ RUNTIME_PM_OPS(NULL, dc_de_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_de_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-display-engine" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_de_dt_ids);
+
+struct platform_driver dc_de_driver = {
+ .probe = dc_de_probe,
+ .remove = dc_de_remove,
+ .driver = {
+ .name = "imx8-dc-display-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_de_dt_ids,
+ .pm = pm_sleep_ptr(&dc_de_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-de.h b/drivers/gpu/drm/imx/dc/dc-de.h
new file mode 100644
index 000000000000..211f3fcc1a9a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DISPLAY_ENGINE_H__
+#define __DC_DISPLAY_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <drm/drm_modes.h>
+
+#define DC_DISPLAYS 2
+
+#define DC_FRAMEGEN_MAX_FRAME_INDEX 0x3ffff
+#define DC_FRAMEGEN_MAX_CLOCK_KHZ 300000
+
+struct dc_fg {
+ struct device *dev;
+ struct regmap *reg;
+ struct clk *clk_disp;
+};
+
+struct dc_tc {
+ struct device *dev;
+ struct regmap *reg;
+};
+
+struct dc_de {
+ struct device *dev;
+ struct regmap *reg_top;
+ struct dc_fg *fg;
+ struct dc_tc *tc;
+ int irq_shdload;
+ int irq_framecomplete;
+ int irq_seqcomplete;
+};
+
+/* Frame Generator Unit */
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m);
+void dc_fg_enable(struct dc_fg *fg);
+void dc_fg_disable(struct dc_fg *fg);
+void dc_fg_shdtokgen(struct dc_fg *fg);
+u32 dc_fg_get_frame_index(struct dc_fg *fg);
+u32 dc_fg_get_line_index(struct dc_fg *fg);
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg);
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg);
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg);
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg);
+void dc_fg_enable_clock(struct dc_fg *fg);
+void dc_fg_disable_clock(struct dc_fg *fg);
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz);
+void dc_fg_init(struct dc_fg *fg);
+
+/* Timing Controller Unit */
+void dc_tc_init(struct dc_tc *tc);
+
+#endif /* __DC_DISPLAY_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.c b/drivers/gpu/drm/imx/dc/dc-drv.c
new file mode 100644
index 000000000000..04f021d2d6cf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+struct dc_priv {
+ struct drm_device *drm;
+ struct clk *clk_cfg;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(dc_drm_driver_fops);
+
+static struct drm_driver dc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dc_drm_driver_fops,
+ .name = "imx8-dc",
+ .desc = "i.MX8 DC DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static void
+dc_add_components(struct device *dev, struct component_match **matchptr)
+{
+ struct device_node *child, *grandchild;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* The interrupt controller is not a component. */
+ if (of_device_is_compatible(child, "fsl,imx8qxp-dc-intc"))
+ continue;
+
+ drm_of_component_match_add(dev, matchptr, component_compare_of,
+ child);
+
+ for_each_available_child_of_node(child, grandchild)
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of,
+ grandchild);
+ }
+}
+
+static int dc_drm_component_bind_all(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret;
+
+ ret = component_bind_all(drm->dev, dc_drm);
+ if (ret)
+ return ret;
+
+ dc_de_post_bind(dc_drm);
+ dc_pe_post_bind(dc_drm);
+
+ return 0;
+}
+
+static void dc_drm_component_unbind_all(void *ptr)
+{
+ struct dc_drm_device *dc_drm = ptr;
+ struct drm_device *drm = &dc_drm->base;
+
+ component_unbind_all(drm->dev, dc_drm);
+}
+
+static int dc_drm_bind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm;
+ struct drm_device *drm;
+ int ret;
+
+ dc_drm = devm_drm_dev_alloc(dev, &dc_drm_driver, struct dc_drm_device,
+ base);
+ if (IS_ERR(dc_drm))
+ return PTR_ERR(dc_drm);
+
+ drm = &dc_drm->base;
+
+ ret = dc_drm_component_bind_all(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, dc_drm_component_unbind_all,
+ dc_drm);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ dev_err(dev, "failed to register drm device: %d\n", ret);
+ goto err;
+ }
+
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_XRGB8888);
+
+ priv->drm = drm;
+
+ return 0;
+
+err:
+ dc_kms_uninit(dc_drm);
+
+ return ret;
+}
+
+static void dc_drm_unbind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(priv->drm);
+ struct drm_device *drm = &dc_drm->base;
+
+ priv->drm = NULL;
+ drm_dev_unplug(drm);
+ dc_kms_uninit(dc_drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct component_master_ops dc_drm_ops = {
+ .bind = dc_drm_bind,
+ .unbind = dc_drm_unbind,
+};
+
+static int dc_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct dc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk_cfg = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_cfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cfg),
+ "failed to get cfg clock\n");
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ dc_add_components(&pdev->dev, &match);
+
+ ret = component_master_add_with_match(&pdev->dev, &dc_drm_ops, match);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component master\n");
+
+ return 0;
+}
+
+static void dc_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &dc_drm_ops);
+}
+
+static int dc_runtime_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_cfg);
+
+ return 0;
+}
+
+static int dc_runtime_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_cfg);
+ if (ret)
+ dev_err(dev, "failed to enable cfg clock: %d\n", ret);
+
+ return ret;
+}
+
+static int dc_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int dc_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(priv->drm);
+}
+
+static void dc_shutdown(struct platform_device *pdev)
+{
+ struct dc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
+static const struct dev_pm_ops dc_pm_ops = {
+ RUNTIME_PM_OPS(dc_runtime_suspend, dc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dc_suspend, dc_resume)
+};
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_driver = {
+ .probe = dc_probe,
+ .remove = dc_remove,
+ .shutdown = dc_shutdown,
+ .driver = {
+ .name = "imx8-dc",
+ .of_match_table = dc_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pm_ops),
+ },
+};
+
+static struct platform_driver * const dc_drivers[] = {
+ &dc_cf_driver,
+ &dc_de_driver,
+ &dc_ed_driver,
+ &dc_fg_driver,
+ &dc_fl_driver,
+ &dc_fw_driver,
+ &dc_ic_driver,
+ &dc_lb_driver,
+ &dc_pe_driver,
+ &dc_tc_driver,
+ &dc_driver,
+};
+
+static int __init dc_drm_init(void)
+{
+ return platform_register_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+static void __exit dc_drm_exit(void)
+{
+ platform_unregister_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+module_init(dc_drm_init);
+module_exit(dc_drm_exit);
+
+MODULE_DESCRIPTION("i.MX8 Display Controller DRM Driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.h b/drivers/gpu/drm/imx/dc/dc-drv.h
new file mode 100644
index 000000000000..eb61b8c76269
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DRV_H__
+#define __DC_DRV_H__
+
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+
+#include "dc-de.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+/**
+ * struct dc_drm_device - DC specific drm_device
+ */
+struct dc_drm_device {
+ /** @base: base drm_device structure */
+ struct drm_device base;
+ /** @dc_crtc: DC specific CRTC list */
+ struct dc_crtc dc_crtc[DC_DISPLAYS];
+ /** @dc_primary: DC specific primary plane list */
+ struct dc_plane dc_primary[DC_DISPLAYS];
+ /** @encoder: encoder list */
+ struct drm_encoder encoder[DC_DISPLAYS];
+ /** @cf_safe: constframe list(safety stream) */
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ /** @cf_cont: constframe list(content stream) */
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ /** @de: display engine list */
+ struct dc_de *de[DC_DISPLAYS];
+ /** @ed_safe: extdst list(safety stream) */
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ /** @ed_cont: extdst list(content stream) */
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ /** @fg: framegen list */
+ struct dc_fg *fg[DC_DISPLAYS];
+ /** @fu_disp: fetchunit list(used by display engine) */
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ /** @lb: layerblend list */
+ struct dc_lb *lb[DC_LB_CNT];
+ /** @pe: pixel engine */
+ struct dc_pe *pe;
+ /** @tc: tcon list */
+ struct dc_tc *tc[DC_DISPLAYS];
+};
+
+struct dc_subdev_info {
+ resource_size_t reg_start;
+ int id;
+};
+
+static inline struct dc_drm_device *to_dc_drm_device(struct drm_device *drm)
+{
+ return container_of(drm, struct dc_drm_device, base);
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index);
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index);
+
+int dc_kms_init(struct dc_drm_device *dc_drm);
+void dc_kms_uninit(struct dc_drm_device *dc_drm);
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane);
+
+extern struct platform_driver dc_cf_driver;
+extern struct platform_driver dc_de_driver;
+extern struct platform_driver dc_ed_driver;
+extern struct platform_driver dc_fg_driver;
+extern struct platform_driver dc_fl_driver;
+extern struct platform_driver dc_fw_driver;
+extern struct platform_driver dc_ic_driver;
+extern struct platform_driver dc_lb_driver;
+extern struct platform_driver dc_pe_driver;
+extern struct platform_driver dc_tc_driver;
+
+static inline int dc_subdev_get_id(const struct dc_subdev_info *info,
+ int info_cnt, struct resource *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < info_cnt; i++)
+ if (info[i].reg_start == res->start)
+ return info[i].id;
+
+ return -EINVAL;
+}
+
+void dc_de_post_bind(struct dc_drm_device *dc_drm);
+void dc_pe_post_bind(struct dc_drm_device *dc_drm);
+
+#endif /* __DC_DRV_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
new file mode 100644
index 000000000000..86ecc22d0a55
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define POWERDOWN BIT(4)
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define DIV_MASK GENMASK(23, 16)
+#define DIV(x) FIELD_PREP(DIV_MASK, (x))
+#define DIV_RESET 0x80
+
+#define PIXENGCFG_DYNAMIC 0xc
+
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+
+#define STATICCONTROL 0x8
+#define KICK_MODE BIT(8)
+#define EXTERNAL BIT(8)
+#define PERFCOUNTMODE BIT(12)
+
+#define CONTROL 0xc
+#define GAMMAAPPLYENABLE BIT(0)
+
+static const struct dc_subdev_info dc_ed_info[] = {
+ { .reg_start = 0x56180980, .id = 0, },
+ { .reg_start = 0x56180a00, .id = 1, },
+ { .reg_start = 0x561809c0, .id = 4, },
+ { .reg_start = 0x56180a40, .id = 5, },
+};
+
+static const struct regmap_range dc_ed_pec_regmap_write_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_write_table = {
+ .yes_ranges = dc_ed_pec_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_read_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_read_table = {
+ .yes_ranges = dc_ed_pec_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_volatile_ranges[] = {
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_volatile_table = {
+ .yes_ranges = dc_ed_pec_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ed_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_pec_regmap_write_table,
+ .rd_table = &dc_ed_pec_regmap_read_table,
+ .volatile_table = &dc_ed_pec_regmap_volatile_table,
+ .max_register = PIXENGCFG_TRIGGER,
+};
+
+static const struct regmap_range dc_ed_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, STATICCONTROL),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_ed_regmap_access_table = {
+ .yes_ranges = dc_ed_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_regmap_ranges),
+};
+
+static const struct regmap_config dc_ed_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_regmap_access_table,
+ .rd_table = &dc_ed_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static const enum dc_link_id src_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ LINK_ID_LAYERBLEND3,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND0,
+};
+
+static inline void dc_ed_pec_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_pec_poweron(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, POWERDOWN, 0);
+}
+
+static inline void dc_ed_pec_sync_mode_single(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SYNC_MODE, SINGLE);
+}
+
+static inline void dc_ed_pec_div_reset(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, DIV_MASK,
+ DIV(DIV_RESET));
+}
+
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(src_sels); i++) {
+ if (src_sels[i] == src) {
+ regmap_write(ed->reg_pec, PIXENGCFG_DYNAMIC, src);
+ return;
+ }
+ }
+}
+
+void dc_ed_pec_sync_trigger(struct dc_ed *ed)
+{
+ regmap_write(ed->reg_pec, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+}
+
+static inline void dc_ed_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_kick_mode_external(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, KICK_MODE, EXTERNAL);
+}
+
+static inline void dc_ed_disable_perfcountmode(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, PERFCOUNTMODE, 0);
+}
+
+static inline void dc_ed_disable_gamma_apply(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, CONTROL, GAMMAAPPLYENABLE, 0);
+}
+
+void dc_ed_init(struct dc_ed *ed)
+{
+ dc_ed_pec_src_sel(ed, LINK_ID_NONE);
+ dc_ed_pec_enable_shden(ed);
+ dc_ed_pec_poweron(ed);
+ dc_ed_pec_sync_mode_single(ed);
+ dc_ed_pec_div_reset(ed);
+ dc_ed_enable_shden(ed);
+ dc_ed_disable_perfcountmode(ed);
+ dc_ed_kick_mode_external(ed);
+ dc_ed_disable_gamma_apply(ed);
+}
+
+static int dc_ed_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_ed *ed;
+ int id;
+
+ ed = devm_kzalloc(dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ ed->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_ed_pec_regmap_config);
+ if (IS_ERR(ed->reg_pec))
+ return PTR_ERR(ed->reg_pec);
+
+ ed->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_ed_cfg_regmap_config);
+ if (IS_ERR(ed->reg_cfg))
+ return PTR_ERR(ed->reg_cfg);
+
+ ed->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (ed->irq_shdload < 0)
+ return ed->irq_shdload;
+
+ ed->dev = dev;
+
+ id = dc_subdev_get_id(dc_ed_info, ARRAY_SIZE(dc_ed_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ dc_drm->ed_cont[0] = ed;
+ break;
+ case 1:
+ dc_drm->ed_cont[1] = ed;
+ break;
+ case 4:
+ dc_drm->ed_safe[0] = ed;
+ break;
+ case 5:
+ dc_drm->ed_safe[1] = ed;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_ed_ops = {
+ .bind = dc_ed_bind,
+};
+
+static int dc_ed_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_ed_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_ed_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_ed_ops);
+}
+
+static const struct of_device_id dc_ed_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-extdst" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_ed_dt_ids);
+
+struct platform_driver dc_ed_driver = {
+ .probe = dc_ed_probe,
+ .remove = dc_ed_remove,
+ .driver = {
+ .name = "imx8-dc-extdst",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ed_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
new file mode 100644
index 000000000000..7f6c1852bf72
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <drm/drm_modes.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK GENMASK(2, 1)
+#define FGSYNCMODE(x) FIELD_PREP(FGSYNCMODE_MASK, (x))
+#define SHDEN BIT(0)
+
+#define HTCFG1 0xc
+#define HTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define VTCFG1 0x14
+#define VTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define PKICKCONFIG 0x2c
+#define SKICKCONFIG 0x30
+#define EN BIT(31)
+#define ROW(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define COL(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define PACFG 0x54
+#define SACFG 0x58
+#define STARTY(x) FIELD_PREP(GENMASK(29, 16), ((x) + 1))
+#define STARTX(x) FIELD_PREP(GENMASK(13, 0), ((x) + 1))
+
+#define FGINCTRL 0x5c
+#define FGINCTRLPANIC 0x60
+#define FGDM_MASK GENMASK(2, 0)
+#define ENPRIMALPHA BIT(3)
+#define ENSECALPHA BIT(4)
+
+#define FGCCR 0x64
+#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
+
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+
+#define FGSLR 0x6c
+#define SHDTOKGEN BIT(0)
+
+#define FGTIMESTAMP 0x74
+#define FRAMEINDEX(x) FIELD_GET(GENMASK(31, 14), (x))
+#define LINEINDEX(x) FIELD_GET(GENMASK(13, 0), (x))
+
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define SFIFOEMPTY BIT(16)
+
+#define FGCHSTATCLR 0x7c
+#define CLRSECSTAT BIT(16)
+
+enum dc_fg_syncmode {
+ FG_SYNCMODE_OFF, /* No side-by-side synchronization. */
+};
+
+enum dc_fg_dm {
+ FG_DM_CONSTCOL = 0x1, /* Constant Color Background is shown. */
+ FG_DM_SEC_ON_TOP = 0x5, /* Both inputs overlaid with secondary on top. */
+};
+
+static const struct dc_subdev_info dc_fg_info[] = {
+ { .reg_start = 0x5618b800, .id = 0, },
+ { .reg_start = 0x5618d400, .id = 1, },
+};
+
+static const struct regmap_range dc_fg_regmap_write_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGSLR),
+ regmap_reg_range(FGCHSTATCLR, FGCHSTATCLR),
+};
+
+static const struct regmap_range dc_fg_regmap_read_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGENABLE),
+ regmap_reg_range(FGTIMESTAMP, FGCHSTAT),
+};
+
+static const struct regmap_access_table dc_fg_regmap_write_table = {
+ .yes_ranges = dc_fg_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_write_ranges),
+};
+
+static const struct regmap_access_table dc_fg_regmap_read_table = {
+ .yes_ranges = dc_fg_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_read_ranges),
+};
+
+static const struct regmap_config dc_fg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fg_regmap_write_table,
+ .rd_table = &dc_fg_regmap_read_table,
+ .max_register = FGCHSTATCLR,
+};
+
+static inline void dc_fg_enable_shden(struct dc_fg *fg)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, SHDEN, SHDEN);
+}
+
+static inline void dc_fg_syncmode(struct dc_fg *fg, enum dc_fg_syncmode mode)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, FGSYNCMODE_MASK, FGSYNCMODE(mode));
+}
+
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ int ret;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ /* video mode */
+ regmap_write(fg->reg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ regmap_write(fg->reg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ regmap_write(fg->reg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ regmap_write(fg->reg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ regmap_write(fg->reg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ regmap_write(fg->reg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary and secondary area position configuration */
+ regmap_write(fg->reg, PACFG, STARTX(0) | STARTY(0));
+ regmap_write(fg->reg, SACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ regmap_write_bits(fg->reg, FGINCTRL, ENPRIMALPHA | ENSECALPHA, 0);
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, ENPRIMALPHA | ENSECALPHA, 0);
+
+ /* constant color is green(used in panic mode) */
+ regmap_write(fg->reg, FGCCR, CCGREEN(0x3ff));
+
+ ret = clk_set_rate(fg->clk_disp, m->clock * HZ_PER_KHZ);
+ if (ret < 0)
+ dev_err(fg->dev, "failed to set display clock rate: %d\n", ret);
+}
+
+static inline void dc_fg_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRL, FGDM_MASK, mode);
+}
+
+static inline void dc_fg_panic_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, FGDM_MASK, mode);
+}
+
+void dc_fg_enable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, FGEN);
+}
+
+void dc_fg_disable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, 0);
+}
+
+void dc_fg_shdtokgen(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGSLR, SHDTOKGEN);
+}
+
+u32 dc_fg_get_frame_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return FRAMEINDEX(val);
+}
+
+u32 dc_fg_get_line_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return LINEINDEX(val);
+}
+
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 frame_index, last_frame_index;
+
+ frame_index = dc_fg_get_frame_index(fg);
+ do {
+ last_frame_index = frame_index;
+ frame_index = dc_fg_get_frame_index(fg);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ return last_frame_index != frame_index;
+}
+
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGCHSTAT, &val);
+
+ return !!(val & SFIFOEMPTY);
+}
+
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGCHSTATCLR, CLRSECSTAT);
+}
+
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(fg->reg, FGCHSTAT, val,
+ val & SECSYNCSTAT, 5, 100000);
+}
+
+void dc_fg_enable_clock(struct dc_fg *fg)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fg->clk_disp);
+ if (ret)
+ dev_err(fg->dev, "failed to enable display clock: %d\n", ret);
+}
+
+void dc_fg_disable_clock(struct dc_fg *fg)
+{
+ clk_disable_unprepare(fg->clk_disp);
+}
+
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz)
+{
+ unsigned long rounded_rate;
+
+ rounded_rate = clk_round_rate(fg->clk_disp, clk_khz * HZ_PER_KHZ);
+
+ if (rounded_rate != clk_khz * HZ_PER_KHZ)
+ return MODE_NOCLOCK;
+
+ return MODE_OK;
+}
+
+void dc_fg_init(struct dc_fg *fg)
+{
+ dc_fg_enable_shden(fg);
+ dc_fg_syncmode(fg, FG_SYNCMODE_OFF);
+ dc_fg_displaymode(fg, FG_DM_SEC_ON_TOP);
+ dc_fg_panic_displaymode(fg, FG_DM_CONSTCOL);
+}
+
+static int dc_fg_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_fg *fg;
+ int id;
+
+ fg = devm_kzalloc(dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fg->reg = devm_regmap_init_mmio(dev, base, &dc_fg_regmap_config);
+ if (IS_ERR(fg->reg))
+ return PTR_ERR(fg->reg);
+
+ fg->clk_disp = devm_clk_get(dev, NULL);
+ if (IS_ERR(fg->clk_disp))
+ return dev_err_probe(dev, PTR_ERR(fg->clk_disp),
+ "failed to get display clock\n");
+
+ id = dc_subdev_get_id(dc_fg_info, ARRAY_SIZE(dc_fg_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fg->dev = dev;
+ dc_drm->fg[id] = fg;
+
+ return 0;
+}
+
+static const struct component_ops dc_fg_ops = {
+ .bind = dc_fg_bind,
+};
+
+static int dc_fg_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fg_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fg_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fg_ops);
+}
+
+static const struct of_device_id dc_fg_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-framegen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fg_dt_ids);
+
+struct platform_driver dc_fg_driver = {
+ .probe = dc_fg_probe,
+ .remove = dc_fg_remove,
+ .driver = {
+ .name = "imx8-dc-framegen",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fl.c b/drivers/gpu/drm/imx/dc/dc-fl.c
new file mode 100644
index 000000000000..3ce24c72aa13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fl.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+
+struct dc_fl {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fl_info[] = {
+ { .reg_start = 0x56180ac0, .id = 0, },
+};
+
+static const struct regmap_range dc_fl_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+};
+
+static const struct regmap_access_table dc_fl_regmap_access_table = {
+ .yes_ranges = dc_fl_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fl_regmap_ranges),
+};
+
+static const struct regmap_config dc_fl_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fl_regmap_access_table,
+ .rd_table = &dc_fl_regmap_access_table,
+ .max_register = FRAMEDIMENSIONS,
+};
+
+static void dc_fl_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fl_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fl_init(struct dc_fu *fu)
+{
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fl_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fl_init;
+ fu->ops.set_fmt = dc_fl_set_fmt;
+ fu->ops.set_framedimensions = dc_fl_set_framedimensions;
+}
+
+static int dc_fl_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_fl *fl;
+ struct dc_fu *fu;
+ int i, id;
+
+ fl = devm_kzalloc(dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fl_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fl_info, ARRAY_SIZE(dc_fl_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHLAYER0;
+ fu->id = DC_FETCHUNIT_FL0;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchLayer%d", id);
+
+ dc_fl_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fl_ops = {
+ .bind = dc_fl_bind,
+};
+
+static int dc_fl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fl_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fl_ops);
+}
+
+static const struct of_device_id dc_fl_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchlayer" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fl_dt_ids);
+
+struct platform_driver dc_fl_driver = {
+ .probe = dc_fl_probe,
+ .remove = dc_fl_remove,
+ .driver = {
+ .name = "imx8-dc-fetchlayer",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fl_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
new file mode 100644
index 000000000000..f94c591c8158
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+/* STATICCONTROL */
+#define SHDLDREQSTICKY_MASK GENMASK(31, 24)
+#define SHDLDREQSTICKY(x) FIELD_PREP(SHDLDREQSTICKY_MASK, (x))
+#define BASEADDRESSAUTOUPDATE_MASK GENMASK(23, 16)
+#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
+
+/* BURSTBUFFERMANAGEMENT */
+#define SETBURSTLENGTH_MASK GENMASK(12, 8)
+#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
+#define SETNUMBUFFERS_MASK GENMASK(7, 0)
+#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
+#define LINEMODE_MASK BIT(31)
+
+/* SOURCEBUFFERATTRIBUTES */
+#define BITSPERPIXEL_MASK GENMASK(21, 16)
+#define BITSPERPIXEL(x) FIELD_PREP(BITSPERPIXEL_MASK, (x))
+#define STRIDE_MASK GENMASK(15, 0)
+#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
+
+/* SOURCEBUFFERDIMENSION */
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* LAYEROFFSET */
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWOFFSET */
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWDIMENSIONS */
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
+#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+
+enum dc_linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE_DISPLAY = 0,
+};
+
+struct dc_fu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shifts;
+};
+
+static const struct dc_fu_pixel_format pixel_formats[] = {
+ {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *bits = pixel_formats[i].bits;
+ return;
+ }
+ }
+}
+
+void
+dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *shifts = pixel_formats[i].shifts;
+ return;
+ }
+ }
+}
+
+static inline void dc_fu_enable_shden(struct dc_fu *fu)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_fu_baddr_autoupdate(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL,
+ BASEADDRESSAUTOUPDATE_MASK,
+ BASEADDRESSAUTOUPDATE(layer_mask));
+}
+
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDLDREQSTICKY_MASK,
+ SHDLDREQSTICKY(layer_mask));
+}
+
+static inline void dc_fu_set_linemode(struct dc_fu *fu, enum dc_linemode mode)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT, LINEMODE_MASK,
+ mode);
+}
+
+static inline void dc_fu_set_numbuffers(struct dc_fu *fu, unsigned int num)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS_MASK, SETNUMBUFFERS(num));
+}
+
+static void dc_fu_set_burstlength(struct dc_fu *fu, dma_addr_t baddr)
+{
+ unsigned int burst_size, burst_length;
+
+ burst_size = 1 << __ffs(baddr);
+ burst_size = round_up(burst_size, 8);
+ burst_size = min(burst_size, 128U);
+ burst_length = burst_size / 8;
+
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETBURSTLENGTH_MASK, SETBURSTLENGTH(burst_length));
+}
+
+static void dc_fu_set_baseaddress(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr)
+{
+ regmap_write(fu->reg_cfg, fu->reg_baseaddr[frac], baddr);
+}
+
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ BITSPERPIXEL_MASK, BITSPERPIXEL(bpp));
+}
+
+static void dc_fu_set_src_stride(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ STRIDE_MASK, STRIDE(stride));
+}
+
+static void dc_fu_set_src_buf_dimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_sourcebufferdimension[frac],
+ LINEWIDTH(w) | LINECOUNT(h));
+}
+
+static inline void dc_fu_layeroffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layeroffset[frac],
+ LAYERXOFFSET(x) | LAYERYOFFSET(y));
+}
+
+static inline void dc_fu_clipoffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowoffset[frac],
+ CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y));
+}
+
+static inline void dc_fu_clipdimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int w, unsigned int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowdimensions[frac],
+ CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h));
+}
+
+static inline void
+dc_fu_set_pixel_blend_mode(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layerproperty[frac], 0);
+ regmap_write(fu->reg_cfg, fu->reg_constantcolor[frac], 0);
+}
+
+static void dc_fu_enable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, SOURCEBUFFERENABLE);
+}
+
+static void dc_fu_disable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, 0);
+
+ if (fu->lb) {
+ dc_lb_pec_clken(fu->lb, CLKEN_DISABLE);
+ dc_lb_mode(fu->lb, LB_NEUTRAL);
+ }
+}
+
+static void dc_fu_set_layerblend(struct dc_fu *fu, struct dc_lb *lb)
+{
+ fu->lb = lb;
+}
+
+static enum dc_link_id dc_fu_get_link_id(struct dc_fu *fu)
+{
+ return fu->link_id;
+}
+
+static const char *dc_fu_get_name(struct dc_fu *fu)
+{
+ return fu->name;
+}
+
+const struct dc_fu_ops dc_fu_common_ops = {
+ .set_burstlength = dc_fu_set_burstlength,
+ .set_baseaddress = dc_fu_set_baseaddress,
+ .set_src_stride = dc_fu_set_src_stride,
+ .set_src_buf_dimensions = dc_fu_set_src_buf_dimensions,
+ .enable_src_buf = dc_fu_enable_src_buf,
+ .disable_src_buf = dc_fu_disable_src_buf,
+ .set_layerblend = dc_fu_set_layerblend,
+ .get_link_id = dc_fu_get_link_id,
+ .get_name = dc_fu_get_name,
+};
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu)
+{
+ return &fu->ops;
+}
+
+void dc_fu_common_hw_init(struct dc_fu *fu)
+{
+ enum dc_fu_frac i;
+
+ dc_fu_baddr_autoupdate(fu, 0x0);
+ dc_fu_enable_shden(fu);
+ dc_fu_set_linemode(fu, LINEMODE_DISPLAY);
+ dc_fu_set_numbuffers(fu, 16);
+
+ for (i = DC_FETCHUNIT_FRAC0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ dc_fu_layeroffset(fu, i, 0, 0);
+ dc_fu_clipoffset(fu, i, 0, 0);
+ dc_fu_clipdimensions(fu, i, 1, 1);
+ dc_fu_disable_src_buf(fu, i);
+ dc_fu_set_pixel_blend_mode(fu, i);
+ }
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
new file mode 100644
index 000000000000..e016e1ea5b4e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_FETCHUNIT_H__
+#define __DC_FETCHUNIT_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-pe.h"
+
+#define FRAC_OFFSET 0x28
+
+#define STATICCONTROL 0x8
+#define BURSTBUFFERMANAGEMENT 0xc
+
+/* COLORCOMPONENTBITS */
+#define R_BITS(x) FIELD_PREP_CONST(GENMASK(27, 24), (x))
+#define G_BITS(x) FIELD_PREP_CONST(GENMASK(19, 16), (x))
+#define B_BITS(x) FIELD_PREP_CONST(GENMASK(11, 8), (x))
+#define A_BITS(x) FIELD_PREP_CONST(GENMASK(3, 0), (x))
+
+/* COLORCOMPONENTSHIFT */
+#define R_SHIFT(x) FIELD_PREP_CONST(GENMASK(28, 24), (x))
+#define G_SHIFT(x) FIELD_PREP_CONST(GENMASK(20, 16), (x))
+#define B_SHIFT(x) FIELD_PREP_CONST(GENMASK(12, 8), (x))
+#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
+
+/* LAYERPROPERTY */
+#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
+#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
+#define SOURCEBUFFERENABLE BIT(31)
+
+/* FRAMEDIMENSIONS */
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* CONTROL */
+#define INPUTSELECT_MASK GENMASK(4, 3)
+#define INPUTSELECT(x) FIELD_PREP(INPUTSELECT_MASK, (x))
+#define RASTERMODE_MASK GENMASK(2, 0)
+#define RASTERMODE(x) FIELD_PREP(RASTERMODE_MASK, (x))
+
+enum dc_yuvconversionmode {
+ YUVCONVERSIONMODE_OFF,
+};
+
+enum dc_inputselect {
+ INPUTSELECT_INACTIVE,
+};
+
+enum dc_rastermode {
+ RASTERMODE_NORMAL,
+};
+
+enum {
+ DC_FETCHUNIT_FL0,
+ DC_FETCHUNIT_FW2,
+};
+
+enum dc_fu_frac {
+ DC_FETCHUNIT_FRAC0,
+ DC_FETCHUNIT_FRAC1,
+ DC_FETCHUNIT_FRAC2,
+ DC_FETCHUNIT_FRAC3,
+ DC_FETCHUNIT_FRAC4,
+ DC_FETCHUNIT_FRAC5,
+ DC_FETCHUNIT_FRAC6,
+ DC_FETCHUNIT_FRAC7,
+ DC_FETCHUNIT_FRAC_NUM
+};
+
+struct dc_fu;
+struct dc_lb;
+
+struct dc_fu_ops {
+ void (*init)(struct dc_fu *fu);
+ void (*set_burstlength)(struct dc_fu *fu, dma_addr_t baddr);
+ void (*set_baseaddress)(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr);
+ void (*set_src_stride)(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride);
+ void (*set_src_buf_dimensions)(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h);
+ void (*set_fmt)(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format);
+ void (*enable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*disable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*set_framedimensions)(struct dc_fu *fu, int w, int h);
+ void (*set_layerblend)(struct dc_fu *fu, struct dc_lb *lb);
+ enum dc_link_id (*get_link_id)(struct dc_fu *fu);
+ const char *(*get_name)(struct dc_fu *fu);
+};
+
+struct dc_fu {
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ char name[21];
+ u32 reg_baseaddr[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferattributes[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferdimension[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layeroffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowoffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowdimensions[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_constantcolor[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layerproperty[DC_FETCHUNIT_FRAC_NUM];
+ unsigned int id;
+ enum dc_link_id link_id;
+ struct dc_fu_ops ops;
+ struct dc_lb *lb;
+};
+
+extern const struct dc_fu_ops dc_fu_common_ops;
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits);
+void dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts);
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask);
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp);
+void dc_fu_common_hw_init(struct dc_fu *fu);
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu);
+
+#endif /* __DC_FETCHUNIT_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-fw.c b/drivers/gpu/drm/imx/dc/dc-fw.c
new file mode 100644
index 000000000000..acb2d4d9e2ec
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+#define CONTROL 0x170
+
+struct dc_fw {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fw_info[] = {
+ { .reg_start = 0x56180a60, .id = 2, },
+};
+
+static const struct regmap_range dc_fw_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_fw_pec_regmap_access_table = {
+ .yes_ranges = dc_fw_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_fw_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_pec_regmap_access_table,
+ .rd_table = &dc_fw_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_fw_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_fw_regmap_access_table = {
+ .yes_ranges = dc_fw_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_regmap_ranges),
+};
+
+static const struct regmap_config dc_fw_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_regmap_access_table,
+ .rd_table = &dc_fw_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static void dc_fw_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, CONTROL, INPUTSELECT_MASK,
+ INPUTSELECT(INPUTSELECT_INACTIVE));
+ regmap_write_bits(fu->reg_cfg, CONTROL, RASTERMODE_MASK,
+ RASTERMODE(RASTERMODE_NORMAL));
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fw_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fw_init(struct dc_fu *fu)
+{
+ regmap_write(fu->reg_pec, PIXENGCFG_DYNAMIC, LINK_ID_NONE);
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fw_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fw_init;
+ fu->ops.set_fmt = dc_fw_set_fmt;
+ fu->ops.set_framedimensions = dc_fw_set_framedimensions;
+}
+
+static int dc_fw_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_fw *fw;
+ struct dc_fu *fu;
+ int i, id;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fu = &fw->fu;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_fw_pec_regmap_config);
+ if (IS_ERR(fu->reg_pec))
+ return PTR_ERR(fu->reg_pec);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fw_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fw_info, ARRAY_SIZE(dc_fw_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHWARP2;
+ fu->id = DC_FETCHUNIT_FW2;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchWarp%d", id);
+
+ dc_fw_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fw_ops = {
+ .bind = dc_fw_bind,
+};
+
+static int dc_fw_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fw_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fw_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fw_ops);
+}
+
+static const struct of_device_id dc_fw_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchwarp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fw_dt_ids);
+
+struct platform_driver dc_fw_driver = {
+ .probe = dc_fw_probe,
+ .remove = dc_fw_remove,
+ .driver = {
+ .name = "imx8-dc-fetchwarp",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fw_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-ic.c b/drivers/gpu/drm/imx/dc/dc-ic.c
new file mode 100644
index 000000000000..a270ae4030cd
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ic.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define USERINTERRUPTMASK(n) (0x8 + 4 * (n))
+#define INTERRUPTENABLE(n) (0x10 + 4 * (n))
+#define INTERRUPTPRESET(n) (0x18 + 4 * (n))
+#define INTERRUPTCLEAR(n) (0x20 + 4 * (n))
+#define INTERRUPTSTATUS(n) (0x28 + 4 * (n))
+#define USERINTERRUPTENABLE(n) (0x40 + 4 * (n))
+#define USERINTERRUPTPRESET(n) (0x48 + 4 * (n))
+#define USERINTERRUPTCLEAR(n) (0x50 + 4 * (n))
+#define USERINTERRUPTSTATUS(n) (0x58 + 4 * (n))
+
+#define IRQ_COUNT 49
+#define IRQ_RESERVED 35
+#define REG_NUM 2
+
+struct dc_ic_data {
+ struct regmap *regs;
+ struct clk *clk_axi;
+ int irq[IRQ_COUNT];
+ struct irq_domain *domain;
+};
+
+struct dc_ic_entry {
+ struct dc_ic_data *data;
+ int irq;
+};
+
+static const struct regmap_range dc_ic_regmap_write_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_write_table = {
+ .yes_ranges = dc_ic_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_read_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTENABLE(1)),
+ regmap_reg_range(INTERRUPTSTATUS(0), INTERRUPTSTATUS(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTENABLE(1)),
+ regmap_reg_range(USERINTERRUPTSTATUS(0), USERINTERRUPTSTATUS(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_read_table = {
+ .yes_ranges = dc_ic_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_volatile_ranges[] = {
+ regmap_reg_range(INTERRUPTPRESET(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTPRESET(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_volatile_table = {
+ .yes_ranges = dc_ic_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ic_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ic_regmap_write_table,
+ .rd_table = &dc_ic_regmap_read_table,
+ .volatile_table = &dc_ic_regmap_volatile_table,
+ .max_register = USERINTERRUPTSTATUS(1),
+};
+
+static void dc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct dc_ic_entry *entry = irq_desc_get_handler_data(desc);
+ struct dc_ic_data *data = entry->data;
+ unsigned int status, enable;
+ unsigned int virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ regmap_read(data->regs, USERINTERRUPTSTATUS(entry->irq / 32), &status);
+ regmap_read(data->regs, USERINTERRUPTENABLE(entry->irq / 32), &enable);
+
+ status &= enable;
+
+ if (status & BIT(entry->irq % 32)) {
+ virq = irq_find_mapping(data->domain, entry->irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static const unsigned long unused_irq[REG_NUM] = {0x00000000, 0xfffe0008};
+
+static int dc_ic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_chip_generic *gc;
+ struct dc_ic_entry *entry;
+ struct irq_chip_type *ct;
+ struct dc_ic_data *data;
+ void __iomem *base;
+ int i, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ entry = devm_kcalloc(dev, IRQ_COUNT, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to initialize reg\n");
+ return PTR_ERR(base);
+ }
+
+ data->regs = devm_regmap_init_mmio(dev, base, &dc_ic_regmap_config);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(data->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(data->clk_axi),
+ "failed to get AXI clock\n");
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM sync: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < REG_NUM; i++) {
+ /* mask and clear all interrupts */
+ regmap_write(data->regs, USERINTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, INTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, USERINTERRUPTCLEAR(i), 0xffffffff);
+ regmap_write(data->regs, INTERRUPTCLEAR(i), 0xffffffff);
+
+ /* set all interrupts to user mode */
+ regmap_write(data->regs, USERINTERRUPTMASK(i), 0xffffffff);
+ }
+
+ data->domain = irq_domain_add_linear(dev->of_node, IRQ_COUNT,
+ &irq_generic_chip_ops, data);
+ if (!data->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ pm_runtime_put(dev);
+ return -ENOMEM;
+ }
+ irq_domain_set_pm_device(data->domain, dev);
+
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, "DC",
+ handle_level_irq, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to alloc generic IRQ chips: %d\n", ret);
+ irq_domain_remove(data->domain);
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ for (i = 0; i < IRQ_COUNT; i += 32) {
+ gc = irq_get_domain_generic_chip(data->domain, i);
+ gc->reg_base = base;
+ gc->unused = unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(i / 32);
+ }
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ data->irq[i] = irq_of_parse_and_map(dev->of_node, i);
+
+ entry[i].data = data;
+ entry[i].irq = i;
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ dc_ic_irq_handler, &entry[i]);
+ }
+
+ return 0;
+}
+
+static void dc_ic_remove(struct platform_device *pdev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ if (i == IRQ_RESERVED)
+ continue;
+
+ irq_set_chained_handler_and_data(data->irq[i], NULL, NULL);
+ }
+
+ irq_domain_remove(data->domain);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int dc_ic_runtime_suspend(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->clk_axi);
+
+ return 0;
+}
+
+static int dc_ic_runtime_resume(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk_axi);
+ if (ret)
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dc_ic_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dc_ic_runtime_suspend, dc_ic_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_ic_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-intc", },
+ { /* sentinel */ }
+};
+
+struct platform_driver dc_ic_driver = {
+ .probe = dc_ic_probe,
+ .remove = dc_ic_remove,
+ .driver = {
+ .name = "imx8-dc-intc",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ic_dt_ids,
+ .pm = pm_sleep_ptr(&dc_ic_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.c b/drivers/gpu/drm/imx/dc/dc-kms.c
new file mode 100644
index 000000000000..2b18aa37a4a8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+
+static const struct drm_mode_config_funcs dc_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int dc_kms_init_encoder_per_crtc(struct dc_drm_device *dc_drm,
+ int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ struct drm_connector *connector;
+ struct device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_drm_of_get_bridge(dev, dc_crtc->de->tc->dev->of_node,
+ 0, 0);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret,
+ "failed to find bridge for CRTC%u\n",
+ crtc->index);
+ }
+
+ encoder = &dc_drm->encoder[crtc_index];
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret) {
+ dev_err(dev, "failed to initialize encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev,
+ "failed to attach bridge to encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev, "failed to init bridge connector for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ dev_err(dev,
+ "failed to attach encoder to connector for CRTC%u: %d\n",
+ crtc->index, ret);
+
+ return ret;
+}
+
+int dc_kms_init(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret, i;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 60;
+ drm->mode_config.min_height = 60;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &dc_drm_mode_config_funcs;
+
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = DC_FRAMEGEN_MAX_FRAME_INDEX;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_init(dc_drm, i);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init_encoder_per_crtc(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_post_init(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, DC_DISPLAYS);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank support: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+void dc_kms_uninit(struct dc_drm_device *dc_drm)
+{
+ drm_kms_helper_poll_fini(&dc_drm->base);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.h b/drivers/gpu/drm/imx/dc/dc-kms.h
new file mode 100644
index 000000000000..cd7860eff986
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_KMS_H__
+#define __DC_KMS_H__
+
+#include <linux/completion.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+#define DC_CRTC_IRQS 5
+
+struct dc_crtc_irq {
+ struct dc_crtc *dc_crtc;
+ unsigned int irq;
+};
+
+/**
+ * struct dc_crtc - DC specific drm_crtc
+ *
+ * Each display controller contains one content stream and one safety stream.
+ * In general, the two streams have the same functionality. One stream is
+ * overlaid on the other by @fg. This driver chooses to generate black constant
+ * color from the content stream as background color, build plane(s) on the
+ * content stream by using layerblend(s) and always generate a constant color
+ * from the safety stream. Note that due to the decoupled timing, the safety
+ * stream still works to show the constant color properly even when the content
+ * stream has completely hung up due to mal-function of this driver.
+ */
+struct dc_crtc {
+ /** @base: base drm_crtc structure */
+ struct drm_crtc base;
+ /** @de: display engine */
+ struct dc_de *de;
+ /** @cf_cont: content stream constframe */
+ struct dc_cf *cf_cont;
+ /** @cf_safe: safety stream constframe */
+ struct dc_cf *cf_safe;
+ /** @ed_cont: content stream extdst */
+ struct dc_ed *ed_cont;
+ /** @ed_safe: safety stream extdst */
+ struct dc_ed *ed_safe;
+ /** @fg: framegen */
+ struct dc_fg *fg;
+ /**
+ * @irq_dec_framecomplete:
+ *
+ * display engine configuration frame complete interrupt
+ */
+ unsigned int irq_dec_framecomplete;
+ /**
+ * @irq_dec_seqcomplete:
+ *
+ * display engine configuration sequence complete interrupt
+ */
+ unsigned int irq_dec_seqcomplete;
+ /**
+ * @irq_dec_shdload:
+ *
+ * display engine configuration shadow load interrupt
+ */
+ unsigned int irq_dec_shdload;
+ /**
+ * @irq_ed_cont_shdload:
+ *
+ * content stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_cont_shdload;
+ /**
+ * @irq_ed_safe_shdload:
+ *
+ * safety stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_safe_shdload;
+ /**
+ * @dec_seqcomplete_done:
+ *
+ * display engine configuration sequence completion
+ */
+ struct completion dec_seqcomplete_done;
+ /**
+ * @dec_shdload_done:
+ *
+ * display engine configuration shadow load completion
+ */
+ struct completion dec_shdload_done;
+ /**
+ * @ed_cont_shdload_done:
+ *
+ * content stream extdst shadow load completion
+ */
+ struct completion ed_cont_shdload_done;
+ /**
+ * @ed_safe_shdload_done:
+ *
+ * safety stream extdst shadow load completion
+ */
+ struct completion ed_safe_shdload_done;
+ /** @event: cached pending vblank event */
+ struct drm_pending_vblank_event *event;
+ /** @irqs: interrupt list */
+ struct dc_crtc_irq irqs[DC_CRTC_IRQS];
+};
+
+/**
+ * struct dc_plane - DC specific drm_plane
+ *
+ * Build a plane on content stream with a fetchunit and a layerblend.
+ */
+struct dc_plane {
+ /** @base: base drm_plane structure */
+ struct drm_plane base;
+ /** @fu: fetchunit */
+ struct dc_fu *fu;
+ /** @cf: content stream constframe */
+ struct dc_cf *cf;
+ /** @lb: layerblend */
+ struct dc_lb *lb;
+ /** @ed: content stream extdst */
+ struct dc_ed *ed;
+};
+
+#endif /* __DC_KMS_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
new file mode 100644
index 000000000000..38f966625d38
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_blend.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
+#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+
+#define STATICCONTROL 0x8
+#define SHDTOKSEL_MASK GENMASK(4, 3)
+#define SHDTOKSEL(x) FIELD_PREP(SHDTOKSEL_MASK, (x))
+#define SHDLDSEL_MASK GENMASK(2, 1)
+#define SHDLDSEL(x) FIELD_PREP(SHDLDSEL_MASK, (x))
+
+#define CONTROL 0xc
+#define CTRL_MODE_MASK BIT(0)
+#define CTRL_MODE(x) FIELD_PREP(CTRL_MODE_MASK, (x))
+
+#define BLENDCONTROL 0x10
+#define ALPHA_MASK GENMASK(23, 16)
+#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
+#define SEC_A_BLD_FUNC(x) \
+ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+
+#define POSITION 0x14
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
+#define YPOS_MASK GENMASK(31, 16)
+#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+
+enum dc_lb_blend_func {
+ DC_LAYERBLEND_BLEND_ZERO,
+ DC_LAYERBLEND_BLEND_ONE,
+ DC_LAYERBLEND_BLEND_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_CONST_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_CONST_ALPHA,
+};
+
+enum dc_lb_shadow_sel {
+ BOTH = 0x2,
+};
+
+static const struct dc_subdev_info dc_lb_info[] = {
+ { .reg_start = 0x56180ba0, .id = 0, },
+ { .reg_start = 0x56180bc0, .id = 1, },
+ { .reg_start = 0x56180be0, .id = 2, },
+ { .reg_start = 0x56180c00, .id = 3, },
+};
+
+static const struct regmap_range dc_lb_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_lb_pec_regmap_access_table = {
+ .yes_ranges = dc_lb_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_lb_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_pec_regmap_access_table,
+ .rd_table = &dc_lb_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_lb_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, POSITION),
+};
+
+static const struct regmap_access_table dc_lb_regmap_access_table = {
+ .yes_ranges = dc_lb_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_regmap_ranges),
+};
+
+static const struct regmap_config dc_lb_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_regmap_access_table,
+ .rd_table = &dc_lb_regmap_access_table,
+ .max_register = POSITION,
+};
+
+static const enum dc_link_id prim_sels[] = {
+ /* common options */
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LINK_ID_LAYERBLEND0,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND3,
+};
+
+static const enum dc_link_id sec_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_FETCHWARP2,
+ LINK_ID_FETCHLAYER0,
+};
+
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb)
+{
+ return lb->link;
+}
+
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim)
+{
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_PRIM_SEL_MASK,
+ PIXENGCFG_DYNAMIC_PRIM_SEL(prim));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid primary input selection:%d\n", prim);
+}
+
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_sels); i++) {
+ if (sec_sels[i] == sec) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_SEC_SEL_MASK,
+ PIXENGCFG_DYNAMIC_SEC_SEL(sec));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid secondary input selection:%d\n", sec);
+}
+
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken)
+{
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC, CLKEN_MASK,
+ CLKEN(clken));
+}
+
+static inline void dc_lb_enable_shden(struct dc_lb *lb)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_lb_shdtoksel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDTOKSEL_MASK,
+ SHDTOKSEL(sel));
+}
+
+static inline void dc_lb_shdldsel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDLDSEL_MASK,
+ SHDLDSEL(sel));
+}
+
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode)
+{
+ regmap_write_bits(lb->reg_cfg, CONTROL, CTRL_MODE_MASK, mode);
+}
+
+static inline void dc_lb_blendcontrol(struct dc_lb *lb)
+{
+ u32 val = PRIM_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ PRIM_C_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_C_BLD_FUNC(DC_LAYERBLEND_BLEND_CONST_ALPHA) |
+ ALPHA(DRM_BLEND_ALPHA_OPAQUE >> 8);
+
+ regmap_write(lb->reg_cfg, BLENDCONTROL, val);
+}
+
+void dc_lb_position(struct dc_lb *lb, int x, int y)
+{
+ regmap_write(lb->reg_cfg, POSITION, XPOS(x) | YPOS(y));
+}
+
+int dc_lb_get_id(struct dc_lb *lb)
+{
+ return lb->id;
+}
+
+void dc_lb_init(struct dc_lb *lb)
+{
+ dc_lb_pec_dynamic_prim_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_dynamic_sec_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_clken(lb, CLKEN_DISABLE);
+ dc_lb_shdldsel(lb, BOTH);
+ dc_lb_shdtoksel(lb, BOTH);
+ dc_lb_blendcontrol(lb);
+ dc_lb_enable_shden(lb);
+}
+
+static int dc_lb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_lb *lb;
+
+ lb = devm_kzalloc(dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ lb->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_lb_pec_regmap_config);
+ if (IS_ERR(lb->reg_pec))
+ return PTR_ERR(lb->reg_pec);
+
+ lb->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_lb_cfg_regmap_config);
+ if (IS_ERR(lb->reg_cfg))
+ return PTR_ERR(lb->reg_cfg);
+
+ lb->id = dc_subdev_get_id(dc_lb_info, ARRAY_SIZE(dc_lb_info), res_pec);
+ if (lb->id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", lb->id);
+ return lb->id;
+ }
+
+ lb->dev = dev;
+ lb->link = LINK_ID_LAYERBLEND0 + lb->id;
+
+ dc_drm->lb[lb->id] = lb;
+
+ return 0;
+}
+
+static const struct component_ops dc_lb_ops = {
+ .bind = dc_lb_bind,
+};
+
+static int dc_lb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_lb_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_lb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_lb_ops);
+}
+
+static const struct of_device_id dc_lb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-layerblend" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_lb_dt_ids);
+
+struct platform_driver dc_lb_driver = {
+ .probe = dc_lb_probe,
+ .remove = dc_lb_remove,
+ .driver = {
+ .name = "imx8-dc-layerblend",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_lb_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.c b/drivers/gpu/drm/imx/dc/dc-pe.c
new file mode 100644
index 000000000000..6676c22f3f45
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+static int dc_pe_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dc_drm_device *dc_drm = data;
+ struct dc_pe *pe;
+ int ret;
+
+ pe = devm_kzalloc(dev, sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(pe->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(pe->clk_axi),
+ "failed to get AXI clock\n");
+
+ pe->dev = dev;
+
+ dev_set_drvdata(dev, pe);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ dc_drm->pe = pe;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the pixel engine component first. To avoid the dependency, post bind to
+ * get the pointers from dc_drm in a safe manner.
+ */
+void dc_pe_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_pe *pe = dc_drm->pe;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ pe->cf_safe[i] = dc_drm->cf_safe[i];
+ pe->cf_cont[i] = dc_drm->cf_cont[i];
+ pe->ed_safe[i] = dc_drm->ed_safe[i];
+ pe->ed_cont[i] = dc_drm->ed_cont[i];
+ }
+
+ for (i = 0; i < DC_DISP_FU_CNT; i++)
+ pe->fu_disp[i] = dc_drm->fu_disp[i];
+
+ for (i = 0; i < DC_LB_CNT; i++)
+ pe->lb[i] = dc_drm->lb[i];
+}
+
+static const struct component_ops dc_pe_ops = {
+ .bind = dc_pe_bind,
+};
+
+static int dc_pe_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_pe_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_pe_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_pe_ops);
+}
+
+static int dc_pe_runtime_suspend(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pe->clk_axi);
+
+ return 0;
+}
+
+static int dc_pe_runtime_resume(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = clk_prepare_enable(pe->clk_axi);
+ if (ret) {
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_safe); i++)
+ dc_cf_init(pe->cf_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_cont); i++)
+ dc_cf_init(pe->cf_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_safe); i++)
+ dc_ed_init(pe->ed_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_cont); i++)
+ dc_ed_init(pe->ed_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->fu_disp); i++)
+ pe->fu_disp[i]->ops.init(pe->fu_disp[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->lb); i++)
+ dc_lb_init(pe->lb[i]);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_pe_pm_ops = {
+ RUNTIME_PM_OPS(dc_pe_runtime_suspend, dc_pe_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_pe_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-pixel-engine", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pe_dt_ids);
+
+struct platform_driver dc_pe_driver = {
+ .probe = dc_pe_probe,
+ .remove = dc_pe_remove,
+ .driver = {
+ .name = "imx8-dc-pixel-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_pe_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pe_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.h b/drivers/gpu/drm/imx/dc/dc-pe.h
new file mode 100644
index 000000000000..f5e01a6eb9e9
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_PIXEL_ENGINE_H__
+#define __DC_PIXEL_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+
+#define SHDEN BIT(0)
+
+#define CLKEN_MASK_SHIFT 24
+#define CLKEN_MASK (0x3 << CLKEN_MASK_SHIFT)
+#define CLKEN(n) ((n) << CLKEN_MASK_SHIFT)
+
+#define DC_DISP_FU_CNT 2
+#define DC_LB_CNT 4
+
+enum dc_link_id {
+ LINK_ID_NONE = 0x00,
+ LINK_ID_CONSTFRAME0 = 0x0c,
+ LINK_ID_CONSTFRAME4 = 0x0e,
+ LINK_ID_CONSTFRAME1 = 0x10,
+ LINK_ID_CONSTFRAME5 = 0x12,
+ LINK_ID_FETCHWARP2 = 0x14,
+ LINK_ID_FETCHLAYER0 = 0x1a,
+ LINK_ID_LAYERBLEND0 = 0x21,
+ LINK_ID_LAYERBLEND1 = 0x22,
+ LINK_ID_LAYERBLEND2 = 0x23,
+ LINK_ID_LAYERBLEND3 = 0x24,
+};
+
+enum dc_lb_mode {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+};
+
+enum dc_pec_clken {
+ CLKEN_DISABLE,
+ CLKEN_AUTOMATIC,
+};
+
+struct dc_cf {
+ struct regmap *reg_cfg;
+ enum dc_link_id link;
+};
+
+struct dc_ed {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int irq_shdload;
+};
+
+struct dc_lb {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int id;
+ enum dc_link_id link;
+};
+
+struct dc_pe {
+ struct device *dev;
+ struct clk *clk_axi;
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ struct dc_lb *lb[DC_LB_CNT];
+};
+
+/* Constant Frame Unit */
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf);
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w, unsigned int h);
+void dc_cf_constantcolor_black(struct dc_cf *cf);
+void dc_cf_constantcolor_blue(struct dc_cf *cf);
+void dc_cf_init(struct dc_cf *cf);
+
+/* External Destination Unit */
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src);
+void dc_ed_pec_sync_trigger(struct dc_ed *ed);
+void dc_ed_init(struct dc_ed *ed);
+
+/* Layer Blend Unit */
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb);
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim);
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec);
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken);
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode);
+void dc_lb_position(struct dc_lb *lb, int x, int y);
+int dc_lb_get_id(struct dc_lb *lb);
+void dc_lb_init(struct dc_lb *lb);
+
+#endif /* __DC_PIXEL_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
new file mode 100644
index 000000000000..d8b946fb90de
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/container_of.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-kms.h"
+
+#define DC_PLANE_MAX_PITCH 0x10000
+#define DC_PLANE_MAX_PIX_CNT 8192
+
+#define dc_plane_dbg(plane, fmt, ...) \
+do { \
+ struct drm_plane *_plane = (plane); \
+ drm_dbg_kms(_plane->dev, "[PLANE:%d:%s] " fmt, \
+ _plane->base.id, _plane->name, ##__VA_ARGS__); \
+} while (0)
+
+static const uint32_t dc_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_plane_funcs dc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct dc_plane *to_dc_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dc_plane, base);
+}
+
+static int dc_plane_check_max_source_resolution(struct drm_plane_state *state)
+{
+ int src_h = drm_rect_height(&state->src) >> 16;
+ int src_w = drm_rect_width(&state->src) >> 16;
+
+ if (src_w > DC_PLANE_MAX_PIX_CNT || src_h > DC_PLANE_MAX_PIX_CNT) {
+ dc_plane_dbg(state->plane, "invalid source resolution\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dc_plane_check_fb(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t baseaddr = drm_fb_dma_get_gem_addr(fb, state, 0);
+
+ /* base address alignment */
+ if (baseaddr & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad baddr alignment\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] range */
+ if (fb->pitches[0] > DC_PLANE_MAX_PITCH) {
+ dc_plane_dbg(state->plane, "fb pitches[0] is out of range\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] alignment */
+ if (fb->pitches[0] & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad pitches[0] alignment\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ /* ok to disable */
+ if (!plane_state->fb)
+ return 0;
+
+ if (!plane_state->crtc) {
+ dc_plane_dbg(plane, "no CRTC in plane state\n");
+ return -EINVAL;
+ }
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ if (ret) {
+ dc_plane_dbg(plane, "failed to check plane state: %d\n", ret);
+ return ret;
+ }
+
+ ret = dc_plane_check_max_source_resolution(plane_state);
+ if (ret)
+ return ret;
+
+ return dc_plane_check_fb(plane_state);
+}
+
+static void
+dc_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dc_plane *dplane = to_dc_plane(plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ const struct dc_fu_ops *fu_ops;
+ struct dc_lb *lb = dplane->lb;
+ struct dc_fu *fu = dplane->fu;
+ dma_addr_t baseaddr;
+ int src_w, src_h;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ src_w = drm_rect_width(&new_state->src) >> 16;
+ src_h = drm_rect_height(&new_state->src) >> 16;
+
+ baseaddr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ fu_ops = dc_fu_get_ops(dplane->fu);
+
+ fu_ops->set_layerblend(fu, lb);
+ fu_ops->set_burstlength(fu, baseaddr);
+ fu_ops->set_src_stride(fu, DC_FETCHUNIT_FRAC0, fb->pitches[0]);
+ fu_ops->set_src_buf_dimensions(fu, DC_FETCHUNIT_FRAC0, src_w, src_h);
+ fu_ops->set_fmt(fu, DC_FETCHUNIT_FRAC0, fb->format);
+ fu_ops->set_framedimensions(fu, src_w, src_h);
+ fu_ops->set_baseaddress(fu, DC_FETCHUNIT_FRAC0, baseaddr);
+ fu_ops->enable_src_buf(fu, DC_FETCHUNIT_FRAC0);
+
+ dc_plane_dbg(plane, "uses %s\n", fu_ops->get_name(fu));
+
+ dc_lb_pec_dynamic_prim_sel(lb, dc_cf_get_link_id(dplane->cf));
+ dc_lb_pec_dynamic_sec_sel(lb, fu_ops->get_link_id(fu));
+ dc_lb_mode(lb, LB_BLEND);
+ dc_lb_position(lb, new_state->dst.x1, new_state->dst.y1);
+ dc_lb_pec_clken(lb, CLKEN_AUTOMATIC);
+
+ dc_plane_dbg(plane, "uses LayerBlend%d\n", dc_lb_get_id(lb));
+
+ /* set ExtDst's source to LayerBlend */
+ dc_ed_pec_src_sel(dplane->ed, dc_lb_get_link_id(lb));
+
+ drm_dev_exit(idx);
+}
+
+static void dc_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dc_plane *dplane = to_dc_plane(plane);
+ const struct dc_fu_ops *fu_ops;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ /* disable fetchunit in shadow */
+ fu_ops = dc_fu_get_ops(dplane->fu);
+ fu_ops->disable_src_buf(dplane->fu, DC_FETCHUNIT_FRAC0);
+
+ /* set ExtDst's source to ConstFrame */
+ dc_ed_pec_src_sel(dplane->ed, dc_cf_get_link_id(dplane->cf));
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs dc_plane_helper_funcs = {
+ .atomic_check = dc_plane_atomic_check,
+ .atomic_update = dc_plane_atomic_update,
+ .atomic_disable = dc_plane_atomic_disable,
+};
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane)
+{
+ struct drm_plane *plane = &dc_plane->base;
+ int ret;
+
+ ret = drm_universal_plane_init(&dc_drm->base, plane, 0, &dc_plane_funcs,
+ dc_plane_formats,
+ ARRAY_SIZE(dc_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &dc_plane_helper_funcs);
+
+ dc_plane->fu = dc_drm->pe->fu_disp[plane->index];
+ dc_plane->cf = dc_drm->pe->cf_cont[plane->index];
+ dc_plane->lb = dc_drm->pe->lb[plane->index];
+ dc_plane->ed = dc_drm->pe->ed_cont[plane->index];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-tc.c b/drivers/gpu/drm/imx/dc/dc-tc.c
new file mode 100644
index 000000000000..0bfd381b2cea
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-tc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-de.h"
+
+#define TCON_CTRL 0x410
+#define CTRL_RST_VAL 0x01401408
+
+/* red: MAPBIT 29-20, green: MAPBIT 19-10, blue: MAPBIT 9-0 */
+#define MAPBIT3_0 0x418
+#define MAPBIT7_4 0x41c
+#define MAPBIT11_8 0x420
+#define MAPBIT15_12 0x424
+#define MAPBIT19_16 0x428
+#define MAPBIT23_20 0x42c
+#define MAPBIT27_24 0x430
+#define MAPBIT31_28 0x434
+
+static const struct dc_subdev_info dc_tc_info[] = {
+ { .reg_start = 0x5618c800, .id = 0, },
+ { .reg_start = 0x5618e400, .id = 1, },
+};
+
+static const struct regmap_range dc_tc_regmap_ranges[] = {
+ regmap_reg_range(TCON_CTRL, TCON_CTRL),
+ regmap_reg_range(MAPBIT3_0, MAPBIT31_28),
+};
+
+static const struct regmap_access_table dc_tc_regmap_access_table = {
+ .yes_ranges = dc_tc_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_tc_regmap_ranges),
+};
+
+static const struct regmap_config dc_tc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_tc_regmap_access_table,
+ .rd_table = &dc_tc_regmap_access_table,
+ .max_register = MAPBIT31_28,
+};
+
+/*
+ * The pixels reach TCON are always in 30-bit BGR format.
+ * The first bridge always receives pixels in 30-bit RGB format.
+ * So, map the format to MEDIA_BUS_FMT_RGB101010_1X30.
+ */
+static const u32 dc_tc_mapbit[] = {
+ 0x17161514, 0x1b1a1918, 0x0b0a1d1c, 0x0f0e0d0c,
+ 0x13121110, 0x03020100, 0x07060504, 0x00000908,
+};
+
+void dc_tc_init(struct dc_tc *tc)
+{
+ /* reset TCON_CTRL to POR default so that TCON works in bypass mode */
+ regmap_write(tc->reg, TCON_CTRL, CTRL_RST_VAL);
+
+ /* set format */
+ regmap_bulk_write(tc->reg, MAPBIT3_0, dc_tc_mapbit,
+ ARRAY_SIZE(dc_tc_mapbit));
+}
+
+static int dc_tc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_tc *tc;
+ int id;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tc->reg = devm_regmap_init_mmio(dev, base, &dc_tc_regmap_config);
+ if (IS_ERR(tc->reg))
+ return PTR_ERR(tc->reg);
+
+ id = dc_subdev_get_id(dc_tc_info, ARRAY_SIZE(dc_tc_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ tc->dev = dev;
+ dc_drm->tc[id] = tc;
+
+ return 0;
+}
+
+static const struct component_ops dc_tc_ops = {
+ .bind = dc_tc_bind,
+};
+
+static int dc_tc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_tc_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_tc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_tc_ops);
+}
+
+static const struct of_device_id dc_tc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-tcon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_tc_dt_ids);
+
+struct platform_driver dc_tc_driver = {
+ .probe = dc_tc_probe,
+ .remove = dc_tc_remove,
+ .driver = {
+ .name = "imx8-dc-tcon",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_tc_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index f851e9ffdb28..9db1ceaed518 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -901,14 +901,15 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
static struct drm_framebuffer *
ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
if (priv->soc_info->map_noncoherent)
- return drm_gem_fb_create_with_dirty(drm, file, mode_cmd);
+ return drm_gem_fb_create_with_dirty(drm, file, info, mode_cmd);
- return drm_gem_fb_create(drm, file, mode_cmd);
+ return drm_gem_fb_create(drm, file, info, mode_cmd);
}
static struct drm_gem_object *
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index 31b5a3e21911..0e9dba1ef4af 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 5deec673c11e..9722b847a539 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -341,7 +341,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err = lima_sched_task_init(
submit->task, submit->ctx->context + submit->pipe,
- bos, submit->nr_bos, vm);
+ bos, submit->nr_bos, vm, file->client_id);
if (err)
goto err_out1;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 7934098e651b..739e8c6c6d90 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -113,7 +113,8 @@ static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sch
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm)
+ struct lima_vm *vm,
+ u64 drm_client_id)
{
int err, i;
@@ -124,7 +125,8 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, 1, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm,
+ drm_client_id);
if (err) {
kfree(task->bos);
return err;
@@ -410,7 +412,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -427,7 +429,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -465,7 +467,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void lima_sched_free_job(struct drm_sched_job *job)
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 85b23ba901d5..1a08faf8a529 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -88,7 +88,8 @@ struct lima_sched_pipe {
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm);
+ struct lima_vm *vm,
+ u64 drm_client_id);
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a349d10304e..98a7fb2fa00e 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -14,21 +14,19 @@ DECLARE_EVENT_CLASS(lima_task,
TP_PROTO(struct lima_sched_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
- __field(uint64_t, task_id)
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(pipe, task->base.sched->name)
),
TP_fast_assign(
- __entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
__assign_str(pipe);
),
- TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
- __entry->task_id, __entry->context, __entry->seqno,
+ TP_printk("context=%u seqno=%u pipe=%s",
+ __entry->context, __entry->seqno,
__get_str(pipe))
);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 58279ddaab3c..bef6eeb30d3e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2118,7 +2118,8 @@ static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
mutex_unlock(&mtk_dp->update_plugged_status_lock);
}
-static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
@@ -2725,9 +2726,10 @@ static int mtk_dp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
- if (!mtk_dp)
- return -ENOMEM;
+ mtk_dp = devm_drm_bridge_alloc(dev, struct mtk_dp, bridge,
+ &mtk_dp_bridge_funcs);
+ if (IS_ERR(mtk_dp))
+ return PTR_ERR(mtk_dp);
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
@@ -2785,7 +2787,6 @@ static int mtk_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
- mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index a2fdceadf209..61cab32e213a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1266,9 +1266,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct mtk_dpi *dpi;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(dev, struct mtk_dpi, bridge,
+ &mtk_dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
@@ -1320,7 +1321,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpi);
- dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 7c0c12dde488..d5e6bab36414 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -43,14 +43,13 @@ static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
static struct drm_framebuffer *
mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, cmd);
-
if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
- return drm_gem_fb_create(dev, file, cmd);
+ return drm_gem_fb_create(dev, file, info, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4fe1f38a3c4b..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1196,9 +1196,10 @@ static int mtk_dsi_probe(struct platform_device *pdev)
int irq_num;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct mtk_dsi, bridge,
+ &mtk_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->driver_data = of_device_get_match_data(dev);
@@ -1246,7 +1247,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dsi);
- dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 8803cd4a8bc9..845fd8aa43c3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1174,7 +1174,8 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1642,9 +1643,10 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
+ &mtk_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->dev = dev;
hdmi->conf = of_device_get_match_data(dev);
@@ -1666,7 +1668,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register audio driver\n");
- hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index c9678dc68fa1..dc374bfc5951 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -227,9 +227,12 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
- if (!meson_encoder_cvbs)
- return -ENOMEM;
+ meson_encoder_cvbs = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_cvbs,
+ bridge,
+ &meson_encoder_cvbs_bridge_funcs);
+ if (IS_ERR(meson_encoder_cvbs))
+ return PTR_ERR(meson_encoder_cvbs);
/* CVBS Connector Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
@@ -245,7 +248,6 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
"Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
- meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3db518e5f95d..6c6624f9ba24 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -106,9 +106,12 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
- if (!meson_encoder_dsi)
- return -ENOMEM;
+ meson_encoder_dsi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_dsi,
+ bridge,
+ &meson_encoder_dsi_bridge_funcs);
+ if (IS_ERR(meson_encoder_dsi))
+ return PTR_ERR(meson_encoder_dsi);
/* DSI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
@@ -123,7 +126,6 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
"Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
- meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index ab08d690d882..8205ee56a691 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -376,9 +376,12 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
- if (!meson_encoder_hdmi)
- return -ENOMEM;
+ meson_encoder_hdmi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_hdmi,
+ bridge,
+ &meson_encoder_hdmi_bridge_funcs);
+ if (IS_ERR(meson_encoder_hdmi))
+ return PTR_ERR(meson_encoder_hdmi);
/* HDMI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
@@ -395,7 +398,6 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
}
/* HDMI Encoder Bridge */
- meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
meson_encoder_hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 819a7e9381e3..f4bf40cd7c88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -382,10 +382,10 @@ int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index c20ed0ab50ec..23debc70dc54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -200,9 +200,9 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 78be964eb97c..f8796e2b7a0f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -201,9 +201,9 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 7a32d3b1d226..e80da12ba1fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -332,9 +332,9 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6067d08aeee3..951d715dea30 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -30,35 +31,37 @@
* This file contains setup code for the CRTC.
*/
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+static void mgag200_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- int i;
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, i8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, g8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b8);
+}
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format)
+{
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- }
+ drm_crtc_fill_gamma_888(crtc, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -67,36 +70,19 @@ void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
-
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8);
- }
+ drm_crtc_load_gamma_888(crtc, lut, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -642,9 +628,9 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
const struct drm_format_info *format = mgag200_crtc_state->format;
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
}
}
@@ -665,9 +651,9 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
funcs->pixpllc_atomic_update(crtc, old_state);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7f127e2ae442..250246f81ea9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -15,21 +15,13 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
- select DRM_CLIENT_SELECTION
- select DRM_DISPLAY_DP_AUX_BUS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_BRIDGE_CONNECTOR
select DRM_EXEC
- select DRM_KMS_HELPER
- select DRM_PANEL
- select DRM_BRIDGE
- select DRM_PANEL_BRIDGE
+ select DRM_GPUVM
select DRM_SCHED
- select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
select SHMEM
select TMPFS
select QCOM_SCM
+ select QCOM_UBWC_CONFIG
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -66,6 +58,22 @@ config DRM_MSM_VALIDATE_XML
Validate XML files with register definitions against rules-fd schema.
This option is mostly targeting DRM MSM developers. If unsure, say N.
+config DRM_MSM_KMS
+ def_bool n
+ depends on DRM_MSM
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_PANEL_BRIDGE
+
+config DRM_MSM_KMS_FBDEV
+ def_bool DRM_FBDEV_EMULATION
+ depends on DRM_MSM_KMS
+ select FB_SYSMEM_HELPERS
+
config DRM_MSM_MDSS
bool
depends on DRM_MSM
@@ -74,6 +82,7 @@ config DRM_MSM_MDSS
config DRM_MSM_MDP4
bool "Enable MDP4 support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v4 (MDP4) in
@@ -84,6 +93,7 @@ config DRM_MSM_MDP5
bool "Enable MDP5 support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v5 (MDP5) in
@@ -94,6 +104,7 @@ config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
select DRM_DISPLAY_DSC_HELPER
default y
help
@@ -104,8 +115,11 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
+ select DRM_DISPLAY_DP_AUX_BUS
+ select DRM_DISPLAY_DP_HELPER
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
@@ -115,6 +129,7 @@ config DRM_MSM_DP
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
select DRM_PANEL
select DRM_MIPI_DSI
select DRM_DISPLAY_DSC_HELPER
@@ -170,6 +185,7 @@ config DRM_MSM_DSI_7NM_PHY
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
default y
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 7a2ada6e2d74..0c0dfb25f01b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -100,18 +100,15 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
msm-display-$(CONFIG_DRM_MSM_MDSS) += \
msm_mdss.o \
-msm-display-y += \
+msm-display-$(CONFIG_DRM_MSM_KMS) += \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
msm-y += \
- msm_atomic.o \
- msm_atomic_tracepoints.o \
msm_debugfs.o \
msm_drv.o \
- msm_fb.o \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
@@ -122,21 +119,24 @@ msm-y += \
msm_gpu_devfreq.o \
msm_io_utils.o \
msm_iommu.o \
- msm_kms.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
msm_submitqueue.o \
+ msm_syncobj.o \
msm_gpu_tracepoints.o \
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm-$(CONFIG_DRM_MSM_KMS) += \
+ msm_atomic.o \
+ msm_atomic_tracepoints.o \
+ msm_fb.o \
+ msm_kms.o \
-msm-display-$(CONFIG_DEBUG_FS) += \
- dp/dp_debug.o
+msm-$(CONFIG_DRM_MSM_KMS_FBDEV) += msm_fbdev.o
msm-display-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
- dp/dp_catalog.o \
dp/dp_ctrl.o \
+ dp/dp_debug.o \
dp/dp_display.o \
dp/dp_drm.o \
dp/dp_link.o \
@@ -159,7 +159,8 @@ msm-display-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
-msm-y += $(adreno-y) $(msm-display-y)
+msm-y += $(adreno-y)
+msm-$(CONFIG_DRM_MSM_KMS) += $(msm-display-y)
obj-$(CONFIG_DRM_MSM) += msm.o
@@ -195,6 +196,11 @@ ADRENO_HEADERS = \
generated/a4xx.xml.h \
generated/a5xx.xml.h \
generated/a6xx.xml.h \
+ generated/a6xx_descriptors.xml.h \
+ generated/a6xx_enums.xml.h \
+ generated/a6xx_perfcntrs.xml.h \
+ generated/a7xx_enums.xml.h \
+ generated/a7xx_perfcntrs.xml.h \
generated/a6xx_gmu.xml.h \
generated/adreno_common.xml.h \
generated/adreno_pm4.xml.h \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 379a3d346c30..ec38db45d8a3 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+ a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
@@ -466,19 +466,18 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
-static struct msm_gem_address_space *
-a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfff * SZ_64K);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -504,7 +503,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = a2xx_create_address_space,
+ .create_vm = a2xx_create_vm,
.get_rptr = a2xx_get_rptr,
},
};
@@ -551,14 +550,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
- if (!gpu->aspace) {
- dev_err(dev->dev, "No memory protection without MMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 4280f71e472a..0407c9bc8c1b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
}
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
+ WARN_ON(off != 0);
+
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b6df115bb567..a956cd79195e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -526,7 +526,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a3xx_gpu_busy,
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a3xx_get_rptr,
},
};
@@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index f1b18a6663f7..83f6329accba 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -645,7 +645,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a4xx_gpu_busy,
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
@@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 169b8fe688f8..625a4e787d8f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -116,13 +116,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 60aef0796236..4a04dc43a8e6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -622,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu)
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a5xx_gpu->shadow_bo,
+ gpu->vm, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
@@ -835,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13;
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
@@ -1042,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
@@ -1457,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -1557,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
return;
}
@@ -1565,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1713,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
@@ -1756,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct qcom_ubwc_cfg_data *common_cfg;
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
@@ -1786,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- adreno_gpu->ubwc_config.highest_bank_bit = 15;
- else
- adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return ERR_CAST(common_cfg);
- /* a5xx only supports UBWC 1.0, these are not configurable */
- adreno_gpu->ubwc_config.macrotile_mode = 0;
- adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ adreno_gpu->_ubwc_config = *common_cfg;
+ adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config;
adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 6b91e0bd1514..d6da7351cfbb 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
ptr = msm_gem_kernel_new(drm, bosize,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index b5f9d40687d5..e4924b5e1c48 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -255,7 +255,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -263,9 +263,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
+ MSM_BO_WC, gpu->vm, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
- msm_gem_kernel_put(bo, gpu->aspace);
+ msm_gem_kernel_put(bo, gpu->vm);
return PTR_ERR(counters);
}
@@ -296,8 +296,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++) {
- msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
- msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 70f7ad806c34..00e1afd46b81 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1335,7 +1335,7 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
REG_A6XX_RB_NC_MODE_CNTL,
REG_A6XX_RB_CMP_DBG_ECO_CNTL,
REG_A7XX_GRAS_NC_MODE_CNTL,
- REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE,
REG_A6XX_UCHE_GBIF_GX_CONFIG,
REG_A6XX_UCHE_CLIENT_PF,
REG_A6XX_TPL1_DBG_ECO_CNTL1,
@@ -1442,6 +1442,13 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
},
.preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 59, 1 },
+ { 7, 2 },
+ { 232, 3 },
+ { 146, 4 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
@@ -1474,6 +1481,45 @@ static const struct adreno_info a7xx_gpus[] = {
},
},
.preempt_record_size = 3572 * SZ_1K,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43030c00),
+ .family = ADRENO_7XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen71500_sqe.fw",
+ [ADRENO_FW_GMU] = "gen71500_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a740_hwcg,
+ .protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
+ .gmu_chipid = 0x70f0000,
+ .gmu_cgc_mode = 0x00020222,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
+ },
+ .preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 294, 1 },
+ { 263, 2 },
+ { 233, 3 },
+ { 141, 4 },
+ ),
}
};
DECLARE_ADRENO_GPULIST(a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 38c0f8ef85c3..28e6705c6da6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1259,15 +1259,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
-
- gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
- msm_gem_address_space_put(gmu->aspace);
+ struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
+
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->log.obj, gmu->vm);
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gmu->vm);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
@@ -1296,7 +1298,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
if (IS_ERR(bo->obj))
return PTR_ERR(bo->obj);
- ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova,
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
@@ -1311,7 +1313,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return 0;
}
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
{
struct msm_mmu *mmu;
@@ -1321,9 +1323,9 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
if (IS_ERR(mmu))
return PTR_ERR(mmu);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
- if (IS_ERR(gmu->aspace))
- return PTR_ERR(gmu->aspace);
+ gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true);
+ if (IS_ERR(gmu->vm))
+ return PTR_ERR(gmu->vm);
return 0;
}
@@ -1940,7 +1942,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- ret = a6xx_gmu_memory_probe(gmu);
+ ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu);
if (ret)
goto err_put_device;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index b2d4489b4024..d1ce11131ba6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -62,7 +62,7 @@ struct a6xx_gmu {
/* For serializing communication with the GMU: */
struct mutex lock;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
void __iomem *mmio;
void __iomem *rscc;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 491fde0083a2..45dd5fd1c2bf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -111,7 +111,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
+ struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
@@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (ctx->seqno == ring->cur_ctx_seqno)
return;
- if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid))
return;
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
@@ -603,117 +604,118 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]);
}
-static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
+static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- gpu->ubwc_config.rgb565_predicator = 0;
- gpu->ubwc_config.uavflagprd_inv = 0;
- gpu->ubwc_config.min_acc_len = 0;
- gpu->ubwc_config.ubwc_swizzle = 0x6;
- gpu->ubwc_config.macrotile_mode = 0;
- gpu->ubwc_config.highest_bank_bit = 15;
+ const struct qcom_ubwc_cfg_data *common_cfg;
+ struct qcom_ubwc_cfg_data *cfg = &gpu->_ubwc_config;
+
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return PTR_ERR(common_cfg);
+
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ *cfg = *common_cfg;
+
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x7;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
- gpu->ubwc_config.highest_bank_bit = 14;
+ cfg->highest_bank_bit = 14;
if (adreno_is_a619(gpu))
/* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
- gpu->ubwc_config.highest_bank_bit = 13;
+ cfg->highest_bank_bit = 13;
if (adreno_is_a619_holi(gpu))
- gpu->ubwc_config.highest_bank_bit = 13;
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a621(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- }
-
- if (adreno_is_a623(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ if (adreno_is_a621(gpu))
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a640_family(gpu))
- gpu->ubwc_config.amsbc = 1;
-
- if (adreno_is_a680(gpu))
- gpu->ubwc_config.macrotile_mode = 1;
+ if (adreno_is_a623(gpu))
+ cfg->highest_bank_bit = 16;
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
adreno_is_a730(gpu) ||
adreno_is_a740_family(gpu)) {
- /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
+ /* TODO: get ddr type from bootloader and use 15 for LPDDR4 */
+ cfg->highest_bank_bit = 16;
}
if (adreno_is_a663(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x4;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x4;
}
- if (adreno_is_7c3(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ if (adreno_is_7c3(gpu))
+ cfg->highest_bank_bit = 14;
- if (adreno_is_a702(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.min_acc_len = 1;
- }
+ if (adreno_is_a702(gpu))
+ cfg->highest_bank_bit = 14;
+
+ if (cfg->highest_bank_bit != common_cfg->highest_bank_bit)
+ DRM_WARN_ONCE("Inconclusive highest_bank_bit value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->highest_bank_bit, common_cfg->highest_bank_bit);
+
+ if (cfg->ubwc_swizzle != common_cfg->ubwc_swizzle)
+ DRM_WARN_ONCE("Inconclusive ubwc_swizzle value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->ubwc_swizzle, common_cfg->ubwc_swizzle);
+
+ gpu->ubwc_config = &gpu->_ubwc_config;
+
+ return 0;
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
/*
* We subtract 13 from the highest bank bit (13 is the minimum value
* allowed by hw) and write the lowest two bits of the remaining value
* as hbb_lo and the one above it as hbb_hi to the hardware.
*/
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(cfg->highest_bank_bit < 13);
+ u32 hbb = cfg->highest_bank_bit - 13;
+ bool rgb565_predicator = cfg->ubwc_enc_version >= UBWC_4_0;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ bool amsbc = cfg->ubwc_enc_version >= UBWC_3_0;
+ bool min_acc_len_64b = false;
+ u8 uavflagprd_inv = 0;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
- u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
- u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
+
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
+ uavflagprd_inv = 2;
+
+ if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ min_acc_len_64b = true;
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
level2_swizzling_dis << 12 |
- adreno_gpu->ubwc_config.rgb565_predicator << 11 |
- hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ rgb565_predicator << 11 |
+ hbb_hi << 10 | amsbc << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
level2_swizzling_dis << 6 | hbb_hi << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
level2_swizzling_dis << 12 | hbb_hi << 10 |
- adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
@@ -721,10 +723,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
FIELD_PREP(GENMASK(8, 5), hbb_lo));
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
- adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+ min_acc_len_64b << 23 | hbb_lo << 21);
gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
- adreno_gpu->ubwc_config.macrotile_mode);
+ cfg->macrotile_mode);
}
static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
@@ -970,7 +972,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
@@ -987,7 +989,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->shadow_bo,
+ gpu->vm, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
if (IS_ERR(a6xx_gpu->shadow))
@@ -998,7 +1000,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->pwrup_reglist_bo,
+ gpu->vm, &a6xx_gpu->pwrup_reglist_bo,
&a6xx_gpu->pwrup_reglist_iova);
if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr))
@@ -2211,12 +2213,12 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
if (a6xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->shadow_bo);
}
@@ -2256,8 +2258,8 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
mutex_unlock(&a6xx_gpu->gmu.lock);
}
-static struct msm_gem_address_space *
-a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -2271,22 +2273,21 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
!device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- return adreno_iommu_create_address_space(gpu, pdev, quirks);
+ return adreno_iommu_create_vm(gpu, pdev, quirks);
}
-static struct msm_gem_address_space *
-a6xx_create_private_address_space(struct msm_gpu *gpu)
+static struct drm_gpuvm *
+a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
{
struct msm_mmu *mmu;
- mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+ mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- return msm_gem_address_space_create(mmu,
- "gpu", ADRENO_VM_START,
- adreno_private_address_space_size(gpu));
+ return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START,
+ adreno_private_vm_size(gpu), kernel_managed);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -2403,8 +2404,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2432,8 +2433,8 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2463,8 +2464,8 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2560,11 +2561,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
- a6xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a6xx_fault_handler);
+
+ ret = a6xx_calc_ubwc_config(adreno_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
- a6xx_calc_ubwc_config(adreno_gpu);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a6xx_preempt_init(gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 9201a53dd341..6e71f617fc3d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -6,6 +6,10 @@
#include "adreno_gpu.h"
+#include "a6xx_enums.xml.h"
+#include "a7xx_enums.xml.h"
+#include "a6xx_perfcntrs.xml.h"
+#include "a7xx_perfcntrs.xml.h"
#include "a6xx.xml.h"
#include "a6xx_gmu.h"
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 341a72a67401..faca2a0243ab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -132,7 +132,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -158,7 +158,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
/* Make sure all pending memory writes are posted */
wmb();
- gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova);
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova);
gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
@@ -1619,7 +1619,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a7xx_get_clusters(gpu, a6xx_state, dumper);
a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
a7xx_get_post_crashdumper_registers(gpu, a6xx_state);
@@ -1631,7 +1631,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_clusters(gpu, a6xx_state, dumper);
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e545106c70be..95d93ac6812a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -212,7 +212,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
- SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_GFX_UAV_BASE_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 3b17fd2dba89..6a12a35dabff 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -210,7 +210,7 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0);
/* Enable the GMEM save/restore feature for preemption */
- gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1);
/* Reset the preemption state */
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
@@ -344,7 +344,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_RECORD_SIZE(adreno_gpu),
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -362,7 +362,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_SMMU_INFO_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &bo, &iova);
+ gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -377,7 +377,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
- msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid);
+ msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid);
smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
smmu_info_ptr->ttbr0 = ttbr;
@@ -405,7 +405,7 @@ void a6xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->vm);
}
void a6xx_preempt_init(struct msm_gpu *gpu)
@@ -431,7 +431,7 @@ void a6xx_preempt_init(struct msm_gpu *gpu)
a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev,
PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &a6xx_gpu->preempt_postamble_bo,
+ gpu->vm, &a6xx_gpu->preempt_postamble_bo,
&a6xx_gpu->preempt_postamble_iova);
preempt_prepare_postamble(a6xx_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 16e7ac444efd..50945bfe9b49 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -16,10 +16,6 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
-bool allow_vram_carveout = false;
-MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
-module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
-
int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
@@ -264,42 +260,23 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
-static void adreno_device_register_headless(void)
-{
- /* on imx5, we don't have a top-level mdp/dpu node
- * this creates a dummy node for the driver for that case
- */
- struct platform_device_info dummy_info = {
- .parent = NULL,
- .name = "msm",
- .id = -1,
- .res = NULL,
- .num_res = 0,
- .data = NULL,
- .size_data = 0,
- .dma_mask = ~0,
- };
- platform_device_register_full(&dummy_info);
-}
-
static int adreno_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") ||
+ msm_gpu_no_components())
+ return msm_gpu_probe(pdev, &a3xx_ops);
- int ret;
-
- ret = component_add(&pdev->dev, &a3xx_ops);
- if (ret)
- return ret;
-
- if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
- adreno_device_register_headless();
-
- return 0;
+ return component_add(&pdev->dev, &a3xx_ops);
}
static void adreno_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &a3xx_ops);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+
+ if (priv->kms_init)
+ component_del(&pdev->dev, &a3xx_ops);
+ else
+ msm_gpu_remove(pdev, &a3xx_ops);
}
static void adreno_shutdown(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 9a327d543f27..e02cabb39f19 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1311,8 +1311,8 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR,
- REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100},
+ { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
{ "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 86bff915c3e7..f1230465bf0d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,25 +191,27 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev)
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev)
{
- return adreno_iommu_create_address_space(gpu, pdev, 0);
+ return adreno_iommu_create_vm(gpu, pdev, 0);
}
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks)
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (!mmu)
+ return ERR_PTR(-ENODEV);
+ else if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
@@ -224,16 +226,16 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
start = max_t(u64, SZ_16M, geometry->aperture_start);
size = geometry->aperture_end - start + 1;
- aspace = msm_gem_address_space_create(mmu, "gpu",
- start & GENMASK_ULL(48, 0), size);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0),
+ size, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
-u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+u64 adreno_private_vm_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
@@ -273,9 +275,11 @@ void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
if (!priv->stall_enabled &&
ktime_after(ktime_get(), priv->stall_reenable_time) &&
!READ_ONCE(gpu->crashstate)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+
priv->stall_enabled = true;
- gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true);
+ mmu->funcs->set_stall(mmu, true);
}
spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
}
@@ -290,6 +294,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
u32 scratch[4])
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
!READ_ONCE(gpu->crashstate);
@@ -303,8 +308,9 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
if (priv->stall_enabled) {
priv->stall_enabled = false;
- gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false);
+ mmu->funcs->set_stall(mmu, false);
}
+
priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
@@ -351,11 +357,20 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+static bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *drm = gpu->dev;
+ /* Note ctx can be NULL when called from rd_open(): */
+ struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
/* No pointer params yet */
if (*len != 0)
@@ -401,8 +416,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
- if (ctx->aspace)
- *value = gpu->global_faults + ctx->aspace->faults;
+ if (vm)
+ *value = gpu->global_faults + to_msm_vm(vm)->faults;
else
*value = gpu->global_faults;
return 0;
@@ -410,36 +425,39 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_start;
+ *value = vm->mm_start;
return 0;
case MSM_PARAM_VA_SIZE:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_size;
+ *value = vm->mm_range;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
- *value = adreno_gpu->ubwc_config.highest_bank_bit;
+ *value = adreno_gpu->ubwc_config->highest_bank_bit;
return 0;
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
case MSM_PARAM_UBWC_SWIZZLE:
- *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ *value = adreno_gpu->ubwc_config->ubwc_swizzle;
return 0;
case MSM_PARAM_MACROTILE_MODE:
- *value = adreno_gpu->ubwc_config.macrotile_mode;
+ *value = adreno_gpu->ubwc_config->macrotile_mode;
return 0;
case MSM_PARAM_UCHE_TRAP_BASE:
*value = adreno_gpu->uche_trap_base;
return 0;
+ case MSM_PARAM_HAS_PRR:
+ *value = adreno_smmu_has_prr(gpu);
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
struct drm_device *drm = gpu->dev;
@@ -485,7 +503,22 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return UERR(EPERM, drm, "invalid permissions");
- return msm_file_private_set_sysprof(ctx, gpu, value);
+ return msm_context_set_sysprof(ctx, gpu, value);
+ case MSM_PARAM_EN_VM_BIND:
+ /* We can only support VM_BIND with per-process pgtables: */
+ if (ctx->vm == gpu->vm)
+ return UERR(EINVAL, drm, "requires per-process pgtables");
+
+ /*
+ * We can only swtich to VM_BIND mode if the VM has not yet
+ * been created:
+ */
+ if (ctx->vm)
+ return UERR(EBUSY, drm, "VM already created");
+
+ ctx->userspace_managed_vm = value;
+
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
@@ -607,7 +640,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
void *ptr;
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
@@ -800,6 +833,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
+ kfree(state->vm_logs);
kfree(state->bos);
kfree(state->comm);
kfree(state->cmd);
@@ -940,6 +974,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
+ if (state->vm_logs) {
+ drm_puts(p, "vm-log:\n");
+ for (i = 0; i < state->nr_vm_logs; i++) {
+ struct msm_gem_vm_log_entry *e = &state->vm_logs[i];
+ drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+ }
+
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
drm_puts(p, "ringbuffer:\n");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index bc063594a359..9dc93c247196 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -12,13 +12,14 @@
#include <linux/firmware.h>
#include <linux/iopoll.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_gpu.h"
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
-extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -205,44 +206,12 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *fw[ADRENO_FW_MAX];
- struct {
- /**
- * @rgb565_predicator: Unknown, introduced with A650 family,
- * related to UBWC mode/ver 4
- */
- u32 rgb565_predicator;
- /** @uavflagprd_inv: Unknown, introduced with A650 family */
- u32 uavflagprd_inv;
- /** @min_acc_len: Whether the minimum access length is 64 bits */
- u32 min_acc_len;
- /**
- * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
- *
- * UBWC 1.0 always enables all three levels.
- * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
- * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
- *
- * This is a bitmask where BIT(0) enables level 1, BIT(1)
- * controls level 2, and BIT(2) enables level 3.
- */
- u32 ubwc_swizzle;
- /**
- * @highest_bank_bit: Highest Bank Bit
- *
- * The Highest Bank Bit value represents the bit of the highest
- * DDR bank. This should ideally use DRAM type detection.
- */
- u32 highest_bank_bit;
- u32 amsbc;
- /**
- * @macrotile_mode: Macrotile Mode
- *
- * Whether to use 4-channel macrotiling mode or the newer
- * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
- * 4-channel and 1 is 8-channel.
- */
- u32 macrotile_mode;
- } ubwc_config;
+ /*
+ * The migration to the central UBWC config db is still in flight - keep
+ * a copy containing some local fixups until that's done.
+ */
+ const struct qcom_ubwc_cfg_data *ubwc_config;
+ struct qcom_ubwc_cfg_data _ubwc_config;
/*
* Register offsets are different between some GPUs.
@@ -580,10 +549,10 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
#define ADRENO_VM_START 0x100000000ULL
-u64 adreno_private_address_space_size(struct msm_gpu *gpu);
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+u64 adreno_private_vm_size(struct msm_gpu *gpu);
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -623,14 +592,14 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len,
* Common helper function to initialize the default address space for arm-smmu
* attached targets
*/
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev);
-
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks);
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks);
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index ffc4d4257ae5..56d3c38c8778 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8650_dpu_caps = {
static const struct dpu_mdp_cfg sm8650_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sm8650_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -213,67 +202,57 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
.name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
},
@@ -307,32 +286,30 @@ static const struct dpu_dsc_cfg sm8650_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -341,7 +318,7 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -374,7 +351,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -383,7 +359,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -393,7 +368,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -403,7 +377,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
new file mode 100644
index 000000000000..db8cc2d0112c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Limited
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_12_0_SM8750_H
+#define _DPU_12_0_SM8750_H
+
+static const struct dpu_caps sm8750_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8750_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm8750_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8750_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8750_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ }, {
+ .name = "lm_6", .id = LM_6,
+ .base = 0x4a000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_7,
+ .pingpong = PINGPONG_6,
+ }, {
+ .name = "lm_7", .id = LM_7,
+ .base = 0x4b000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_6,
+ .pingpong = PINGPONG_7,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8750_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8750_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x6f000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 20),
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x70000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 21),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
+ .base = 0x7e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ }, {
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
+ .base = 0x7e400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8750_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x51000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x66700, .len = 0x1c,
+ }, {
+ .name = "merge_3d_5", .id = MERGE_3D_5,
+ .base = 0x7e700, .len = 0x1c,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8750_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_3_0", .id = DSC_6,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_3_1", .id = DSC_7,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8750_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_cwb_cfg sm8750_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x20,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x20,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7e200, .len = 0x20,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7e600, .len = 0x20,
+ },
+};
+
+static const struct dpu_intf_cfg sm8750_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8750_perf_data = {
+ .max_bw_low = 18900000,
+ .max_bw_high = 28500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8750_mdss_ver = {
+ .core_major_ver = 12,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8750_cfg = {
+ .mdss_ver = &sm8750_mdss_ver,
+ .caps = &sm8750_dpu_caps,
+ .mdp = &sm8750_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sm8750_ctl),
+ .ctl = sm8750_ctl,
+ .sspp_count = ARRAY_SIZE(sm8750_sspp),
+ .sspp = sm8750_sspp,
+ .mixer_count = ARRAY_SIZE(sm8750_lm),
+ .mixer = sm8750_lm,
+ .dspp_count = ARRAY_SIZE(sm8750_dspp),
+ .dspp = sm8750_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8750_pp),
+ .pingpong = sm8750_pp,
+ .dsc_count = ARRAY_SIZE(sm8750_dsc),
+ .dsc = sm8750_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8750_merge_3d),
+ .merge_3d = sm8750_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8750_wb),
+ .wb = sm8750_wb,
+ .cwb_count = ARRAY_SIZE(sm8750_cwb),
+ .cwb = sm8650_cwb,
+ .intf_count = ARRAY_SIZE(sm8750_intf),
+ .intf = sm8750_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8750_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index 39027a21c6fe..29e0eba91930 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8937_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -116,7 +115,6 @@ static const struct dpu_dspp_cfg msm8937_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 8d1b43ea1663..cb1ee4b63f9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8917_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -103,7 +102,6 @@ static const struct dpu_dspp_cfg msm8917_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 16c12499b24b..b44d02b48418 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8953_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -116,7 +115,6 @@ static const struct dpu_dspp_cfg msm8953_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 91f514d28ac6..8af63db315b4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -22,7 +22,6 @@ static const struct dpu_mdp_cfg msm8996_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -181,28 +180,24 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -223,12 +218,10 @@ static const struct dpu_dspp_cfg msm8996_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 413cd59dc0c4..f91220496082 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -23,7 +23,6 @@ static const struct dpu_caps msm8998_dpu_caps = {
static const struct dpu_mdp_cfg msm8998_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -170,28 +169,24 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -212,12 +207,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index b2eb7ca699e3..8f9a097147c0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm660_dpu_caps = {
static const struct dpu_mdp_cfg sdm660_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -141,28 +140,24 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -183,12 +178,10 @@ static const struct dpu_dspp_cfg sdm660_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index 85e121ad84a0..0ad18bd273ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm630_dpu_caps = {
static const struct dpu_mdp_cfg sdm630_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -115,14 +114,12 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
@@ -133,7 +130,6 @@ static const struct dpu_dspp_cfg sdm630_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 49363d7d5b93..5cc9f55d542b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sdm845_dpu_caps = {
static const struct dpu_mdp_cfg sdm845_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -134,7 +133,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -142,7 +141,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -150,7 +149,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
@@ -158,7 +157,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -170,22 +169,18 @@ static const struct dpu_dspp_cfg sdm845_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -194,28 +189,24 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index c2fde980fb52..0f5e9babdeea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -11,7 +11,6 @@
static const struct dpu_mdp_cfg sdm670_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -69,7 +68,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -77,7 +76,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -85,14 +84,14 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -103,12 +102,10 @@ static const struct dpu_dspp_cfg sdm670_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 08d38e1d420c..ae1b2ed96e9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_mdp_cfg sm8150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -41,32 +40,26 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -143,7 +136,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +144,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +152,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +160,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +168,14 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +186,18 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,42 +206,36 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -276,19 +259,15 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -296,7 +275,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -311,7 +290,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -320,7 +298,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -330,7 +307,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -340,7 +316,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d6f8b1030c68..b572cfa7ed35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_mdp_cfg sc8180x_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -41,32 +40,26 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -143,7 +136,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +144,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +152,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +160,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +168,14 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +186,18 @@ static const struct dpu_dspp_cfg sc8180x_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,42 +206,36 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -276,27 +259,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_4", .id = DSC_4,
.base = 0x81000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_5", .id = DSC_5,
.base = 0x81400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -304,7 +281,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -319,7 +296,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -328,7 +304,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +313,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -350,7 +324,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = 999,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +332,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x6c000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -368,7 +340,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x6c800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -383,6 +354,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
+ .min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 71ba48b05656..a56c288ac10c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm7150_dpu_caps = {
static const struct dpu_mdp_cfg sm7150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -38,32 +37,26 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -116,7 +109,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -124,7 +117,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -132,14 +125,14 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -150,12 +143,10 @@ static const struct dpu_dspp_cfg sm7150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -164,28 +155,24 @@ static const struct dpu_pingpong_cfg sm7150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -206,11 +193,9 @@ static const struct dpu_dsc_cfg sm7150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -218,7 +203,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -227,7 +211,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -237,7 +220,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -247,7 +229,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -260,7 +241,7 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index da11830d4407..26883f6b66b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sm6150_dpu_caps = {
static const struct dpu_mdp_cfg sm6150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -113,20 +106,17 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_2,
},
@@ -136,7 +126,6 @@ static const struct dpu_dspp_cfg sm6150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -145,19 +134,16 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
},
@@ -167,7 +153,7 @@ static const struct dpu_wb_cfg sm6150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -182,7 +168,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -191,7 +176,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -201,7 +185,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index fcfb3774f7a1..fbf50f279e66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sm6125_dpu_caps = {
static const struct dpu_mdp_cfg sm6125_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6125_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -97,7 +90,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -105,7 +97,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
.dspp = 0,
@@ -117,7 +108,6 @@ static const struct dpu_dspp_cfg sm6125_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,14 +116,12 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -144,7 +132,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -159,7 +147,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -168,7 +155,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = 0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index a86fdb33ebdd..7b8b7a1c2d76 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -39,32 +39,26 @@ static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +135,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +143,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +151,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +159,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +167,14 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +185,18 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -274,19 +258,15 @@ static const struct dpu_dsc_cfg sm8250_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -294,7 +274,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -303,7 +282,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -313,7 +291,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -323,7 +300,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -336,7 +312,7 @@ static const struct dpu_wb_cfg sm8250_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 842fcc5887fe..c990ba3b5db0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -32,17 +32,14 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
};
@@ -87,7 +84,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -95,7 +92,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -106,7 +103,6 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -115,14 +111,12 @@ static const struct dpu_pingpong_cfg sc7180_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -133,7 +127,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -142,7 +135,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -156,7 +148,7 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index c5fd89dd7c89..343ff5482382 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg sm6115_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg sm6115_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg sm6115_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg sm6115_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg sm6115_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index a234bb289d24..093d16bdc450 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -35,22 +35,18 @@ static const struct dpu_ctl_cfg sm6350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -95,7 +91,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -103,7 +99,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -115,7 +111,6 @@ static const struct dpu_dspp_cfg sm6350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -124,14 +119,12 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -142,7 +135,6 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -150,7 +142,7 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -165,7 +157,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
@@ -174,7 +165,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 53f3be28f6f6..47053bf9b0a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg qcm2290_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg qcm2290_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg qcm2290_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index 3a3bc8e429be..98190ee7ec7a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -30,7 +30,6 @@ static const struct dpu_ctl_cfg sm6375_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -59,7 +58,6 @@ static const struct dpu_lm_cfg sm6375_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.lm_pair = 0,
.pingpong = PINGPONG_0,
@@ -71,7 +69,6 @@ static const struct dpu_dspp_cfg sm6375_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -80,7 +77,6 @@ static const struct dpu_pingpong_cfg sm6375_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -91,7 +87,6 @@ static const struct dpu_dsc_cfg sm6375_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -99,7 +94,6 @@ static const struct dpu_intf_cfg sm6375_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 90e86063a372..85aae40c210f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -39,32 +39,26 @@ static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +135,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +143,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +151,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +159,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +167,14 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +185,18 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -279,22 +263,20 @@ static const struct dpu_dsc_cfg sm8350_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -303,7 +285,7 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -318,7 +300,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -327,7 +308,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -337,7 +317,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -347,7 +326,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index e9625c48c567..8f978b9c3452 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -32,22 +32,18 @@ static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -92,21 +88,21 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -117,7 +113,6 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,28 +121,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -159,7 +150,7 @@ static const struct dpu_dsc_cfg sc7280_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
},
};
@@ -168,7 +159,7 @@ static const struct dpu_wb_cfg sc7280_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -183,7 +174,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -192,7 +182,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -202,7 +191,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 139f11321fea..303d33dc7783 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
static const struct dpu_mdp_cfg sc8280xp_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -39,32 +38,26 @@ static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +134,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +142,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +150,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +158,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +166,14 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +184,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +204,36 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -279,32 +262,28 @@ static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -314,7 +293,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -323,7 +301,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -333,7 +310,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -343,7 +319,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -352,7 +327,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -361,7 +335,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -370,7 +343,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3a000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -379,7 +351,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -388,7 +359,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 461294143a90..b09a6af4c474 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8450_dpu_caps = {
static const struct dpu_mdp_cfg sm8450_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -40,32 +39,26 @@ static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -142,7 +135,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +143,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +151,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +159,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +167,14 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +185,18 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,55 +205,47 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -295,22 +276,20 @@ static const struct dpu_dsc_cfg sm8450_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -319,7 +298,7 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -334,7 +313,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -343,7 +321,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -353,7 +330,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -363,7 +339,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index c248b3b55c41..0f7b4a224e4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sa8775p_dpu_caps = {
static const struct dpu_mdp_cfg sa8775p_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -39,32 +38,26 @@ static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +134,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +142,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +150,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +158,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +166,14 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +184,18 @@ static const struct dpu_dspp_cfg sa8775p_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,55 +204,47 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -294,32 +275,28 @@ static const struct dpu_dsc_cfg sa8775p_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -328,7 +305,7 @@ static const struct dpu_wb_cfg sa8775p_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -344,7 +321,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -353,7 +329,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -363,7 +338,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -373,7 +347,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -382,7 +355,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -391,7 +363,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -400,7 +371,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -409,7 +379,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 59c7fdf28e89..465b6460f875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8550_dpu_caps = {
static const struct dpu_mdp_cfg sm8550_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg sm8550_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -329,7 +308,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +316,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -348,7 +325,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -358,7 +334,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
index 5667d055fbd1..6caa7d40f368 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sar2130p_dpu_caps = {
static const struct dpu_mdp_cfg sar2130p_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sar2130p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sar2130p_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg sar2130p_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg sar2130p_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg sar2130p_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -329,7 +308,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +316,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -348,7 +325,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -358,7 +334,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 52cc10aec1f9..7243eebb85f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -20,7 +20,6 @@ static const struct dpu_caps x1e80100_dpu_caps = {
static const struct dpu_mdp_cfg x1e80100_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -30,32 +29,26 @@ static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -138,7 +131,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -146,7 +139,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -154,7 +147,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -162,7 +155,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -170,14 +163,14 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -188,22 +181,18 @@ static const struct dpu_dspp_cfg x1e80100_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg x1e80100_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -330,7 +309,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -339,7 +317,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -349,7 +326,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +335,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -368,7 +343,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -377,7 +351,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -386,7 +359,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -395,7 +367,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -404,7 +375,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index a4b0fe0d9899..d4b545448d74 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -320,14 +320,22 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate, const struct msm_format *format)
+ struct dpu_plane_state *pstate,
+ const struct msm_format *format,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
- uint32_t blend_op;
- uint32_t fg_alpha, bg_alpha;
+ u32 blend_op;
+ u32 fg_alpha, bg_alpha, max_alpha;
- fg_alpha = pstate->base.alpha >> 8;
- bg_alpha = 0xff - fg_alpha;
+ if (mdss_ver->core_major_ver < 12) {
+ max_alpha = 0xff;
+ fg_alpha = pstate->base.alpha >> 8;
+ } else {
+ max_alpha = 0x3ff;
+ fg_alpha = pstate->base.alpha >> 6;
+ }
+ bg_alpha = max_alpha - fg_alpha;
/* default to opaque blending */
if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
@@ -337,7 +345,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
@@ -348,7 +356,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_FG_MOD_ALPHA |
DPU_BLEND_FG_INV_MOD_ALPHA |
@@ -402,7 +410,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct dpu_hw_stage_cfg *stage_cfg
)
{
- uint32_t lm_idx;
+ u32 lm_idx;
enum dpu_sspp sspp_idx;
struct drm_plane_state *state;
@@ -442,12 +450,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
const struct msm_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
-
- uint32_t lm_idx;
+ u32 lm_idx;
bool bg_alpha_enable = false;
DECLARE_BITMAP(active_fetch, SSPP_MAX);
+ DECLARE_BITMAP(active_pipes, SSPP_MAX);
memset(active_fetch, 0, sizeof(active_fetch));
+ memset(active_pipes, 0, sizeof(active_pipes));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -465,6 +474,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
bg_alpha_enable = true;
set_bit(pstate->pipe.sspp->idx, active_fetch);
+ set_bit(pstate->pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -473,6 +483,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->r_pipe.sspp) {
set_bit(pstate->r_pipe.sspp->idx, active_fetch);
+ set_bit(pstate->r_pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -482,7 +493,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
/* blend config update */
for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format,
+ ctl->mdss_ver);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -495,6 +507,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (ctl->ops.set_active_fetch_pipes)
ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, active_pipes);
+
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -510,6 +525,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
struct dpu_hw_stage_cfg stage_cfg;
+ DECLARE_BITMAP(active_lms, LM_MAX);
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -521,10 +537,16 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].lm_ctl);
if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
+ if (mixer[i].lm_ctl->ops.set_active_pipes)
+ mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL);
+
+ if (mixer[i].hw_lm->ops.clear_all_blendstages)
+ mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm);
}
/* initialize stage cfg */
memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(active_lms, 0, sizeof(active_lms));
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
@@ -538,13 +560,22 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
ctl->ops.update_pending_flush_mixer(ctl,
mixer[i].hw_lm->idx);
+ set_bit(lm->idx, active_lms);
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, active_lms);
+
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0);
- ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &stage_cfg);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+
+ if (lm->ops.setup_blendstage)
+ lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
+ &stage_cfg);
}
}
@@ -711,7 +742,7 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
+ kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
}
/**
@@ -880,7 +911,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
dev = crtc->dev;
priv = dev->dev_private;
- if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) {
DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c0ed110a7d30..05e5f3463e30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -264,7 +264,7 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
mode = &phys_enc->cached_mode;
return phys_enc->hw_intf->cap->type == INTF_DP &&
- msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
+ msm_dp_needs_periph_flush(priv->kms->dp[disp_info->h_tile_instance[0]], mode);
}
/**
@@ -283,9 +283,9 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
index = disp_info->h_tile_instance[0];
if (disp_info->intf_type == INTF_DP)
- return msm_dp_wide_bus_available(priv->dp[index]);
+ return msm_dp_wide_bus_available(priv->kms->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
- return msm_dsi_wide_bus_enabled(priv->dsi[index]);
+ return msm_dsi_wide_bus_enabled(priv->kms->dsi[index]);
return false;
}
@@ -647,7 +647,7 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
int index = dpu_enc->disp_info.h_tile_instance[0];
if (dpu_enc->disp_info.intf_type == INTF_DSI)
- return msm_dsi_get_dsc_config(priv->dsi[index]);
+ return msm_dsi_get_dsc_config(priv->kms->dsi[index]);
return NULL;
}
@@ -709,7 +709,8 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
- if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
+ if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]],
+ adj_mode))
topology->num_cdm++;
}
}
@@ -980,7 +981,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
- queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
@@ -2195,8 +2196,17 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (hw_mixer[i]->ops.clear_all_blendstages)
+ hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]);
+
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, NULL);
+
if (ctl->ops.set_active_fetch_pipes)
ctl->ops.set_active_fetch_pipes(ctl, NULL);
+
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, NULL);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a0ba55ab3c89..0ec6d67c7c70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -69,7 +69,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
+ phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 1c468ca5d692..0ba777bda253 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -313,8 +313,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
- &timing_params, fmt,
- phys_enc->dpu_kms->catalog->mdss_ver);
+ &timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
@@ -378,7 +377,7 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) &&
+ return !(phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5) &&
phys_enc->split_role != ENC_ROLE_SOLO;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 849fea580a4c..56a5b596554d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -218,7 +218,6 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
- struct dpu_hw_ctl *ctl;
struct dpu_hw_cdm *hw_cdm;
if (!phys_enc) {
@@ -227,10 +226,9 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
}
hw_wb = phys_enc->hw_wb;
- ctl = phys_enc->hw_ctl;
hw_cdm = phys_enc->hw_cdm;
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
(phys_enc->hw_ctl &&
phys_enc->hw_ctl->ops.setup_intf_cfg)) {
struct dpu_hw_intf_cfg intf_cfg = {0};
@@ -534,7 +532,6 @@ static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
- struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -556,7 +553,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
* WB support is added to those targets will need to add
* the legacy teardown sequence as well.
*/
- if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
@@ -566,7 +563,6 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
const struct msm_format *format;
- struct msm_gem_address_space *aspace;
struct dpu_hw_wb_cfg *wb_cfg;
int ret;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
@@ -576,13 +572,12 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
wb_enc->wb_job = job;
wb_enc->wb_conn = job->connector;
- aspace = phys_enc->dpu_kms->base.aspace;
wb_cfg = &wb_enc->wb_cfg;
memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
- ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ ret = msm_framebuffer_prepare(job->fb, false);
if (ret) {
DPU_ERROR("prep fb failed, %d\n", ret);
return;
@@ -596,7 +591,7 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
return;
}
- dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest);
+ dpu_format_populate_addrs(job->fb, &wb_cfg->dest);
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
@@ -619,14 +614,11 @@ static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- struct msm_gem_address_space *aspace;
if (!job->fb)
return;
- aspace = phys_enc->dpu_kms->base.aspace;
-
- msm_framebuffer_cleanup(job->fb, aspace, false);
+ msm_framebuffer_cleanup(job->fb, false);
wb_enc->wb_job = NULL;
wb_enc->wb_conn = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 59c9427da7dd..b0d585c5315c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -274,15 +274,14 @@ int dpu_format_populate_plane_sizes(
return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout);
}
-static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_ubwc(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
uint32_t base_addr = 0;
bool meta;
- base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ base_addr = msm_framebuffer_iova(fb, 0);
fmt = msm_framebuffer_format(fb);
meta = MSM_FORMAT_IS_UBWC(fmt);
@@ -355,26 +354,23 @@ static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace
}
}
-static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_linear(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i)
- layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
-}
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, i);
+ }
/**
* dpu_format_populate_addrs - populate buffer addresses based on
* mmu, fb, and format found in the fb
- * @aspace: address space pointer
* @fb: framebuffer pointer
* @layout: format layout structure to populate
*/
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
@@ -384,7 +380,7 @@ void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
/* Populate the addresses given the fb */
if (MSM_FORMAT_IS_UBWC(fmt) ||
MSM_FORMAT_IS_TILE(fmt))
- _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ _dpu_format_populate_addrs_ubwc(fb, layout);
else
- _dpu_format_populate_addrs_linear(aspace, fb, layout);
+ _dpu_format_populate_addrs_linear(fb, layout);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index c6145d43aa3f..dc03f522e616 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -31,8 +31,7 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
return false;
}
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout);
int dpu_format_populate_plane_sizes(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index c878fe196aeb..e824cd64fd3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -35,12 +35,12 @@
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_NO_SDMA \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK)
#define DMA_MSM8953_MASK \
(BIT(DPU_SSPP_QOS))
@@ -60,7 +60,7 @@
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_SDM845_MASK_NO_SDMA \
- (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ (BIT(DPU_SSPP_QOS) | \
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
@@ -89,39 +89,6 @@
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
-#define MIXER_SDM845_MASK \
- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define MIXER_QCM2290_MASK \
- (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define PINGPONG_MSM8996_MASK \
- (BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_SDM845_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_SM8150_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define CTL_SC7280_MASK \
- (BIT(DPU_CTL_ACTIVE_CFG) | \
- BIT(DPU_CTL_FETCH_ACTIVE) | \
- BIT(DPU_CTL_VM_CFG) | \
- BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
-
-#define CTL_SM8550_MASK \
- (CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
-
-#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
-
-#define INTF_SC7180_MASK \
- (BIT(DPU_INTF_INPUT_CTRL) | \
- BIT(DPU_INTF_STATUS_SUPPORTED) | \
- BIT(DPU_DATA_HCTL_EN))
-
-#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-
#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
@@ -131,9 +98,6 @@
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP))
-#define WB_SM8250_MASK (WB_SDM845_MASK | \
- BIT(DPU_WB_INPUT_CTRL))
-
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -362,6 +326,9 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
_VIG_SBLK(SSPP_SCALER_VER(3, 3));
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 4));
+
static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK();
static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
@@ -396,6 +363,16 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
+static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ /* 0x40 + n*0x30 */
+ 0x40, 0x70, 0xa0, 0xd0, 0x100, 0x130, 0x160, 0x190, 0x1c0,
+ 0x1f0, 0x220
+ },
+};
+
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
@@ -417,6 +394,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
+static const struct dpu_dspp_sub_blks sm8750_dspp_sblk = {
+ .pcc = {.name = "pcc", .base = 0x1700,
+ .len = 0x90, .version = 0x60000},
+};
+
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -448,6 +430,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
.ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
};
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_0 = {
+ .enc = {.name = "enc", .base = 0x100, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF00, .len = 0x24},
+};
+
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_1 = {
+ .enc = {.name = "enc", .base = 0x200, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF80, .len = 0x24},
+};
+
/*************************************************************
* CDM block config
*************************************************************/
@@ -738,3 +730,4 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
+#include "catalog/dpu_12_0_sm8750.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 01dd6e65f777..a78bb2c334e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -29,26 +29,6 @@
#define MAX_XIN_COUNT 16
/**
- * MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
- * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
- * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
- * in a failure
- * @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
- * (moved into INTF block since DPU 5.0.0)
- * @DPU_MDP_MAX Maximum value
-
- */
-enum {
- DPU_MDP_PANIC_PER_PIPE = 0x1,
- DPU_MDP_10BIT_SUPPORT,
- DPU_MDP_AUDIO_SELECT,
- DPU_MDP_PERIPH_0_REMOVED,
- DPU_MDP_VSYNC_SEL,
- DPU_MDP_MAX
-};
-
-/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
@@ -57,7 +37,6 @@ enum {
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
* @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
* @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
- * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
* @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
* @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
* @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
@@ -75,7 +54,6 @@ enum {
DPU_SSPP_CSC_10BIT,
DPU_SSPP_CURSOR,
DPU_SSPP_QOS,
- DPU_SSPP_QOS_8LVL,
DPU_SSPP_EXCL_RECT,
DPU_SSPP_SMART_DMA_V1,
DPU_SSPP_SMART_DMA_V2,
@@ -88,20 +66,12 @@ enum {
/*
* MIXER sub-blocks/features
- * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
* @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
- * @DPU_MIXER_GC Gamma correction block
- * @DPU_DIM_LAYER Layer mixer supports dim layer
- * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
* @DPU_MIXER_MAX maximum value
*/
enum {
- DPU_MIXER_LAYER = 0x1,
- DPU_MIXER_SOURCESPLIT,
- DPU_MIXER_GC,
- DPU_DIM_LAYER,
- DPU_MIXER_COMBINED_ALPHA,
- DPU_MIXER_MAX
+ DPU_MIXER_SOURCESPLIT = 0x1,
+ DPU_MIXER_MAX,
};
/**
@@ -114,57 +84,16 @@ enum {
};
/**
- * PINGPONG sub-blocks
- * @DPU_PINGPONG_SPLIT PP block supports split fifo
- * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
- * @DPU_PINGPONG_DITHER Dither blocks
- * @DPU_PINGPONG_DSC PP block supports DSC
- * @DPU_PINGPONG_MAX
- */
-enum {
- DPU_PINGPONG_SPLIT = 0x1,
- DPU_PINGPONG_SLAVE,
- DPU_PINGPONG_DITHER,
- DPU_PINGPONG_DSC,
- DPU_PINGPONG_MAX
-};
-
-/**
* CTL sub-blocks
* @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
- * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
- * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
- * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register
- * @DPU_CTL_DSPP_BLOCK_FLUSH: CTL config to support dspp sub-block flush
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
- DPU_CTL_ACTIVE_CFG,
- DPU_CTL_FETCH_ACTIVE,
- DPU_CTL_VM_CFG,
- DPU_CTL_HAS_LAYER_EXT4,
- DPU_CTL_DSPP_SUB_BLOCK_FLUSH,
DPU_CTL_MAX
};
/**
- * INTF sub-blocks
- * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
- * pixel data arrives to this INTF
- * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
- * than video timing
- * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
- * @DPU_INTF_MAX
- */
-enum {
- DPU_INTF_INPUT_CTRL = 0x1,
- DPU_DATA_HCTL_EN,
- DPU_INTF_STATUS_SUPPORTED,
- DPU_INTF_MAX
-};
-
-/**
* WB sub-blocks and features
* @DPU_WB_LINE_MODE Writeback module supports line/linear mode
* @DPU_WB_BLOCK_MODE Writeback module supports block mode read
@@ -180,8 +109,6 @@ enum {
* @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
* @DPU_WB_CDP Writeback supports client driven prefetch
- * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
- * data arrives.
* @DPU_WB_CROP CWB supports cropping
* @DPU_WB_MAX maximum value
*/
@@ -195,7 +122,6 @@ enum {
DPU_WB_QOS,
DPU_WB_QOS_8LVL,
DPU_WB_CDP,
- DPU_WB_INPUT_CTRL,
DPU_WB_CROP,
DPU_WB_MAX
};
@@ -214,16 +140,11 @@ enum {
/**
* DSC sub-blocks/features
- * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
- * the pixel output from this DSC.
- * @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2
* @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding
* @DPU_DSC_MAX
*/
enum {
- DPU_DSC_OUTPUT_CTRL = 0x1,
- DPU_DSC_HW_REV_1_2,
- DPU_DSC_NATIVE_42x_EN,
+ DPU_DSC_NATIVE_42x_EN = 0x1,
DPU_DSC_MAX
};
@@ -233,14 +154,12 @@ enum {
* @id: enum identifying this block
* @base: register base offset to mdss
* @len: length of hardware block
- * @features bit mask identifying sub-blocks/features
*/
#define DPU_HW_BLK_INFO \
char name[DPU_HW_BLK_NAME_LEN]; \
u32 id; \
u32 base; \
- u32 len; \
- unsigned long features
+ u32 len
/**
* struct dpu_scaler_blk: Scaler information
@@ -455,7 +374,6 @@ struct dpu_clk_ctrl_reg {
/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
- * @features bit mask identifying sub-blocks/features
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -471,6 +389,7 @@ struct dpu_mdp_cfg {
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
unsigned int intr_start;
};
@@ -486,6 +405,7 @@ struct dpu_ctl_cfg {
*/
struct dpu_sspp_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_sspp_sub_blks *sblk;
u32 xin_id;
enum dpu_clk_ctrl_type clk_ctrl;
@@ -503,6 +423,7 @@ struct dpu_sspp_cfg {
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 dspp;
@@ -513,7 +434,6 @@ struct dpu_lm_cfg {
* struct dpu_dspp_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* supported by this block
* @sblk sub-blocks information
*/
@@ -526,7 +446,6 @@ struct dpu_dspp_cfg {
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @intr_done: index for PINGPONG done interrupt
* @intr_rdptr: index for PINGPONG readpointer done interrupt
* @sblk sub-blocks information
@@ -543,8 +462,6 @@ struct dpu_pingpong_cfg {
* struct dpu_merge_3d_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * supported by this block
* @sblk sub-blocks information
*/
struct dpu_merge_3d_cfg {
@@ -562,6 +479,7 @@ struct dpu_merge_3d_cfg {
*/
struct dpu_dsc_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_dsc_sub_blks *sblk;
};
@@ -569,7 +487,6 @@ struct dpu_dsc_cfg {
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
@@ -600,6 +517,7 @@ struct dpu_intf_cfg {
*/
struct dpu_wb_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
@@ -668,6 +586,7 @@ struct dpu_vbif_qos_tbl {
*/
struct dpu_vbif_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 xin_halt_timeout;
@@ -685,7 +604,6 @@ struct dpu_vbif_cfg {
* @name string name for debug purposes
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
*/
struct dpu_cdm_cfg {
DPU_HW_BLK_INFO;
@@ -860,6 +778,7 @@ extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sa8775p_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8750_cfg;
extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 573e42b06ad0..ac834db2e4c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -42,6 +42,8 @@
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
+#define CTL_PIPE_ACTIVE 0x12c
+#define CTL_LAYER_ACTIVE 0x130
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -64,6 +66,8 @@ static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, 4, 5};
+static const u32 lm_tbl[LM_MAX] = {CTL_INVALID_BIT, 0, 1, 2, 3, 4, 5, 6, 7};
+
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
@@ -555,7 +559,7 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
- if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 9)
DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
}
@@ -575,7 +579,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
* per VM. Explicitly disable it until VM support is
* added in SW. Power on reset value is not disable.
*/
- if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 7)
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
@@ -676,11 +680,18 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
merge3d_active);
}
- dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.clear_all_blendstages)
+ ctx->ops.clear_all_blendstages(ctx);
+
+ if (ctx->ops.set_active_lms)
+ ctx->ops.set_active_lms(ctx, NULL);
if (ctx->ops.set_active_fetch_pipes)
ctx->ops.set_active_fetch_pipes(ctx, NULL);
+ if (ctx->ops.set_active_pipes)
+ ctx->ops.set_active_pipes(ctx, NULL);
+
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
@@ -737,55 +748,39 @@ static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
-static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
- unsigned long cap)
+static void dpu_hw_ctl_set_active_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes)
{
- if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
- ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf_v1;
+ int i;
+ u32 val = 0;
- ops->update_pending_flush_periph =
- dpu_hw_ctl_update_pending_flush_periph_v1;
+ if (active_pipes) {
+ for (i = 0; i < SSPP_MAX; i++) {
+ if (test_bit(i, active_pipes) &&
+ fetch_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(fetch_tbl[i]);
+ }
+ }
- ops->update_pending_flush_merge_3d =
- dpu_hw_ctl_update_pending_flush_merge_3d_v1;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
- ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
- ops->update_pending_flush_dsc =
- dpu_hw_ctl_update_pending_flush_dsc_v1;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
- } else {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ DPU_REG_WRITE(&ctx->hw, CTL_PIPE_ACTIVE, val);
+}
+
+static void dpu_hw_ctl_set_active_lms(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms)
+{
+ int i;
+ u32 val = 0;
+
+ if (active_lms) {
+ for (i = LM_0; i < LM_MAX; i++) {
+ if (test_bit(i, active_lms) &&
+ lm_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(lm_tbl[i]);
+ }
}
- ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
- ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
- ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->get_flush_register = dpu_hw_ctl_get_flush_register;
- ops->trigger_start = dpu_hw_ctl_trigger_start;
- ops->is_started = dpu_hw_ctl_is_started;
- ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->reset = dpu_hw_ctl_reset_control;
- ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
- ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
- ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
- ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
- if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
- else
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
- if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
-};
+ DPU_REG_WRITE(&ctx->hw, CTL_LAYER_ACTIVE, val);
+}
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
@@ -793,12 +788,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer)
{
@@ -812,7 +809,59 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
c->hw.log_mask = DPU_DBG_MASK_CTL;
c->caps = cfg;
- _setup_ctl_ops(&c->ops, c->caps->features);
+ c->mdss_ver = mdss_ver;
+
+ if (mdss_ver->core_major_ver >= 5) {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ c->ops.reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf_v1;
+
+ c->ops.update_pending_flush_periph =
+ dpu_hw_ctl_update_pending_flush_periph_v1;
+
+ c->ops.update_pending_flush_merge_3d =
+ dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ c->ops.update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
+ c->ops.update_pending_flush_dsc =
+ dpu_hw_ctl_update_pending_flush_dsc_v1;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
+ } else {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ }
+ c->ops.clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ c->ops.update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ c->ops.get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ c->ops.get_flush_register = dpu_hw_ctl_get_flush_register;
+ c->ops.trigger_start = dpu_hw_ctl_trigger_start;
+ c->ops.is_started = dpu_hw_ctl_is_started;
+ c->ops.trigger_pending = dpu_hw_ctl_trigger_pending;
+ c->ops.reset = dpu_hw_ctl_reset_control;
+ c->ops.wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ c->ops.setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ } else {
+ c->ops.set_active_pipes = dpu_hw_ctl_set_active_pipes;
+ c->ops.set_active_lms = dpu_hw_ctl_set_active_lms;
+ }
+ c->ops.update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ c->ops.update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
+ else
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
+
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
+
c->idx = cfg->id;
c->mixer_count = mixer_count;
c->mixer_hw_caps = mixer;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index feb09590bc8f..15931b22ec94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -258,6 +258,23 @@ struct dpu_hw_ctl_ops {
void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
+
+ /**
+ * Set active pipes attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_pipes: bitmap of enum dpu_sspp
+ */
+ void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes);
+
+ /**
+ * Set active layer mixers attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_lms: bitmap of enum dpu_lm
+ */
+ void (*set_active_lms)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms);
+
};
/**
@@ -274,6 +291,7 @@ struct dpu_hw_ctl_ops {
* @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
+ * @mdss_ver: MDSS revision information
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -295,6 +313,8 @@ struct dpu_hw_ctl {
u32 pending_dsc_flush_mask;
u32 pending_cdm_flush_mask;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_ctl_ops ops;
};
@@ -312,6 +332,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index cec6d4e8baec..3a149caa7ff4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -181,26 +181,18 @@ static void dpu_hw_dsc_bind_pingpong_blk(
DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
}
-static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
- unsigned long cap)
-{
- ops->dsc_disable = dpu_hw_dsc_disable;
- ops->dsc_config = dpu_hw_dsc_config;
- ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
- if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
- ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
-};
-
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
* @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_dsc *c;
@@ -213,7 +205,12 @@ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dsc_ops(&c->ops, c->caps->features);
+
+ c->ops.dsc_disable = dpu_hw_dsc_disable;
+ c->ops.dsc_config = dpu_hw_dsc_config;
+ c->ops.dsc_config_thresh = dpu_hw_dsc_config_thresh;
+ if (mdss_ver->core_major_ver >= 5)
+ c->ops.dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index fc171bdeca48..b7013c9822d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,7 +64,8 @@ struct dpu_hw_dsc {
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index b9c433567262..b3395e9c34a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -360,8 +360,7 @@ static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
}
-static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
- const unsigned long features)
+static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops)
{
ops->dsc_disable = dpu_hw_dsc_disable_1_2;
ops->dsc_config = dpu_hw_dsc_config_1_2;
@@ -391,7 +390,7 @@ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dcs_ops_1_2(&c->ops, c->caps->features);
+ _setup_dcs_ops_1_2(&c->ops);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 829ca272873e..11fb1bc54fa9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -63,13 +63,6 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
}
-static void _setup_dspp_ops(struct dpu_hw_dspp *c,
- unsigned long features)
-{
- if (test_bit(DPU_DSPP_PCC, &features))
- c->ops.setup_pcc = dpu_setup_dspp_pcc;
-}
-
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
@@ -97,7 +90,8 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_dspp_ops(c, c->cap->features);
+ if (c->cap->sblk->pcc.base)
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index fb1d25baa518..a80ac82a9625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -98,8 +98,7 @@
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver)
+ const struct msm_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 hsync_period, vsync_period;
@@ -180,7 +179,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
/* TODO: handle DSC+DP case, we only handle DSC+DSI case so far */
if (p->compression_en && !dp_intf &&
- mdss_ver->core_major_ver >= 7)
+ intf->mdss_ver->core_major_ver >= 7)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
hsync_data_start_x = hsync_start_x;
@@ -238,7 +237,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
- if (intf->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ if (intf->mdss_ver->core_major_ver >= 5) {
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
@@ -309,9 +308,8 @@ static void dpu_hw_intf_get_status(
struct dpu_hw_intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
- unsigned long cap = intf->cap->features;
- if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
+ if (intf->mdss_ver->core_major_ver >= 5)
s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
else
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
@@ -580,6 +578,8 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->idx = cfg->id;
c->cap = cfg;
+ c->mdss_ver = mdss_rev;
+
c->ops.setup_timing_gen = dpu_hw_intf_setup_timing_engine;
c->ops.setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
c->ops.get_status = dpu_hw_intf_get_status;
@@ -588,7 +588,7 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->ops.setup_misr = dpu_hw_intf_setup_misr;
c->ops.collect_misr = dpu_hw_intf_collect_misr;
- if (cfg->features & BIT(DPU_INTF_INPUT_CTRL))
+ if (mdss_rev->core_major_ver >= 5)
c->ops.bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
/* INTF TE is only for DSI interfaces */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 114be272ac0a..f31067a9aaf1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -81,8 +81,7 @@ struct dpu_hw_intf_cmd_mode_cfg {
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver);
+ const struct msm_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch);
@@ -126,6 +125,8 @@ struct dpu_hw_intf {
enum dpu_intf idx;
const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_intf_ops ops;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 81b56f066519..e8a76d5192c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -19,12 +19,28 @@
/* These register are offset to mixer base + stage base */
#define LM_BLEND0_OP 0x00
+
+/* <v12 DPU with offset to mixer base + stage base */
#define LM_BLEND0_CONST_ALPHA 0x04
#define LM_FG_COLOR_FILL_COLOR_0 0x08
#define LM_FG_COLOR_FILL_COLOR_1 0x0C
#define LM_FG_COLOR_FILL_SIZE 0x10
#define LM_FG_COLOR_FILL_XY 0x14
+/* >= v12 DPU */
+#define LM_BG_SRC_SEL_V12 0x14
+#define LM_BG_SRC_SEL_V12_RESET_VALUE 0x0000c0c0
+#define LM_BORDER_COLOR_0_V12 0x1c
+#define LM_BORDER_COLOR_1_V12 0x20
+
+/* >= v12 DPU with offset to mixer base + stage base */
+#define LM_BLEND0_FG_SRC_SEL_V12 0x04
+#define LM_BLEND0_CONST_ALPHA_V12 0x08
+#define LM_FG_COLOR_FILL_COLOR_0_V12 0x0c
+#define LM_FG_COLOR_FILL_COLOR_1_V12 0x10
+#define LM_FG_COLOR_FILL_SIZE_V12 0x14
+#define LM_FG_COLOR_FILL_XY_V12 0x18
+
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
@@ -83,6 +99,22 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
+static void dpu_hw_lm_setup_border_color_v12(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0_V12,
+ (color->color_0 & 0x3ff) |
+ ((color->color_1 & 0x3ff) << 16));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1_V12,
+ (color->color_2 & 0x3ff) |
+ ((color->color_3 & 0x3ff) << 16));
+ }
+}
+
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
{
dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
@@ -112,6 +144,27 @@ static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
+static void
+dpu_hw_lm_setup_blend_config_combined_alpha_v12(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha,
+ u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0x3ff) | ((fg_alpha & 0x3ff) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA_V12 + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
@@ -144,18 +197,146 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
- unsigned long features)
+static void dpu_hw_lm_setup_color3_v12(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode, stages, stage_off, i;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return;
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ /* set color_out3 bit in blend0_op when enabled in mixer_op_mode */
+ op_mode = DPU_REG_READ(c, LM_BLEND0_OP + stage_off);
+ if (mixer_op_mode & BIT(i))
+ op_mode |= BIT(30);
+ else
+ op_mode &= ~BIT(30);
+
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, op_mode);
+ }
+}
+
+static int _set_staged_sspp(u32 stage, struct dpu_hw_stage_cfg *stage_cfg,
+ int pipes_per_stage, u32 *value)
{
- ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ int i;
+ u32 pipe_type = 0, pipe_id = 0, rec_id = 0;
+ u32 src_sel[PIPES_PER_STAGE];
+
+ *value = LM_BG_SRC_SEL_V12_RESET_VALUE;
+ if (!stage_cfg || !pipes_per_stage)
+ return 0;
+
+ for (i = 0; i < pipes_per_stage; i++) {
+ enum dpu_sspp pipe = stage_cfg->stage[stage][i];
+ enum dpu_sspp_multirect_index rect_index = stage_cfg->multirect_index[stage][i];
+
+ src_sel[i] = LM_BG_SRC_SEL_V12_RESET_VALUE;
+
+ if (!pipe)
+ continue;
+
+ /* translate pipe data to SWI pipe_type, pipe_id */
+ if (pipe >= SSPP_DMA0 && pipe <= SSPP_DMA5) {
+ pipe_type = 0;
+ pipe_id = pipe - SSPP_DMA0;
+ } else if (pipe >= SSPP_VIG0 && pipe <= SSPP_VIG3) {
+ pipe_type = 1;
+ pipe_id = pipe - SSPP_VIG0;
+ } else {
+ DPU_ERROR("invalid rec-%d pipe:%d\n", i, pipe);
+ return -EINVAL;
+ }
+
+ /* translate rec data to SWI rec_id */
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ rec_id = 0;
+ } else if (rect_index == DPU_SSPP_RECT_1) {
+ rec_id = 1;
+ } else {
+ DPU_ERROR("invalid rec-%d rect_index:%d\n", i, rect_index);
+ rec_id = 0;
+ }
+
+ /* calculate SWI value for rec-0 and rec-1 and store it temporary buffer */
+ src_sel[i] = (((pipe_type & 0x3) << 6) | ((rec_id & 0x3) << 4) | (pipe_id & 0xf));
+ }
+
+ /* calculate final SWI register value for rec-0 and rec-1 */
+ *value = 0;
+ for (i = 0; i < pipes_per_stage; i++)
+ *value |= src_sel[i] << (i * 8);
+
+ return 0;
+}
+
+static int dpu_hw_lm_setup_blendstage(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, ret, stages, stage_off, pipes_per_stage;
+ u32 value;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT, &ctx->cap->features))
+ pipes_per_stage = PIPES_PER_STAGE;
else
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
- ops->setup_alpha_out = dpu_hw_lm_setup_color3;
- ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
+ pipes_per_stage = 1;
+
+ /*
+ * When stage configuration is empty, we can enable the
+ * border color by setting the corresponding LAYER_ACTIVE bit
+ * and un-staging all the pipes from the layer mixer.
+ */
+ if (!stage_cfg)
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ ret = _set_staged_sspp(i, stage_cfg, pipes_per_stage, &value);
+ if (ret)
+ return ret;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off, value);
+ }
+
+ return 0;
+}
+
+static int dpu_hw_lm_clear_all_blendstages(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, stages, stage_off;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off,
+ LM_BG_SRC_SEL_V12_RESET_VALUE);
+ }
+
+ return 0;
}
/**
@@ -164,10 +345,12 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: DPU core's major and minor versions
*/
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *c;
@@ -186,7 +369,24 @@ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_mixer_ops(&c->ops, c->cap->features);
+ c->ops.setup_mixer_out = dpu_hw_lm_setup_out;
+ if (mdss_ver->core_major_ver >= 12)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha_v12;
+ else if (mdss_ver->core_major_ver >= 4)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ else
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color;
+ } else {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3_v12;
+ c->ops.setup_blendstage = dpu_hw_lm_setup_blendstage;
+ c->ops.clear_all_blendstages = dpu_hw_lm_clear_all_blendstages;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color_v12;
+ }
+ c->ops.setup_misr = dpu_hw_lm_setup_misr;
+ c->ops.collect_misr = dpu_hw_lm_collect_misr;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 6f60fa9b3cd7..1b9ecd082d7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -11,6 +11,7 @@
#include "dpu_hw_util.h"
struct dpu_hw_mixer;
+struct dpu_hw_stage_cfg;
struct dpu_hw_mixer_cfg {
u32 out_width;
@@ -49,6 +50,23 @@ struct dpu_hw_lm_ops {
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
+ * Clear layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * Returns: 0 on success or -error
+ */
+ int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * @lm : layer mixer enumeration
+ * @stage_cfg : blend stage configuration
+ * Returns: 0 on success or -error
+ */
+ int (*setup_blendstage)(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg);
+
+ /**
* setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
@@ -95,6 +113,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 0b3325f9c870..83b1dbecddd2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -33,8 +33,7 @@ static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
}
}
-static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
- unsigned long features)
+static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c)
{
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
@@ -62,7 +61,7 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_merge_3d_ops(c, c->caps->features);
+ _setup_merge_3d_ops(c);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 36c0ec775b92..138071be5649 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -319,13 +319,13 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
- if (test_bit(DPU_PINGPONG_DSC, &cfg->features)) {
+ if (mdss_rev->core_major_ver < 7) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
- if (test_bit(DPU_PINGPONG_DITHER, &cfg->features))
+ if (mdss_rev->core_major_ver >= 3)
c->ops.setup_dither = dpu_hw_pp_setup_dither;
return c;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 32c7c8084553..6f1fc790ad6d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -10,11 +10,11 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
-#include "msm_mdss.h"
-
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
+#include <linux/soc/qcom/ubwc.h>
+
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* SSPP registers */
@@ -543,7 +543,7 @@ static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
return;
_dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
- test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
+ ctx->mdss_ver->core_major_ver >= 4,
cfg);
}
@@ -684,7 +684,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
@@ -703,6 +703,9 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
+
+ hw_pipe->mdss_ver = mdss_rev;
+
_setup_layer_ops(hw_pipe, hw_pipe->cap->features, mdss_rev);
return hw_pipe;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 56a0edf2a57c..bdac5c04bf79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -308,12 +308,14 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_sspp {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
- const struct msm_mdss_data *ubwc;
+ const struct qcom_ubwc_cfg_data *ubwc;
/* Pipe */
enum dpu_sspp idx;
const struct dpu_sspp_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* Ops */
struct dpu_hw_sspp_ops ops;
};
@@ -323,7 +325,7 @@ struct dpu_kms;
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 562a3f4c5238..96dc10589bee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -264,15 +264,15 @@ static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap, const struct dpu_mdss_version *mdss_rev)
+ const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
- if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ if (mdss_rev->core_major_ver < 5)
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ else if (mdss_rev->core_major_ver < 8)
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
@@ -280,7 +280,8 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (mdss_rev->core_major_ver >= 5)
ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
- if (cap & BIT(DPU_MDP_AUDIO_SELECT))
+ if (mdss_rev->core_major_ver == 4 ||
+ mdss_rev->core_major_ver == 5)
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -312,7 +313,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
+ _setup_mdp_ops(&mdp->ops, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 4853e516c487..478a091aeccf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -208,7 +208,7 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
- if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ if (mdss_rev->core_major_ver >= 5)
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
if (mdss_rev->core_major_ver >= 9)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 1fd82b6747e9..12dcb32b4724 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -20,9 +20,10 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
#include "msm_mmu.h"
-#include "msm_mdss.h"
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
@@ -582,7 +583,7 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_display_info info;
int i, rc = 0;
- if (!(priv->dsi[0] || priv->dsi[1]))
+ if (!(priv->kms->dsi[0] || priv->kms->dsi[1]))
return rc;
/*
@@ -593,26 +594,26 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
*
* TODO: Support swapping DSI0 and DSI1 in the bonded setup.
*/
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dsi); i++) {
int other = (i + 1) % 2;
- if (!priv->dsi[i])
+ if (!priv->kms->dsi[i])
continue;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
- !msm_dsi_is_master_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->kms->dsi[i]))
continue;
memset(&info, 0, sizeof(info));
info.intf_type = INTF_DSI;
info.h_tile_instance[info.num_of_h_tiles++] = i;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]))
info.h_tile_instance[info.num_of_h_tiles++] = other;
- info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->kms->dsi[i]);
- rc = dpu_kms_dsi_set_te_source(&info, priv->dsi[i]);
+ rc = dpu_kms_dsi_set_te_source(&info, priv->kms->dsi[i]);
if (rc) {
DPU_ERROR("failed to identify TE source for dsi display\n");
return rc;
@@ -624,15 +625,15 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ rc = msm_dsi_modeset_init(priv->kms->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
- rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) && priv->kms->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->kms->dsi[other], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
other, rc);
@@ -654,8 +655,8 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
int rc;
int i;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dp); i++) {
+ if (!priv->kms->dp[i])
continue;
memset(&info, 0, sizeof(info));
@@ -670,7 +671,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
}
yuv_supported = !!dpu_kms->catalog->cdm;
- rc = msm_dp_modeset_init(priv->dp[i], dev, encoder, yuv_supported);
+ rc = msm_dp_modeset_init(priv->kms->dp[i], dev, encoder, yuv_supported);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -688,7 +689,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
struct msm_display_info info;
int rc;
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
return 0;
memset(&info, 0, sizeof(info));
@@ -702,7 +703,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ rc = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -874,12 +875,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
ret = PTR_ERR(crtc);
return ret;
}
- priv->num_crtcs++;
}
/* All CRTCs are compatible with all encoders */
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
}
@@ -1022,7 +1022,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->wb[i].base, "%s",
cat->wb[i].name);
- if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
+ if (dpu_kms->catalog->mdss_ver->core_major_ver >= 8) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
dpu_kms->mmio + cat->mdp[0].base, "top");
msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
@@ -1043,7 +1043,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
"%s", cat->dsc[i].name);
- if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
+ if (cat->mdss_ver->core_major_ver >= 7) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
@@ -1095,26 +1095,26 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
- if (!dpu_kms->base.aspace)
+ if (!dpu_kms->base.vm)
return;
- mmu = dpu_kms->base.aspace->mmu;
+ mmu = to_msm_vm(dpu_kms->base.vm)->mmu;
mmu->funcs->detach(mmu);
- msm_gem_address_space_put(dpu_kms->base.aspace);
+ drm_gpuvm_put(dpu_kms->base.vm);
- dpu_kms->base.aspace = NULL;
+ dpu_kms->base.vm = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_kms_init_aspace(dpu_kms->dev);
- if (IS_ERR(aspace))
- return PTR_ERR(aspace);
+ vm = msm_kms_init_vm(dpu_kms->dev);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
- dpu_kms->base.aspace = aspace;
+ dpu_kms->base.vm = vm;
return 0;
}
@@ -1189,10 +1189,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
- dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
+ dpu_kms->mdss = qcom_ubwc_config_get_data();
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
- DPU_ERROR("failed to get MDSS data: %d\n", rc);
+ DPU_ERROR("failed to get UBWC config data: %d\n", rc);
goto err_pm_put;
}
@@ -1533,6 +1533,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
+ { .compatible = "qcom,sm8750-dpu", .data = &dpu_sm8750_cfg, },
{ .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
{}
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index a57ec2ec1060..993cf512f8c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -60,7 +60,7 @@ struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
const struct dpu_mdss_cfg *catalog;
- const struct msm_mdss_data *mdss;
+ const struct qcom_ubwc_cfg_data *mdss;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 421138bc3cb7..01171c535a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -17,8 +17,9 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
-#include "msm_mdss.h"
#include "dpu_kms.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
@@ -71,7 +72,7 @@ static const uint32_t qcom_compressed_supported_formats[] = {
/*
* struct dpu_plane - local dpu plane structure
- * @aspace: address space pointer
+ * @vm: address space pointer
* @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -646,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -654,9 +654,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- /* cache aspace */
- pstate->aspace = kms->base.aspace;
-
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
* we can use msm_atomic_prepare_fb() instead of doing the
@@ -664,13 +661,10 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
*/
drm_gem_plane_helper_prepare_fb(plane, new_state);
- if (pstate->aspace) {
- ret = msm_framebuffer_prepare(new_state->fb,
- pstate->aspace, pstate->needs_dirtyfb);
- if (ret) {
- DPU_ERROR("failed to prepare framebuffer\n");
- return ret;
- }
+ ret = msm_framebuffer_prepare(new_state->fb, pstate->needs_dirtyfb);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
}
return 0;
@@ -689,8 +683,7 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
- msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
- old_pstate->needs_dirtyfb);
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->needs_dirtyfb);
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
@@ -1457,7 +1450,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
- dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout);
+ dpu_format_populate_addrs(new_state->fb, &pstate->layout);
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index acd5725175cd..a3a6e9028333 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -17,7 +17,6 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @aspace: pointer to address space for input/output buffers
* @pipe: software pipe description
* @r_pipe: software pipe description of the second pipe
* @pipe_cfg: software pipe configuration
@@ -34,7 +33,6 @@
*/
struct dpu_plane_state {
struct drm_plane_state base;
- struct msm_gem_address_space *aspace;
struct dpu_sw_pipe pipe;
struct dpu_sw_pipe r_pipe;
struct dpu_sw_pipe_cfg pipe_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 2e296f79cba1..25382120cb1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -40,7 +40,7 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio)
{
int rc, i;
@@ -60,7 +60,7 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(dev, lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
@@ -142,7 +142,7 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mdss_ver, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
@@ -168,10 +168,10 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_dsc *hw;
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
- if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
+ if (cat->mdss_ver->core_major_ver >= 7)
hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dev, dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index aa62966056d4..ccd64404f12d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -69,7 +69,7 @@ struct msm_display_topology {
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio);
int dpu_rm_reserve(struct dpu_rm *rm,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b8610aa806ea..da53ca88251e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -17,7 +17,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
- int id;
int ovlp;
enum mdp4_dma dma;
bool enabled;
@@ -120,7 +119,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -369,7 +368,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->vm, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -427,7 +426,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm, &iova);
if (ret)
goto fail;
} else {
@@ -511,7 +510,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_CURSOR) {
update_cursor(crtc);
- drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
}
}
@@ -539,7 +538,7 @@ static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
mdp4_crtc->flushed_mask),
msecs_to_jiffies(50));
if (ret <= 0)
- dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
+ dev_warn(dev->dev, "vblank time out, crtc=%s\n", mdp4_crtc->base.name);
mdp4_crtc->flushed_mask = 0;
@@ -624,7 +623,7 @@ static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id)
{
struct drm_crtc *crtc = NULL;
@@ -639,8 +638,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
- mdp4_crtc->id = id;
-
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 7e942c1337b3..0952c7f18abd 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -122,15 +122,16 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->vm);
drm_gem_object_put(mdp4_kms->blank_cursor_bo);
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
if (mdp4_kms->rpm_enabled)
@@ -249,9 +250,9 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << 1;
- if (priv->hdmi) {
+ if (priv->kms->hdmi) {
/* Construct bridge/connector for HDMI: */
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
@@ -263,7 +264,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* only DSI1 supported for now */
dsi_id = 0;
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
encoder = mdp4_dsi_encoder_init(dev);
@@ -277,7 +278,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
@@ -296,7 +297,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
static int modeset_init(struct mdp4_kms *mdp4_kms)
{
struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -338,7 +338,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
goto fail;
}
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ crtc = mdp4_crtc_init(dev, plane, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
@@ -346,8 +346,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
ret = PTR_ERR(crtc);
goto fail;
}
-
- priv->num_crtcs++;
}
/*
@@ -398,7 +396,7 @@ static int mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int ret;
u32 major, minor;
unsigned long max_clk;
@@ -467,19 +465,20 @@ static int mdp4_kms_init(struct drm_device *dev)
} else if (!mmu) {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- aspace = NULL;
+ vm = NULL;
} else {
- aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0x100000000 - 0x1000);
+ vm = msm_gem_vm_create(dev, mmu, "mdp4",
+ 0x1000, 0x100000000 - 0x1000,
+ true);
- if (IS_ERR(aspace)) {
+ if (IS_ERR(vm)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- ret = PTR_ERR(aspace);
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
}
ret = modeset_init(mdp4_kms);
@@ -496,7 +495,7 @@ static int mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->vm,
&mdp4_kms->blank_cursor_iova);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index f9d988076337..fb348583dc84 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -185,7 +185,7 @@ void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id);
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 3fefb2088008..098c3b5ff2b2 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -79,30 +79,25 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
+ return msm_framebuffer_prepare(new_state->fb, false);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, false);
+ msm_framebuffer_cleanup(fb, false);
}
@@ -141,7 +136,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_pipe pipe = mdp4_plane->pipe;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -153,13 +147,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0f653e62b4a0..4c4900a7beda 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -169,7 +169,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -993,7 +993,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm,
&mdp5_crtc->cursor.iova);
if (ret) {
drm_gem_object_put(cursor_bo);
@@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
}
if (pending & PENDING_CURSOR)
- drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq);
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3fcca7a3d82e..5b6ca8dd929e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -198,11 +198,12 @@ static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_gem_address_space *aspace = kms->aspace;
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
mdp_kms_destroy(&mdp5_kms->base);
@@ -311,7 +312,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -326,7 +327,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
break;
case INTF_DSI:
{
@@ -334,14 +335,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
- if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ if ((dsi_id >= ARRAY_SIZE(priv->kms->dsi)) || (dsi_id < 0)) {
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
}
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -356,9 +357,10 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (!ret)
- mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+ mdp5_encoder_set_intf_mode(encoder,
+ msm_dsi_is_cmd_mode(priv->kms->dsi[dsi_id]));
break;
}
@@ -374,7 +376,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
@@ -442,7 +443,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
- priv->num_crtcs++;
}
/*
@@ -450,7 +450,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* crtcs for the encoders
*/
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
@@ -500,7 +500,7 @@ static int mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms = priv->kms;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
@@ -534,13 +534,13 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- aspace = msm_kms_init_aspace(mdp5_kms->dev);
- if (IS_ERR(aspace)) {
- ret = PTR_ERR(aspace);
+ vm = msm_kms_init_vm(mdp5_kms->dev);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index bb1601921938..7c790406d533 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -135,8 +135,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
if (!new_state->fb)
@@ -144,14 +142,12 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+ return msm_framebuffer_prepare(new_state->fb, needs_dirtyfb);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
@@ -159,7 +155,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+ msm_framebuffer_cleanup(fb, needed_dirtyfb);
}
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
@@ -467,8 +463,6 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
enum mdp5_pipe pipe,
struct drm_framebuffer *fb)
{
- struct msm_kms *kms = &mdp5_kms->base.base;
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -478,13 +472,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
/* Note: mdp5_plane->pipe_lock must be locked */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 07a2c1e87219..071bcdea80f7 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -127,18 +127,18 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
priv = drm_dev->dev_private;
kms = priv->kms;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dp); i++) {
+ if (!kms->dp[i])
continue;
- msm_dp_snapshot(disp_state, priv->dp[i]);
+ msm_dp_snapshot(disp_state, kms->dp[i]);
}
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dsi); i++) {
+ if (!kms->dsi[i])
continue;
- msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ msm_dsi_snapshot(disp_state, kms->dsi[i]);
}
if (kms->funcs->snapshot)
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index f8bfb908f9b4..41018e82efa1 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -11,7 +11,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_drm.h"
#include "dp_panel.h"
@@ -22,13 +21,28 @@
struct msm_dp_audio_private {
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
u32 channels;
struct msm_dp_audio msm_dp_audio;
};
+static inline u32 msm_dp_read_link(struct msm_dp_audio_private *audio, u32 offset)
+{
+ return readl_relaxed(audio->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_audio_private *audio,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, audio->link_base + offset);
+}
+
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
struct dp_sdp_header sdp_hdr = {
@@ -37,8 +51,12 @@ static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x00,
.HB3 = audio->channels - 1,
};
+ u32 header[2];
- msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_1, header[1]);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
@@ -49,8 +67,12 @@ static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x17,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
@@ -61,8 +83,12 @@ static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x1b,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
- msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
@@ -73,8 +99,12 @@ static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
@@ -85,13 +115,53 @@ static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(audio, MMSS_DP_AUDIO_ISRC_1);
+ sdp_hdr.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_1, header[1]);
+}
+
+static void msm_dp_audio_config_sdp(struct msm_dp_audio_private *audio)
+{
+ u32 sdp_cfg, sdp_cfg2;
+
+ sdp_cfg = msm_dp_read_link(audio, MMSS_DP_SDP_CFG);
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
+
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG, sdp_cfg);
+
+ sdp_cfg2 = msm_dp_read_link(audio, MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
- msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
{
- msm_dp_catalog_audio_config_sdp(audio->catalog);
+ msm_dp_audio_config_sdp(audio);
msm_dp_audio_stream_sdp(audio);
msm_dp_audio_timestamp_sdp(audio);
@@ -102,8 +172,7 @@ static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
{
- u32 select = 0;
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 select, acr_ctrl;
switch (audio->msm_dp_audio.bw_code) {
case DP_LINK_BW_1_62:
@@ -124,13 +193,17 @@ static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_config_acr(catalog, select);
+ acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+ drm_dbg_dp(audio->drm_dev, "select: %#x, acr_ctrl: %#x\n",
+ select, acr_ctrl);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 safe_to_exit_level = 0;
+ u32 safe_to_exit_level, mainlink_levels;
switch (audio->msm_dp_audio.lane_count) {
case 1:
@@ -150,14 +223,33 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
+ mainlink_levels = msm_dp_read_link(audio, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels &= 0xFE0;
+ mainlink_levels |= safe_to_exit_level;
+
+ drm_dbg_dp(audio->drm_dev,
+ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ mainlink_levels, safe_to_exit_level);
+
+ msm_dp_write_link(audio, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
{
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 audio_ctrl;
+
+ audio_ctrl = msm_dp_read_link(audio, MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ drm_dbg_dp(audio->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
- msm_dp_catalog_audio_enable(catalog, enable);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ /* make sure audio engine is disabled */
+ wmb();
}
static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
@@ -173,8 +265,8 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-int msm_dp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
@@ -216,8 +308,8 @@ end:
return rc;
}
-void msm_dp_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connecter)
{
struct msm_dp_audio_private *audio;
struct msm_dp *msm_dp_display;
@@ -246,13 +338,13 @@ void msm_dp_audio_shutdown(struct drm_connector *connector,
}
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog)
+ void __iomem *link_base)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !catalog) {
+ if (!pdev) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -265,7 +357,7 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
}
audio->pdev = pdev;
- audio->catalog = catalog;
+ audio->link_base = link_base;
msm_dp_audio = &audio->msm_dp_audio;
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 58fc14693e48..ce2342856adb 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,9 +8,10 @@
#include <linux/platform_device.h>
-#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
+struct drm_bridge;
+
/**
* struct msm_dp_audio
* @lane_count: number of lanes configured in current session
@@ -27,13 +28,13 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @catalog: an instance of msm_dp_catalog module.
+ * @link_base: pointer to the msm_dp_link resource.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog);
+ void __iomem *link_base);
/**
* msm_dp_audio_put()
@@ -44,12 +45,12 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
-void msm_dp_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge);
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index bc8d46abfc61..3825a2fb48e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <drm/drm_print.h>
@@ -22,7 +23,7 @@ enum msm_dp_aux_err {
struct msm_dp_aux_private {
struct device *dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *aux_base;
struct phy *phy;
@@ -45,6 +46,80 @@ struct msm_dp_aux_private {
struct drm_dp_aux msm_dp_aux;
};
+static inline u32 msm_dp_read_aux(struct msm_dp_aux_private *aux, u32 offset)
+{
+ return readl_relaxed(aux->aux_base + offset);
+}
+
+static inline void msm_dp_write_aux(struct msm_dp_aux_private *aux,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure aux reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, aux->aux_base + offset);
+}
+
+static void msm_dp_aux_clear_hw_interrupts(struct msm_dp_aux_private *aux)
+{
+ msm_dp_read_aux(aux, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+}
+
+/*
+ * NOTE: resetting AUX controller will also clear any pending HPD related interrupts
+ */
+static void msm_dp_aux_reset(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ aux_ctrl |= DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ aux_ctrl &= ~DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_enable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ msm_dp_write_aux(aux, REG_DP_TIMEOUT_COUNT, 0xffff);
+ msm_dp_write_aux(aux, REG_DP_AUX_LIMITS, 0xffff);
+
+ aux_ctrl |= DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_disable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+ aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static int msm_dp_aux_wait_for_hpd_connect_state(struct msm_dp_aux_private *aux,
+ unsigned long wait_us)
+{
+ u32 state;
+
+ /* poll for hpd connected status every 2ms and timeout after wait_us */
+ return readl_poll_timeout(aux->aux_base +
+ REG_DP_DP_HPD_INT_STATUS,
+ state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+ min(wait_us, 2000), wait_us);
+}
+
#define MAX_AUX_RETRIES 5
static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
@@ -88,11 +163,11 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- msm_dp_catalog_aux_write_data(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, reg);
}
- msm_dp_catalog_aux_clear_trans(aux->catalog, false);
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, 0);
+ msm_dp_aux_clear_hw_interrupts(aux);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
@@ -106,7 +181,7 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- msm_dp_catalog_aux_write_trans(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, reg);
return len;
}
@@ -139,20 +214,22 @@ static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
u32 i, actual_i;
u32 len = msg->size;
- msm_dp_catalog_aux_clear_trans(aux->catalog, true);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_TRANS_CTRL);
+ data &= ~DP_AUX_TRANS_CTRL_GO;
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, data);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- msm_dp_catalog_aux_write_data(aux->catalog, data);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, data);
dp = msg->buffer;
/* discard first byte */
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
for (i = 0; i < len; i++) {
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
@@ -335,8 +412,8 @@ static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
- if (msm_dp_catalog_link_is_connected(aux->catalog))
- msm_dp_catalog_aux_reset(aux->catalog);
+ if (msm_dp_aux_is_link_connected(msm_dp_aux))
+ msm_dp_aux_reset(aux);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
@@ -369,9 +446,8 @@ exit:
return ret;
}
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr)
{
- u32 isr;
struct msm_dp_aux_private *aux;
if (!msm_dp_aux) {
@@ -381,12 +457,6 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
- isr = msm_dp_catalog_aux_get_irq(aux->catalog);
-
- /* no interrupts pending, return immediately */
- if (!isr)
- return IRQ_NONE;
-
if (!aux->cmd_busy) {
DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
return IRQ_NONE;
@@ -403,7 +473,7 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_aux_clear_hw_interrupts(aux);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
@@ -444,7 +514,7 @@ void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
phy_calibrate(aux->phy);
- msm_dp_catalog_aux_reset(aux->catalog);
+ msm_dp_aux_reset(aux);
}
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
@@ -460,7 +530,7 @@ void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
- msm_dp_catalog_aux_enable(aux->catalog, true);
+ msm_dp_aux_enable(aux);
aux->retry_cnt = 0;
aux->initted = true;
@@ -476,7 +546,7 @@ void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
aux->initted = false;
- msm_dp_catalog_aux_enable(aux->catalog, false);
+ msm_dp_aux_disable(aux);
mutex_unlock(&aux->mutex);
}
@@ -517,23 +587,105 @@ static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
if (ret)
return ret;
- ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ ret = msm_dp_aux_wait_for_hpd_connect_state(aux, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
}
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ /* Configure REFTIMER and enable it */
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg |= DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ /* Enable HPD */
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg &= ~DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, 0);
+}
+
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg |= DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg &= ~DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ int isr, mask;
+
+ isr = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_ACK,
+ (isr & DP_DP_HPD_INT_MASK));
+ mask = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+
+ /*
+ * We only want to return interrupts that are unmasked to the caller.
+ * However, the interrupt status field also contains other
+ * informational bits about the HPD state status, so we only mask
+ * out the part of the register that tells us about which interrupts
+ * are pending.
+ */
+ return isr & (mask | ~DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 status;
+
+ status = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
+ status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
+
+ return status;
+}
+
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp)
+ bool is_edp,
+ void __iomem *aux_base)
{
struct msm_dp_aux_private *aux;
- if (!catalog) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
@@ -544,9 +696,9 @@ struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *cat
mutex_init(&aux->mutex);
aux->dev = dev;
- aux->catalog = catalog;
aux->phy = phy;
aux->retry_cnt = 0;
+ aux->aux_base = aux_base;
/*
* Use the drm_dp_aux_init() to use the aux adapter
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 39c5b4c8596a..4be02e8b4d0b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -6,21 +6,28 @@
#ifndef _DP_AUX_H_
#define _DP_AUX_H_
-#include "dp_catalog.h"
#include <drm/display/drm_dp_helper.h>
int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux);
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux);
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr);
void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled);
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux);
+
struct phy;
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp);
+ bool is_edp,
+ void __iomem *aux_base);
void msm_dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
deleted file mode 100644
index 7b7eadb2f83b..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ /dev/null
@@ -1,1298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/platform_device.h>
-#include <linux/rational.h>
-#include <drm/display/drm_dp_helper.h>
-#include <drm/drm_print.h>
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define POLLING_SLEEP_US 1000
-#define POLLING_TIMEOUT_US 10000
-
-#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
-
-#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
-#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
-
-#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
-
-#define DP_INTERRUPT_STATUS1 \
- (DP_INTR_AUX_XFER_DONE| \
- DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
- DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
- DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
- DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
-
-#define DP_INTERRUPT_STATUS1_ACK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS1_MASK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS2 \
- (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
- DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
-
-#define DP_INTERRUPT_STATUS2_ACK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS2_MASK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS4 \
- (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
- PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
-
-#define DP_INTERRUPT_MASK4 \
- (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
- PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
-
-#define DP_DEFAULT_AHB_OFFSET 0x0000
-#define DP_DEFAULT_AHB_SIZE 0x0200
-#define DP_DEFAULT_AUX_OFFSET 0x0200
-#define DP_DEFAULT_AUX_SIZE 0x0200
-#define DP_DEFAULT_LINK_OFFSET 0x0400
-#define DP_DEFAULT_LINK_SIZE 0x0C00
-#define DP_DEFAULT_P0_OFFSET 0x1000
-#define DP_DEFAULT_P0_SIZE 0x0400
-
-struct dss_io_region {
- size_t len;
- void __iomem *base;
-};
-
-struct dss_io_data {
- struct dss_io_region ahb;
- struct dss_io_region aux;
- struct dss_io_region link;
- struct dss_io_region p0;
-};
-
-struct msm_dp_catalog_private {
- struct device *dev;
- struct drm_device *drm_dev;
- struct dss_io_data io;
- struct msm_dp_catalog msm_dp_catalog;
-};
-
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dss_io_data *dss = &catalog->io;
-
- msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
- msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
- msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
- msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
-}
-
-static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.aux.base + offset);
-}
-
-static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure aux reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.aux.base + offset);
-}
-
-static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure phy reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog,
- u32 offset)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- return readl_relaxed(catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.link.base + offset);
-}
-
-static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure link reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.link.base + offset);
-}
-
-/* aux related catalog functions */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_aux(catalog, REG_DP_AUX_DATA);
-}
-
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read)
-{
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (read) {
- data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
- data &= ~DP_AUX_TRANS_CTRL_GO;
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- } else {
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
- }
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
- return 0;
-}
-
-/**
- * msm_dp_catalog_aux_reset() - reset AUX controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset AUX controller
- *
- * NOTE: reset AUX controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- aux_ctrl |= DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- aux_ctrl &= ~DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- if (enable) {
- msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
- msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
- aux_ctrl |= DP_AUX_CTRL_ENABLE;
- } else {
- aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
- }
-
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us)
-{
- u32 state;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* poll for hpd connected status every 2ms and timeout after wait_us */
- return readl_poll_timeout(catalog->io.aux.base +
- REG_DP_DP_HPD_INT_STATUS,
- state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
- min(wait_us, 2000), wait_us);
-}
-
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS);
- intr &= ~DP_INTERRUPT_STATUS1_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS1)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
- DP_INTERRUPT_STATUS1_MASK);
-
- return intr;
-
-}
-
-/* controller related catalog functions */
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
- msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu);
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
-}
-
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state);
-}
-
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
-
- msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
-}
-
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
- u32 ln_mapping;
-
- ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
- ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
- ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
- ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
-
- msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
- ln_mapping);
-}
-
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- if (enable)
- val |= DP_MAINLINK_CTRL_ENABLE;
- else
- val &= ~DP_MAINLINK_CTRL_ENABLE;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
-}
-
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 mainlink_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
- if (enable) {
- /*
- * To make sure link reg writes happens before other operation,
- * msm_dp_write_link() function uses writel()
- */
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
- DP_MAINLINK_FB_BOUNDARY_SEL);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- } else {
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- }
-}
-
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog,
- u32 colorimetry_cfg,
- u32 test_bits_depth)
-{
- u32 misc_val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- /* clear bpp bits */
- misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
- misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
- misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
- /* Configure clock to synchronous mode */
- misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
-
- drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
-}
-
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 mainlink_ctrl, hw_revision;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision >= DP_HW_VERSION_1_2)
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
- else
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-}
-
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog,
- u32 rate, u32 stream_rate_khz,
- bool is_ycbcr_420)
-{
- u32 pixel_m, pixel_n;
- u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
- u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
- u32 const link_rate_hbr2 = 540000;
- u32 const link_rate_hbr3 = 810000;
- unsigned long den, num;
-
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (rate == link_rate_hbr3)
- pixel_div = 6;
- else if (rate == 162000 || rate == 270000)
- pixel_div = 2;
- else if (rate == link_rate_hbr2)
- pixel_div = 4;
- else
- DRM_ERROR("Invalid pixel mux divider\n");
-
- dispcc_input_rate = (rate * 10) / pixel_div;
-
- rational_best_approximation(dispcc_input_rate, stream_rate_khz,
- (unsigned long)(1 << 16) - 1,
- (unsigned long)(1 << 16) - 1, &den, &num);
-
- den = ~(den - num);
- den = den & 0xFFFF;
- pixel_m = num;
- pixel_n = den;
-
- mvid = (pixel_m & 0xFFFF) * 5;
- nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
- if (nvid < nvid_fixed) {
- u32 temp;
-
- temp = (nvid_fixed / nvid) * nvid;
- mvid = (nvid_fixed / nvid) * mvid;
- nvid = temp;
- }
-
- if (is_ycbcr_420)
- mvid /= 2;
-
- if (link_rate_hbr2 == rate)
- nvid *= 2;
-
- if (link_rate_hbr3 == rate)
- nvid *= 3;
-
- drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
- msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
-}
-
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog,
- u32 state_bit)
-{
- int bit, ret;
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- bit = BIT(state_bit - 1);
- drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
- msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit);
-
- bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
-
- /* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & bit,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
- return ret;
- }
- return 0;
-}
-
-/**
- * msm_dp_catalog_hw_revision() - retrieve DP hw revision
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * Return: DP controller hw revision
- *
- */
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog)
-{
- const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION);
-}
-
-/**
- * msm_dp_catalog_ctrl_reset() - reset DP controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset the DP controller
- *
- * NOTE: reset DP controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 sw_reset;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET);
-
- sw_reset |= DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- sw_reset &= ~DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
-}
-
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 data;
- int ret;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & DP_MAINLINK_READY_FOR_VIDEO,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("mainlink not ready\n");
- return false;
- }
-
- return true;
-}
-
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (enable) {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS,
- DP_INTERRUPT_STATUS1_MASK);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- DP_INTERRUPT_STATUS2_MASK);
- } else {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
- }
-}
-
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- config = (en ? config | intr_mask : config & ~intr_mask);
-
- drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
- intr_mask, config);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
- config & DP_DP_HPD_INT_MASK);
-}
-
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- /* Configure REFTIMER and enable it */
- reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- /* Enable HPD */
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
-}
-
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
-}
-
-static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog)
-{
- /* trigger sdp */
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
-}
-
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 config;
-
- /* enable PSR1 function */
- config = msm_dp_read_link(catalog, REG_PSR_CONFIG);
- config |= PSR1_SUPPORTED;
- msm_dp_write_link(catalog, REG_PSR_CONFIG, config);
-
- msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
- msm_dp_catalog_enable_sdp(catalog);
-}
-
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 cmd;
-
- cmd = msm_dp_read_link(catalog, REG_PSR_CMD);
-
- cmd &= ~(PSR_ENTER | PSR_EXIT);
-
- if (enter)
- cmd |= PSR_ENTER;
- else
- cmd |= PSR_EXIT;
-
- msm_dp_catalog_enable_sdp(catalog);
- msm_dp_write_link(catalog, REG_PSR_CMD, cmd);
-}
-
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 status;
-
- status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
- status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
- status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
-
- return status;
-}
-
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- int isr, mask;
-
- isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
- (isr & DP_DP_HPD_INT_MASK));
- mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- /*
- * We only want to return interrupts that are unmasked to the caller.
- * However, the interrupt status field also contains other
- * informational bits about the HPD state status, so we only mask
- * out the part of the register that tells us about which interrupts
- * are pending.
- */
- return isr & (mask | ~DP_DP_HPD_INT_MASK);
-}
-
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
- intr_ack = (intr & DP_INTERRUPT_STATUS4)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
-
- return intr;
-}
-
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
- intr &= ~DP_INTERRUPT_STATUS2_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS2)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- intr_ack | DP_INTERRUPT_STATUS2_MASK);
-
- return intr;
-}
-
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL,
- DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
- usleep_range(1000, 1100); /* h/w recommended delay */
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
-}
-
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 value = 0x0;
-
- /* Make sure to clear the current pattern before starting a new one */
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
-
- drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
- switch (pattern) {
- case DP_PHY_TEST_PATTERN_D10_2:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
- break;
- case DP_PHY_TEST_PATTERN_ERROR_COUNT:
- value &= ~(1 << 16);
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- break;
- case DP_PHY_TEST_PATTERN_PRBS7:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_PRBS7);
- break;
- case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
- /* 00111110000011111000001111100000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
- 0x3E0F83E0);
- /* 00001111100000111110000011111000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
- 0x0F83E0F8);
- /* 1111100000111110 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
- 0x0000F83E);
- break;
- case DP_PHY_TEST_PATTERN_CP2520:
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
-
- value = DP_HBR2_ERM_PATTERN;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value |= DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
- break;
- case DP_PHY_TEST_PATTERN_SEL_MASK:
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
- break;
- default:
- drm_dbg_dp(catalog->drm_dev,
- "No valid test pattern requested: %#x\n", pattern);
- break;
- }
-}
-
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY);
-}
-
-/* panel related catalog functions */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reg;
-
- msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
- msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
- msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
- msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
-
- reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
-
- if (msm_dp_catalog->wide_bus_en)
- reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
- else
- reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
-
-
- DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg);
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
- return 0;
-}
-
-static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 header[2];
- u32 val;
- int i;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
-
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
-
- for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
- val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
- (vsc_sdp->db[i + 3] << 24));
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
- }
-}
-
-static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 hw_revision;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
- }
-}
-
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg |= GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 |= GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp);
-
- /* indicates presence of VSC (BIT(6) of MISC1) */
- misc |= DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg &= ~GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 &= ~GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- /* switch back to MSA */
- misc &= ~DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 hsync_period, vsync_period;
- u32 display_v_start, display_v_end;
- u32 hsync_start_x, hsync_end_x;
- u32 v_sync_width;
- u32 hsync_ctl;
- u32 display_hctl;
-
- /* TPG config parameters*/
- hsync_period = drm_mode->htotal;
- vsync_period = drm_mode->vtotal;
-
- display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
- hsync_period);
- display_v_end = ((vsync_period - (drm_mode->vsync_start -
- drm_mode->vdisplay))
- * hsync_period) - 1;
-
- display_v_start += drm_mode->htotal - drm_mode->hsync_start;
- display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
-
- hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
- hsync_end_x = hsync_period - (drm_mode->hsync_start -
- drm_mode->hdisplay) - 1;
-
- v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
-
- hsync_ctl = (hsync_period << 16) |
- (drm_mode->hsync_end - drm_mode->hsync_start);
- display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
- DP_TPG_CHECKERED_RECT_PATTERN);
- msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
- DP_TPG_VIDEO_CONFIG_BPP_8BIT |
- DP_TPG_VIDEO_CONFIG_RGB);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
- DP_BIST_ENABLE_DPBIST_EN);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
- DP_TIMING_ENGINE_EN_EN);
- drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
-}
-
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
-}
-
-static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
-{
- struct resource *res;
- void __iomem *base;
-
- base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
- if (!IS_ERR(base))
- *len = resource_size(res);
-
- return base;
-}
-
-static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog)
-{
- struct platform_device *pdev = to_platform_device(catalog->dev);
- struct dss_io_data *dss = &catalog->io;
-
- dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len);
- if (IS_ERR(dss->ahb.base))
- return PTR_ERR(dss->ahb.base);
-
- dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len);
- if (IS_ERR(dss->aux.base)) {
- /*
- * The initial binding had a single reg, but in order to
- * support variation in the sub-region sizes this was split.
- * msm_dp_ioremap() will fail with -EINVAL here if only a single
- * reg is specified, so fill in the sub-region offsets and
- * lengths based on this single region.
- */
- if (PTR_ERR(dss->aux.base) == -EINVAL) {
- if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
- DRM_ERROR("legacy memory region not large enough\n");
- return -EINVAL;
- }
-
- dss->ahb.len = DP_DEFAULT_AHB_SIZE;
- dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
- dss->aux.len = DP_DEFAULT_AUX_SIZE;
- dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
- dss->link.len = DP_DEFAULT_LINK_SIZE;
- dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
- dss->p0.len = DP_DEFAULT_P0_SIZE;
- } else {
- DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
- return PTR_ERR(dss->aux.base);
- }
- } else {
- dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len);
- if (IS_ERR(dss->link.base)) {
- DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
- return PTR_ERR(dss->link.base);
- }
-
- dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len);
- if (IS_ERR(dss->p0.base)) {
- DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
- return PTR_ERR(dss->p0.base);
- }
- }
-
- return 0;
-}
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
-{
- struct msm_dp_catalog_private *catalog;
- int ret;
-
- catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
- if (!catalog)
- return ERR_PTR(-ENOMEM);
-
- catalog->dev = dev;
-
- ret = msm_dp_catalog_get_io(catalog);
- if (ret)
- return ERR_PTR(ret);
-
- return &catalog->msm_dp_catalog;
-}
-
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dp_sdp_header tmp = *sdp_hdr;
- u32 header[2];
- u32 reg;
-
- /* XXX: is it necessary to preserve this field? */
- reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
- tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
-
- msm_dp_utils_pack_sdp_header(&tmp, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
-}
-
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
-{
- struct msm_dp_catalog_private *catalog;
- u32 acr_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
-
- drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
- select, acr_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
-}
-
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- struct msm_dp_catalog_private *catalog;
- u32 audio_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
-
- if (enable)
- audio_ctrl |= BIT(0);
- else
- audio_ctrl &= ~BIT(0);
-
- drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
- /* make sure audio engine is disabled */
- wmb();
-}
-
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 sdp_cfg = 0;
- u32 sdp_cfg2 = 0;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- /* AUDIO_TIMESTAMP_SDP_EN */
- sdp_cfg |= BIT(1);
- /* AUDIO_STREAM_SDP_EN */
- sdp_cfg |= BIT(2);
- /* AUDIO_COPY_MANAGEMENT_SDP_EN */
- sdp_cfg |= BIT(5);
- /* AUDIO_ISRC_SDP_EN */
- sdp_cfg |= BIT(6);
- /* AUDIO_INFOFRAME_SDP_EN */
- sdp_cfg |= BIT(20);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
-
- sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- /* IFRM_REGSRC -> Do not use reg values */
- sdp_cfg2 &= ~BIT(0);
- /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
- sdp_cfg2 &= ~BIT(1);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
-}
-
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
-{
- struct msm_dp_catalog_private *catalog;
- u32 mainlink_levels;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
- mainlink_levels &= 0xFE0;
- mainlink_levels |= safe_to_exit_level;
-
- drm_dbg_dp(catalog->drm_dev,
- "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
- mainlink_levels, safe_to_exit_level);
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
deleted file mode 100644
index 6678b0ac9a67..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_CATALOG_H_
-#define _DP_CATALOG_H_
-
-#include <drm/drm_modes.h>
-
-#include "dp_utils.h"
-#include "disp/msm_disp_snapshot.h"
-
-/* interrupts */
-#define DP_INTR_HPD BIT(0)
-#define DP_INTR_AUX_XFER_DONE BIT(3)
-#define DP_INTR_WRONG_ADDR BIT(6)
-#define DP_INTR_TIMEOUT BIT(9)
-#define DP_INTR_NACK_DEFER BIT(12)
-#define DP_INTR_WRONG_DATA_CNT BIT(15)
-#define DP_INTR_I2C_NACK BIT(18)
-#define DP_INTR_I2C_DEFER BIT(21)
-#define DP_INTR_PLL_UNLOCKED BIT(24)
-#define DP_INTR_AUX_ERROR BIT(27)
-
-#define DP_INTR_READY_FOR_VIDEO BIT(0)
-#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
-#define DP_INTR_FRAME_END BIT(6)
-#define DP_INTR_CRC_UPDATED BIT(9)
-
-#define DP_HW_VERSION_1_0 0x10000000
-#define DP_HW_VERSION_1_2 0x10020000
-
-struct msm_dp_catalog {
- bool wide_bus_en;
-};
-
-/* Debug module */
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state);
-
-/* AUX APIs */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read);
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us);
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Controller APIs */
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state);
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config);
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb);
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate,
- u32 stream_rate_khz, bool is_ycbcr_420);
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern);
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog);
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en);
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter);
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2);
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern);
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Panel APIs */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active);
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode);
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
-
-/* DP Audio APIs */
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
-
-#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index a50bfafbb4ea..c42fd2c17a32 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -6,14 +6,18 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/types.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/rational.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -21,11 +25,46 @@
#include "dp_ctrl.h"
#include "dp_link.h"
+#define POLLING_SLEEP_US 1000
+#define POLLING_TIMEOUT_US 10000
+
#define DP_KHZ_TO_HZ 1000
#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
#define PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES (300 * HZ / 1000) /* 300 ms */
#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define DP_INTERRUPT_STATUS1 \
+ (DP_INTR_AUX_XFER_DONE| \
+ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+ DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+ (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+ DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS4 \
+ (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
+ PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
+
+#define DP_INTERRUPT_MASK4 \
+ (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
+ PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
+
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
@@ -77,7 +116,8 @@ struct msm_dp_ctrl_private {
struct drm_dp_aux *aux;
struct msm_dp_panel *panel;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *ahb_base;
+ void __iomem *link_base;
struct phy *phy;
@@ -95,11 +135,43 @@ struct msm_dp_ctrl_private {
struct completion psr_op_comp;
struct completion video_comp;
+ u32 hw_revision;
+
bool core_clks_on;
bool link_clks_on;
bool stream_clks_on;
};
+static inline u32 msm_dp_read_ahb(const struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->ahb_base + offset);
+}
+
+static inline void msm_dp_write_ahb(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure phy reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->ahb_base + offset);
+}
+
+static inline u32 msm_dp_read_link(struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->link_base + offset);
+}
+
static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
struct msm_dp_link_info *link)
{
@@ -119,6 +191,179 @@ static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
return 0;
}
+/*
+ * NOTE: resetting DP controller will also clear any pending HPD related interrupts
+ */
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 sw_reset;
+
+ sw_reset = msm_dp_read_ahb(ctrl, REG_DP_SW_RESET);
+
+ sw_reset |= DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ sw_reset &= ~DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+
+ if (!ctrl->hw_revision) {
+ ctrl->hw_revision = msm_dp_read_ahb(ctrl, REG_DP_HW_VERSION);
+ ctrl->panel->hw_revision = ctrl->hw_revision;
+ }
+}
+
+static u32 msm_dp_ctrl_get_aux_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS);
+ intr &= ~DP_INTERRUPT_STATUS1_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS1)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ intr_ack | DP_INTERRUPT_STATUS1_MASK);
+
+ return intr;
+
+}
+
+static u32 msm_dp_ctrl_get_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS2);
+ intr &= ~DP_INTERRUPT_STATUS2_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS2)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+ return intr;
+}
+
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ DP_INTERRUPT_STATUS1_MASK);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ DP_INTERRUPT_STATUS2_MASK);
+}
+
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS, 0x00);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2, 0x00);
+}
+
+static u32 msm_dp_ctrl_get_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS4);
+ intr_ack = (intr & DP_INTERRUPT_STATUS4)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS4, intr_ack);
+
+ return intr;
+}
+
+static void msm_dp_ctrl_config_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
+}
+
+static void msm_dp_ctrl_psr_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_psr_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "enable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+ DP_MAINLINK_FB_BOUNDARY_SEL);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_ctrl_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "disable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_setup_peripheral_flush(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ if (ctrl->hw_revision >= DP_HW_VERSION_1_2)
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
+ else
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
+
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static bool msm_dp_ctrl_mainlink_ready(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 data;
+ int ret;
+
+ /* Poll for mainlink ready status */
+ ret = readl_poll_timeout(ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & DP_MAINLINK_READY_FOR_VIDEO,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("mainlink not ready\n");
+ return false;
+ }
+
+ return true;
+}
+
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -126,7 +371,7 @@ void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
reinit_completion(&ctrl->idle_comp);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
@@ -171,23 +416,50 @@ static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
- msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+ drm_dbg_dp(ctrl->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", config);
+
+ msm_dp_write_link(ctrl, REG_DP_CONFIGURATION_CTRL, config);
+}
+
+static void msm_dp_ctrl_lane_mapping(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 ln_mapping;
+
+ ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+ ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+ ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+ ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+ msm_dp_write_link(ctrl, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ ln_mapping);
}
static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl)
{
- u32 cc, tb;
+ u32 colorimetry_cfg, test_bits_depth, misc_val;
- msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ msm_dp_ctrl_lane_mapping(ctrl);
+ msm_dp_setup_peripheral_flush(ctrl);
msm_dp_ctrl_config_ctrl(ctrl);
- tb = msm_dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->msm_dp_mode.bpp);
- cc = msm_dp_link_get_colorimetry_config(ctrl->link);
- msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
- msm_dp_panel_timing_cfg(ctrl->panel);
+ test_bits_depth = msm_dp_link_get_test_bits_depth(ctrl->link, ctrl->panel->msm_dp_mode.bpp);
+ colorimetry_cfg = msm_dp_link_get_colorimetry_config(ctrl->link);
+
+ misc_val = msm_dp_read_link(ctrl, REG_DP_MISC1_MISC0);
+
+ /* clear bpp bits */
+ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+ misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+ misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+ /* Configure clock to synchronous mode */
+ misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+ drm_dbg_dp(ctrl->drm_dev, "misc settings = 0x%x\n", misc_val);
+ msm_dp_write_link(ctrl, REG_DP_MISC1_MISC0, misc_val);
+
+ msm_dp_panel_timing_cfg(ctrl->panel, ctrl->msm_dp_ctrl.wide_bus_en);
}
/*
@@ -1003,8 +1275,9 @@ static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl)
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
msm_dp_tu, valid_boundary, valid_boundary2);
- msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
- msm_dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY, valid_boundary);
+ msm_dp_write_link(ctrl, REG_DP_TU, msm_dp_tu);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl)
@@ -1113,6 +1386,30 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
return ret == 1;
}
+static int msm_dp_ctrl_set_pattern_state_bit(struct msm_dp_ctrl_private *ctrl,
+ u32 state_bit)
+{
+ int bit, ret;
+ u32 data;
+
+ bit = BIT(state_bit - 1);
+ drm_dbg_dp(ctrl->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, bit);
+
+ bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+ /* Poll for mainlink ready status */
+ ret = readx_poll_timeout(readl, ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & bit,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
+ return ret;
+ }
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
int *training_step, enum drm_dp_phy dp_phy)
{
@@ -1124,11 +1421,11 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
ctrl->panel->dpcd, dp_phy, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_1;
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, 1);
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
@@ -1242,7 +1539,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
ctrl->panel->dpcd, dp_phy, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_2;
@@ -1257,7 +1554,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
state_ctrl_bit = 2;
}
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, state_ctrl_bit);
if (ret)
return ret;
@@ -1359,7 +1656,7 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
}
end:
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
return ret;
}
@@ -1369,7 +1666,7 @@ static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl,
{
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_ctrl_mainlink_enable(ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
@@ -1502,33 +1799,55 @@ static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
return ret;
}
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable)
+static void msm_dp_ctrl_enable_sdp(struct msm_dp_ctrl_private *ctrl)
{
- struct msm_dp_ctrl_private *ctrl;
+ /* trigger sdp */
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, 0x0);
+}
- ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+static void msm_dp_ctrl_psr_enter(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
- /*
- * all dp controller programmable registers will not
- * be reset to default value after DP_SW_RESET
- * therefore interrupt mask bits have to be updated
- * to enable/disable interrupts
- */
- msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_ENTER;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
+}
+
+static void msm_dp_ctrl_psr_exit(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
+
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
+
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_EXIT;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
}
void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl)
{
- u8 cfg;
struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 cfg;
if (!ctrl->panel->psr_cap.version)
return;
- msm_dp_catalog_ctrl_config_psr(ctrl->catalog);
+ /* enable PSR1 function */
+ cfg = msm_dp_read_link(ctrl, REG_PSR_CONFIG);
+ cfg |= PSR1_SUPPORTED;
+ msm_dp_write_link(ctrl, REG_PSR_CONFIG, cfg);
+
+ msm_dp_ctrl_config_psr_interrupt(ctrl);
+ msm_dp_ctrl_enable_sdp(ctrl);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
@@ -1554,29 +1873,37 @@ void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter)
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true);
+ msm_dp_ctrl_psr_enter(ctrl);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_ctrl_psr_exit(ctrl);
return;
}
msm_dp_ctrl_push_idle(msm_dp_ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
+ msm_dp_ctrl_psr_mainlink_disable(ctrl);
} else {
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
+ msm_dp_ctrl_psr_mainlink_enable(ctrl);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_ctrl_psr_exit(ctrl);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
msm_dp_ctrl_wait4video_ready(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
}
}
+static void msm_dp_ctrl_phy_reset(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL,
+ DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL, 0x0);
+}
+
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -1585,7 +1912,7 @@ void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
@@ -1600,7 +1927,7 @@ void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -1611,7 +1938,7 @@ static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
struct phy *phy = ctrl->phy;
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &ctrl->phy_opts);
/*
@@ -1642,9 +1969,9 @@ static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -1676,13 +2003,96 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
+#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
+
+static void msm_dp_ctrl_send_phy_pattern(struct msm_dp_ctrl_private *ctrl,
+ u32 pattern)
+{
+ u32 value = 0x0;
+
+ /* Make sure to clear the current pattern before starting a new one */
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0x0);
+
+ drm_dbg_dp(ctrl->drm_dev, "pattern: %#x\n", pattern);
+ switch (pattern) {
+ case DP_PHY_TEST_PATTERN_D10_2:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+ break;
+
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ value &= ~(1 << 16);
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ break;
+
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_PRBS7);
+ break;
+
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+ /* 00111110000011111000001111100000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
+ /* 00001111100000111110000011111000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
+ /* 1111100000111110 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
+ break;
+
+ case DP_PHY_TEST_PATTERN_CP2520:
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+
+ value = DP_HBR2_ERM_PATTERN;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+ break;
+
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL,
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+ break;
+
+ default:
+ drm_dbg_dp(ctrl->drm_dev,
+ "No valid test pattern requested: %#x\n", pattern);
+ break;
+ }
+}
+
static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1697,11 +2107,11 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
- msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ msm_dp_ctrl_send_phy_pattern(ctrl, pattern_requested);
msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
- pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+ pattern_sent = msm_dp_read_link(ctrl, REG_DP_MAINLINK_READY);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1898,7 +2308,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
@@ -1923,7 +2333,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
@@ -1980,6 +2390,62 @@ static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl)
return msm_dp_ctrl_setup_main_link(ctrl, &training_step);
}
+static void msm_dp_ctrl_config_msa(struct msm_dp_ctrl_private *ctrl,
+ u32 rate, u32 stream_rate_khz,
+ bool is_ycbcr_420)
+{
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+ u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+ u32 const link_rate_hbr2 = 540000;
+ u32 const link_rate_hbr3 = 810000;
+ unsigned long den, num;
+
+ if (rate == link_rate_hbr3)
+ pixel_div = 6;
+ else if (rate == 162000 || rate == 270000)
+ pixel_div = 2;
+ else if (rate == link_rate_hbr2)
+ pixel_div = 4;
+ else
+ DRM_ERROR("Invalid pixel mux divider\n");
+
+ dispcc_input_rate = (rate * 10) / pixel_div;
+
+ rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+ (unsigned long)(1 << 16) - 1,
+ (unsigned long)(1 << 16) - 1, &den, &num);
+
+ den = ~(den - num);
+ den = den & 0xFFFF;
+ pixel_m = num;
+ pixel_n = den;
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+ if (nvid < nvid_fixed) {
+ u32 temp;
+
+ temp = (nvid_fixed / nvid) * nvid;
+ mvid = (nvid_fixed / nvid) * mvid;
+ nvid = temp;
+ }
+
+ if (is_ycbcr_420)
+ mvid /= 2;
+
+ if (link_rate_hbr2 == rate)
+ nvid *= 2;
+
+ if (link_rate_hbr3 == rate)
+ nvid *= 3;
+
+ drm_dbg_dp(ctrl->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_MVID, mvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_NVID, nvid);
+}
+
int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train)
{
int ret = 0;
@@ -2045,20 +2511,22 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_configure_source_params(ctrl);
- msm_dp_catalog_ctrl_config_msa(ctrl->catalog,
+ msm_dp_ctrl_config_msa(ctrl,
ctrl->link->link_params.rate,
pixel_rate_orig,
ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420);
+ msm_dp_panel_clear_dsc_dto(ctrl->panel);
+
msm_dp_ctrl_setup_tr_unit(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
- mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ mainlink_ready = msm_dp_ctrl_mainlink_ready(ctrl);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
@@ -2074,12 +2542,12 @@ void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
/* set dongle to D3 (power off) mode */
msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2107,7 +2575,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -2129,11 +2597,11 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2160,7 +2628,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->panel->psr_cap.version) {
- isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
+ isr = msm_dp_ctrl_get_psr_interrupt(ctrl);
if (isr)
complete(&ctrl->psr_op_comp);
@@ -2175,8 +2643,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
- isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog);
-
+ isr = msm_dp_ctrl_get_interrupt(ctrl);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
@@ -2190,6 +2657,11 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ret = IRQ_HANDLED;
}
+ /* DP aux isr */
+ isr = msm_dp_ctrl_get_aux_interrupt(ctrl);
+ if (isr)
+ ret |= msm_dp_aux_isr(ctrl->aux, isr);
+
return ret;
}
@@ -2245,14 +2717,14 @@ static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl)
struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy)
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base)
{
struct msm_dp_ctrl_private *ctrl;
int ret;
- if (!dev || !panel || !aux ||
- !link || !catalog) {
+ if (!dev || !panel || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -2283,9 +2755,10 @@ struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link
ctrl->panel = panel;
ctrl->aux = aux;
ctrl->link = link;
- ctrl->catalog = catalog;
ctrl->dev = dev;
ctrl->phy = phy;
+ ctrl->ahb_base = ahb_base;
+ ctrl->link_base = link_base;
ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl);
if (ret) {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b7abfedbf574..124b9b21bb7f 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -9,7 +9,6 @@
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
-#include "dp_catalog.h"
struct msm_dp_ctrl {
bool wide_bus_en;
@@ -25,12 +24,15 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl);
irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl);
-struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
- struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy);
-
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev,
+ struct msm_dp_link *link,
+ struct msm_dp_panel *panel,
+ struct drm_dp_aux *aux,
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base);
+
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
@@ -41,4 +43,7 @@ void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl);
int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 22fd946ee201..cf3838fcd154 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -5,11 +5,12 @@
#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+#ifdef CONFIG_DEBUG_FS
+
#include <linux/debugfs.h>
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
#include "dp_debug.h"
@@ -235,3 +236,5 @@ int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
return 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a48e6db4f156..d87d47cc7ec3 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -19,7 +19,6 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_ctrl.h"
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_reg.h"
#include "dp_link.h"
@@ -87,7 +86,6 @@ struct msm_dp_display_private {
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
struct msm_dp_panel *panel;
@@ -112,6 +110,18 @@ struct msm_dp_display_private {
bool wide_bus_supported;
struct msm_dp_audio *audio;
+
+ void __iomem *ahb_base;
+ size_t ahb_len;
+
+ void __iomem *aux_base;
+ size_t aux_len;
+
+ void __iomem *link_base;
+ size_t link_len;
+
+ void __iomem *p0_base;
+ size_t p0_len;
};
struct msm_dp_desc {
@@ -282,9 +292,7 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
struct drm_device *drm = priv->dev;
dp->msm_dp_display.drm_dev = drm;
- priv->dp[dp->id] = &dp->msm_dp_display;
-
-
+ priv->kms->dp[dp->id] = &dp->msm_dp_display;
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
@@ -318,7 +326,7 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
- priv->dp[dp->id] = NULL;
+ priv->kms->dp[dp->id] = NULL;
}
static const struct component_ops msm_dp_display_comp_ops = {
@@ -462,7 +470,8 @@ static void msm_dp_display_host_init(struct msm_dp_display_private *dp)
dp->phy_initialized);
msm_dp_ctrl_core_clk_enable(dp->ctrl);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_enable_irq(dp->ctrl);
msm_dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -473,7 +482,8 @@ static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp)
dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_disable_irq(dp->ctrl);
msm_dp_aux_deinit(dp->aux);
msm_dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
@@ -754,21 +764,10 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
if (rc) {
DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
- dp->catalog = NULL;
goto error;
}
- dp->catalog = msm_dp_catalog_get(dev);
- if (IS_ERR(dp->catalog)) {
- rc = PTR_ERR(dp->catalog);
- DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
- dp->catalog = NULL;
- goto error;
- }
-
- dp->aux = msm_dp_aux_get(dev, dp->catalog,
- phy,
- dp->msm_dp_display.is_edp);
+ dp->aux = msm_dp_aux_get(dev, phy, dp->msm_dp_display.is_edp, dp->aux_base);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -784,7 +783,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
- dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->link_base, dp->p0_base);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -793,8 +792,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
}
dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
- dp->catalog,
- phy);
+ phy, dp->ahb_base, dp->link_base);
if (IS_ERR(dp->ctrl)) {
rc = PTR_ERR(dp->ctrl);
DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
@@ -802,7 +800,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->link_base);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -1025,7 +1023,14 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
return;
}
- msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state);
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->ahb_len,
+ msm_dp_display->ahb_base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->aux_len,
+ msm_dp_display->aux_base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->link_len,
+ msm_dp_display->link_base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->p0_len,
+ msm_dp_display->p0_base, "dp_p0");
mutex_unlock(&msm_dp_display->event_mutex);
}
@@ -1148,7 +1153,7 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog);
+ hpd_isr_status = msm_dp_aux_get_hpd_intr_status(dp->aux);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
@@ -1175,9 +1180,6 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
/* DP controller isr */
ret |= msm_dp_ctrl_isr(dp->ctrl);
- /* DP aux isr */
- ret |= msm_dp_aux_isr(dp->aux);
-
return ret;
}
@@ -1275,6 +1277,80 @@ static int msm_dp_display_get_connector_type(struct platform_device *pdev,
return connector_type;
}
+static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
+
+ return base;
+}
+
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
+static int msm_dp_display_get_io(struct msm_dp_display_private *display)
+{
+ struct platform_device *pdev = display->msm_dp_display.pdev;
+
+ display->ahb_base = msm_dp_ioremap(pdev, 0, &display->ahb_len);
+ if (IS_ERR(display->ahb_base))
+ return PTR_ERR(display->ahb_base);
+
+ display->aux_base = msm_dp_ioremap(pdev, 1, &display->aux_len);
+ if (IS_ERR(display->aux_base)) {
+ if (display->aux_base != ERR_PTR(-EINVAL)) {
+ DRM_ERROR("unable to remap aux region: %pe\n", display->aux_base);
+ return PTR_ERR(display->aux_base);
+ }
+
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * msm_dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (display->ahb_len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ display->ahb_len = DP_DEFAULT_AHB_SIZE;
+ display->aux_base = display->ahb_base + DP_DEFAULT_AUX_OFFSET;
+ display->aux_len = DP_DEFAULT_AUX_SIZE;
+ display->link_base = display->ahb_base + DP_DEFAULT_LINK_OFFSET;
+ display->link_len = DP_DEFAULT_LINK_SIZE;
+ display->p0_base = display->ahb_base + DP_DEFAULT_P0_OFFSET;
+ display->p0_len = DP_DEFAULT_P0_SIZE;
+
+ return 0;
+ }
+
+ display->link_base = msm_dp_ioremap(pdev, 2, &display->link_len);
+ if (IS_ERR(display->link_base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", display->link_base);
+ return PTR_ERR(display->link_base);
+ }
+
+ display->p0_base = msm_dp_ioremap(pdev, 3, &display->p0_len);
+ if (IS_ERR(display->p0_base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", display->p0_base);
+ return PTR_ERR(display->p0_base);
+ }
+
+ return 0;
+}
+
static int msm_dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1301,6 +1377,10 @@ static int msm_dp_display_probe(struct platform_device *pdev)
dp->msm_dp_display.is_edp =
(dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+ rc = msm_dp_display_get_io(dp);
+ if (rc)
+ return rc;
+
rc = msm_dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
@@ -1363,7 +1443,7 @@ static int msm_dp_pm_runtime_suspend(struct device *dev)
if (dp->msm_dp_display.is_edp) {
msm_dp_display_host_phy_exit(dp);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+ msm_dp_aux_hpd_disable(dp->aux);
}
msm_dp_display_host_deinit(dp);
@@ -1384,7 +1464,7 @@ static int msm_dp_pm_runtime_resume(struct device *dev)
*/
msm_dp_display_host_init(dp);
if (dp->msm_dp_display.is_edp) {
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_aux_hpd_enable(dp->aux);
msm_dp_display_host_phy_init(dp);
}
@@ -1646,8 +1726,6 @@ void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
/* populate wide_bus_support to different layers */
msm_dp_display->ctrl->wide_bus_en =
msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
- msm_dp_display->catalog->wide_bus_en =
- msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
}
void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
@@ -1671,10 +1749,8 @@ void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
return;
}
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
-
- /* enable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
+ msm_dp_aux_hpd_enable(dp->aux);
+ msm_dp_aux_hpd_intr_enable(dp->aux);
msm_dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
@@ -1687,9 +1763,9 @@ void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
mutex_lock(&dp->event_mutex);
- /* disable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+
+ msm_dp_aux_hpd_intr_disable(dp->aux);
+ msm_dp_aux_hpd_disable(dp->aux);
msm_dp_display->internal_hpd = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index f222d7ccaa88..9a461ab2f32f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -20,7 +20,8 @@
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+msm_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct msm_dp *dp;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 92a9077959b3..66e1bbd80db3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 4e8ab75c771b..15b7f6c7146e 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -4,6 +4,7 @@
*/
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_utils.h"
#include <drm/drm_connector.h>
@@ -11,6 +12,10 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#include <linux/io.h>
+
+#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
+
#define DP_MAX_NUM_DP_LANES 4
#define DP_LINK_RATE_HBR2 540000 /* kbytes */
@@ -20,10 +25,46 @@ struct msm_dp_panel_private {
struct msm_dp_panel msm_dp_panel;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
+ void __iomem *p0_base;
bool panel_on;
};
+static inline u32 msm_dp_read_link(struct msm_dp_panel_private *panel, u32 offset)
+{
+ return readl_relaxed(panel->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->link_base + offset);
+}
+
+static inline void msm_dp_write_p0(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->p0_base + offset);
+}
+
+static inline u32 msm_dp_read_p0(struct msm_dp_panel_private *panel,
+ u32 offset)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ return readl_relaxed(panel->p0_base + offset);
+}
+
static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
{
ssize_t rlen;
@@ -172,7 +213,7 @@ int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
if (!msm_dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
- if (!msm_dp_catalog_link_is_connected(panel->catalog)) {
+ if (!msm_dp_aux_is_link_connected(panel->aux)) {
rc = -ETIMEDOUT;
goto end;
}
@@ -252,9 +293,85 @@ void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel)
}
}
+static void msm_dp_panel_tpg_enable(struct msm_dp_panel *msm_dp_panel,
+ struct drm_display_mode *drm_mode)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
+
+ /* TPG config parameters*/
+ hsync_period = drm_mode->htotal;
+ vsync_period = drm_mode->vtotal;
+
+ display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+ hsync_period);
+ display_v_end = ((vsync_period - (drm_mode->vsync_start -
+ drm_mode->vdisplay))
+ * hsync_period) - 1;
+
+ display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+ display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+ hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+ hsync_end_x = hsync_period - (drm_mode->hsync_start -
+ drm_mode->hdisplay) - 1;
+
+ v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+ hsync_ctl = (hsync_period << 16) |
+ (drm_mode->hsync_end - drm_mode->hsync_start);
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL,
+ DP_TPG_CHECKERED_RECT_PATTERN);
+ msm_dp_write_p0(panel, MMSS_DP_TPG_VIDEO_CONFIG,
+ DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+ DP_TPG_VIDEO_CONFIG_RGB);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE,
+ DP_BIST_ENABLE_DPBIST_EN);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN,
+ DP_TIMING_ENGINE_EN_EN);
+ drm_dbg_dp(panel->drm_dev, "%s: enabled tpg\n", __func__);
+}
+
+static void msm_dp_panel_tpg_disable(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
{
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
if (!msm_dp_panel) {
@@ -263,7 +380,6 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
if (!panel->panel_on) {
drm_dbg_dp(panel->drm_dev,
@@ -272,18 +388,109 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
if (!enable) {
- msm_dp_catalog_panel_tpg_disable(catalog);
+ msm_dp_panel_tpg_disable(msm_dp_panel);
return;
}
- drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
- msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+ drm_dbg_dp(panel->drm_dev, "calling panel's tpg_enable\n");
+ msm_dp_panel_tpg_enable(msm_dp_panel, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+}
+
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_DSC_DTO, 0x0);
+}
+
+static void msm_dp_panel_send_vsc_sdp(struct msm_dp_panel_private *panel, struct dp_sdp *vsc_sdp)
+{
+ u32 header[2];
+ u32 val;
+ int i;
+
+ msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_0, header[0]);
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_1, header[1]);
+
+ for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
+ val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
+ (vsc_sdp->db[i + 3] << 24));
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_2 + i, val);
+ }
+}
+
+static void msm_dp_panel_update_sdp(struct msm_dp_panel_private *panel)
+{
+ u32 hw_revision = panel->msm_dp_panel.hw_revision;
+
+ if (hw_revision >= DP_HW_VERSION_1_0 &&
+ hw_revision < DP_HW_VERSION_1_2) {
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, 0x0);
+ }
+}
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg |= GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 |= GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ msm_dp_panel_send_vsc_sdp(panel, vsc_sdp);
+
+ /* indicates presence of VSC (BIT(6) of MISC1) */
+ misc |= DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=1\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
+}
+
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg &= ~GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 &= ~GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ /* switch back to MSA */
+ misc &= ~DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=0\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
}
static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
{
- struct msm_dp_catalog *catalog;
- struct msm_dp_panel_private *panel;
struct msm_dp_display_mode *msm_dp_mode;
struct drm_dp_vsc_sdp vsc_sdp_data;
struct dp_sdp vsc_sdp;
@@ -294,8 +501,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return -EINVAL;
}
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
msm_dp_mode = &msm_dp_panel->msm_dp_mode;
memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
@@ -322,24 +527,23 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return len;
}
- msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+ msm_dp_panel_enable_vsc_sdp(msm_dp_panel, &vsc_sdp);
return 0;
}
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en)
{
u32 data, total_ver, total_hor;
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
struct drm_display_mode *drm_mode;
u32 width_blanking;
u32 sync_start;
u32 msm_dp_active;
u32 total;
+ u32 reg;
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
@@ -382,7 +586,20 @@ int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
msm_dp_active = data;
- msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active);
+ msm_dp_write_link(panel, REG_DP_TOTAL_HOR_VER, total);
+ msm_dp_write_link(panel, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ msm_dp_write_link(panel, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ msm_dp_write_link(panel, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
+
+ reg = msm_dp_read_p0(panel, MMSS_DP_INTF_CONFIG);
+ if (wide_bus_en)
+ reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
+ else
+ reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
+
+ drm_dbg_dp(panel->drm_dev, "wide_bus_en=%d reg=%#x\n", wide_bus_en, reg);
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_CONFIG, reg);
if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420)
msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel);
@@ -486,13 +703,15 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog)
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!dev || !catalog || !aux || !link) {
+ if (!dev || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -503,8 +722,9 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux
panel->dev = dev;
panel->aux = aux;
- panel->catalog = catalog;
panel->link = link;
+ panel->link_base = link_base;
+ panel->p0_base = p0_base;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 4906f4f09f24..d2cf401506dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -6,6 +6,7 @@
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
+#include <drm/drm_modes.h>
#include <drm/msm_drm.h>
#include "dp_aux.h"
@@ -38,6 +39,7 @@ struct msm_dp_panel {
struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
+ u32 hw_revision;
u32 max_dp_lanes;
u32 max_dp_link_rate;
@@ -47,7 +49,7 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@@ -57,6 +59,11 @@ int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel);
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp);
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel);
+
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
@@ -85,6 +92,8 @@ static inline bool is_lane_count_valid(u32 lane_count)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog);
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 3835c7f5cb98..7c44d4e2cf13 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -11,6 +11,8 @@
/* DP_TX Registers */
#define REG_DP_HW_VERSION (0x00000000)
+#define DP_HW_VERSION_1_0 0x10000000
+#define DP_HW_VERSION_1_2 0x10020000
#define REG_DP_SW_RESET (0x00000010)
#define DP_SW_RESET (0x00000001)
@@ -21,8 +23,25 @@
#define REG_DP_CLK_CTRL (0x00000018)
#define REG_DP_CLK_ACTIVE (0x0000001C)
+
#define REG_DP_INTR_STATUS (0x00000020)
+#define DP_INTR_HPD BIT(0)
+#define DP_INTR_AUX_XFER_DONE BIT(3)
+#define DP_INTR_WRONG_ADDR BIT(6)
+#define DP_INTR_TIMEOUT BIT(9)
+#define DP_INTR_NACK_DEFER BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK BIT(18)
+#define DP_INTR_I2C_DEFER BIT(21)
+#define DP_INTR_PLL_UNLOCKED BIT(24)
+#define DP_INTR_AUX_ERROR BIT(27)
+
#define REG_DP_INTR_STATUS2 (0x00000024)
+#define DP_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
+#define DP_INTR_FRAME_END BIT(6)
+#define DP_INTR_CRC_UPDATED BIT(9)
+
#define REG_DP_INTR_STATUS3 (0x00000028)
#define REG_DP_INTR_STATUS4 (0x0000002C)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 296215877613..d8bb40ef820e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -136,7 +136,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
msm_dsi->next_bridge = ext_bridge;
}
- priv->dsi[msm_dsi->id] = msm_dsi;
+ priv->kms->dsi[msm_dsi->id] = msm_dsi;
return 0;
}
@@ -148,7 +148,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
msm_dsi_tx_buf_free(msm_dsi->host);
- priv->dsi[msm_dsi->id] = NULL;
+ priv->kms->dsi[msm_dsi->id] = NULL;
}
static const struct component_ops dsi_ops = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 87496db203d6..93c028a122f3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -98,6 +98,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
@@ -115,6 +116,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7675558ae2e5..fed8e9b67011 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -273,6 +273,18 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_9_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g_v2_9,
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2_9,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
@@ -318,6 +330,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
&sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_9_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_9_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 65b0705fac0e..38f303f2ed04 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -31,6 +31,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
+#define MSM_DSI_6G_VER_MINOR_V2_9_0 0x20090000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4d75529c0e85..e0de545d4077 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -119,6 +119,15 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_intf_clk;
+ /*
+ * Clocks which needs to be properly parented between DISPCC and DSI PHY
+ * PLL:
+ */
+ struct clk *byte_src_clk;
+ struct clk *pixel_src_clk;
+ struct clk *dsi_pll_byte_clk;
+ struct clk *dsi_pll_pixel_clk;
+
unsigned long byte_clk_rate;
unsigned long byte_intf_clk_rate;
unsigned long pixel_clk_rate;
@@ -143,7 +152,7 @@ struct msm_dsi_host {
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* DSI v2 TX buffer */
void *tx_buf;
@@ -269,6 +278,38 @@ int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
return ret;
}
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ ret = dsi_clk_init_6g_v2(msm_host);
+ if (ret)
+ return ret;
+
+ msm_host->byte_src_clk = devm_clk_get(dev, "byte_src");
+ if (IS_ERR(msm_host->byte_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->byte_src_clk),
+ "can't get byte_src clock\n");
+
+ msm_host->dsi_pll_byte_clk = devm_clk_get(dev, "dsi_pll_byte");
+ if (IS_ERR(msm_host->dsi_pll_byte_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_byte_clk),
+ "can't get dsi_pll_byte clock\n");
+
+ msm_host->pixel_src_clk = devm_clk_get(dev, "pixel_src");
+ if (IS_ERR(msm_host->pixel_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->pixel_src_clk),
+ "can't get pixel_src clock\n");
+
+ msm_host->dsi_pll_pixel_clk = devm_clk_get(dev, "dsi_pll_pixel");
+ if (IS_ERR(msm_host->dsi_pll_pixel_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_pixel_clk),
+ "can't get dsi_pll_pixel clock\n");
+
+ return 0;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -370,6 +411,26 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ /*
+ * DSI PHY PLLs have to be enabled to allow reparenting to them, so
+ * cannot use assigned-clock-parents.
+ */
+ ret = clk_set_parent(msm_host->byte_src_clk, msm_host->dsi_pll_byte_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent byte_src -> dsi_pll_byte: %d\n", ret);
+
+ ret = clk_set_parent(msm_host->pixel_src_clk, msm_host->dsi_pll_pixel_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent pixel_src -> dsi_pll_pixel: %d\n", ret);
+
+ return dsi_link_clk_set_rate_6g(msm_host);
+}
+
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -1146,10 +1207,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
- msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
+ msm_host->vm = drm_gpuvm_get(priv->kms->vm);
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
- msm_host->aspace,
+ msm_host->vm,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
@@ -1193,10 +1254,10 @@ void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
return;
if (msm_host->tx_gem_obj) {
- msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
- msm_gem_address_space_put(msm_host->aspace);
+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm);
+ drm_gpuvm_put(msm_host->vm);
msm_host->tx_gem_obj = NULL;
- msm_host->aspace = NULL;
+ msm_host->vm = NULL;
}
if (msm_host->tx_buf)
@@ -1327,7 +1388,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
return -EINVAL;
return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, dma_base);
+ priv->kms->vm, dma_base);
}
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 5973d7325699..221f12db5f8b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -597,6 +597,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_4nm_8550_cfgs },
{ .compatible = "qcom,sm8650-dsi-phy-4nm",
.data = &dsi_phy_4nm_8650_cfgs },
+ { .compatible = "qcom,sm8750-dsi-phy-3nm",
+ .data = &dsi_phy_3nm_8750_cfgs },
#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 7ea608f620fe..c558f8df1684 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -63,6 +63,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index c19890358b74..8c98f91a5930 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -51,6 +51,8 @@
#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
/* Hardware is V5.2 */
#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
+/* Hardware is V7.0 */
+#define DSI_PHY_7NM_QUIRK_V7_0 BIT(5)
struct dsi_pll_config {
bool enable_ssc;
@@ -129,9 +131,30 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
- if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) {
config->pll_clock_inverters = 0x28;
- else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (pll_freq < 163000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 175000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 325000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 350000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 650000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 700000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 1300000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 4000000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (pll_freq <= 1300000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -250,7 +273,8 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
vco_config_1 = 0x01;
}
- if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
if (pll->vco_current_rate < 1557000000ULL)
vco_config_1 = 0x08;
else
@@ -620,6 +644,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -629,6 +654,9 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
break;
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+ /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)
+ writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5);
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
@@ -907,7 +935,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
udelay(500);
}
@@ -941,7 +970,20 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
lane_ctrl0 = 0x1f;
}
- if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (phy->cphy_mode) {
+ /* TODO: different for second phy */
+ vreg_ctrl_0 = 0x57;
+ vreg_ctrl_1 = 0x41;
+ glbl_rescode_top_ctrl = 0x3d;
+ glbl_rescode_bot_ctrl = 0x38;
+ } else {
+ vreg_ctrl_0 = 0x56;
+ vreg_ctrl_1 = 0x19;
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
+ }
+ } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
vreg_ctrl_1 = 0x41;
@@ -1003,6 +1045,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) ||
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
@@ -1117,7 +1160,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
@@ -1384,3 +1428,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V7_0,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 2fd388b892dc..5afac09c0d33 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
#include <drm/drm_of.h>
#include <drm/display/drm_hdmi_state_helper.h>
+#include "msm_kms.h"
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -244,7 +245,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
err = msm_hdmi_init(hdmi);
if (err)
return err;
- priv->hdmi = hdmi;
+ priv->kms->hdmi = hdmi;
return 0;
}
@@ -254,9 +255,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
{
struct msm_drm_private *priv = dev_get_drvdata(master);
- if (priv->hdmi) {
- msm_hdmi_destroy(priv->hdmi);
- priv->hdmi = NULL;
+ if (priv->kms->hdmi) {
+ msm_hdmi_destroy(priv->kms->hdmi);
+ priv->kms->hdmi = NULL;
}
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d5e572d10d6a..02cfd46df594 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -200,12 +200,12 @@ struct hdmi_codec_daifmt;
struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge);
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/*
* hdmi bridge:
@@ -215,7 +215,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge);
+ struct drm_bridge *bridge, struct drm_connector *connector);
void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index b9ec14ef2c20..d9a8dc9dae8f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -122,8 +122,8 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
@@ -163,8 +163,8 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 53a7ce8cc7bc..46fd58646d32 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -475,7 +475,7 @@ msm_hdmi_hotplug_work(struct work_struct *work)
container_of(work, struct hdmi_bridge, hpd_work);
struct drm_bridge *bridge = &hdmi_bridge->base;
- drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge, hdmi_bridge->hdmi->connector));
}
/* initialize bridge */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 407e6c449ee0..114b0d507700 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -177,8 +177,8 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge)
+enum drm_connector_status
+msm_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 6af72162cda4..bbda865addae 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -117,6 +117,36 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+#ifdef CONFIG_DRM_MSM_KMS
+static int msm_fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (dev->fb_helper && dev->fb_helper->fb) {
+ seq_puts(m, "fbcon ");
+ fbdev_fb = dev->fb_helper->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_puts(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list msm_kms_debugfs_list[] = {
+ { "fb", msm_fb_show },
+};
+
/*
* Display Snapshot:
*/
@@ -180,6 +210,27 @@ static const struct file_operations msm_kms_fops = {
.release = msm_kms_release,
};
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", 0400, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
+ if (priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+}
+#else /* ! CONFIG_DRM_MSM_KMS */
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+}
+#endif
+
/*
* Other debugfs:
*/
@@ -267,47 +318,23 @@ static int msm_mm_show(struct seq_file *m, void *arg)
return 0;
}
-static int msm_fb_show(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_framebuffer *fb, *fbdev_fb = NULL;
-
- if (dev->fb_helper && dev->fb_helper->fb) {
- seq_printf(m, "fbcon ");
- fbdev_fb = dev->fb_helper->fb;
- msm_framebuffer_describe(fbdev_fb, m);
- }
-
- mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user ");
- msm_framebuffer_describe(fb, m);
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
};
-static struct drm_info_list msm_kms_debugfs_list[] = {
- { "fb", msm_fb_show },
-};
-
static int late_init_minor(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
if (!minor)
return 0;
+ if (!priv->gpu_pdev)
+ return 0;
+
ret = msm_rd_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
@@ -375,20 +402,12 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->gpu_pdev)
msm_debugfs_gpu_init(minor);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
+ if (priv->kms)
+ msm_debugfs_kms_init(minor);
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init)
- priv->kms->funcs->debugfs_init(priv->kms, minor);
-
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d007687c2446..9dcc7a596a11 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
-#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -41,17 +40,12 @@
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
* - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
+ * - 1.13.0 - Add VM_BIND
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 12
+#define MSM_VERSION_MINOR 13
#define MSM_VERSION_PATCHLEVEL 0
-static void msm_deinit_vram(struct drm_device *ddev);
-
-static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
-module_param(vram, charp, 0);
-
bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
@@ -60,10 +54,19 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+static bool separate_gpu_kms;
+MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
+module_param(separate_gpu_kms, bool, 0400);
+
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
-static int msm_drm_uninit(struct device *dev)
+bool msm_gpu_no_components(void)
+{
+ return separate_gpu_kms;
+}
+
+static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
@@ -79,16 +82,9 @@ static int msm_drm_uninit(struct device *dev)
if (ddev->registered) {
drm_dev_unregister(ddev);
if (priv->kms)
- drm_atomic_helper_shutdown(ddev);
+ msm_drm_kms_unregister(dev);
}
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before msm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
-
- flush_workqueue(priv->wq);
-
msm_gem_shrinker_cleanup(ddev);
msm_perf_debugfs_cleanup(priv);
@@ -97,120 +93,19 @@ static int msm_drm_uninit(struct device *dev)
if (priv->kms)
msm_drm_kms_uninit(dev);
- msm_deinit_vram(ddev);
-
- component_unbind_all(dev, ddev);
+ if (gpu_ops)
+ gpu_ops->unbind(dev, dev, NULL);
+ else
+ component_unbind_all(dev, ddev);
ddev->dev_private = NULL;
drm_dev_put(ddev);
- destroy_workqueue(priv->wq);
-
return 0;
}
-bool msm_use_mmu(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
-
- /*
- * a2xx comes with its own MMU
- * On other platforms IOMMU can be declared specified either for the
- * MDP/DPU device or for its parent, MDSS device.
- */
- return priv->is_a2xx ||
- device_iommu_mapped(dev->dev) ||
- device_iommu_mapped(dev->dev->parent);
-}
-
-static int msm_init_vram(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct device_node *node;
- unsigned long size = 0;
- int ret = 0;
-
- /* In the device-tree world, we could have a 'memory-region'
- * phandle, which gives us a link to our "vram". Allocating
- * is all nicely abstracted behind the dma api, but we need
- * to know the entire size to allocate it all in one go. There
- * are two cases:
- * 1) device with no IOMMU, in which case we need exclusive
- * access to a VRAM carveout big enough for all gpu
- * buffers
- * 2) device with IOMMU, but where the bootloader puts up
- * a splash screen. In this case, the VRAM carveout
- * need only be large enough for fbdev fb. But we need
- * exclusive access to the buffer to avoid the kernel
- * using those pages for other purposes (which appears
- * as corruption on screen before we have a chance to
- * load and do initial modeset)
- */
-
- node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
- if (node) {
- struct resource r;
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
- size = r.end - r.start + 1;
- DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
-
- /* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire DMA chunk carved out in early startup in
- * mach-msm:
- */
- } else if (!msm_use_mmu(dev)) {
- DRM_INFO("using %s VRAM carveout\n", vram);
- size = memparse(vram, NULL);
- }
-
- if (size) {
- unsigned long attrs = 0;
- void *p;
-
- priv->vram.size = size;
-
- drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- spin_lock_init(&priv->vram.lock);
-
- attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
- attrs |= DMA_ATTR_WRITE_COMBINE;
-
- /* note that for no-kernel-mapping, the vaddr returned
- * is bogus, but non-null if allocation succeeded:
- */
- p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, attrs);
- if (!p) {
- DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
- priv->vram.paddr = 0;
- return -ENOMEM;
- }
-
- DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
- (uint32_t)priv->vram.paddr,
- (uint32_t)(priv->vram.paddr + size));
- }
-
- return ret;
-}
-
-static void msm_deinit_vram(struct drm_device *ddev)
-{
- struct msm_drm_private *priv = ddev->dev_private;
- unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
-
- if (!priv->vram.paddr)
- return;
-
- drm_mm_takedown(&priv->vram.mm);
- dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
- attrs);
-}
-
-static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+static int msm_drm_init(struct device *dev, const struct drm_driver *drv,
+ const struct component_ops *gpu_ops)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
@@ -227,12 +122,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- priv->wq = alloc_ordered_workqueue("msm", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto err_put_dev;
- }
-
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -257,19 +146,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
}
- ret = msm_init_vram(ddev);
- if (ret)
- goto err_destroy_wq;
-
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ if (gpu_ops)
+ ret = gpu_ops->bind(dev, dev, NULL);
+ else
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_deinit_vram;
+ goto err_put_dev;
ret = msm_gem_shrinker_init(ddev);
if (ret)
@@ -279,11 +167,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_drm_kms_init(dev, drv);
if (ret)
goto err_msm_uninit;
- } else {
- /* valid only for the dummy headless case, where of_node=NULL */
- WARN_ON(dev->of_node);
- ddev->driver_features &= ~DRIVER_MODESET;
- ddev->driver_features &= ~DRIVER_ATOMIC;
}
ret = drm_dev_register(ddev, 0);
@@ -294,22 +177,16 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- if (priv->kms_init) {
- drm_kms_helper_poll_init(ddev);
- drm_client_setup(ddev, NULL);
- }
+ if (priv->kms_init)
+ msm_drm_kms_post_init(dev);
return 0;
err_msm_uninit:
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, gpu_ops);
return ret;
-err_deinit_vram:
- msm_deinit_vram(ddev);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@@ -333,11 +210,42 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+/**
+ * msm_context_vm - lazily create the context's VM
+ *
+ * @dev: the drm device
+ * @ctx: the context
+ *
+ * The VM is lazily created, so that userspace has a chance to opt-in to having
+ * a userspace managed VM before the VM is created.
+ *
+ * Note that this does not return a reference to the VM. Once the VM is created,
+ * it exists for the lifetime of the context.
+ */
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
+{
+ static DEFINE_MUTEX(init_lock);
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* Once ctx->vm is created it is valid for the lifetime of the context: */
+ if (ctx->vm)
+ return ctx->vm;
+
+ mutex_lock(&init_lock);
+ if (!ctx->vm) {
+ ctx->vm = msm_gpu_create_private_vm(
+ priv->gpu, current, !ctx->userspace_managed_vm);
+
+ }
+ mutex_unlock(&init_lock);
+
+ return ctx->vm;
+}
+
static int context_init(struct drm_device *dev, struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -349,7 +257,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
- ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
@@ -367,23 +274,24 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
return context_init(dev, file);
}
-static void context_close(struct msm_file_private *ctx)
+static void context_close(struct msm_context *ctx)
{
+ ctx->closed = true;
msm_submitqueue_close(ctx);
- msm_file_private_put(ctx);
+ msm_context_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
/*
* It is not possible to set sysprof param to non-zero if gpu
* is not initialized:
*/
if (priv->gpu)
- msm_file_private_set_sysprof(ctx, priv->gpu, 0);
+ msm_context_set_sysprof(ctx, priv->gpu, 0);
context_close(ctx);
}
@@ -515,11 +423,14 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
uint64_t *iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -527,7 +438,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
- return msm_gem_get_iova(obj, ctx->aspace, iova);
+ return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
}
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
@@ -535,19 +446,23 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
uint64_t iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
/* Only supported if per-process address space is supported: */
- if (priv->gpu->aspace == ctx->aspace)
+ if (priv->gpu->vm == vm)
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
- return msm_gem_set_iova(obj, ctx->aspace, iova);
+ return msm_gem_set_iova(obj, vm, iova);
}
static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
@@ -555,6 +470,7 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
u32 metadata_size)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *new_metadata;
void *buf;
int ret;
@@ -572,8 +488,14 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
if (ret)
goto out;
- msm_obj->metadata =
+ new_metadata =
krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ if (!new_metadata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msm_obj->metadata = new_metadata;
msm_obj->metadata_size = metadata_size;
memcpy(msm_obj->metadata, buf, metadata_size);
@@ -872,6 +794,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
};
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
@@ -893,13 +816,45 @@ static const struct file_operations fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#define DRIVER_FEATURES_GPU ( \
+ DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
+ DRIVER_RENDER | \
+ DRIVER_SYNCOBJ | \
+ DRIVER_SYNCOBJ_TIMELINE | \
+ 0 )
+
+#define DRIVER_FEATURES_KMS ( \
+ DRIVER_GEM | \
+ DRIVER_ATOMIC | \
+ DRIVER_MODESET | \
+ 0 )
+
static const struct drm_driver msm_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_RENDER |
- DRIVER_ATOMIC |
- DRIVER_MODESET |
- DRIVER_SYNCOBJ_TIMELINE |
- DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .gem_prime_import = msm_gem_prime_import,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ MSM_FBDEV_DRIVER_OPS,
+ .show_fdinfo = msm_show_fdinfo,
+ .ioctls = msm_ioctls,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+static const struct drm_driver msm_kms_driver = {
+ .driver_features = DRIVER_FEATURES_KMS,
.open = msm_open,
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
@@ -910,6 +865,23 @@ static const struct drm_driver msm_driver = {
#endif
MSM_FBDEV_DRIVER_OPS,
.show_fdinfo = msm_show_fdinfo,
+ .fops = &fops,
+ .name = "msm-kms",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+static const struct drm_driver msm_gpu_driver = {
+ .driver_features = DRIVER_FEATURES_GPU,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ .show_fdinfo = msm_show_fdinfo,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
@@ -1044,12 +1016,16 @@ static int add_gpu_components(struct device *dev,
static int msm_drm_bind(struct device *dev)
{
- return msm_drm_init(dev, &msm_driver);
+ return msm_drm_init(dev,
+ msm_gpu_no_components() ?
+ &msm_kms_driver :
+ &msm_driver,
+ NULL);
}
static void msm_drm_unbind(struct device *dev)
{
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, NULL);
}
const struct component_master_ops msm_drm_ops = {
@@ -1080,9 +1056,11 @@ int msm_drv_probe(struct device *master_dev,
return ret;
}
- ret = add_gpu_components(master_dev, &match);
- if (ret)
- return ret;
+ if (!msm_gpu_no_components()) {
+ ret = add_gpu_components(master_dev, &match);
+ if (ret)
+ return ret;
+ }
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
@@ -1098,29 +1076,34 @@ int msm_drv_probe(struct device *master_dev,
return 0;
}
-/*
- * Platform driver:
- * Used only for headlesss GPU instances
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- return msm_drv_probe(&pdev->dev, NULL, NULL);
+ struct msm_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
+ return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops);
}
-static void msm_pdev_remove(struct platform_device *pdev)
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- component_master_del(&pdev->dev, &msm_drm_ops);
+ msm_drm_uninit(&pdev->dev, ops);
}
-static struct platform_driver msm_platform_driver = {
- .probe = msm_pdev_probe,
- .remove = msm_pdev_remove,
- .driver = {
- .name = "msm",
- },
-};
-
static int __init msm_drm_register(void)
{
if (!modeset)
@@ -1135,13 +1118,13 @@ static int __init msm_drm_register(void)
adreno_register();
msm_mdp4_register();
msm_mdss_register();
- return platform_driver_register(&msm_platform_driver);
+
+ return 0;
}
static void __exit msm_drm_unregister(void)
{
DBG("fini");
- platform_driver_unregister(&msm_platform_driver);
msm_mdss_unregister();
msm_mdp4_unregister();
msm_dp_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c8afb1ea6040..985db9febd98 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -48,8 +48,6 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_gem_address_space;
-struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
@@ -72,12 +70,6 @@ enum msm_dsi_controller {
#define MSM_GPU_MAX_RINGS 4
-/* Commit/Event thread specific structure */
-struct msm_drm_thread {
- struct drm_device *dev;
- struct kthread_worker *worker;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -88,16 +80,6 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* possibly this should be in the kms component, but it is
- * shared by both mdp4 and mdp5..
- */
- struct hdmi *hdmi;
-
- /* DSI is shared by mdp4 and mdp5 */
- struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
-
- struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
-
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -177,23 +159,6 @@ struct msm_drm_private {
struct mutex lock;
} lru;
- struct workqueue_struct *wq;
-
- unsigned int num_crtcs;
-
- struct msm_drm_thread event_thread[MAX_CRTCS];
-
- /* VRAM carveout, used when no IOMMU: */
- struct {
- unsigned long size;
- dma_addr_t paddr;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
- */
- struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
- } vram;
-
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
@@ -264,11 +229,13 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
- struct drm_file *file);
+ struct drm_file *file);
+int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file);
#ifdef CONFIG_DEBUG_FS
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
@@ -280,25 +247,25 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needs_dirtyfb);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needed_dirtyfb);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#ifdef CONFIG_DRM_MSM_KMS_FBDEV
int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define MSM_FBDEV_DRIVER_OPS \
@@ -383,6 +350,7 @@ static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
}
#endif
+struct msm_dp;
#ifdef CONFIG_DRM_MSM_DP
int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
@@ -576,6 +544,10 @@ extern const struct component_master_ops msm_drm_ops;
int msm_kms_pm_prepare(struct device *dev);
void msm_kms_pm_complete(struct device *dev);
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops);
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops);
int msm_drv_probe(struct device *dev,
int (*kms_init)(struct drm_device *dev),
struct msm_kms *kms);
@@ -583,4 +555,6 @@ void msm_kms_shutdown(struct platform_device *pdev);
bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
+bool msm_gpu_no_components(void);
+
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 09268e416843..1eff615ff9bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -30,6 +30,7 @@ struct msm_framebuffer {
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb,
@@ -75,20 +76,22 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
/* prepare/pin all the fb's bo's for scanout.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needs_dirtyfb)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
if (needs_dirtyfb)
refcount_inc(&msm_fb->dirtyfb);
- atomic_inc(&msm_fb->prepare_count);
+ if (atomic_inc_return(&msm_fb->prepare_count) > 1)
+ return 0;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+ msm_gem_vma_get(fb->obj[i]);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], vm, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
@@ -98,25 +101,28 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needed_dirtyfb)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
if (needed_dirtyfb)
refcount_dec(&msm_fb->dirtyfb);
- for (i = 0; i < n; i++)
- msm_gem_unpin_iova(fb->obj[i], aspace);
+ if (atomic_dec_return(&msm_fb->prepare_count))
+ return;
+
+ memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
- if (!atomic_dec_return(&msm_fb->prepare_count))
- memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
+ for (i = 0; i < n; i++) {
+ msm_gem_unpin_iova(fb->obj[i], vm);
+ msm_gem_vma_put(fb->obj[i]);
+ }
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->iova[plane] + fb->offsets[plane];
@@ -134,10 +140,9 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
int ret, i, n = info->num_planes;
@@ -150,7 +155,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
}
}
- fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ fb = msm_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
@@ -165,10 +170,9 @@ out_unref:
}
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
@@ -222,7 +226,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->base.obj[i] = bos[i];
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
@@ -271,7 +275,10 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
msm_gem_object_set_name(bo, "stolenfb");
- fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = msm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c62249b1ab3d..b5969374d53f 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,7 +122,7 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->vm, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2995e80fec3b..7ff994d4f91a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
@@ -17,24 +16,9 @@
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
-#include "msm_mmu.h"
-
-static dma_addr_t physaddr(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
-}
-
-static bool use_pages(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
-}
+#include "msm_kms.h"
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
@@ -44,7 +28,7 @@ static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
@@ -55,13 +39,73 @@ static void update_ctx_mem(struct drm_file *file, ssize_t size)
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
+ msm_gem_vma_get(obj);
update_ctx_mem(file, obj->size);
return 0;
}
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
+
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
+
update_ctx_mem(file, -obj->size);
+ msm_gem_vma_put(obj);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
+ put_iova_spaces(obj, ctx->vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
+}
+
+/*
+ * Get/put for kms->vm VMA
+ */
+
+void msm_gem_vma_get(struct drm_gem_object *obj)
+{
+ atomic_inc(&to_msm_bo(obj)->vma_ref);
+}
+
+void msm_gem_vma_put(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct drm_exec exec;
+
+ if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
+ return;
+
+ if (!priv->kms)
+ return;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
+ put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
+ drm_exec_fini(&exec); /* drop locks */
}
/*
@@ -135,36 +179,6 @@ static void update_lru(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
-
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
- }
-
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = pfn_to_page(__phys_to_pfn(paddr));
- paddr += PAGE_SIZE;
- }
-
- return p;
-}
-
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -176,10 +190,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
+ p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -212,22 +223,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return msm_obj->pages;
}
-static void put_pages_vram(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
-
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
-
- kvfree(msm_obj->pages);
-}
-
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
@@ -244,18 +250,14 @@ static void put_pages(struct drm_gem_object *obj)
update_device_mem(obj->dev->dev_private, -obj->size);
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
update_lru(obj);
}
}
-static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
- unsigned madv)
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -397,48 +399,31 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
- vma = msm_gem_vma_new(aspace);
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- list_add_tail(&vma->list, &msm_obj->vmas);
-
- return vma;
-}
-
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == vm) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
+ return vma;
+ }
+ }
}
return NULL;
}
-static void del_vma(struct msm_gem_vma *vma)
-{
- if (!vma)
- return;
-
- list_del(&vma->list);
- kfree(vma);
-}
-
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
@@ -446,71 +431,54 @@ static void del_vma(struct msm_gem_vma *vma)
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace) {
- msm_gem_vma_purge(vma);
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
+
+ if (vm && vm_bo->vm != vm)
+ continue;
+
+ drm_gpuvm_bo_get(vm_bo);
+
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ msm_gem_vma_unmap(vma, reason);
if (close)
msm_gem_vma_close(vma);
}
- }
-}
-/* Called with msm_obj locked */
-static void
-put_iova_vmas(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
-
- msm_gem_assert_locked(obj);
-
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- del_vma(vma);
+ drm_gpuvm_bo_put(vm_bo);
}
}
-static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
- vma = lookup_vma(obj, aspace);
+ vma = lookup_vma(obj, vm);
if (!vma) {
- int ret;
-
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return vma;
-
- ret = msm_gem_vma_init(vma, obj->size,
- range_start, range_end);
- if (ret) {
- del_vma(vma);
- return ERR_PTR(ret);
- }
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
- GEM_WARN_ON(vma->iova < range_start);
- GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
@@ -522,13 +490,22 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
+ return prot;
+}
+
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int prot = msm_gem_prot(obj);
+
msm_gem_assert_locked(obj);
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -560,28 +537,31 @@ void msm_gem_unpin_active(struct drm_gem_object *obj)
update_lru_active(obj);
}
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- return get_vma_locked(obj, aspace, 0, U64_MAX);
+ return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
- vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
+ vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
- *iova = vma->iova;
+ *iova = vma->va.addr;
pin_obj_locked(obj);
}
@@ -593,58 +573,59 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
- ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
- vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->iova;
+ *iova = vma->va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+ struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
- msm_gem_vma_purge(vma);
+ msm_gem_vma_unmap(vma, NULL);
msm_gem_vma_close(vma);
- del_vma(vma);
return 0;
}
@@ -657,44 +638,54 @@ static int clear_iova(struct drm_gem_object *obj,
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova)
+ struct drm_gpuvm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
- ret = clear_iova(obj, aspace);
+ ret = clear_iova(obj, vm);
} else {
- struct msm_gem_vma *vma;
- vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ struct drm_gpuva *vma;
+ vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->iova != iova)) {
- clear_iova(obj, aspace);
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
+ clear_iova(obj, vm);
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
+static bool is_kms_vm(struct drm_gpuvm *vm)
+{
+ struct msm_drm_private *priv = vm->drm->dev_private;
+
+ return priv->kms && (priv->kms->vm == vm);
+}
+
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
- msm_gem_lock(obj);
- vma = lookup_vma(obj, aspace);
- if (!GEM_WARN_ON(!vma)) {
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = lookup_vma(obj, vm);
+ if (vma) {
msm_gem_unpin_locked(obj);
}
- msm_gem_unlock(obj);
+ if (!is_kms_vm(vm))
+ put_iova_spaces(obj, vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -853,7 +844,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, true);
+ put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
@@ -861,8 +852,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_pages(obj);
- put_iova_vmas(obj);
-
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
@@ -893,7 +882,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
@@ -920,7 +909,7 @@ bool msm_gem_active(struct drm_gem_object *obj)
if (to_msm_bo(obj)->pin_count)
return true;
- return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+ return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -959,11 +948,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- msm_gem_lock(obj);
+ if (!msm_gem_trylock(obj))
+ return;
stats->all.count++;
stats->all.size += obj->size;
@@ -1002,31 +991,33 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- const char *name, *comm;
- if (vma->aspace) {
- struct msm_gem_address_space *aspace = vma->aspace;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
- get_pid_task(aspace->pid, PIDTYPE_PID);
+ get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
- name = aspace->name;
- } else {
- name = comm = NULL;
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
}
- seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
- name, comm ? ":" : "", comm ? comm : "",
- vma->aspace, vma->iova,
- vma->mapped ? "mapped" : "unmapped");
- kfree(comm);
}
seq_puts(m, "\n");
@@ -1067,12 +1058,46 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- put_iova_spaces(obj, true);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case.
+ *
+ * Note that we skip the dance if we aren't attached to any VM. This
+ * is load bearing. The driver needs to support two usage models:
+ *
+ * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
+ * implicitly torn down when the object is freed, the VMA's do
+ * not hold a hard reference to the BO.
+ *
+ * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
+ * BO. This can be dropped when the VM is closed and it's associated
+ * VMAs are torn down. (See msm_gem_vm_close()).
+ *
+ * In the latter case the last reference to a BO can be dropped while
+ * we already have the VM locked. It would have already been removed
+ * from the gpuva list, but lockdep doesn't know that. Or understand
+ * the differences between the two usage models.
+ */
+ if (!list_empty(&obj->gpuva.list)) {
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec,
+ drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
+ put_iova_spaces(obj, NULL, true, "free");
+ drm_exec_fini(&exec); /* drop locks */
+ }
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
@@ -1082,13 +1107,18 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
*/
kvfree(msm_obj->pages);
- put_iova_vmas(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
- put_iova_vmas(obj);
+ }
+
+ if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
}
drm_gem_object_release(obj);
@@ -1123,6 +1153,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -1155,6 +1194,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
+ .export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
@@ -1194,7 +1234,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
- INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
@@ -1207,19 +1246,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
- bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
- if (!msm_use_mmu(dev))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
-
- if (GEM_WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
-
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
@@ -1232,44 +1262,16 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
msm_obj = to_msm_bo(obj);
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
-
- drm_gem_private_object_init(dev, obj, size);
-
- msm_gem_lock(obj);
-
- vma = add_vma(obj, NULL);
- msm_gem_unlock(obj);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
-
- to_msm_bo(obj)->vram_node = &vma->node;
-
- msm_gem_lock(obj);
- pages = get_pages(obj);
- msm_gem_unlock(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
-
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- /*
- * Our buffers are kept pinned, so allocating them from the
- * MOVABLE zone is a really bad idea, and conflicts with CMA.
- * See comments above new_inode() why this is required _and_
- * expected if you're going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- }
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
@@ -1297,12 +1299,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size;
int ret, npages;
- /* if we don't have IOMMU, don't bother pretending we can import: */
- if (!msm_use_mmu(dev)) {
- DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
- return ERR_PTR(-EINVAL);
- }
-
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
@@ -1348,9 +1344,9 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
@@ -1360,14 +1356,14 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_unpin_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
@@ -1383,14 +1379,13 @@ err:
}
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
+ msm_gem_unpin_iova(bo, vm);
drm_gem_object_put(bo);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index ba5c4ff76292..88239da1cd72 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -7,9 +7,11 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include "msm_mmu.h"
#include <linux/kref.h>
#include <linux/dma-resv.h>
#include "drm/drm_exec.h"
+#include "drm/drm_gpuvm.h"
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
@@ -22,56 +24,173 @@
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
-struct msm_gem_address_space {
- const char *name;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
+/**
+ * struct msm_gem_vm_log_entry - An entry in the VM log
+ *
+ * For userspace managed VMs, a log of recent VM updates is tracked and
+ * captured in GPU devcore dumps, to aid debugging issues caused by (for
+ * example) incorrectly synchronized VM updates
+ */
+struct msm_gem_vm_log_entry {
+ const char *op;
+ uint64_t iova;
+ uint64_t range;
+ int queue_id;
+};
+
+/**
+ * struct msm_gem_vm - VM object
+ *
+ * A VM object representing a GPU (or display or GMU or ...) virtual address
+ * space.
+ *
+ * In the case of GPU, if per-process address spaces are supported, the address
+ * space is split into two VMs, which map to TTBR0 and TTBR1 in the SMMU. TTBR0
+ * is used for userspace objects, and is unique per msm_context/drm_file, while
+ * TTBR1 is the same for all processes. (The kernel controlled ringbuffer and
+ * a few other kernel controlled buffers live in TTBR1.)
+ *
+ * The GPU TTBR0 vm can be managed by userspace or by the kernel, depending on
+ * whether userspace supports VM_BIND. All other vm's are managed by the kernel.
+ * (Managed by kernel means the kernel is responsible for VA allocation.)
+ *
+ * Note that because VM_BIND allows a given BO to be mapped multiple times in
+ * a VM, and therefore have multiple VMA's in a VM, there is an extra object
+ * provided by drm_gpuvm infrastructure.. the drm_gpuvm_bo, which is not
+ * embedded in any larger driver structure. The GEM object holds a list of
+ * drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma
+ * holds a reference to the vm_bo, and drops it when the vma is unlinked.
+ * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an
+ * existing vm_bo, or create a new one. Once the vma is linked, the ref
+ * to the vm_bo can be dropped (since the vma is holding one).
+ */
+struct msm_gem_vm {
+ /** @base: Inherit from drm_gpuvm. */
+ struct drm_gpuvm base;
+
+ /**
+ * @sched: Scheduler used for asynchronous VM_BIND request.
+ *
+ * Unused for kernel managed VMs (where all operations are synchronous).
+ */
+ struct drm_gpu_scheduler sched;
+
+ /**
+ * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre-
+ * allocated memory is in flight.
+ *
+ * Because we have to pre-allocate pgtable pages for the worst case
+ * (ie. new mappings do not share any PTEs with existing mappings)
+ * we could end up consuming a lot of resources transiently. The
+ * prealloc_throttle puts an upper bound on that.
+ */
+ struct {
+ /** @wait: Notified when preallocated resources are released */
+ wait_queue_head_t wait;
+
+ /**
+ * @in_flight: The # of preallocated pgtable pages in-flight
+ * for queued VM_BIND jobs.
+ */
+ atomic_t in_flight;
+ } prealloc_throttle;
+
+ /**
+ * @mm: Memory management for kernel managed VA allocations
+ *
+ * Only used for kernel managed VMs, unused for user managed VMs.
+ *
+ * Protected by @mm_lock.
*/
struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
+
+ /** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu;
- struct kref kref;
- /* For address spaces associated with a specific process, this
+ /** @mmu_lock: Protects access to the mmu */
+ struct mutex mmu_lock;
+
+ /**
+ * @pid: For address spaces associated with a specific process, this
* will be non-NULL:
*/
struct pid *pid;
- /* @faults: the number of GPU hangs associated with this address space */
+ /** @last_fence: Fence for last pending work scheduled on the VM */
+ struct dma_fence *last_fence;
+
+ /** @log: A log of recent VM updates */
+ struct msm_gem_vm_log_entry *log;
+
+ /** @log_shift: length of @log is (1 << @log_shift) */
+ uint32_t log_shift;
+
+ /** @log_idx: index of next @log entry to write */
+ uint32_t log_idx;
+
+ /** @faults: the number of GPU hangs associated with this address space */
int faults;
- /** @va_start: lowest possible address to allocate */
- uint64_t va_start;
+ /** @managed: is this a kernel managed VM? */
+ bool managed;
- /** @va_size: the size of the address space (in bytes) */
- uint64_t va_size;
+ /**
+ * @unusable: True if the VM has turned unusable because something
+ * bad happened during an asynchronous request.
+ *
+ * We don't try to recover from such failures, because this implies
+ * informing userspace about the specific operation that failed, and
+ * hoping the userspace driver can replay things from there. This all
+ * sounds very complicated for little gain.
+ *
+ * Instead, we should just flag the VM as unusable, and fail any
+ * further request targeting this VM.
+ *
+ * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
+ * situation, where the logical device needs to be re-created.
+ */
+ bool unusable;
};
+#define to_msm_vm(x) container_of(x, struct msm_gem_vm, base)
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace);
-
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed);
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size);
+void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
+void msm_gem_vm_unusable(struct drm_gpuvm *gpuvm);
struct msm_fence_context;
+#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0)
+
+/**
+ * struct msm_gem_vma - a VMA mapping
+ *
+ * Represents a combination of a GEM object plus a VM.
+ */
struct msm_gem_vma {
+ /** @base: inherit from drm_gpuva */
+ struct drm_gpuva base;
+
+ /**
+ * @node: mm node for VA allocation
+ *
+ * Only used by kernel managed VMs
+ */
struct drm_mm_node node;
- uint64_t iova;
- struct msm_gem_address_space *aspace;
- struct list_head list; /* node in msm_gem_object::vmas */
+
+ /** @mapped: Is this VMA mapped? */
bool mapped;
};
+#define to_msm_vma(x) container_of(x, struct msm_gem_vma, base)
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end);
-void msm_gem_vma_purge(struct msm_gem_vma *vma);
-int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
-void msm_gem_vma_close(struct msm_gem_vma *vma);
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end);
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
+void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
struct drm_gem_object base;
@@ -100,13 +219,6 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct list_head vmas; /* list of msm_gem_vma */
-
- /* For physically contiguous buffers. Used when we don't have
- * an IOMMU. Also used for stolen/splashscreen buffer.
- */
- struct drm_mm_node *vram_node;
-
char name[32]; /* Identifier to print for the debugfs files */
/* userspace metadata backchannel */
@@ -119,27 +231,56 @@ struct msm_gem_object {
* Protected by LRU lock.
*/
int pin_count;
+
+ /**
+ * @vma_ref: Reference count of VMA users.
+ *
+ * With the vm_bo/vma holding a reference to the GEM object, we'd
+ * otherwise have to actively tear down a VMA when, for example,
+ * a buffer is unpinned for scanout, vs. the pre-drm_gpuvm approach
+ * where a VMA did not hold a reference to the BO, but instead was
+ * implicitly torn down when the BO was freed.
+ *
+ * To regain the lazy VMA teardown, we use the @vma_ref. It is
+ * incremented for any of the following:
+ *
+ * 1) the BO is exported as a dma_buf
+ * 2) the BO has open userspace handle
+ *
+ * All of those conditions will hold an reference to the BO,
+ * preventing it from being freed. So lazily keeping around the
+ * VMA will not prevent the BO from being freed. (Or rather, the
+ * reference loop is harmless in this case.)
+ *
+ * When the @vma_ref drops to zero, then kms->vm VMA will be
+ * torn down.
+ */
+ atomic_t vma_ref;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+void msm_gem_vma_get(struct drm_gem_object *obj);
+void msm_gem_vma_put(struct drm_gem_object *obj);
+
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+int msm_gem_prot(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma);
void msm_gem_unpin_locked(struct drm_gem_object *obj);
void msm_gem_unpin_active(struct drm_gem_object *obj);
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova);
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm);
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -159,11 +300,10 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova);
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
@@ -188,6 +328,12 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
+static inline bool __must_check
+msm_gem_trylock(struct drm_gem_object *obj)
+{
+ return dma_resv_trylock(obj->resv);
+}
+
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -200,6 +346,37 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
+/**
+ * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM
+ * @exec: the exec context helper which will be initalized
+ * @obj: the GEM object to lock
+ * @vm: the VM to lock
+ *
+ * Operations which modify a VM frequently need to lock both the VM and
+ * the object being mapped/unmapped/etc. This helper uses drm_exec to
+ * acquire both locks, dealing with potential deadlock/backoff scenarios
+ * which arise when multiple locks are involved.
+ */
+static inline int
+msm_gem_lock_vm_and_obj(struct drm_exec *exec,
+ struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
+{
+ int ret = 0;
+
+ drm_exec_init(exec, 0, 2);
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm));
+ if (!ret && (obj->resv != drm_gpuvm_resv(vm)))
+ ret = drm_exec_lock_obj(exec, obj);
+ drm_exec_retry_on_contention(exec);
+ if (GEM_WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
@@ -257,7 +434,7 @@ struct msm_gem_submit {
struct kref ref;
struct drm_device *dev;
struct msm_gpu *gpu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct list_head node; /* node in ring submit list */
struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
@@ -297,6 +474,7 @@ struct msm_gem_submit {
struct drm_gem_object *obj;
uint32_t handle;
};
+ struct drm_gpuvm_bo *vm_bo;
uint64_t iova;
} bos[];
};
@@ -320,14 +498,4 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
void msm_submit_retire(struct msm_gem_submit *submit);
-/* helper to determine of a buffer in submit should be dumped, used for both
- * devcoredump and debugfs cmdstream dumping:
- */
-static inline bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- extern bool rd_full;
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 2e37913d5a6a..c0a33ac839cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
@@ -16,6 +17,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
+ if (msm_obj->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EINVAL);
+
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return ERR_PTR(-ENOMEM);
@@ -39,12 +43,70 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
msm_gem_put_vaddr_locked(obj);
}
+static void msm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ msm_gem_vma_put(obj);
+ drm_gem_dmabuf_release(dma_buf);
+}
+
+static const struct dma_buf_ops msm_gem_prime_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = msm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf)
+{
+ if (buf->ops == &msm_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, buf);
+}
+
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
return msm_gem_import(dev, attach->dmabuf, sg);
}
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EPERM);
+
+ msm_gem_vma_get(obj);
+
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &msm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
struct page **pages;
@@ -53,6 +115,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
if (drm_gem_is_imported(obj))
return 0;
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
ret = PTR_ERR(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 07ca4ddfe4e3..1039e3c0a47b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -44,7 +44,76 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
}
static bool
-purge(struct drm_gem_object *obj)
+with_vm_locks(struct ww_acquire_ctx *ticket,
+ void (*fn)(struct drm_gem_object *obj),
+ struct drm_gem_object *obj)
+{
+ /*
+ * Track last locked entry for for unwinding locks in error and
+ * success paths
+ */
+ struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
+ int ret = 0;
+
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ ret = dma_resv_lock(resv, ticket);
+
+ /*
+ * Since we already skip the case when the VM and obj
+ * share a resv (ie. _NO_SHARE objs), we don't expect
+ * to hit a double-locking scenario... which the lock
+ * unwinding cannot really cope with.
+ */
+ WARN_ON(ret == -EALREADY);
+
+ /*
+ * Don't bother with slow-lock / backoff / retry sequence,
+ * if we can't get the lock just give up and move on to
+ * the next object.
+ */
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Hold a ref to prevent the vm_bo from being freed
+ * and removed from the obj's gpuva list, as that would
+ * would result in missing the unlock below
+ */
+ drm_gpuvm_bo_get(vm_bo);
+
+ last_locked = vm_bo;
+ }
+
+ fn(obj);
+
+out_unlock:
+ if (last_locked) {
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ dma_resv_unlock(resv);
+
+ /* Drop the ref taken while locking: */
+ drm_gpuvm_bo_put(vm_bo);
+
+ if (last_locked == vm_bo)
+ break;
+ }
+ }
+
+ return ret == 0;
+}
+
+static bool
+purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_purgeable(to_msm_bo(obj)))
return false;
@@ -52,13 +121,11 @@ purge(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_purge(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_purge, obj);
}
static bool
-evict(struct drm_gem_object *obj)
+evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (is_unevictable(to_msm_bo(obj)))
return false;
@@ -66,43 +133,42 @@ evict(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_evict(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_evict, obj);
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
- enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
-active_purge(struct drm_gem_object *obj)
+active_purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return purge(obj);
+ return purge(obj, ticket);
}
static bool
-active_evict(struct drm_gem_object *obj)
+active_evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return evict(obj);
+ return evict(obj, ticket);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
+ struct ww_acquire_ctx ticket;
struct {
struct drm_gem_lru *lru;
- bool (*shrink)(struct drm_gem_object *obj);
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
bool cond;
unsigned long freed;
unsigned long remaining;
@@ -122,8 +188,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
- &stages[i].remaining,
- stages[i].shrink);
+ &stages[i].remaining,
+ stages[i].shrink,
+ &ticket);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
@@ -164,7 +231,7 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct drm_gem_object *obj)
+vmap_shrink(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_vunmapable(to_msm_bo(obj)))
return false;
@@ -192,7 +259,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
&remaining,
- vmap_shrink);
+ vmap_shrink,
+ NULL);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index d4f71bb54e84..5f8e939a5906 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+#include "msm_syncobj.h"
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
@@ -30,7 +32,7 @@
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
- uint32_t nr_cmds)
+ uint32_t nr_cmds, u64 drm_client_id)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
@@ -54,7 +56,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
+ drm_client_id);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
@@ -63,7 +66,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
kref_init(&submit->ref);
submit->dev = dev;
- submit->aspace = queue->ctx->aspace;
+ submit->vm = msm_context_vm(dev, queue->ctx);
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
@@ -191,6 +194,7 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
unsigned i;
size_t sz;
int ret = 0;
@@ -222,6 +226,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
goto out;
}
+ if (msm_context_is_vmbind(ctx)) {
+ if (submit_cmd.nr_relocs) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
+ goto out;
+ }
+
+ if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
+ goto out;
+ }
+
+ submit->cmd[i].iova = submit_cmd.iova;
+ }
+
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
@@ -256,24 +274,48 @@ out:
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ struct drm_exec *exec = &submit->exec;
int ret;
- drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ flags |= DRM_EXEC_IGNORE_DUPLICATES;
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
+
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ return ret;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto error;
+ return ret;
}
}
return 0;
-
-error:
- return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -308,10 +350,10 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
/* if locking succeeded, pin bo: */
- vma = msm_gem_get_vma_locked(obj, submit->aspace);
+ vma = msm_gem_get_vma_locked(obj, submit->vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
@@ -321,7 +363,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].iova = vma->iova;
+ submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
+ submit->bos[i].iova = vma->va.addr;
}
/*
@@ -358,9 +401,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit)
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
- int i;
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+ struct dma_fence *last_fence;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
+ submit->user_fence,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+ return;
+ }
- for (i = 0; i < submit->nr_bos; i++) {
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -370,6 +422,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
@@ -458,14 +514,14 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
+
if (error) {
submit_unpin_objects(submit);
/* job wasn't enqueued to scheduler, so early retirement: */
msm_submit_retire(submit);
}
-
- if (submit->exec.objects)
- drm_exec_fini(&submit->exec);
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -474,191 +530,29 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
+ struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
+ msm_gem_lock(obj);
+ drm_gpuvm_bo_put(vm_bo);
+ msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
-struct msm_submit_post_dep {
- struct drm_syncobj *syncobj;
- uint64_t point;
- struct dma_fence_chain *chain;
-};
-
-static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
- struct drm_file *file,
- uint64_t in_syncobjs_addr,
- uint32_t nr_in_syncobjs,
- size_t syncobj_stride)
-{
- struct drm_syncobj **syncobjs = NULL;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!syncobjs)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_in_syncobjs; ++i) {
- uint64_t address = in_syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- if (syncobj_desc.point &&
- !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
- break;
- }
-
- if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
- break;
- }
-
- ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
- syncobj_desc.handle, syncobj_desc.point);
- if (ret)
- break;
-
- if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
- syncobjs[i] =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!syncobjs[i]) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
- break;
- }
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- if (syncobjs[j])
- drm_syncobj_put(syncobjs[j]);
- }
- kfree(syncobjs);
- return ERR_PTR(ret);
- }
- return syncobjs;
-}
-
-static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
- uint32_t nr_syncobjs)
-{
- uint32_t i;
-
- for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
- if (syncobjs[i])
- drm_syncobj_replace_fence(syncobjs[i], NULL);
- }
-}
-
-static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t syncobjs_addr,
- uint32_t nr_syncobjs,
- size_t syncobj_stride)
-{
- struct msm_submit_post_dep *post_deps;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!post_deps)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_syncobjs; ++i) {
- uint64_t address = syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- post_deps[i].point = syncobj_desc.point;
-
- if (syncobj_desc.flags) {
- ret = UERR(EINVAL, dev, "invalid syncobj flags");
- break;
- }
-
- if (syncobj_desc.point) {
- if (!drm_core_check_feature(dev,
- DRIVER_SYNCOBJ_TIMELINE)) {
- ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
- break;
- }
-
- post_deps[i].chain = dma_fence_chain_alloc();
- if (!post_deps[i].chain) {
- ret = -ENOMEM;
- break;
- }
- }
-
- post_deps[i].syncobj =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!post_deps[i].syncobj) {
- ret = UERR(EINVAL, dev, "invalid syncobj handle");
- break;
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- dma_fence_chain_free(post_deps[j].chain);
- if (post_deps[j].syncobj)
- drm_syncobj_put(post_deps[j].syncobj);
- }
-
- kfree(post_deps);
- return ERR_PTR(ret);
- }
-
- return post_deps;
-}
-
-static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
- uint32_t count, struct dma_fence *fence)
-{
- uint32_t i;
-
- for (i = 0; post_deps && i < count; ++i) {
- if (post_deps[i].chain) {
- drm_syncobj_add_point(post_deps[i].syncobj,
- post_deps[i].chain,
- fence, post_deps[i].point);
- post_deps[i].chain = NULL;
- } else {
- drm_syncobj_replace_fence(post_deps[i].syncobj,
- fence);
- }
- }
-}
-
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
- struct msm_submit_post_dep *post_deps = NULL;
+ struct msm_syncobj_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
struct sync_file *sync_file = NULL;
+ unsigned cmds_to_parse;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -669,10 +563,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
- DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
- return -EPERM;
- }
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
@@ -693,6 +585,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
ring = gpu->rb[queue->ring_nr];
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
@@ -703,7 +600,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
+ file->client_id);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
goto out_post_unlock;
@@ -735,10 +633,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
- syncobjs_to_reset = msm_parse_deps(submit, file,
- args->in_syncobjs,
- args->nr_in_syncobjs,
- args->syncobj_stride);
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(syncobjs_to_reset)) {
ret = PTR_ERR(syncobjs_to_reset);
goto out_unlock;
@@ -746,10 +644,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
- post_deps = msm_parse_post_deps(dev, file,
- args->out_syncobjs,
- args->nr_out_syncobjs,
- args->syncobj_stride);
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
goto out_unlock;
@@ -779,7 +677,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- for (i = 0; i < args->nr_cmds; i++) {
+ cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
+
+ for (i = 0; i < cmds_to_parse; i++) {
struct drm_gem_object *obj;
uint64_t iova;
@@ -810,7 +710,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- submit->nr_cmds = i;
+ submit->nr_cmds = args->nr_cmds;
idr_preload(GFP_KERNEL);
@@ -882,6 +782,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit_attach_object_fences(submit);
+ if (msm_context_is_vmbind(ctx)) {
+ /*
+ * If we are not using VM_BIND, submit_pin_vmas() will validate
+ * just the BOs attached to the submit. In that case we don't
+ * need to validate the _entire_ vm, because userspace tracked
+ * what BOs are associated with the submit.
+ */
+ ret = drm_gpuvm_validate(submit->vm, &submit->exec);
+ if (ret)
+ goto out;
+ }
+
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
@@ -892,10 +804,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
- msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
- msm_process_post_deps(post_deps, args->nr_out_syncobjs,
- submit->user_fence);
-
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
out:
submit_cleanup(submit, !!ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 11e842dda73c..3cd8562a5109 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -4,73 +4,319 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include "drm/drm_file.h"
+#include "drm/msm_drm.h"
+#include "linux/file.h"
+#include "linux/sync_file.h"
+
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
+#include "msm_gpu.h"
#include "msm_mmu.h"
+#include "msm_syncobj.h"
+
+#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+static uint vm_log_shift = 0;
+MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
+module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
+
+/**
+ * struct msm_vm_map_op - create new pgtable mapping
+ */
+struct msm_vm_map_op {
+ /** @iova: start address for mapping */
+ uint64_t iova;
+ /** @range: size of the region to map */
+ uint64_t range;
+ /** @offset: offset into @sgt to map */
+ uint64_t offset;
+ /** @sgt: pages to map, or NULL for a PRR mapping */
+ struct sg_table *sgt;
+ /** @prot: the mapping protection flags */
+ int prot;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vm_unmap_op - unmap a range of pages from pgtable
+ */
+struct msm_vm_unmap_op {
+ /** @iova: start address for unmap */
+ uint64_t iova;
+ /** @range: size of region to unmap */
+ uint64_t range;
+
+ /** @reason: The reason for the unmap */
+ const char *reason;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vma_op - A MAP or UNMAP operation
+ */
+struct msm_vm_op {
+ /** @op: The operation type */
+ enum {
+ MSM_VM_OP_MAP = 1,
+ MSM_VM_OP_UNMAP,
+ } op;
+ union {
+ /** @map: Parameters used if op == MSM_VMA_OP_MAP */
+ struct msm_vm_map_op map;
+ /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
+ struct msm_vm_unmap_op unmap;
+ };
+ /** @node: list head in msm_vm_bind_job::vm_ops */
+ struct list_head node;
+
+ /**
+ * @obj: backing object for pages to be mapped/unmapped
+ *
+ * Async unmap ops, in particular, must hold a reference to the
+ * original GEM object backing the mapping that will be unmapped.
+ * But the same can be required in the map path, for example if
+ * there is not a corresponding unmap op, such as process exit.
+ *
+ * This ensures that the pages backing the mapping are not freed
+ * before the mapping is torn down.
+ */
+ struct drm_gem_object *obj;
+};
+
+/**
+ * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
+ *
+ * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
+ * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
+ * which are applied to the pgtables asynchronously. For example a userspace
+ * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
+ * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
+ */
+struct msm_vm_bind_job {
+ /** @base: base class for drm_sched jobs */
+ struct drm_sched_job base;
+ /** @vm: The VM being operated on */
+ struct drm_gpuvm *vm;
+ /** @fence: The fence that is signaled when job completes */
+ struct dma_fence *fence;
+ /** @queue: The queue that the job runs on */
+ struct msm_gpu_submitqueue *queue;
+ /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
+ struct msm_mmu_prealloc prealloc;
+ /** @vm_ops: a list of struct msm_vm_op */
+ struct list_head vm_ops;
+ /** @bos_pinned: are the GEM objects being bound pinned? */
+ bool bos_pinned;
+ /** @nr_ops: the number of userspace requested ops */
+ unsigned int nr_ops;
+ /**
+ * @ops: the userspace requested ops
+ *
+ * The userspace requested ops are copied/parsed and validated
+ * before we start applying the updates to try to do as much up-
+ * front error checking as possible, to avoid the VM being in an
+ * undefined state due to partially executed VM_BIND.
+ *
+ * This table also serves to hold a reference to the backing GEM
+ * objects.
+ */
+ struct msm_vm_bind_op {
+ uint32_t op;
+ uint32_t flags;
+ union {
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ };
+ uint64_t obj_offset;
+ uint64_t iova;
+ uint64_t range;
+ } ops[];
+};
+
+#define job_foreach_bo(obj, _job) \
+ for (unsigned i = 0; i < (_job)->nr_ops; i++) \
+ if ((obj = (_job)->ops[i].obj))
+
+static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
+{
+ return container_of(job, struct msm_vm_bind_job, base);
+}
static void
-msm_gem_address_space_destroy(struct kref *kref)
+msm_gem_vm_free(struct drm_gpuvm *gpuvm)
{
- struct msm_gem_address_space *aspace = container_of(kref,
- struct msm_gem_address_space, kref);
+ struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
- drm_mm_takedown(&aspace->mm);
- if (aspace->mmu)
- aspace->mmu->funcs->destroy(aspace->mmu);
- put_pid(aspace->pid);
- kfree(aspace);
+ drm_mm_takedown(&vm->mm);
+ if (vm->mmu)
+ vm->mmu->funcs->destroy(vm->mmu);
+ dma_fence_put(vm->last_fence);
+ put_pid(vm->pid);
+ kfree(vm->log);
+ kfree(vm);
}
+/**
+ * msm_gem_vm_unusable() - Mark a VM as unusable
+ * @gpuvm: the VM to mark unusable
+ */
+void
+msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ uint32_t nr_vm_logs;
+ int first;
+
+ vm->unusable = true;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ nr_vm_logs = vm_log_len;
+ }
+
+ pr_err("vm-log:\n");
+ for (int i = 0; i < nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+ struct msm_gem_vm_log_entry *e = &vm->log[idx];
+ pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+static void
+vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
{
- if (aspace)
- kref_put(&aspace->kref, msm_gem_address_space_destroy);
+ int idx;
+
+ if (!vm->managed)
+ lockdep_assert_held(&vm->mmu_lock);
+
+ vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
+
+ if (!vm->log)
+ return;
+
+ idx = vm->log_idx;
+ vm->log[idx].op = op;
+ vm->log[idx].iova = iova;
+ vm->log[idx].range = range;
+ vm->log[idx].queue_id = queue_id;
+ vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
+}
+
+static void
+vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
+{
+ const char *reason = op->reason;
+
+ if (!reason)
+ reason = "unmap";
+
+ vm_log(vm, reason, op->iova, op->range, op->queue_id);
+
+ vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
}
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+static int
+vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
{
- if (!IS_ERR_OR_NULL(aspace))
- kref_get(&aspace->kref);
+ vm_log(vm, "map", op->iova, op->range, op->queue_id);
- return aspace;
+ return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
+ op->range, op->prot);
}
/* Actually unmap memory for the vma */
-void msm_gem_vma_purge(struct msm_gem_vma *vma)
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
{
- struct msm_gem_address_space *aspace = vma->aspace;
- unsigned size = vma->node.size;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
/* Don't do anything if the memory isn't mapped */
- if (!vma->mapped)
+ if (!msm_vma->mapped)
return;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
- vma->mapped = false;
+ vm_unmap_op(vm, &(struct msm_vm_unmap_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .reason = reason,
+ });
+
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ msm_vma->mapped = false;
}
/* Map and pin vma: */
int
-msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int size)
+msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
int ret;
- if (GEM_WARN_ON(!vma->iova))
+ if (GEM_WARN_ON(!vma->va.addr))
return -EINVAL;
- if (vma->mapped)
+ if (msm_vma->mapped)
return 0;
- vma->mapped = true;
+ msm_vma->mapped = true;
- if (!aspace)
- return 0;
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
@@ -81,97 +327,1205 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
+ ret = vm_map_op(vm, &(struct msm_vm_map_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .sgt = sgt,
+ .prot = prot,
+ });
- if (ret) {
- vma->mapped = false;
- }
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ if (ret)
+ msm_vma->mapped = false;
return ret;
}
/* Close an iova. Warn if it is still in use */
-void msm_gem_vma_close(struct msm_gem_vma *vma)
+void msm_gem_vma_close(struct drm_gpuva *vma)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- GEM_WARN_ON(vma->mapped);
+ GEM_WARN_ON(msm_vma->mapped);
- spin_lock(&aspace->lock);
- if (vma->iova)
- drm_mm_remove_node(&vma->node);
- spin_unlock(&aspace->lock);
+ drm_gpuvm_resv_assert_held(&vm->base);
+
+ if (vma->gem.obj)
+ msm_gem_assert_locked(vma->gem.obj);
- vma->iova = 0;
+ if (vma->va.addr && vm->managed)
+ drm_mm_remove_node(&msm_vma->node);
- msm_gem_address_space_put(aspace);
+ drm_gpuva_remove(vma);
+ drm_gpuva_unlink(vma);
+
+ kfree(vma);
}
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
+/* Create a new vma and allocate an iova for it */
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end)
{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
+ int ret;
+
+ drm_gpuvm_resv_assert_held(&vm->base);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ if (vm->managed) {
+ BUG_ON(offset != 0);
+ BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */
+ ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
+ obj->size, PAGE_SIZE, 0,
+ range_start, range_end, 0);
+
+ if (ret)
+ goto err_free_vma;
+
+ range_start = vma->node.start;
+ range_end = range_start + obj->size;
+ }
+
+ if (obj)
+ GEM_WARN_ON((range_end - range_start) > obj->size);
+
+ drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ vma->mapped = false;
+
+ ret = drm_gpuva_insert(&vm->base, &vma->base);
+ if (ret)
+ goto err_free_range;
- vma->aspace = aspace;
+ if (!obj)
+ return &vma->base;
- return vma;
+ vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
+ if (IS_ERR(vm_bo)) {
+ ret = PTR_ERR(vm_bo);
+ goto err_va_remove;
+ }
+
+ drm_gpuvm_bo_extobj_add(vm_bo);
+ drm_gpuva_link(&vma->base, vm_bo);
+ GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
+
+ return &vma->base;
+
+err_va_remove:
+ drm_gpuva_remove(&vma->base);
+err_free_range:
+ if (vm->managed)
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ kfree(vma);
+ return ERR_PTR(ret);
}
-/* Initialize a new vma and allocate an iova for it */
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end)
+static int
+msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct drm_gem_object *obj = vm_bo->obj;
+ struct drm_gpuva *vma;
int ret;
- if (GEM_WARN_ON(!aspace))
- return -EINVAL;
+ vm_dbg("validate: %p", obj);
+
+ msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(vma->iova))
- return -EBUSY;
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (ret)
+ return ret;
+ }
- spin_lock(&aspace->lock);
- ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
- size, PAGE_SIZE, 0,
- range_start, range_end, 0);
- spin_unlock(&aspace->lock);
+ return 0;
+}
- if (ret)
- return ret;
+struct op_arg {
+ unsigned flags;
+ struct msm_vm_bind_job *job;
+};
- vma->iova = vma->node.start;
- vma->mapped = false;
+static void
+vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
+{
+ struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
+ *op = _op;
+ list_add_tail(&op->node, &arg->job->vm_ops);
+
+ if (op->obj)
+ drm_gem_object_get(op->obj);
+}
+
+static struct drm_gpuva *
+vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
+{
+ return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
+ op->va.addr, op->va.addr + op->va.range);
+}
+
+static int
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gem_object *obj = op->map.gem.obj;
+ struct drm_gpuva *vma;
+ struct sg_table *sgt;
+ unsigned prot;
+
+ vma = vma_from_op(arg, &op->map);
+ if (WARN_ON(IS_ERR(vma)))
+ return PTR_ERR(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ vma->flags = ((struct op_arg *)arg)->flags;
+
+ if (obj) {
+ sgt = to_msm_bo(obj)->sgt;
+ prot = msm_gem_prot(obj);
+ } else {
+ sgt = NULL;
+ prot = IOMMU_READ | IOMMU_WRITE;
+ }
- kref_get(&aspace->kref);
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_MAP,
+ .map = {
+ .sgt = sgt,
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .prot = prot,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ to_msm_vma(vma)->mapped = true;
+
+ return 0;
+}
+
+static int
+msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuvm *vm = job->vm;
+ struct drm_gpuva *orig_vma = op->remap.unmap->va;
+ struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
+ struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
+ bool mapped = to_msm_vma(orig_vma)->mapped;
+ unsigned flags;
+
+ vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
+ orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
+
+ if (mapped) {
+ uint64_t unmap_start, unmap_range;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = unmap_start,
+ .range = unmap_range,
+ .queue_id = job->queue->id,
+ },
+ .obj = orig_vma->gem.obj,
+ });
+
+ /*
+ * Part of this GEM obj is still mapped, but we're going to kill the
+ * existing VMA and replace it with one or two new ones (ie. two if
+ * the unmapped range is in the middle of the existing (unmap) VMA).
+ * So just set the state to unmapped:
+ */
+ to_msm_vma(orig_vma)->mapped = false;
+ }
+
+ /*
+ * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
+ * creation of the new prev/next vma's, in case the vm_bo is tracked
+ * in the VM's evict list:
+ */
+ if (vm_bo)
+ drm_gpuvm_bo_get(vm_bo);
+
+ /*
+ * The prev_vma and/or next_vma are replacing the unmapped vma, and
+ * therefore should preserve it's flags:
+ */
+ flags = orig_vma->flags;
+
+ msm_gem_vma_close(orig_vma);
+
+ if (op->remap.prev) {
+ prev_vma = vma_from_op(arg, op->remap.prev);
+ if (WARN_ON(IS_ERR(prev_vma)))
+ return PTR_ERR(prev_vma);
+
+ vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
+ to_msm_vma(prev_vma)->mapped = mapped;
+ prev_vma->flags = flags;
+ }
+
+ if (op->remap.next) {
+ next_vma = vma_from_op(arg, op->remap.next);
+ if (WARN_ON(IS_ERR(next_vma)))
+ return PTR_ERR(next_vma);
+
+ vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
+ to_msm_vma(next_vma)->mapped = mapped;
+ next_vma->flags = flags;
+ }
+
+ if (!mapped)
+ drm_gpuvm_bo_evict(vm_bo, true);
+
+ /* Drop the previous ref: */
+ drm_gpuvm_bo_put(vm_bo);
return 0;
}
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size)
+static int
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuva *vma = op->unmap.va;
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ if (!msm_vma->mapped)
+ goto out_close;
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ msm_vma->mapped = false;
+
+out_close:
+ msm_gem_vma_close(vma);
+
+ return 0;
+}
+
+static const struct drm_gpuvm_ops msm_gpuvm_ops = {
+ .vm_free = msm_gem_vm_free,
+ .vm_bo_validate = msm_gem_vm_bo_validate,
+ .sm_step_map = msm_gem_vm_sm_step_map,
+ .sm_step_remap = msm_gem_vm_sm_step_remap,
+ .sm_step_unmap = msm_gem_vm_sm_step_unmap,
+};
+
+static struct dma_fence *
+msm_vma_job_run(struct drm_sched_job *_job)
+{
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+ int ret = vm->unusable ? -EINVAL : 0;
+
+ vm_dbg("");
+
+ mutex_lock(&vm->mmu_lock);
+ vm->mmu->prealloc = &job->prealloc;
+
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+
+ switch (op->op) {
+ case MSM_VM_OP_MAP:
+ /*
+ * On error, stop trying to map new things.. but we
+ * still want to process the unmaps (or in particular,
+ * the drm_gem_object_put()s)
+ */
+ if (!ret)
+ ret = vm_map_op(vm, &op->map);
+ break;
+ case MSM_VM_OP_UNMAP:
+ vm_unmap_op(vm, &op->unmap);
+ break;
+ }
+ drm_gem_object_put(op->obj);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ vm->mmu->prealloc = NULL;
+ mutex_unlock(&vm->mmu_lock);
+
+ /*
+ * We failed to perform at least _some_ of the pgtable updates, so
+ * now the VM is in an undefined state. Game over!
+ */
+ if (ret)
+ msm_gem_vm_unusable(job->vm);
+
+ job_foreach_bo (obj, job) {
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
+ msm_gem_unlock(obj);
+ }
+
+ /* VM_BIND ops are synchronous, so no fence to wait on: */
+ return NULL;
+}
+
+static void
+msm_vma_job_free(struct drm_sched_job *_job)
{
- struct msm_gem_address_space *aspace;
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+
+ vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
+
+ atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ drm_sched_job_cleanup(_job);
+
+ job_foreach_bo (obj, job)
+ drm_gem_object_put(obj);
+
+ msm_submitqueue_put(job->queue);
+ dma_fence_put(job->fence);
+
+ /* In error paths, we could have unexecuted ops: */
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ wake_up(&vm->prealloc_throttle.wait);
+
+ kfree(job);
+}
+
+static const struct drm_sched_backend_ops msm_vm_bind_ops = {
+ .run_job = msm_vma_job_run,
+ .free_job = msm_vma_job_free
+};
+
+/**
+ * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
+ * @drm: the drm device
+ * @mmu: the backing MMU objects handling mapping/unmapping
+ * @name: the name of the VM
+ * @va_start: the start offset of the VA space
+ * @va_size: the size of the VA space
+ * @managed: is it a kernel managed VM?
+ *
+ * In a kernel managed VM, the kernel handles address allocation, and only
+ * synchronous operations are supported. In a user managed VM, userspace
+ * handles virtual address allocation, and both async and sync operations
+ * are supported.
+ */
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed)
+{
+ /*
+ * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
+ * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
+ * tracking that an extobj is evicted) :facepalm:
+ */
+ enum drm_gpuvm_flags flags = 0;
+ struct msm_gem_vm *vm;
+ struct drm_gem_object *dummy_gem;
+ int ret = 0;
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = mmu;
- aspace->va_start = va_start;
- aspace->va_size = size;
+ dummy_gem = drm_gpuvm_resv_object_alloc(drm);
+ if (!dummy_gem) {
+ ret = -ENOMEM;
+ goto err_free_vm;
+ }
+
+ if (!managed) {
+ struct drm_sched_init_args args = {
+ .ops = &msm_vm_bind_ops,
+ .num_rqs = 1,
+ .credit_limit = 1,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = "msm-vm-bind",
+ .dev = drm->dev,
+ };
+
+ ret = drm_sched_init(&vm->sched, &args);
+ if (ret)
+ goto err_free_dummy;
+
+ init_waitqueue_head(&vm->prealloc_throttle.wait);
+ }
+
+ drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
+ va_start, va_size, 0, 0, &msm_gpuvm_ops);
+ drm_gem_object_put(dummy_gem);
+
+ vm->mmu = mmu;
+ mutex_init(&vm->mmu_lock);
+ vm->managed = managed;
+
+ drm_mm_init(&vm->mm, va_start, va_size);
+
+ /*
+ * We don't really need vm log for kernel managed VMs, as the kernel
+ * is responsible for ensuring that GEM objs are mapped if they are
+ * used by a submit. Furthermore we piggyback on mmu_lock to serialize
+ * access to the log.
+ *
+ * Limit the max log_shift to 8 to prevent userspace from asking us
+ * for an unreasonable log size.
+ */
+ if (!managed)
+ vm->log_shift = MIN(vm_log_shift, 8);
+
+ if (vm->log_shift) {
+ vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
+ GFP_KERNEL | __GFP_ZERO);
+ }
+
+ return &vm->base;
+
+err_free_dummy:
+ drm_gem_object_put(dummy_gem);
+
+err_free_vm:
+ kfree(vm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * msm_gem_vm_close() - Close a VM
+ * @gpuvm: The VM to close
+ *
+ * Called when the drm device file is closed, to tear down VM related resources
+ * (which will drop refcounts to GEM objects that were still mapped into the
+ * VM at the time).
+ */
+void
+msm_gem_vm_close(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuva *vma, *tmp;
+ struct drm_exec exec;
+
+ /*
+ * For kernel managed VMs, the VMAs are torn down when the handle is
+ * closed, so nothing more to do.
+ */
+ if (vm->managed)
+ return;
- drm_mm_init(&aspace->mm, va_start, size);
+ if (vm->last_fence)
+ dma_fence_wait(vm->last_fence, false);
+
+ /* Kill the scheduler now, so we aren't racing with it for cleanup: */
+ drm_sched_stop(&vm->sched, NULL);
+ drm_sched_fini(&vm->sched);
+
+ /* Tear down any remaining mappings: */
+ drm_exec_init(&exec, 0, 2);
+ drm_exec_until_all_locked (&exec) {
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
+ struct drm_gem_object *obj = vma->gem.obj;
+
+ /*
+ * MSM_BO_NO_SHARE objects share the same resv as the
+ * VM, in which case the obj is already locked:
+ */
+ if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
+ obj = NULL;
+
+ if (obj) {
+ drm_exec_lock_obj(&exec, obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ msm_gem_vma_unmap(vma, "close");
+ msm_gem_vma_close(vma);
+
+ if (obj) {
+ drm_exec_unlock_obj(&exec, obj);
+ }
+ }
+ }
+ drm_exec_fini(&exec);
+}
+
+
+static struct msm_vm_bind_job *
+vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
+ struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
+{
+ struct msm_vm_bind_job *job;
+ uint64_t sz;
+ int ret;
- kref_init(&aspace->kref);
+ sz = struct_size(job, ops, nr_ops);
- return aspace;
+ if (sz > SIZE_MAX)
+ return ERR_PTR(-ENOMEM);
+
+ job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
+ file->client_id);
+ if (ret) {
+ kfree(job);
+ return ERR_PTR(ret);
+ }
+
+ job->vm = msm_context_vm(dev, queue->ctx);
+ job->queue = queue;
+ INIT_LIST_HEAD(&job->vm_ops);
+
+ return job;
+}
+
+static bool invalid_alignment(uint64_t addr)
+{
+ /*
+ * Technically this is about GPU alignment, not CPU alignment. But
+ * I've not seen any qcom SoC where the SMMU does not support the
+ * CPU's smallest page size.
+ */
+ return !PAGE_ALIGNED(addr);
+}
+
+static int
+lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
+{
+ struct drm_device *dev = job->vm->drm;
+ int i = job->nr_ops++;
+ int ret = 0;
+
+ job->ops[i].op = op->op;
+ job->ops[i].handle = op->handle;
+ job->ops[i].obj_offset = op->obj_offset;
+ job->ops[i].iova = op->iova;
+ job->ops[i].range = op->range;
+ job->ops[i].flags = op->flags;
+
+ if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
+ ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
+
+ if (invalid_alignment(op->iova))
+ ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
+
+ if (invalid_alignment(op->obj_offset))
+ ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
+
+ if (invalid_alignment(op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
+
+ if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
+
+ /*
+ * MAP must specify a valid handle. But the handle MBZ for
+ * UNMAP or MAP_NULL.
+ */
+ if (op->op == MSM_VM_BIND_OP_MAP) {
+ if (!op->handle)
+ ret = UERR(EINVAL, dev, "invalid handle\n");
+ } else if (op->handle) {
+ ret = UERR(EINVAL, dev, "handle must be zero\n");
+ }
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ case MSM_VM_BIND_OP_UNMAP:
+ break;
+ default:
+ ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * ioctl parsing, parameter validation, and GEM handle lookup
+ */
+static int
+vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
+ struct drm_file *file, int *nr_bos)
+{
+ struct drm_device *dev = job->vm->drm;
+ int ret = 0;
+ int cnt = 0;
+
+ if (args->nr_ops == 1) {
+ /* Single op case, the op is inlined: */
+ ret = lookup_op(job, &args->op);
+ } else {
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_msm_vm_bind_op op;
+ void __user *userptr =
+ u64_to_user_ptr(args->ops + (i * sizeof(op)));
+
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ job->ops[i].flags = 0;
+
+ if (copy_from_user(&op, userptr, sizeof(op))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = lookup_op(job, &op);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret) {
+ job->nr_ops = 0;
+ goto out;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_gem_object *obj;
+
+ if (!job->ops[i].handle) {
+ job->ops[i].obj = NULL;
+ continue;
+ }
+
+ /*
+ * normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, job->ops[i].handle);
+ if (!obj) {
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ goto out_unlock;
+ }
+
+ drm_gem_object_get(obj);
+
+ job->ops[i].obj = obj;
+ cnt++;
+ }
+
+ *nr_bos = cnt;
+
+out_unlock:
+ spin_unlock(&file->table_lock);
+
+out:
+ return ret;
+}
+
+static void
+prealloc_count(struct msm_vm_bind_job *job,
+ struct msm_vm_bind_op *first,
+ struct msm_vm_bind_op *last)
+{
+ struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
+
+ if (!first)
+ return;
+
+ uint64_t start_iova = first->iova;
+ uint64_t end_iova = last->iova + last->range;
+
+ mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
+}
+
+static bool
+ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
+{
+ /*
+ * Last level pte covers 2MB.. so we should merge two ops, from
+ * the PoV of figuring out how much pgtable pages to pre-allocate
+ * if they land in the same 2MB range:
+ */
+ uint64_t pte_mask = ~(SZ_2M - 1);
+ return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
+}
+
+/*
+ * Determine the amount of memory to prealloc for pgtables. For sparse images,
+ * in particular, userspace plays some tricks with the order of page mappings
+ * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
+ * So detect when multiple MAP operations are physically contiguous, and count
+ * them as a single mapping. Otherwise the prealloc_count() will not realize
+ * they can share pagetable pages and vastly overcount.
+ */
+static int
+vm_bind_prealloc_count(struct msm_vm_bind_job *job)
+{
+ struct msm_vm_bind_op *first = NULL, *last = NULL;
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ int ret;
+
+ for (int i = 0; i < job->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ /* We only care about MAP/MAP_NULL: */
+ if (op->op == MSM_VM_BIND_OP_UNMAP)
+ continue;
+
+ /*
+ * If op is contiguous with last in the current range, then
+ * it becomes the new last in the range and we continue
+ * looping:
+ */
+ if (last && ops_are_same_pte(last, op)) {
+ last = op;
+ continue;
+ }
+
+ /*
+ * If op is not contiguous with the current range, flush
+ * the current range and start anew:
+ */
+ prealloc_count(job, first, last);
+ first = last = op;
+ }
+
+ /* Flush the remaining range: */
+ prealloc_count(job, first, last);
+
+ /*
+ * Now that we know the needed amount to pre-alloc, throttle on pending
+ * VM_BIND jobs if we already have too much pre-alloc memory in flight
+ */
+ ret = wait_event_interruptible(
+ vm->prealloc_throttle.wait,
+ atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
+ if (ret)
+ return ret;
+
+ atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ return 0;
+}
+
+/*
+ * Lock VM and GEM objects
+ */
+static int
+vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
+{
+ int ret;
+
+ /* Lock VM and objects: */
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
+ op->iova,
+ op->obj_offset);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
+ op->iova, op->range,
+ op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ WARN_ON("unreachable");
+ }
+
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Pin GEM objects, ensuring that we have backing pages. Pinning will move
+ * the object to the pinned LRU so that the shrinker knows to first consider
+ * other objects for evicting.
+ */
+static int
+vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ /*
+ * First loop, before holding the LRU lock, avoids holding the
+ * LRU lock while calling msm_gem_pin_vma_locked (which could
+ * trigger get_pages())
+ */
+ job_foreach_bo (obj, job) {
+ struct page **pages;
+
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ }
+
+ struct msm_drm_private *priv = job->vm->drm->dev_private;
+
+ /*
+ * A second loop while holding the LRU lock (a) avoids acquiring/dropping
+ * the LRU lock for each individual bo, while (b) avoiding holding the
+ * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
+ * get_pages() which could trigger reclaim.. and if we held the LRU lock
+ * could trigger deadlock with the shrinker).
+ */
+ mutex_lock(&priv->lru.lock);
+ job_foreach_bo (obj, job)
+ msm_gem_pin_obj_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+
+ job->bos_pinned = true;
+
+ return 0;
+}
+
+/*
+ * Unpin GEM objects. Normally this is done after the bind job is run.
+ */
+static void
+vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ if (!job->bos_pinned)
+ return;
+
+ job_foreach_bo (obj, job)
+ msm_gem_unpin_locked(obj);
+
+ job->bos_pinned = false;
+}
+
+/*
+ * Pre-allocate pgtable memory, and translate the VM bind requests into a
+ * sequence of pgtable updates to be applied asynchronously.
+ */
+static int
+vm_bind_job_prepare(struct msm_vm_bind_job *job)
+{
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct msm_mmu *mmu = vm->mmu;
+ int ret;
+
+ ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+ struct op_arg arg = {
+ .job = job,
+ };
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
+ op->range);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ if (op->flags & MSM_VM_BIND_OP_DUMP)
+ arg.flags |= MSM_VMA_DUMP;
+ fallthrough;
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
+ op->range, op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ BUG_ON("unreachable");
+ }
+
+ if (ret) {
+ /*
+ * If we've already started modifying the vm, we can't
+ * adequetly describe to userspace the intermediate
+ * state the vm is in. So throw up our hands!
+ */
+ if (i > 0)
+ msm_gem_vm_unusable(job->vm);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Attach fences to the GEM objects being bound. This will signify to
+ * the shrinker that they are busy even after dropping the locks (ie.
+ * drm_exec_fini())
+ */
+static void
+vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
+{
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ struct drm_gem_object *obj = job->ops[i].obj;
+
+ if (!obj)
+ continue;
+
+ dma_resv_add_fence(obj->resv, job->fence,
+ DMA_RESV_USAGE_KERNEL);
+ }
+}
+
+int
+msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_vm_bind *args = data;
+ struct msm_context *ctx = file->driver_priv;
+ struct msm_vm_bind_job *job = NULL;
+ struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_syncobj_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
+ struct dma_fence *fence;
+ int out_fence_fd = -1;
+ int ret, nr_bos = 0;
+ unsigned i;
+
+ if (!gpu)
+ return -ENXIO;
+
+ /*
+ * Maybe we could allow just UNMAP ops? OTOH userspace should just
+ * immediately close the device file and all will be torn down.
+ */
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
+
+ /*
+ * Technically, you cannot create a VM_BIND submitqueue in the first
+ * place, if you haven't opted in to VM_BIND context. But it is
+ * cleaner / less confusing, to check this case directly.
+ */
+ if (!msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "context does not support vmbind");
+
+ if (args->flags & ~MSM_VM_BIND_FLAGS)
+ return UERR(EINVAL, dev, "invalid flags");
+
+ queue = msm_submitqueue_get(ctx, args->queue_id);
+ if (!queue)
+ return -ENOENT;
+
+ if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_post_unlock;
+ }
+ }
+
+ job = vm_bind_job_create(dev, file, queue, args->nr_ops);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_post_unlock;
+ }
+
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence) {
+ ret = UERR(EINVAL, dev, "invalid in-fence");
+ goto out_unlock;
+ }
+
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (args->in_syncobjs > 0) {
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
+ }
+
+ if (args->out_syncobjs > 0) {
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_unlock;
+ }
+ }
+
+ ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
+ if (ret)
+ goto out_unlock;
+
+ ret = vm_bind_prealloc_count(job);
+ if (ret)
+ goto out_unlock;
+
+ struct drm_exec exec;
+ unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
+ drm_exec_init(&exec, flags, nr_bos + 1);
+
+ ret = vm_bind_job_lock_objects(job, &exec);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_pin_objects(job);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_prepare(job);
+ if (ret)
+ goto out;
+
+ drm_sched_job_arm(&job->base);
+
+ job->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ sync_file = sync_file_create(job->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+ }
+
+ if (ret)
+ goto out;
+
+ vm_bind_job_attach_fences(job);
+
+ /*
+ * The job can be free'd (and fence unref'd) at any point after
+ * drm_sched_entity_push_job(), so we need to hold our own ref
+ */
+ fence = dma_fence_get(job->fence);
+
+ drm_sched_entity_push_job(&job->base);
+
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
+
+ dma_fence_put(fence);
+
+out:
+ if (ret)
+ vm_bind_job_unpin_objects(job);
+
+ drm_exec_fini(&exec);
+out_unlock:
+ mutex_unlock(&queue->lock);
+out_post_unlock:
+ if (ret && (out_fence_fd >= 0)) {
+ put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
+
+ if (!IS_ERR_OR_NULL(job)) {
+ if (ret)
+ msm_vma_job_free(&job->base);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
+
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3947f7ba1421..c317b25a8162 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -148,7 +148,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
@@ -219,13 +219,14 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct drm_gem_object *obj, u64 iova, bool full)
+ struct drm_gem_object *obj, u64 iova,
+ bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
- state_bo->size = obj->size;
+ state_bo->size = size;
state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
@@ -236,26 +237,126 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
+ state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(obj);
ptr = msm_gem_get_vaddr_active(obj);
- msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->size);
- msm_gem_put_vaddr(obj);
+ memcpy(state_bo->data, ptr + offset, size);
+ msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
+static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
+{
+ extern bool rd_full;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_exec exec;
+ struct drm_gpuva *vma;
+ unsigned cnt = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ cnt = 0;
+
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ if (!vma->gem.obj)
+ continue;
+
+ cnt++;
+ drm_exec_lock_obj(&exec, vma->gem.obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ }
+
+ drm_gpuvm_for_each_va (vma, submit->vm)
+ cnt++;
+
+ state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
+
+ drm_exec_fini(&exec);
+ } else {
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (int i = 0; state->bos && i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ msm_gem_lock(obj);
+ msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
+ dump, 0, obj->size);
+ msm_gem_unlock(obj);
+ }
+ }
+}
+
+static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
+{
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ int first;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ state->nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ state->nr_vm_logs = vm_log_len;
+ }
+
+ state->vm_logs = kmalloc_array(
+ state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ for (int i = 0; i < state->nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+
+ state->vm_logs[i] = vm->log[idx];
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
+
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
char *comm, char *cmd)
@@ -280,26 +381,18 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (fault_info)
state->fault_info = *fault_info;
- if (submit) {
- int i;
-
- if (state->fault_info.ttbr0) {
- struct msm_gpu_fault_info *info = &state->fault_info;
- struct msm_mmu *mmu = submit->aspace->mmu;
+ if (submit && state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
- msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
- &info->asid);
- msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
- }
-
- state->bos = kcalloc(submit->nr_bos,
- sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
- for (i = 0; state->bos && i < submit->nr_bos; i++) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova,
- should_dump(submit, i));
- }
+ if (submit) {
+ crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
+ crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
@@ -342,7 +435,7 @@ static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
@@ -389,8 +482,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
+ if (submit->vm) {
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+
+ vm->faults++;
+
+ /*
+ * If userspace has opted-in to VM_BIND (and therefore userspace
+ * management of the VM), faults mark the VM as unusuable. This
+ * matches vulkan expectations (vulkan is the main target for
+ * VM_BIND)
+ */
+ if (!vm->managed)
+ msm_gem_vm_unusable(submit->vm);
+ }
get_comm_cmdline(submit, &comm, &cmd);
@@ -828,10 +933,12 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
- struct msm_gem_address_space *aspace = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
@@ -839,16 +946,16 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
* If the target doesn't support private address spaces then return
* the global one
*/
- if (gpu->funcs->create_private_address_space) {
- aspace = gpu->funcs->create_private_address_space(gpu);
- if (!IS_ERR(aspace))
- aspace->pid = get_pid(task_pid(task));
+ if (gpu->funcs->create_private_vm) {
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
+ if (!IS_ERR(vm))
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
- if (IS_ERR_OR_NULL(aspace))
- aspace = msm_gem_address_space_get(gpu->aspace);
+ if (IS_ERR_OR_NULL(vm))
+ vm = drm_gpuvm_get(gpu->vm);
- return aspace;
+ return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -942,19 +1049,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
-
- gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
-
- if (gpu->aspace == NULL)
- DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- else if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
+ gpu->vm = gpu->funcs->create_vm(gpu, pdev);
+ if (IS_ERR(gpu->vm)) {
+ ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
@@ -998,7 +1101,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1015,11 +1118,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
- if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
- msm_gem_address_space_put(gpu->aspace);
+ if (!IS_ERR_OR_NULL(gpu->vm)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 5bf7cd985b9c..b2a96544f92a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -20,9 +20,10 @@
#include "msm_gem.h"
struct msm_gem_submit;
+struct msm_gem_vm_log_entry;
struct msm_gpu_perfcntr;
struct msm_gpu_state;
-struct msm_file_private;
+struct msm_context;
struct msm_gpu_config {
const char *ioname;
@@ -44,9 +45,9 @@ struct msm_gpu_config {
* + z180_gpu
*/
struct msm_gpu_funcs {
- int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
- int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
int (*hw_init)(struct msm_gpu *gpu);
@@ -78,10 +79,8 @@ struct msm_gpu_funcs {
/* note: gpu_set_freq() can assume that we have been pm_resumed */
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
- struct msm_gem_address_space *(*create_address_space)
- (struct msm_gpu *gpu, struct platform_device *pdev);
- struct msm_gem_address_space *(*create_private_address_space)
- (struct msm_gpu *gpu);
+ struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
/**
@@ -236,7 +235,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
@@ -341,26 +340,61 @@ struct msm_gpu_perfcntr {
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
- * struct msm_file_private - per-drm_file context
- *
- * @queuelock: synchronizes access to submitqueues list
- * @submitqueues: list of &msm_gpu_submitqueue created by userspace
- * @queueid: counter incremented each time a submitqueue is created,
- * used to assign &msm_gpu_submitqueue.id
- * @aspace: the per-process GPU address-space
- * @ref: reference count
- * @seqno: unique per process seqno
+ * struct msm_context - per-drm_file context
*/
-struct msm_file_private {
+struct msm_context {
+ /** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
+
+ /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
+
+ /**
+ * @queueid:
+ *
+ * Counter incremented each time a submitqueue is created, used to
+ * assign &msm_gpu_submitqueue.id
+ */
int queueid;
- struct msm_gem_address_space *aspace;
+
+ /**
+ * @closed: The device file associated with this context has been closed.
+ *
+ * Once the device is closed, any submits that have not been written
+ * to the ring buffer are no-op'd.
+ */
+ bool closed;
+
+ /**
+ * @userspace_managed_vm:
+ *
+ * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+ * MSM_PARAM_EN_VM_BIND?
+ */
+ bool userspace_managed_vm;
+
+ /**
+ * @vm:
+ *
+ * The per-process GPU address-space. Do not access directly, use
+ * msm_context_vm().
+ */
+ struct drm_gpuvm *vm;
+
+ /** @kref: the reference count */
struct kref ref;
+
+ /**
+ * @seqno:
+ *
+ * A unique per-process sequence number. Used to detect context
+ * switches, without relying on keeping a, potentially dangling,
+ * pointer to the previous context.
+ */
int seqno;
/**
- * sysprof:
+ * @sysprof:
*
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
@@ -378,21 +412,21 @@ struct msm_file_private {
int sysprof;
/**
- * comm: Overridden task comm, see MSM_PARAM_COMM
+ * @comm: Overridden task comm, see MSM_PARAM_COMM
*
* Accessed under msm_gpu::lock
*/
char *comm;
/**
- * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
*
* Accessed under msm_gpu::lock
*/
char *cmdline;
/**
- * elapsed:
+ * @elapsed:
*
* The total (cumulative) elapsed time GPU was busy with rendering
* from this context in ns.
@@ -400,7 +434,7 @@ struct msm_file_private {
uint64_t elapsed_ns;
/**
- * cycles:
+ * @cycles:
*
* The total (cumulative) GPU cycles elapsed attributed to this
* context.
@@ -408,7 +442,7 @@ struct msm_file_private {
uint64_t cycles;
/**
- * entities:
+ * @entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
@@ -421,7 +455,7 @@ struct msm_file_private {
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
/**
- * ctx_mem:
+ * @ctx_mem:
*
* Total amount of memory of GEM buffers with handles attached for
* this context.
@@ -429,6 +463,24 @@ struct msm_file_private {
atomic64_t ctx_mem;
};
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
+
+/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+ return ctx->userspace_managed_vm;
+}
+
/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
@@ -506,13 +558,16 @@ struct msm_gpu_submitqueue {
u32 ring_nr;
int faults;
uint32_t last_fence;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
struct list_head node;
struct idr fence_idr;
struct spinlock idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
+
+ /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */
+ struct drm_sched_entity _vm_bind_entity[0];
};
struct msm_gpu_state_bo {
@@ -549,6 +604,9 @@ struct msm_gpu_state {
struct msm_gpu_fault_info fault_info;
+ int nr_vm_logs;
+ struct msm_gem_vm_log_entry *vm_logs;
+
int nr_bos;
struct msm_gpu_state_bo *bos;
};
@@ -602,33 +660,32 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p);
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
+ struct msm_context *ctx,
u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id);
+void msm_submitqueue_close(struct msm_context *ctx);
void msm_submitqueue_destroy(struct kref *kref);
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof);
-void __msm_file_private_destroy(struct kref *kref);
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof);
+void __msm_context_destroy(struct kref *kref);
-static inline void msm_file_private_put(struct msm_file_private *ctx)
+static inline void msm_context_put(struct msm_context *ctx)
{
- kref_put(&ctx->ref, __msm_file_private_destroy);
+ kref_put(&ctx->ref, __msm_context_destroy);
}
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
+static inline struct msm_context *msm_context_get(
+ struct msm_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
@@ -656,8 +713,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed);
void msm_gpu_cleanup(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 7f863282db0d..781bbe5540bd 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -205,6 +205,20 @@ TRACE_EVENT(msm_gpu_preemption_irq,
TP_printk("preempted to %u", __entry->ring_id)
);
+TRACE_EVENT(msm_mmu_prealloc_cleanup,
+ TP_PROTO(u32 count, u32 remaining),
+ TP_ARGS(count, remaining),
+ TP_STRUCT__entry(
+ __field(u32, count)
+ __field(u32, remaining)
+ ),
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 739ce2c283a4..55c29f49b788 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -6,13 +6,18 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
+#include <linux/kmemleak.h>
#include "msm_drv.h"
+#include "msm_gpu_trace.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
+ struct page *prr_page;
+
+ struct kmem_cache *pt_cache;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@@ -26,6 +31,9 @@ struct msm_iommu_pagetable {
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
+
+ /** @root_page_table: Stores the root page table pointer. */
+ void *root_page_table;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
@@ -93,15 +101,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ int ret = 0;
while (size) {
- size_t unmapped, pgsize, count;
+ size_t pgsize, count;
+ ssize_t unmapped;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
- if (!unmapped)
- break;
+ if (unmapped <= 0) {
+ ret = -EINVAL;
+ /*
+ * Continue attempting to unamp the remained of the
+ * range, so we don't end up with some dangling
+ * mapped pages
+ */
+ unmapped = PAGE_SIZE;
+ }
iova += unmapped;
size -= unmapped;
@@ -109,11 +126,42 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (size == 0) ? 0 : -EINVAL;
+ return ret;
+}
+
+static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+ phys_addr_t phys = page_to_phys(iommu->prr_page);
+ u64 addr = iova;
+
+ while (len) {
+ size_t mapped = 0;
+ size_t size = PAGE_SIZE;
+ int ret;
+
+ ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ addr += mapped;
+ len -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -121,10 +169,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova;
unsigned int i;
+ if (!sgt)
+ return msm_iommu_pagetable_map_prr(mmu, iova, len, prot);
+
for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
+ if (!len)
+ break;
+
+ if (size <= off) {
+ off -= size;
+ continue;
+ }
+
+ phys += off;
+ size -= off;
+ size = min_t(size_t, size, len);
+ off = 0;
+
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
@@ -140,6 +204,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
phys += mapped;
addr += mapped;
size -= mapped;
+ len -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -162,9 +227,16 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0)
+ if (atomic_dec_return(&iommu->pagetables) == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+ if (adreno_smmu->set_prr_bit) {
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, false);
+ __free_page(iommu->prr_page);
+ iommu->prr_page = NULL;
+ }
+ }
+
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
}
@@ -217,7 +289,148 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[
return 0;
}
+static void
+msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len)
+{
+ u64 pt_count;
+
+ /*
+ * L1, L2 and L3 page tables.
+ *
+ * We could optimize L3 allocation by iterating over the sgt and merging
+ * 2M contiguous blocks, but it's simpler to over-provision and return
+ * the pages if they're not used.
+ *
+ * The first level descriptor (v8 / v7-lpae page table format) encodes
+ * 30 bits of address. The second level encodes 29. For the 3rd it is
+ * 39.
+ *
+ * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
+ */
+ pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) +
+ ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) +
+ ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21);
+
+ p->count += pt_count;
+}
+
+static struct kmem_cache *
+get_pt_cache(struct msm_mmu *mmu)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ return to_msm_iommu(pagetable->parent)->pt_cache;
+}
+
+static int
+msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ int ret;
+
+ p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL);
+ if (!p->pages)
+ return -ENOMEM;
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
+ if (ret != p->count) {
+ p->count = ret;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ uint32_t remaining_pt_count = p->count - p->ptr;
+
+ if (p->count > 0)
+ trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+
+ kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
+ kvfree(p->pages);
+}
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *
+msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct msm_mmu_prealloc *p = pagetable->base.prealloc;
+ void *page;
+
+ /* Allocation of the root page table happening during init. */
+ if (unlikely(!pagetable->root_page_table)) {
+ struct page *p;
+
+ p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
+ gfp | __GFP_ZERO, get_order(size));
+ page = p ? page_address(p) : NULL;
+ pagetable->root_page_table = page;
+ return page;
+ }
+
+ if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
+ return NULL;
+
+ page = p->pages[p->ptr++];
+ memset(page, 0, size);
+
+ /*
+ * Page table entries don't use virtual addresses, which trips out
+ * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+ * are mixed with other fields, and I fear kmemleak won't detect that
+ * either.
+ *
+ * Let's just ignore memory passed to the page-table driver for now.
+ */
+ kmemleak_ignore(page);
+
+ return page;
+}
+
+
+/**
+ * free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void
+msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+
+ if (unlikely(pagetable->root_page_table == data)) {
+ free_pages((unsigned long)data, get_order(size));
+ pagetable->root_page_table = NULL;
+ return;
+ }
+
+ kmem_cache_free(get_pt_cache(&pagetable->base), data);
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
+ .prealloc_count = msm_iommu_pagetable_prealloc_count,
+ .prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
+ .prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
@@ -268,7 +481,18 @@ static const struct iommu_flush_ops tlb_ops = {
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
+{
+ int pg_shift, bits_per_level;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ /* arm_lpae_iopte is u64: */
+ bits_per_level = pg_shift - ilog2(sizeof(u64));
+
+ return sizeof(u64) << bits_per_level;
+}
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -302,6 +526,36 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &tlb_ops;
+ if (!kernel_managed) {
+ ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
+
+ /*
+ * With userspace managed VM (aka VM_BIND), we need to pre-
+ * allocate pages ahead of time for map/unmap operations,
+ * handing them to io-pgtable via custom alloc/free ops as
+ * needed:
+ */
+ ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
+ ttbr0_cfg.free = msm_iommu_pagetable_free_pt;
+
+ /*
+ * Restrict to single page granules. Otherwise we may run
+ * into a situation where userspace wants to unmap/remap
+ * only a part of a larger block mapping, which is not
+ * possible without unmapping the entire block. Which in
+ * turn could cause faults if the GPU is accessing other
+ * parts of the block mapping.
+ *
+ * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm:
+ * Remove split on unmap behavior)" this was handled in
+ * io-pgtable-arm. But this apparently does not work
+ * correctly on SMMUv3.
+ */
+ WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
+ ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
+ }
+
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, pagetable);
@@ -321,12 +575,30 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
kfree(pagetable);
return ERR_PTR(ret);
}
+
+ BUG_ON(iommu->prr_page);
+ if (adreno_smmu->set_prr_bit) {
+ /*
+ * We need a zero'd page for two reasons:
+ *
+ * 1) Reserve a known physical address to use when
+ * mapping NULL / sparsely resident regions
+ * 2) Read back zero
+ *
+ * It appears the hw drops writes to the PRR region
+ * on the floor, but reads actually return whatever
+ * is in the PRR page.
+ */
+ iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ adreno_smmu->set_prr_addr(adreno_smmu->cookie,
+ page_to_phys(iommu->prr_page));
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
+ }
}
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->tlb = ttbr1_cfg->tlb;
- pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
@@ -388,11 +660,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
+ WARN_ON(off != 0);
+
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
@@ -419,6 +694,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
+ kmem_cache_destroy(iommu->pt_cache);
kfree(iommu);
}
@@ -492,6 +768,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
+ if (adreno_smmu && adreno_smmu->cookie) {
+ const struct io_pgtable_cfg *cfg =
+ adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+ size_t tblsz = get_tblsz(cfg);
+
+ iommu->pt_cache =
+ kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
+ }
iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 35d5397e73b4..6889f1c1e721 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
+#include <drm/clients/drm_client_setup.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
@@ -137,7 +138,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
vbl_work->enable = enable;
vbl_work->priv = priv;
- queue_work(priv->wq, &vbl_work->work);
+ queue_work(priv->kms->wq, &vbl_work->work);
return 0;
}
@@ -176,9 +177,9 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
@@ -204,17 +205,26 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
return NULL;
}
- aspace = msm_gem_address_space_create(mmu, "mdp_kms",
- 0x1000, 0x100000000 - 0x1000);
- if (IS_ERR(aspace)) {
- dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
+ vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000, true);
+ if (IS_ERR(vm)) {
+ dev_err(mdp_dev, "vm create, error %pe\n", vm);
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
- msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler);
- return aspace;
+ return vm;
+}
+
+void msm_drm_kms_unregister(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_atomic_helper_shutdown(ddev);
}
void msm_drm_kms_uninit(struct device *dev)
@@ -227,10 +237,17 @@ void msm_drm_kms_uninit(struct device *dev)
BUG_ON(!kms);
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(kms->wq);
+
/* clean up event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->event_thread[i].worker)
- kthread_destroy_worker(priv->event_thread[i].worker);
+ for (i = 0; i < MAX_CRTCS; i++) {
+ if (kms->event_thread[i].worker)
+ kthread_destroy_worker(kms->event_thread[i].worker);
}
drm_kms_helper_poll_fini(ddev);
@@ -261,7 +278,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- return ret;
+ goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -283,7 +300,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
struct msm_drm_thread *ev_thread;
/* initialize event thread */
- ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
+ ev_thread = &kms->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
@@ -296,7 +313,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
sched_set_fifo(ev_thread->worker->task);
}
- ret = drm_vblank_init(ddev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, ddev->mode_config.num_crtc);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
@@ -359,3 +376,13 @@ void msm_kms_shutdown(struct platform_device *pdev)
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
+
+void msm_drm_kms_post_init(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_kms_helper_poll_init(ddev);
+ drm_client_setup(ddev, NULL);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 43b58d052ee6..8a7be7b854de 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -13,6 +13,8 @@
#include "msm_drv.h"
+#ifdef CONFIG_DRM_MSM_KMS
+
#define MAX_PLANE 4
/* As there are different display controller blocks depending on the
@@ -127,10 +129,22 @@ struct msm_pending_timer {
unsigned crtc_idx;
};
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct kthread_worker *worker;
+};
+
struct msm_kms {
const struct msm_kms_funcs *funcs;
struct drm_device *dev;
+ struct hdmi *hdmi;
+
+ struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
+
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
+
/* irq number to be passed on to msm_irq_install */
int irq;
bool irq_requested;
@@ -139,7 +153,7 @@ struct msm_kms {
atomic_t fault_snapshot_capture;
/* mapper-id used to request GEM buffer mapped for scanout: */
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* disp snapshot support */
struct kthread_worker *dump_worker;
@@ -153,6 +167,9 @@ struct msm_kms {
struct mutex commit_lock[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
+
+ struct workqueue_struct *wq;
+ struct msm_drm_thread event_thread[MAX_CRTCS];
};
static inline int msm_kms_init(struct msm_kms *kms,
@@ -165,6 +182,10 @@ static inline int msm_kms_init(struct msm_kms *kms,
kms->funcs = funcs;
+ kms->wq = alloc_ordered_workqueue("msm", 0);
+ if (!kms->wq)
+ return -ENOMEM;
+
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
if (ret) {
@@ -181,6 +202,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
+
+ destroy_workqueue(kms->wq);
}
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
@@ -192,6 +215,29 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
+void msm_drm_kms_post_init(struct device *dev);
+void msm_drm_kms_unregister(struct device *dev);
void msm_drm_kms_uninit(struct device *dev);
+#else /* ! CONFIG_DRM_MSM_KMS */
+
+static inline int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+{
+ return -ENODEV;
+}
+
+static inline void msm_drm_kms_post_init(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_unregister(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_uninit(struct device *dev)
+{
+}
+
+#endif
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 709979fcfab6..1f5fe7811e01 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -16,14 +16,17 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include "msm_mdss.h"
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_kms.h"
#include <generated/mdss.xml.h>
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
-#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+struct msm_mdss_data {
+ u32 reg_bus_bw;
+};
struct msm_mdss {
struct device *dev;
@@ -36,7 +39,8 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
- const struct msm_mdss_data *mdss_data;
+ const struct qcom_ubwc_cfg_data *mdss_data;
+ u32 reg_bus_bw;
struct icc_path *mdp_path[2];
u32 num_mdp_paths;
struct icc_path *reg_bus_path;
@@ -165,9 +169,9 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -180,9 +184,9 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->macrotile_mode)
value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
@@ -198,9 +202,9 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -222,67 +226,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
}
}
-#define MDSS_HW_MAJ_MIN \
- (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
-
-#define MDSS_HW_MSM8996 0x1007
-#define MDSS_HW_MSM8937 0x100e
-#define MDSS_HW_MSM8953 0x1010
-#define MDSS_HW_MSM8998 0x3000
-#define MDSS_HW_SDM660 0x3002
-#define MDSS_HW_SDM630 0x3003
-
-/*
- * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data
- */
-static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_mdss *mdss)
+static void msm_mdss_setup_ubwc_dec_50(struct msm_mdss *msm_mdss)
{
- struct msm_mdss_data *data;
- u32 hw_rev;
-
- data = devm_kzalloc(mdss->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION);
- hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8937 ||
- hw_rev == MDSS_HW_MSM8953 ||
- hw_rev == MDSS_HW_MSM8998 ||
- hw_rev == MDSS_HW_SDM660 ||
- hw_rev == MDSS_HW_SDM630) {
- data->ubwc_dec_version = UBWC_1_0;
- data->ubwc_enc_version = UBWC_1_0;
- }
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8998)
- data->highest_bank_bit = 2;
- else
- data->highest_bank_bit = 1;
-
- return data;
-}
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
-{
- struct msm_mdss *mdss;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- if (!dev)
- return ERR_PTR(-EINVAL);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
- mdss = dev_get_drvdata(dev);
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
- /*
- * We could not do it at the probe time, since hw revision register was
- * not readable. Fill data structure now for the MDP5 platforms.
- */
- if (!mdss->mdss_data && mdss->is_mdp5)
- mdss->mdss_data = msm_mdss_generate_mdp5_mdss_data(mdss);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
- return mdss->mdss_data;
+ writel_relaxed(4, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
}
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
@@ -297,12 +256,8 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
for (i = 0; i < msm_mdss->num_mdp_paths; i++)
icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
- if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- msm_mdss->mdss_data->reg_bus_bw);
- else
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- DEFAULT_REG_BW);
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->reg_bus_bw);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -339,6 +294,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
+ case UBWC_5_0:
+ msm_mdss_setup_ubwc_dec_50(msm_mdss);
+ break;
default:
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
@@ -438,6 +396,7 @@ static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_d
static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
{
+ const struct msm_mdss_data *mdss_data;
struct msm_mdss *msm_mdss;
int ret;
int irq;
@@ -450,7 +409,15 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
- msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+ msm_mdss->mdss_data = qcom_ubwc_config_get_data();
+ if (IS_ERR(msm_mdss->mdss_data))
+ return ERR_CAST(msm_mdss->mdss_data);
+
+ mdss_data = of_device_get_match_data(&pdev->dev);
+ if (!mdss_data)
+ return ERR_PTR(-EINVAL);
+
+ msm_mdss->reg_bus_bw = mdss_data->reg_bus_bw;
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
@@ -569,205 +536,49 @@ static void mdss_remove(struct platform_device *pdev)
msm_mdss_destroy(mdss);
}
-static const struct msm_mdss_data msm8998_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_1_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data qcm2290_data = {
- /* no UBWC */
- .highest_bank_bit = 0x2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sa8775p_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 4,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sar2130p_data = {
- .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0,
- .macrotile_mode = 1,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sc7180_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
+static const struct msm_mdss_data data_57k = {
+ .reg_bus_bw = 57000,
};
-static const struct msm_mdss_data sc7280_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .macrotile_mode = true,
+static const struct msm_mdss_data data_74k = {
.reg_bus_bw = 74000,
};
-static const struct msm_mdss_data sc8180x_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sc8280xp_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm670_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm845_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6350_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm7150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
+static const struct msm_mdss_data data_76k8 = {
.reg_bus_bw = 76800,
};
-static const struct msm_mdss_data sm8150_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6115_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 7,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6125_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_3_0,
- .ubwc_swizzle = 1,
- .highest_bank_bit = 1,
-};
-
-static const struct msm_mdss_data sm6150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8250_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8350_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sm8550_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 57000,
-};
-
-static const struct msm_mdss_data x1e80100_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- /* TODO: Add reg_bus_bw with real value */
+static const struct msm_mdss_data data_153k6 = {
+ .reg_bus_bw = 153600,
};
static const struct of_device_id mdss_dt_match[] = {
- { .compatible = "qcom,mdss" },
- { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
- { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
- { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
- { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data },
- { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
- { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
- { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
- { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
- { .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
- { .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
- { .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
- { .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
- { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
- { .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
- { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
- { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
- { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
- { .compatible = "qcom,x1e80100-mdss", .data = &x1e80100_data},
+ { .compatible = "qcom,mdss", .data = &data_153k6 },
+ { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sa8775p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sar2130p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sdm845-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7180-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7280-mdss", .data = &data_74k },
+ { .compatible = "qcom,sc8180x-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc8280xp-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6115-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6125-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6350-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6375-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm7150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8250-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8350-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8450-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8550-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8650-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8750-mdss", .data = &data_57k },
+ /* TODO: x1e8: Add reg_bus_bw with real value */
+ { .compatible = "qcom,x1e80100-mdss", .data = &data_153k6 },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
deleted file mode 100644
index 14dc53704314..000000000000
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#ifndef __MSM_MDSS_H__
-#define __MSM_MDSS_H__
-
-struct msm_mdss_data {
- u32 ubwc_enc_version;
- /* can be read from register 0x58 */
- u32 ubwc_dec_version;
- u32 ubwc_swizzle;
- u32 highest_bank_bit;
- bool ubwc_bank_spread;
- bool macrotile_mode;
- u32 reg_bus_bw;
-};
-
-#define UBWC_1_0 0x10000000
-#define UBWC_2_0 0x20000000
-#define UBWC_3_0 0x30000000
-#define UBWC_4_0 0x40000000
-#define UBWC_4_3 0x40030000
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev);
-
-#endif /* __MSM_MDSS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 0c694907140d..8915662fbd4d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -9,10 +9,18 @@
#include <linux/iommu.h>
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
+ void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len);
+ int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
+ void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
void (*set_stall)(struct msm_mmu *mmu, bool enable);
@@ -24,12 +32,38 @@ enum msm_mmu_type {
MSM_MMU_IOMMU_PAGETABLE,
};
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+ /** @count: Number of pages reserved. */
+ uint32_t count;
+ /** @ptr: Index of first unused page in @pages */
+ uint32_t ptr;
+ /**
+ * @pages: Array of pages preallocated for MMU table updates.
+ *
+ * After a VM operation, there might be free pages remaining in this
+ * array (since the amount allocated is a worst-case). These are
+ * returned to the pt_cache at mmu->prealloc_cleanup().
+ */
+ void **pages;
+};
+
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
+
+ /**
+ * @prealloc: pre-allocated pages for pgtable
+ *
+ * Set while a VM_BIND job is running, serialized under
+ * msm_gem_vm::mmu_lock.
+ */
+ struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -51,7 +85,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 39138e190cb9..54493a94dcb7 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -308,21 +308,11 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
priv->hangrd = NULL;
}
-static void snapshot_buf(struct msm_rd_state *rd,
- struct msm_gem_submit *submit, int idx,
- uint64_t iova, uint32_t size, bool full)
+static void snapshot_buf(struct msm_rd_state *rd, struct drm_gem_object *obj,
+ uint64_t iova, bool full, size_t offset, size_t size)
{
- struct drm_gem_object *obj = submit->bos[idx].obj;
- unsigned offset = 0;
const char *buf;
- if (iova) {
- offset = iova - submit->bos[idx].iova;
- } else {
- iova = submit->bos[idx].iova;
- size = obj->size;
- }
-
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
@@ -333,10 +323,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (!full)
return;
- /* But only dump the contents of buffers marked READ */
- if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
- return;
-
buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
@@ -352,6 +338,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
+ extern bool rd_full;
struct task_struct *task;
char msg[256];
int i, n;
@@ -385,16 +372,43 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_gpuva *vma;
- for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t szd = submit->cmd[i].size; /* in dwords */
+ drm_gpuvm_resv_assert_held(submit->vm);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ snapshot_buf(rd, vma->gem.obj, vma->va.addr, dump,
+ vma->gem.offset, vma->va.range);
+ }
+
+ } else {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ snapshot_buf(rd, obj, submit->bos[i].iova, dump, 0, obj->size);
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+ int idx = submit->cmd[i].idx;
+ bool dump = rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!dump) {
+ struct drm_gem_object *obj = submit->bos[idx].obj;
+ size_t offset = submit->cmd[i].iova - submit->bos[idx].iova;
- /* snapshot cmdstream bo's (if we haven't already): */
- if (!should_dump(submit, i)) {
- snapshot_buf(rd, submit, submit->cmd[i].idx,
- submit->cmd[i].iova, szd * 4, true);
+ snapshot_buf(rd, obj, submit->cmd[i].iova, true,
+ offset, szd * 4);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 89dce15eed3b..b2f612e5dc79 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -17,6 +17,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned nr_cmds = submit->nr_cmds;
int i;
msm_fence_init(submit->hw_fence, fctx);
@@ -36,8 +37,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
+ if (submit->queue->ctx->closed)
+ submit->nr_cmds = 0;
+
msm_gpu_submit(gpu, submit);
+ submit->nr_cmds = nr_cmds;
+
mutex_unlock(&gpu->lock);
return dma_fence_get(submit->hw_fence);
@@ -84,7 +90,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
- gpu->aspace, &ring->bo, &ring->iova);
+ gpu->vm, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
@@ -131,7 +137,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
+ msm_gem_kernel_put(ring->bo, ring->gpu->vm);
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 7fed1de63b5d..8617a82cd6b3 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -7,8 +7,7 @@
#include "msm_gpu.h"
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof)
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof)
{
/*
* Since pm_runtime and sysprof_active are both refcounts, we
@@ -46,10 +45,10 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
return 0;
}
-void __msm_file_private_destroy(struct kref *kref)
+void __msm_context_destroy(struct kref *kref)
{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
+ struct msm_context *ctx = container_of(kref,
+ struct msm_context, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
@@ -60,7 +59,7 @@ void __msm_file_private_destroy(struct kref *kref)
kfree(ctx->entities[i]);
}
- msm_gem_address_space_put(ctx->aspace);
+ drm_gpuvm_put(ctx->vm);
kfree(ctx->comm);
kfree(ctx->cmdline);
kfree(ctx);
@@ -73,12 +72,15 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- msm_file_private_put(queue->ctx);
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_destroy(queue->entity);
+
+ msm_context_put(queue->ctx);
kfree(queue);
}
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
@@ -101,9 +103,9 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
return NULL;
}
-void msm_submitqueue_close(struct msm_file_private *ctx)
+void msm_submitqueue_close(struct msm_context *ctx)
{
- struct msm_gpu_submitqueue *entry, *tmp;
+ struct msm_gpu_submitqueue *queue, *tmp;
if (!ctx)
return;
@@ -112,14 +114,21 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
- list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
- list_del(&entry->node);
- msm_submitqueue_put(entry);
+ list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ list_del(&queue->node);
+ msm_submitqueue_put(queue);
}
+
+ if (!ctx->vm)
+ return;
+
+ msm_gem_vm_close(ctx->vm);
}
static struct drm_sched_entity *
-get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+get_sched_entity(struct msm_context *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
@@ -155,14 +164,12 @@ get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
return ctx->entities[idx];
}
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
- extern int enable_preemption;
- bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -172,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (!priv->gpu)
return -ENODEV;
- preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ unsigned sz;
- if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
- return -EINVAL;
+ /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
+ if (!msm_context_is_vmbind(ctx))
+ return -EINVAL;
- ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
- if (ret)
- return ret;
+ if (prio)
+ return -EINVAL;
+
+ sz = struct_size(queue, _vm_bind_entity, 1);
+ queue = kzalloc(sz, GFP_KERNEL);
+ } else {
+ extern int enable_preemption;
+ bool preemption_supported =
+ priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ }
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
- queue->ring_nr = ring_nr;
- queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
- ring_nr, sched_prio);
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
+
+ queue->entity = &queue->_vm_bind_entity[0];
+
+ drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, NULL);
+ } else {
+ queue->ring_nr = ring_nr;
+
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ }
+
if (IS_ERR(queue->entity)) {
ret = PTR_ERR(queue->entity);
kfree(queue);
@@ -200,7 +234,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_lock(&ctx->queuelock);
- queue->ctx = msm_file_private_get(ctx);
+ queue->ctx = msm_context_get(ctx);
queue->id = ctx->queueid++;
if (id)
@@ -221,7 +255,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
* Create the default submit-queue (id==0), used for backwards compatibility
* for userspace that pre-dates the introduction of submitqueues.
*/
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio, max_priority;
@@ -261,7 +295,7 @@ static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
return ret ? -EFAULT : 0;
}
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args)
{
struct msm_gpu_submitqueue *queue;
@@ -282,7 +316,7 @@ int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
return ret;
}
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/msm/msm_syncobj.c b/drivers/gpu/drm/msm/msm_syncobj.c
new file mode 100644
index 000000000000..4baa9f522c54
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#include "drm/drm_drv.h"
+
+#include "msm_drv.h"
+#include "msm_syncobj.h"
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SYNCOBJ_FLAGS) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags: %x", syncobj_desc.flags);
+ break;
+ }
+
+ ret = drm_sched_job_add_syncobj_dependency(job, file,
+ syncobj_desc.handle,
+ syncobj_desc.point);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle: %u", i);
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+void
+msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_syncobj_post_dep *post_deps;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ dma_fence_chain_free(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+void
+msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/msm_syncobj.h b/drivers/gpu/drm/msm/msm_syncobj.h
new file mode 100644
index 000000000000..bcaa15d01da0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#ifndef __MSM_GEM_SYNCOBJ_H__
+#define __MSM_GEM_SYNCOBJ_H__
+
+#include "drm/drm_device.h"
+#include "drm/drm_syncobj.h"
+#include "drm/gpu_scheduler.h"
+
+struct msm_syncobj_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs);
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence);
+
+#endif /* __MSM_GEM_SYNCOBJ_H__ */
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2db425abf0f3..d860fd94feae 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -5,6 +5,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<import file="freedreno_copyright.xml"/>
<import file="adreno/adreno_common.xml"/>
<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+<import file="adreno/a7xx_enums.xml"/>
+<import file="adreno/a6xx_perfcntrs.xml"/>
+<import file="adreno/a7xx_perfcntrs.xml"/>
+<import file="adreno/a6xx_descriptors.xml"/>
<!--
Each register that is actually being used by driver should have "usage" defined,
@@ -20,2205 +25,6 @@ is either overwritten by renderpass/blit (ib2) or not used if not overwritten
by a particular renderpass/blit.
-->
-<!-- these might be same as a5xx -->
-<enum name="a6xx_tile_mode">
- <value name="TILE6_LINEAR" value="0"/>
- <value name="TILE6_2" value="2"/>
- <value name="TILE6_3" value="3"/>
-</enum>
-
-<enum name="a6xx_format">
- <value value="0x02" name="FMT6_A8_UNORM"/>
- <value value="0x03" name="FMT6_8_UNORM"/>
- <value value="0x04" name="FMT6_8_SNORM"/>
- <value value="0x05" name="FMT6_8_UINT"/>
- <value value="0x06" name="FMT6_8_SINT"/>
-
- <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
- <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
- <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
- <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
-
- <value value="0x0f" name="FMT6_8_8_UNORM"/>
- <value value="0x10" name="FMT6_8_8_SNORM"/>
- <value value="0x11" name="FMT6_8_8_UINT"/>
- <value value="0x12" name="FMT6_8_8_SINT"/>
- <value value="0x13" name="FMT6_L8_A8_UNORM"/>
-
- <value value="0x15" name="FMT6_16_UNORM"/>
- <value value="0x16" name="FMT6_16_SNORM"/>
- <value value="0x17" name="FMT6_16_FLOAT"/>
- <value value="0x18" name="FMT6_16_UINT"/>
- <value value="0x19" name="FMT6_16_SINT"/>
-
- <value value="0x21" name="FMT6_8_8_8_UNORM"/>
- <value value="0x22" name="FMT6_8_8_8_SNORM"/>
- <value value="0x23" name="FMT6_8_8_8_UINT"/>
- <value value="0x24" name="FMT6_8_8_8_SINT"/>
-
- <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
- <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
- <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
- <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
- <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
-
- <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
-
- <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
- <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
- <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
- <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
- <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
-
- <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
-
- <value value="0x43" name="FMT6_16_16_UNORM"/>
- <value value="0x44" name="FMT6_16_16_SNORM"/>
- <value value="0x45" name="FMT6_16_16_FLOAT"/>
- <value value="0x46" name="FMT6_16_16_UINT"/>
- <value value="0x47" name="FMT6_16_16_SINT"/>
-
- <value value="0x48" name="FMT6_32_UNORM"/>
- <value value="0x49" name="FMT6_32_SNORM"/>
- <value value="0x4a" name="FMT6_32_FLOAT"/>
- <value value="0x4b" name="FMT6_32_UINT"/>
- <value value="0x4c" name="FMT6_32_SINT"/>
- <value value="0x4d" name="FMT6_32_FIXED"/>
-
- <value value="0x58" name="FMT6_16_16_16_UNORM"/>
- <value value="0x59" name="FMT6_16_16_16_SNORM"/>
- <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
- <value value="0x5b" name="FMT6_16_16_16_UINT"/>
- <value value="0x5c" name="FMT6_16_16_16_SINT"/>
-
- <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
- <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
- <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
- <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
- <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
-
- <value value="0x65" name="FMT6_32_32_UNORM"/>
- <value value="0x66" name="FMT6_32_32_SNORM"/>
- <value value="0x67" name="FMT6_32_32_FLOAT"/>
- <value value="0x68" name="FMT6_32_32_UINT"/>
- <value value="0x69" name="FMT6_32_32_SINT"/>
- <value value="0x6a" name="FMT6_32_32_FIXED"/>
-
- <value value="0x70" name="FMT6_32_32_32_UNORM"/>
- <value value="0x71" name="FMT6_32_32_32_SNORM"/>
- <value value="0x72" name="FMT6_32_32_32_UINT"/>
- <value value="0x73" name="FMT6_32_32_32_SINT"/>
- <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
- <value value="0x75" name="FMT6_32_32_32_FIXED"/>
-
- <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
- <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
- <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
- <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
- <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
- <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
-
- <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
- <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
- <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
- <value value="0x8f" name="FMT6_NV21"/>
- <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
-
- <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
-
- <!-- Note: tiling/UBWC for these may be different from equivalent formats
- For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
- -->
- <value value="0x94" name="FMT6_NV12_Y"/>
- <value value="0x95" name="FMT6_NV12_UV"/>
- <value value="0x96" name="FMT6_NV12_VU"/>
- <value value="0x97" name="FMT6_NV12_4R"/>
- <value value="0x98" name="FMT6_NV12_4R_Y"/>
- <value value="0x99" name="FMT6_NV12_4R_UV"/>
- <value value="0x9a" name="FMT6_P010"/>
- <value value="0x9b" name="FMT6_P010_Y"/>
- <value value="0x9c" name="FMT6_P010_UV"/>
- <value value="0x9d" name="FMT6_TP10"/>
- <value value="0x9e" name="FMT6_TP10_Y"/>
- <value value="0x9f" name="FMT6_TP10_UV"/>
-
- <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
-
- <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
- <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
- <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
- <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
- <value value="0xaf" name="FMT6_ETC1"/>
- <value value="0xb0" name="FMT6_ETC2_RGB8"/>
- <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
- <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
- <value value="0xb3" name="FMT6_DXT1"/>
- <value value="0xb4" name="FMT6_DXT3"/>
- <value value="0xb5" name="FMT6_DXT5"/>
- <value value="0xb7" name="FMT6_RGTC1_UNORM"/>
- <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
- <value value="0xbb" name="FMT6_RGTC2_UNORM"/>
- <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
- <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
- <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
- <value value="0xc0" name="FMT6_BPTC"/>
- <value value="0xc1" name="FMT6_ASTC_4x4"/>
- <value value="0xc2" name="FMT6_ASTC_5x4"/>
- <value value="0xc3" name="FMT6_ASTC_5x5"/>
- <value value="0xc4" name="FMT6_ASTC_6x5"/>
- <value value="0xc5" name="FMT6_ASTC_6x6"/>
- <value value="0xc6" name="FMT6_ASTC_8x5"/>
- <value value="0xc7" name="FMT6_ASTC_8x6"/>
- <value value="0xc8" name="FMT6_ASTC_8x8"/>
- <value value="0xc9" name="FMT6_ASTC_10x5"/>
- <value value="0xca" name="FMT6_ASTC_10x6"/>
- <value value="0xcb" name="FMT6_ASTC_10x8"/>
- <value value="0xcc" name="FMT6_ASTC_10x10"/>
- <value value="0xcd" name="FMT6_ASTC_12x10"/>
- <value value="0xce" name="FMT6_ASTC_12x12"/>
-
- <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
- <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
-
- <!-- Not a hw enum, used internally in driver -->
- <value value="0xff" name="FMT6_NONE"/>
-
-</enum>
-
-<!-- probably same as a5xx -->
-<enum name="a6xx_polygon_mode">
- <value name="POLYMODE6_POINTS" value="1"/>
- <value name="POLYMODE6_LINES" value="2"/>
- <value name="POLYMODE6_TRIANGLES" value="3"/>
-</enum>
-
-<enum name="a6xx_depth_format">
- <value name="DEPTH6_NONE" value="0"/>
- <value name="DEPTH6_16" value="1"/>
- <value name="DEPTH6_24_8" value="2"/>
- <value name="DEPTH6_32" value="4"/>
-</enum>
-
-<bitset name="a6x_cp_protect" inline="yes">
- <bitfield name="BASE_ADDR" low="0" high="17"/>
- <bitfield name="MASK_LEN" low="18" high="30"/>
- <bitfield name="READ" pos="31" type="boolean"/>
-</bitset>
-
-<enum name="a6xx_shader_id">
- <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
- <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
- <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
- <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
- <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
- <value value="0x29" name="A6XX_SP_INST_DATA"/>
- <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
- <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
- <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
- <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
- <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
- <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
- <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
- <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
- <value value="0x32" name="A6XX_SP_UAV_DATA"/>
- <value value="0x33" name="A6XX_SP_INST_TAG"/>
- <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
- <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
- <value value="0x36" name="A6XX_SP_SMO_TAG"/>
- <value value="0x37" name="A6XX_SP_STATE_DATA"/>
- <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
- <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
- <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
- <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
- <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
- <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
- <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
- <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
- <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
- <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
- <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
- <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
- <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
-</enum>
-
-<enum name="a7xx_statetype_id">
- <value value="0" name="A7XX_TP0_NCTX_REG"/>
- <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
- <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
- <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
- <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
- <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
- <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
- <value value="9" name="A7XX_TP0_TMO_DATA"/>
- <value value="10" name="A7XX_TP0_SMO_DATA"/>
- <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="32" name="A7XX_SP_NCTX_REG"/>
- <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
- <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
- <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
- <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
- <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
- <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
- <value value="39" name="A7XX_SP_INST_DATA"/>
- <value value="40" name="A7XX_SP_INST_DATA_1"/>
- <value value="41" name="A7XX_SP_LB_0_DATA"/>
- <value value="42" name="A7XX_SP_LB_1_DATA"/>
- <value value="43" name="A7XX_SP_LB_2_DATA"/>
- <value value="44" name="A7XX_SP_LB_3_DATA"/>
- <value value="45" name="A7XX_SP_LB_4_DATA"/>
- <value value="46" name="A7XX_SP_LB_5_DATA"/>
- <value value="47" name="A7XX_SP_LB_6_DATA"/>
- <value value="48" name="A7XX_SP_LB_7_DATA"/>
- <value value="49" name="A7XX_SP_CB_RAM"/>
- <value value="50" name="A7XX_SP_LB_13_DATA"/>
- <value value="51" name="A7XX_SP_LB_14_DATA"/>
- <value value="52" name="A7XX_SP_INST_TAG"/>
- <value value="53" name="A7XX_SP_INST_DATA_2"/>
- <value value="54" name="A7XX_SP_TMO_TAG"/>
- <value value="55" name="A7XX_SP_SMO_TAG"/>
- <value value="56" name="A7XX_SP_STATE_DATA"/>
- <value value="57" name="A7XX_SP_HWAVE_RAM"/>
- <value value="58" name="A7XX_SP_L0_INST_BUF"/>
- <value value="59" name="A7XX_SP_LB_8_DATA"/>
- <value value="60" name="A7XX_SP_LB_9_DATA"/>
- <value value="61" name="A7XX_SP_LB_10_DATA"/>
- <value value="62" name="A7XX_SP_LB_11_DATA"/>
- <value value="63" name="A7XX_SP_LB_12_DATA"/>
- <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
- <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
- <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
- <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
- <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
- <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
- <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
- <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
- <value value="82" name="A7XX_HLSQ_INST_RAM"/>
- <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
- <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
- <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
- <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
- <value value="93" name="A7XX_HLSQ_STPROC_META"/>
- <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
- <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
- <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
- <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
- <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
- <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
-</enum>
-
-<enum name="a6xx_debugbus_id">
- <value value="0x1" name="A6XX_DBGBUS_CP"/>
- <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
- <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
- <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
- <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
- <value value="0x6" name="A6XX_DBGBUS_DPM"/>
- <value value="0x7" name="A6XX_DBGBUS_TESS"/>
- <value value="0x8" name="A6XX_DBGBUS_PC"/>
- <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
- <value value="0xa" name="A6XX_DBGBUS_VPC"/>
- <value value="0xb" name="A6XX_DBGBUS_TSE"/>
- <value value="0xc" name="A6XX_DBGBUS_RAS"/>
- <value value="0xd" name="A6XX_DBGBUS_VSC"/>
- <value value="0xe" name="A6XX_DBGBUS_COM"/>
- <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
- <value value="0x11" name="A6XX_DBGBUS_A2D"/>
- <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
- <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
- <value value="0x14" name="A6XX_DBGBUS_RBP"/>
- <value value="0x15" name="A6XX_DBGBUS_DCS"/>
- <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
- <value value="0x17" name="A6XX_DBGBUS_CX"/>
- <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
- <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
- <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
- <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
- <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
- <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
- <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
- <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
- <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
- <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
- <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
- <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
- <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
- <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
- <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
- <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
- <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
- <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
- <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
- <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
- <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
- <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
- <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
- <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
- <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
- <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
- <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
- <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
- <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
- <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
- <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
- <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
- <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
-</enum>
-
-<enum name="a7xx_state_location">
- <value value="0" name="A7XX_HLSQ_STATE"/>
- <value value="1" name="A7XX_HLSQ_DP"/>
- <value value="2" name="A7XX_SP_TOP"/>
- <value value="3" name="A7XX_USPTP"/>
- <value value="4" name="A7XX_HLSQ_DP_STR"/>
-</enum>
-
-<enum name="a7xx_pipe">
- <value value="0" name="A7XX_PIPE_NONE"/>
- <value value="1" name="A7XX_PIPE_BR"/>
- <value value="2" name="A7XX_PIPE_BV"/>
- <value value="3" name="A7XX_PIPE_LPAC"/>
-</enum>
-
-<enum name="a7xx_cluster">
- <value value="0" name="A7XX_CLUSTER_NONE"/>
- <value value="1" name="A7XX_CLUSTER_FE"/>
- <value value="2" name="A7XX_CLUSTER_SP_VS"/>
- <value value="3" name="A7XX_CLUSTER_PC_VS"/>
- <value value="4" name="A7XX_CLUSTER_GRAS"/>
- <value value="5" name="A7XX_CLUSTER_SP_PS"/>
- <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
- <value value="7" name="A7XX_CLUSTER_PS"/>
-</enum>
-
-<enum name="a7xx_debugbus_id">
- <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
- <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
- <value value="3" name="A7XX_DBGBUS_RBBM"/>
- <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
- <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
- <value value="7" name="A7XX_DBGBUS_HLSQ"/>
- <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
- <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
- <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
- <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
- <value value="17" name="A7XX_DBGBUS_PC_BR"/>
- <value value="18" name="A7XX_DBGBUS_PC_BV"/>
- <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
- <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
- <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
- <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
- <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
- <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
- <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
- <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
- <value value="37" name="A7XX_DBGBUS_VSC"/>
- <value value="39" name="A7XX_DBGBUS_COM_0"/>
- <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
- <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
- <value value="47" name="A7XX_DBGBUS_UFC_0"/>
- <value value="48" name="A7XX_DBGBUS_UFC_1"/>
- <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
- <value value="59" name="A7XX_DBGBUS_DBGC"/>
- <value value="60" name="A7XX_DBGBUS_CX"/>
- <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
- <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
- <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
- <value value="66" name="A7XX_DBGBUS_LARC"/>
- <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
- <value value="70" name="A7XX_DBGBUS_RB_0"/>
- <value value="71" name="A7XX_DBGBUS_RB_1"/>
- <value value="72" name="A7XX_DBGBUS_RB_2"/>
- <value value="73" name="A7XX_DBGBUS_RB_3"/>
- <value value="74" name="A7XX_DBGBUS_RB_4"/>
- <value value="75" name="A7XX_DBGBUS_RB_5"/>
- <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="106" name="A7XX_DBGBUS_CCU_0"/>
- <value value="107" name="A7XX_DBGBUS_CCU_1"/>
- <value value="108" name="A7XX_DBGBUS_CCU_2"/>
- <value value="109" name="A7XX_DBGBUS_CCU_3"/>
- <value value="110" name="A7XX_DBGBUS_CCU_4"/>
- <value value="111" name="A7XX_DBGBUS_CCU_5"/>
- <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
- <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
- <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
- <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
- <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
- <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
- <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
- <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
- <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
- <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
- <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
- <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
- <value value="234" name="A7XX_DBGBUS_USP_0"/>
- <value value="235" name="A7XX_DBGBUS_USP_1"/>
- <value value="236" name="A7XX_DBGBUS_USP_2"/>
- <value value="237" name="A7XX_DBGBUS_USP_3"/>
- <value value="238" name="A7XX_DBGBUS_USP_4"/>
- <value value="239" name="A7XX_DBGBUS_USP_5"/>
- <value value="266" name="A7XX_DBGBUS_TP_0"/>
- <value value="267" name="A7XX_DBGBUS_TP_1"/>
- <value value="268" name="A7XX_DBGBUS_TP_2"/>
- <value value="269" name="A7XX_DBGBUS_TP_3"/>
- <value value="270" name="A7XX_DBGBUS_TP_4"/>
- <value value="271" name="A7XX_DBGBUS_TP_5"/>
- <value value="272" name="A7XX_DBGBUS_TP_6"/>
- <value value="273" name="A7XX_DBGBUS_TP_7"/>
- <value value="274" name="A7XX_DBGBUS_TP_8"/>
- <value value="275" name="A7XX_DBGBUS_TP_9"/>
- <value value="276" name="A7XX_DBGBUS_TP_10"/>
- <value value="277" name="A7XX_DBGBUS_TP_11"/>
- <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
- <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
- <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
- <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
- <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
- <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
- <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
- <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
- <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
- <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
- <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
- <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
- <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
- <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
- <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
- <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
- <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
- <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
- <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
- <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
- <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
- <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
- <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
- <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
- <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
- <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
- <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
- <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
- <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
-</enum>
-
-<enum name="a6xx_cp_perfcounter_select">
- <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="PERF_CP_MODE_SWITCH"/>
- <value value="10" name="PERF_CP_ZPASS_DONE"/>
- <value value="11" name="PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="PERF_CP_SQE_IDLE"/>
- <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="PERF_CP_PM4_DATA"/>
- <value value="46" name="PERF_CP_PM4_HEADERS"/>
- <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
-</enum>
-
-<enum name="a6xx_rbbm_perfcounter_select">
- <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="PERF_RBBM_COM_BUSY"/>
- <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a6xx_pc_perfcounter_select">
- <value value="0" name="PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
- <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
- <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
- <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
- <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="16" name="PERF_PC_INSTANCES"/>
- <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
- <value value="18" name="PERF_PC_DEAD_PRIM"/>
- <value value="19" name="PERF_PC_LIVE_PRIM"/>
- <value value="20" name="PERF_PC_VERTEX_HITS"/>
- <value value="21" name="PERF_PC_IA_VERTICES"/>
- <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
- <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
- <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
- <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
- <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
- <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
- <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
- <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
- <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
- <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
- <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
- <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
- <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
- <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
- <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
- <value value="38" name="PERF_PC_TSE_VERTEX"/>
- <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
-</enum>
-
-<enum name="a6xx_vfd_perfcounter_select">
- <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
-</enum>
-
-<enum name="a6xx_hlsq_perfcounter_select">
- <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
- <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
- <value value="8" name="PERF_HLSQ_QUADS"/>
- <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
- <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="PERF_HLSQ_PIXELS"/>
- <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
-</enum>
-
-<enum name="a6xx_vpc_perfcounter_select">
- <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
- <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
-</enum>
-
-<enum name="a6xx_tse_perfcounter_select">
- <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="PERF_TSE_CINVOCATION"/>
- <value value="16" name="PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a6xx_ras_perfcounter_select">
- <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="PERF_RAS_SUPER_TILES"/>
- <value value="5" name="PERF_RAS_8X4_TILES"/>
- <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="PERF_RAS_BLOCKS"/>
-</enum>
-
-<enum name="a6xx_uche_perfcounter_select">
- <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="PERF_UCHE_EVICTS"/>
- <value value="19" name="PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
-</enum>
-
-<enum name="a6xx_tp_perfcounter_select">
- <value value="0" name="PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
- <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
- <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="PERF_TP_QUADS_1D"/>
- <value value="19" name="PERF_TP_QUADS_2D"/>
- <value value="20" name="PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="PERF_TP_QUADS_3D"/>
- <value value="22" name="PERF_TP_QUADS_CUBE"/>
- <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
- <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
- <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
- <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
-</enum>
-
-<enum name="a6xx_sp_perfcounter_select">
- <value value="0" name="PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
- <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="PERF_SP_LM_ATOMICS"/>
- <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="PERF_SP_GM_ATOMICS"/>
- <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="PERF_SP_ICL1_MISSES"/>
- <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="PERF_SP_GPR_READ"/>
- <value value="58" name="PERF_SP_GPR_WRITE"/>
- <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="PERF_SP_WORKING_EU"/>
- <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
-</enum>
-
-<enum name="a6xx_rb_perfcounter_select">
- <value value="0" name="PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="PERF_RB_Z_READ"/>
- <value value="13" name="PERF_RB_Z_WRITE"/>
- <value value="14" name="PERF_RB_C_READ"/>
- <value value="15" name="PERF_RB_C_WRITE"/>
- <value value="16" name="PERF_RB_TOTAL_PASS"/>
- <value value="17" name="PERF_RB_Z_PASS"/>
- <value value="18" name="PERF_RB_Z_FAIL"/>
- <value value="19" name="PERF_RB_S_FAIL"/>
- <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="PERF_RB_3D_PIXELS"/>
- <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
-</enum>
-
-<enum name="a6xx_vsc_perfcounter_select">
- <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VSC_EOT_NUM"/>
- <value value="4" name="PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a6xx_ccu_perfcounter_select">
- <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
- <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
- <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
- <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="9" name="PERF_CCU_GMEM_READ"/>
- <value value="10" name="PERF_CCU_GMEM_WRITE"/>
- <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
- <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
- <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
- <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
- <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
- <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
- <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
- <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
- <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
- <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
- <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
- <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
- <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
- <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
- <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
- <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
- <value value="27" name="PERF_CCU_2D_RD_REQ"/>
- <value value="28" name="PERF_CCU_2D_WR_REQ"/>
-</enum>
-
-<enum name="a6xx_lrz_perfcounter_select">
- <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="PERF_LRZ_LRZ_READ"/>
- <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
- <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
- <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
- <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
- <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
-</enum>
-
-<enum name="a6xx_cmp_perfcounter_select">
- <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
- <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
- <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
- <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
- <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
- <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
- <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
- <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
- <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
- <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
- <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
-</enum>
-
-<!--
-Used in a6xx_2d_blit_cntl.. the value mostly seems to correlate to the
-component type/size, so I think it relates to internal format used for
-blending? The one exception is that 16b unorm and 32b float use the
-same value... maybe 16b unorm is uncommon enough that it was just easier
-to upconvert to 32b float internally?
-
- 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
-16b unorm: 4
-
-32b int: 7
-16b int: 6
- 8b int: 5
-
-32b float: 4
-16b float: 3
- -->
-<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
- <value value="0x7" name="R2D_INT32"/>
- <value value="0x6" name="R2D_INT16"/>
- <value value="0x5" name="R2D_INT8"/>
- <value value="0x4" name="R2D_FLOAT32"/>
- <value value="0x3" name="R2D_FLOAT16"/>
- <value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
-</enum>
-
-<enum name="a6xx_ztest_mode">
- <doc>Allow early z-test and early-lrz (if applicable)</doc>
- <value value="0x0" name="A6XX_EARLY_Z"/>
- <doc>Disable early z-test and early-lrz test (if applicable)</doc>
- <value value="0x1" name="A6XX_LATE_Z"/>
- <doc>
- A special mode that allows early-lrz test but disables
- early-z test. Which might sound a bit funny, since
- lrz-test happens before z-test. But as long as a couple
- conditions are maintained this allows using lrz-test in
- cases where fragment shader has kill/discard:
-
- 1) Disable lrz-write in cases where it is uncertain during
- binning pass that a fragment will pass. Ie. if frag
- shader has-kill, writes-z, or alpha/stencil test is
- enabled. (For correctness, lrz-write must be disabled
- when blend is enabled.) This is analogous to how a
- z-prepass works.
-
- 2) Disable lrz-write and test if a depth-test direction
- reversal is detected. Due to condition (1), the contents
- of the lrz buffer are a conservative estimation of the
- depth buffer during the draw pass. Meaning that geometry
- that we know for certain will not be visible will not pass
- lrz-test. But geometry which may be (or contributes to
- blend) will pass the lrz-test.
-
- This allows us to keep early-lrz-test in cases where the frag
- shader does not write-z (ie. we know the z-value before FS)
- and does not have side-effects (image/ssbo writes, etc), but
- does have kill/discard. Which turns out to be a common
- enough case that it is useful to keep early-lrz test against
- the conservative lrz buffer to discard fragments that we
- know will definitely not be visible.
- </doc>
- <value value="0x2" name="A6XX_EARLY_LRZ_LATE_Z"/>
- <doc>Not a real hw value, used internally by mesa</doc>
- <value value="0x3" name="A6XX_INVALID_ZTEST"/>
-</enum>
-
-<enum name="a6xx_tess_spacing">
- <value value="0x0" name="TESS_EQUAL"/>
- <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
- <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
-</enum>
-<enum name="a6xx_tess_output">
- <value value="0x0" name="TESS_POINTS"/>
- <value value="0x1" name="TESS_LINES"/>
- <value value="0x2" name="TESS_CW_TRIS"/>
- <value value="0x3" name="TESS_CCW_TRIS"/>
-</enum>
-
-<enum name="a7xx_cp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
- <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
- <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
- <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
- <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
- <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
- <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
- <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
- <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
- <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
- <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
- <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
- <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
- <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
- <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
- <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
- <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
- <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
- <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
- <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
- <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
- <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
- <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
- <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
- <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
- <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
- <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
-</enum>
-
-<enum name="a7xx_rbbm_perfcounter_select">
- <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
- <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a7xx_pc_perfcounter_select">
- <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="A7XX_PERF_PC_RESERVED"/>
- <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
- <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
- <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
- <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
- <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
- <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
- <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
- <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
- <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
- <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
- <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
- <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
- <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
- <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
- <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
- <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
- <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
- <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
- <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
- <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
- <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
- <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
- <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
- <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
- <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
- <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
- <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
- <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
- <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
- <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
- <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
- <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
- <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
- <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
- <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
-</enum>
-
-<enum name="a7xx_vfd_perfcounter_select">
- <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
- <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
- <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
-</enum>
-
-<enum name="a7xx_hlsq_perfcounter_select">
- <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
- <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
- <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
- <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
- <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
- <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
- <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
- <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
- <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
- <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
- <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
- <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
- <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
- <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
- <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
- <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
- <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
- <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
- <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
- <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
- <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
- <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
- <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
- <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
- <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
- <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
- <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
- <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
- <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
- <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
- <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
- <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
- <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
- <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
- <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
- <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
- <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
- <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
-</enum>
-
-<enum name="a7xx_vpc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
- <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
- <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
- <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
- <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
- <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
- <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
- <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
- <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
- <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
- <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
- <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
- <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
- <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
- <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
- <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
- <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
-</enum>
-
-<enum name="a7xx_tse_perfcounter_select">
- <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
- <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a7xx_ras_perfcounter_select">
- <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
- <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
- <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
- <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
- <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
- <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
- <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
- <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
- <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
- <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
- <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
- <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
- <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
- <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
- <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
- <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
- <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
- <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
- <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
- <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
-
-</enum>
-
-<enum name="a7xx_uche_perfcounter_select">
- <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
- <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
- <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
- <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
- <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
- <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
- <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
- <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
- <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
- <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
- <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
- <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
- <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
- <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
- <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
- <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
- <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
- <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
- <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
- <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
- <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
-</enum>
-
-<enum name="a7xx_tp_perfcounter_select">
- <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
- <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
- <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
- <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
- <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
- <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
- <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
- <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
- <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
- <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
- <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
- <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
- <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
- <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
- <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
- <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
- <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
- <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
- <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
- <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
- <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
- <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
- <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
- <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
- <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
- <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
- <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
- <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
- <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
- <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
- <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
- <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
- <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
- <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
- <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
- <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
- <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
- <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
- <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
- <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
- <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
- <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
- <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
- <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
- <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
- <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
- <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
- <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
- <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
- <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
- <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
- <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
- <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
- <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
- <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
- <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
- <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
- <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
- <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
- <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
- <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
- <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
- <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
- <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
- <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
- <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
- <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
- <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
- <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
- <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
- <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
- <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
- <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
- <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
- <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
- <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
- <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
- <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
- <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
-</enum>
-
-<enum name="a7xx_sp_perfcounter_select">
- <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
- <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
- <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
- <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
- <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
- <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
- <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
- <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
- <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
- <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
- <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
- <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
- <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
- <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
- <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
- <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
- <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
- <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
- <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
- <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
- <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
- <value value="99" name="A7XX_PERF_SP_QUADS"/>
- <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
- <value value="101" name="A7XX_PERF_SP_PIXELS"/>
- <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
- <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
- <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
- <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
- <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
- <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
- <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
- <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
- <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
- <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
- <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
- <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
- <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
- <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
- <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
- <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
- <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
- <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
- <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
- <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
- <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
- <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
- <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
- <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
- <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
- <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
- <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
- <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
- <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
- <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
- <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
- <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
- <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
- <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
- <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
- <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
- <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
- <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
- <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
- <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
- <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
- <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
- <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
- <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
- <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
- <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
- <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
- <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
- <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
-</enum>
-
-<enum name="a7xx_rb_perfcounter_select">
- <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="A7XX_PERF_RB_Z_READ"/>
- <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
- <value value="14" name="A7XX_PERF_RB_C_READ"/>
- <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
- <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
- <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
- <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
- <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
- <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
- <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
- <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
- <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
- <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
- <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
- <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
- <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
-</enum>
-
-<enum name="a7xx_vsc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
- <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a7xx_ccu_perfcounter_select">
- <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
- <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
- <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
- <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
- <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
- <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
- <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
- <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
- <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
- <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
- <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
- <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
- <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
- <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
- <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
- <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
- <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
-</enum>
-
-<enum name="a7xx_lrz_perfcounter_select">
- <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
- <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
- <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
- <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
- <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
- <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
-</enum>
-
-<enum name="a7xx_cmp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
- <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
- <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
- <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
- <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
- <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
- <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
- <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
- <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
- <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
- <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
- <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
- <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
- <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
- <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
- <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
- <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
- <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
- <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
- <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
- <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
-</enum>
-
-<enum name="a7xx_gbif_perfcounter_select">
- <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
- <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
- <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
- <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
- <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
- <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
- <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
- <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
- <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
- <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
- <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
- <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
- <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
- <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
- <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
- <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
- <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
- <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
- <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
- <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
- <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
- <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
- <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
- <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
- <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
- <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
- <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
- <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
- <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
- <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
- <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
- <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
- <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
- <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
- <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
- <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
- <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
- <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
- <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
- <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
- <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
- <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
- <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
- <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
- <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
- <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
- <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
- <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
- <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
- <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
- <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
- <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
- <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
- <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
- <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
- <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
- <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
- <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
-</enum>
-
-<enum name="a7xx_ufc_perfcounter_select">
- <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
- <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
- <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
- <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
- <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
- <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
- <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
- <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
- <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
- <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
- <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
- <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
- <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
- <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
- <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
- <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
- <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
- <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
- <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
- <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
- <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
- <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
- <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
- <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
- <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
- <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
- <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
- <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
- <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
- <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
- <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
- <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
- <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
- <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
- <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
- <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
- <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
- <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
- <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
- <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
- <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
- <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
- <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
- <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
- <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
- <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
- <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
- <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
- <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
- <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
- <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
- <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
- <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
- <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
- <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
- <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
- <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
-</enum>
-
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -2371,7 +177,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/>
<array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/>
<array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/>
- <reg64 offset="0x0900" name="CP_CRASH_SCRIPT_BASE"/>
+ <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE"/>
<reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/>
<reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/>
<reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/>
@@ -2400,22 +206,22 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x0934" name="CP_VSD_BASE"/>
- <bitset name="a6xx_roq_stat" inline="yes">
+ <bitset name="a6xx_roq_status" inline="yes">
<bitfield name="RPTR" low="0" high="9"/>
<bitfield name="WPTR" low="16" high="25"/>
</bitset>
- <reg32 offset="0x0939" name="CP_ROQ_RB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093a" name="CP_ROQ_IB1_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093b" name="CP_ROQ_IB2_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093c" name="CP_ROQ_SDS_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093d" name="CP_ROQ_MRB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093e" name="CP_ROQ_VSD_STAT" type="a6xx_roq_stat"/>
-
- <reg32 offset="0x0943" name="CP_IB1_DWORDS"/>
- <reg32 offset="0x0944" name="CP_IB2_DWORDS"/>
- <reg32 offset="0x0945" name="CP_SDS_DWORDS"/>
- <reg32 offset="0x0946" name="CP_MRB_DWORDS"/>
- <reg32 offset="0x0947" name="CP_VSD_DWORDS"/>
+ <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status"/>
+
+ <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE"/>
+ <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE"/>
+ <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE"/>
+ <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE"/>
+ <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE"/>
<reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB">
<doc>number of remaining dwords incl current dword being consumed?</doc>
@@ -2451,6 +257,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x098D" name="CP_AHB_CNTL"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/>
+ <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/>
@@ -2468,8 +275,8 @@ to upconvert to 32b float internally?
<reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/>
<reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9a" name="CP_RESOURCE_TBL_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9b" name="CP_RESOURCE_TBL_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX-"/>
<reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/>
<reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/>
@@ -2619,28 +426,17 @@ to upconvert to 32b float internally?
vertices in, number of primnitives assembled etc.
-->
- <reg32 offset="0x0540" name="RBBM_PRIMCTR_0_LO"/> <!-- vs vertices in -->
- <reg32 offset="0x0541" name="RBBM_PRIMCTR_0_HI"/>
- <reg32 offset="0x0542" name="RBBM_PRIMCTR_1_LO"/> <!-- vs primitives out -->
- <reg32 offset="0x0543" name="RBBM_PRIMCTR_1_HI"/>
- <reg32 offset="0x0544" name="RBBM_PRIMCTR_2_LO"/> <!-- hs vertices in -->
- <reg32 offset="0x0545" name="RBBM_PRIMCTR_2_HI"/>
- <reg32 offset="0x0546" name="RBBM_PRIMCTR_3_LO"/> <!-- hs patches out -->
- <reg32 offset="0x0547" name="RBBM_PRIMCTR_3_HI"/>
- <reg32 offset="0x0548" name="RBBM_PRIMCTR_4_LO"/> <!-- dss vertices in -->
- <reg32 offset="0x0549" name="RBBM_PRIMCTR_4_HI"/>
- <reg32 offset="0x054a" name="RBBM_PRIMCTR_5_LO"/> <!-- ds primitives out -->
- <reg32 offset="0x054b" name="RBBM_PRIMCTR_5_HI"/>
- <reg32 offset="0x054c" name="RBBM_PRIMCTR_6_LO"/> <!-- gs primitives in -->
- <reg32 offset="0x054d" name="RBBM_PRIMCTR_6_HI"/>
- <reg32 offset="0x054e" name="RBBM_PRIMCTR_7_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x054f" name="RBBM_PRIMCTR_7_HI"/>
- <reg32 offset="0x0550" name="RBBM_PRIMCTR_8_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x0551" name="RBBM_PRIMCTR_8_HI"/>
- <reg32 offset="0x0552" name="RBBM_PRIMCTR_9_LO"/> <!-- raster primitives in -->
- <reg32 offset="0x0553" name="RBBM_PRIMCTR_9_HI"/>
- <reg32 offset="0x0554" name="RBBM_PRIMCTR_10_LO"/>
- <reg32 offset="0x0555" name="RBBM_PRIMCTR_10_HI"/>
+ <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES"/>
+ <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES"/>
+ <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS"/>
+ <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS"/>
+ <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS"/>
+ <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS"/>
+ <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES"/>
+ <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS"/>
+ <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES"/>
+ <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS"/>
+ <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS"/>
<reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/>
<reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/>
@@ -2779,7 +575,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/>
<reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/>
<reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/>
- <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE"/>
+ <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE" variants="A6XX"/>
<reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-">
<bitfield name="TXDONE" pos="0" type="boolean"/>
</reg32>
@@ -2840,7 +636,7 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/>
<reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/>
- <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/>
+ <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2" variants="A6XX"/>
<reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX">
<doc>
Set to true when binning, isn't changed afterwards
@@ -2936,8 +732,8 @@ to upconvert to 32b float internally?
<bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/>
</reg32>
- <reg64 offset="0x0c03" name="VSC_DRAW_STRM_SIZE_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c06" name="VSC_BIN_COUNT" usage="rp_blit">
+ <reg64 offset="0x0c03" name="VSC_SIZE_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c06" name="VSC_EXPANDED_BIN_CNTL" usage="rp_blit">
<bitfield name="NX" low="1" high="10" type="uint"/>
<bitfield name="NY" low="11" high="20" type="uint"/>
</reg32>
@@ -2967,14 +763,14 @@ to upconvert to 32b float internally?
LIMIT is set to PITCH - 64, to make room for a bit of overflow
-->
- <reg64 offset="0x0c30" name="VSC_PRIM_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c32" name="VSC_PRIM_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c33" name="VSC_PRIM_STRM_LIMIT" usage="cmd"/>
- <reg64 offset="0x0c34" name="VSC_DRAW_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c36" name="VSC_DRAW_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c37" name="VSC_DRAW_STRM_LIMIT" usage="cmd"/>
-
- <array offset="0x0c38" name="VSC_STATE" stride="1" length="32" usage="rp_blit">
+ <reg64 offset="0x0c30" name="VSC_PIPE_DATA_PRIM_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c32" name="VSC_PIPE_DATA_PRIM_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c33" name="VSC_PIPE_DATA_PRIM_LENGTH" usage="cmd"/>
+ <reg64 offset="0x0c34" name="VSC_PIPE_DATA_DRAW_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c36" name="VSC_PIPE_DATA_DRAW_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c37" name="VSC_PIPE_DATA_DRAW_LENGTH" usage="cmd"/>
+
+ <array offset="0x0c38" name="VSC_CHANNEL_VISIBILITY" stride="1" length="32" usage="rp_blit">
<doc>
Seems to be a bitmap of which tiles mapped to the VSC
pipe contain geometry.
@@ -2985,7 +781,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c58" name="VSC_PRIM_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c58" name="VSC_PIPE_DATA_PRIM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC_PRIM_STRM
buffer.
@@ -2993,10 +789,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c78" name="VSC_DRAW_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c78" name="VSC_PIPE_DATA_DRAW_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC pipe, ie.
- same thing that is written out to VSC_DRAW_STRM_SIZE_ADDRESS_LO/HI
+ same thing that is written out to VSC_SIZE_BASE
</doc>
<reg32 offset="0x0" name="REG"/>
</array>
@@ -3028,17 +824,17 @@ to upconvert to 32b float internally?
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_xs_cl_cntl" inline="yes">
+ <bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_VS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_DS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_GS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_MAX_LAYER_INDEX" low="0" high="10" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
- <reg32 offset="0x8005" name="GRAS_CNTL" usage="rp_blit">
- <!-- see also RB_RENDER_CONTROL0 -->
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3067,7 +863,7 @@ to upconvert to 32b float internally?
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -3075,7 +871,7 @@ to upconvert to 32b float internally?
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_Z_CLAMP" stride="2" length="16" usage="rp_blit">
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
@@ -3124,7 +920,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
- <bitfield name="SHIFTAMOUNT" low="1" high="2"/>
+ <enum name="a6xx_shift_amount">
+ <value value="0" name="NO_SHIFT"/>
+ <value value="1" name="HALF_PIXEL_SHIFT"/>
+ <value value="2" name="FULL_PIXEL_SHIFT"/>
+ </enum>
+ <bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
</reg32>
@@ -3133,13 +934,13 @@ to upconvert to 32b float internally?
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_layer_cntl" inline="yes">
+ <bitset name="a6xx_gras_us_xs_siv_cntl" inline="yes">
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_VS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_GS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_DS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
<!-- 0x809e/0x809f invalid -->
<enum name="a6xx_sequenced_thread_dist">
@@ -3213,13 +1014,13 @@ to upconvert to 32b float internally?
<enum name="a6xx_lrz_feedback_mask">
<value value="0x0" name="LRZ_FEEDBACK_NONE"/>
<value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
- <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_Z_LATE_Z"/>
<!-- We don't have a flag type and this flags combination is often used -->
- <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_Z_LATE_Z"/>
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3235,22 +1036,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK27" pos="27"/>
</reg32>
- <reg32 offset="0x80a2" name="GRAS_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
</reg32>
- <reg32 offset="0x80a3" name="GRAS_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <bitset name="a6xx_sample_config" inline="yes">
+ <bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
<bitfield name="LOCATION_ENABLE" pos="1" type="boolean"/>
</bitset>
- <bitset name="a6xx_sample_locations" inline="yes">
+ <bitset name="a6xx_programmable_msaa_pos" inline="yes">
<bitfield name="SAMPLE_0_X" low="0" high="3" radix="4" type="fixed"/>
<bitfield name="SAMPLE_0_Y" low="4" high="7" radix="4" type="fixed"/>
<bitfield name="SAMPLE_1_X" low="8" high="11" radix="4" type="fixed"/>
@@ -3261,9 +1062,9 @@ to upconvert to 32b float internally?
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
@@ -3286,13 +1087,36 @@ to upconvert to 32b float internally?
<reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x80f4 - 0x80fa are used for VK_KHR_fragment_shading_rate -->
- <reg64 offset="0x80f4" name="GRAS_UNKNOWN_80F4" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f5" name="GRAS_UNKNOWN_80F5" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f6" name="GRAS_UNKNOWN_80F6" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f8" name="GRAS_UNKNOWN_80F8" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f9" name="GRAS_UNKNOWN_80F9" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80fa" name="GRAS_UNKNOWN_80FA" variants="A7XX-" usage="cmd"/>
+ <enum name="a6xx_fsr_combiner">
+ <value value="0" name="FSR_COMBINER_OP_KEEP"/>
+ <value value="1" name="FSR_COMBINER_OP_REPLACE"/>
+ <value value="2" name="FSR_COMBINER_OP_MIN"/>
+ <value value="3" name="FSR_COMBINER_OP_MAX"/>
+ <value value="4" name="FSR_COMBINER_OP_MUL"/>
+ </enum>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
+ <bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
+ <bitfield name="COMBINER_OP_1" low="5" high="7" type="a6xx_fsr_combiner"/>
+ <bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="LAYERED" pos="0" type="boolean"/>
+ <bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
+ </reg32>
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ <bitfield name="WIDTH" low="0" high="15" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
+ <bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
+ </reg32>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -3313,7 +1137,7 @@ to upconvert to 32b float internally?
</doc>
<bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
- <bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
<bitfield name="DIR" low="6" high="7" type="a6xx_lrz_dir_status"/>
<doc>
@@ -3339,14 +1163,13 @@ to upconvert to 32b float internally?
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
</reg32>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUF_INFO_0" usage="rp_blit">
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
</reg32>
<reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
<reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
- <!-- TODO: fix the shr fields -->
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="10" high="28" shr="4" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
</reg32>
<!--
@@ -3381,18 +1204,18 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
<!-- 0x8108 invalid -->
- <reg32 offset="0x8109" name="GRAS_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<!--
LRZ buffer represents a single array layer + mip level, and there is
a single buffer per depth image. Thus to reuse LRZ between renderpasses
it is necessary to track the depth view used in the past renderpass, which
- GRAS_LRZ_DEPTH_VIEW is for.
- GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_DEPTH_VIEW is equal to
+ GRAS_LRZ_VIEW_INFO is for.
+ GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_VIEW_INFO is equal to
the value stored in the LRZ buffer, if not - LRZ is disabled.
-->
- <reg32 offset="0x810a" name="GRAS_LRZ_DEPTH_VIEW" usage="cmd">
+ <reg32 offset="0x810a" name="GRAS_LRZ_VIEW_INFO" usage="cmd">
<bitfield name="BASE_LAYER" low="0" high="10" type="uint"/>
<bitfield name="LAYER_COUNT" low="16" high="26" type="uint"/>
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
@@ -3408,7 +1231,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
<reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
@@ -3430,7 +1253,7 @@ to upconvert to 32b float internally?
<value value="0x5" name="ROTATE_VFLIP"/>
</enum>
- <bitset name="a6xx_2d_blit_cntl" inline="yes">
+ <bitset name="a6xx_a2d_bit_cntl" inline="yes">
<bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
<bitfield name="OVERWRITEEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="6"/>
@@ -3447,22 +1270,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_2D_SRC_TL_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_2D_SRC_BR_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_2D_SRC_TL_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_2D_SRC_BR_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_2D_DST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_2D_DST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_2D_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_2D_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- 0x840c-0x85ff invalid -->
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
@@ -3481,7 +1304,7 @@ to upconvert to 32b float internally?
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3490,7 +1313,7 @@ to upconvert to 32b float internally?
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3501,8 +1324,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
@@ -3515,15 +1337,14 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
<reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
@@ -3536,16 +1357,16 @@ to upconvert to 32b float internally?
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x8804" name="RB_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x8805" name="RB_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x8806" name="RB_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x8804" name="RB_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8805" name="RB_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x8806" name="RB_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<!-- 0x8807-0x8808 invalid -->
<!--
note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
name comes from kernel and is probably right)
-->
- <reg32 offset="0x8809" name="RB_RENDER_CONTROL0" usage="rp_blit">
- <!-- see also GRAS_CNTL -->
+ <reg32 offset="0x8809" name="RB_INTERP_CNTL" usage="rp_blit">
+ <!-- see also GRAS_CL_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3555,7 +1376,7 @@ to upconvert to 32b float internally?
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0x880a" name="RB_RENDER_CONTROL1" usage="rp_blit">
+ <reg32 offset="0x880a" name="RB_PS_INPUT_CNTL" usage="rp_blit">
<!-- enable bits for various FS sysvalue regs: -->
<bitfield name="SAMPLEMASK" pos="0" type="boolean"/>
<bitfield name="POSTDEPTHCOVERAGE" pos="1" type="boolean"/>
@@ -3567,16 +1388,16 @@ to upconvert to 32b float internally?
<bitfield name="FOVEATION" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="0x880b" name="RB_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0x880b" name="RB_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_WRITES_Z" pos="1" type="boolean"/>
<bitfield name="FRAG_WRITES_SAMPMASK" pos="2" type="boolean"/>
<bitfield name="FRAG_WRITES_STENCILREF" pos="3" type="boolean"/>
</reg32>
- <reg32 offset="0x880c" name="RB_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0x880c" name="RB_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <reg32 offset="0x880d" name="RB_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0x880d" name="RB_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -3608,7 +1429,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x8810" name="RB_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8810" name="RB_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/>
@@ -3672,18 +1493,18 @@ to upconvert to 32b float internally?
<reg32 offset="0x7" name="BASE_GMEM" low="12" high="31" shr="12"/>
</array>
- <reg32 offset="0x8860" name="RB_BLEND_RED_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8861" name="RB_BLEND_GREEN_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8862" name="RB_BLEND_BLUE_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8863" name="RB_BLEND_ALPHA_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8864" name="RB_ALPHA_CONTROL" usage="cmd">
+ <reg32 offset="0x8860" name="RB_BLEND_CONSTANT_RED_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8861" name="RB_BLEND_CONSTANT_GREEN_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8862" name="RB_BLEND_CONSTANT_BLUE_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8863" name="RB_BLEND_CONSTANT_ALPHA_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8864" name="RB_ALPHA_TEST_CNTL" usage="cmd">
<bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
<bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
<bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
</reg32>
<reg32 offset="0x8865" name="RB_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
- <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="BLEND_READS_DEST" low="0" high="7"/>
<bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
<bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
@@ -3726,12 +1547,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8873" name="RB_DEPTH_BUFFER_PITCH" low="0" high="13" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8874" name="RB_DEPTH_BUFFER_ARRAY_PITCH" low="0" high="27" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8875" name="RB_DEPTH_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8877" name="RB_DEPTH_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8877" name="RB_DEPTH_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8878" name="RB_Z_BOUNDS_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x8879" name="RB_Z_BOUNDS_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8878" name="RB_DEPTH_BOUND_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8879" name="RB_DEPTH_BOUND_MAX" type="float" usage="rp_blit"/>
<!-- 0x887a-0x887f invalid -->
- <reg32 offset="0x8880" name="RB_STENCIL_CONTROL" usage="rp_blit">
+ <reg32 offset="0x8880" name="RB_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
<bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
<!--
@@ -3753,11 +1574,11 @@ to upconvert to 32b float internally?
<reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
<bitfield name="TILEMODE" low="2" high="3" type="a6xx_tile_mode"/>
@@ -3765,22 +1586,22 @@ to upconvert to 32b float internally?
<reg32 offset="0x8882" name="RB_STENCIL_BUFFER_PITCH" low="0" high="11" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8883" name="RB_STENCIL_BUFFER_ARRAY_PITCH" low="0" high="23" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8884" name="RB_STENCIL_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8886" name="RB_STENCIL_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8887" name="RB_STENCILREF" usage="rp_blit">
+ <reg32 offset="0x8886" name="RB_STENCIL_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8887" name="RB_STENCIL_REF_CNTL" usage="rp_blit">
<bitfield name="REF" low="0" high="7"/>
<bitfield name="BFREF" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8888" name="RB_STENCILMASK" usage="rp_blit">
+ <reg32 offset="0x8888" name="RB_STENCIL_MASK" usage="rp_blit">
<bitfield name="MASK" low="0" high="7"/>
<bitfield name="BFMASK" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8889" name="RB_STENCILWRMASK" usage="rp_blit">
+ <reg32 offset="0x8889" name="RB_STENCIL_WRITE_MASK" usage="rp_blit">
<bitfield name="WRMASK" low="0" high="7"/>
<bitfield name="BFWRMASK" low="8" high="15"/>
</reg32>
<!-- 0x888a-0x888f invalid -->
<reg32 offset="0x8890" name="RB_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8891" name="RB_SAMPLE_COUNT_CONTROL" usage="cmd">
+ <reg32 offset="0x8891" name="RB_SAMPLE_COUNTER_CNTL" usage="cmd">
<bitfield name="DISABLE" pos="0" type="boolean"/>
<bitfield name="COPY" pos="1" type="boolean"/>
</reg32>
@@ -3791,27 +1612,27 @@ to upconvert to 32b float internally?
<reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_Z_CLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_Z_CLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
<!-- 0x88c2-0x88cf invalid-->
- <reg32 offset="0x88d0" name="RB_UNKNOWN_88D0" usage="rp_blit">
+ <reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
<bitfield name="UNK16" low="16" high="26"/>
</reg32>
- <reg32 offset="0x88d1" name="RB_BLIT_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d2" name="RB_BLIT_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_BIN_CONTROL2" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
- <reg32 offset="0x88d4" name="RB_WINDOW_OFFSET2" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d5" name="RB_BLIT_GMEM_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x88d4" name="RB_RESOLVE_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d5" name="RB_RESOLVE_GMEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
</reg32>
- <reg32 offset="0x88d6" name="RB_BLIT_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x88d6" name="RB_RESOLVE_GMEM_BUFFER_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
<!-- s/DST_FORMAT/DST_INFO/ probably: -->
- <reg32 offset="0x88d7" name="RB_BLIT_DST_INFO" usage="rp_blit">
+ <reg32 offset="0x88d7" name="RB_RESOLVE_SYSTEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="FLAGS" pos="2" type="boolean"/>
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
@@ -3820,25 +1641,31 @@ to upconvert to 32b float internally?
<bitfield name="UNK15" pos="15" type="boolean"/>
<bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
- <reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88d8" name="RB_RESOLVE_SYSTEM_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88da" name="RB_RESOLVE_SYSTEM_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- array-pitch is size of layer -->
- <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x88dc" name="RB_BLIT_FLAG_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_BLIT_FLAG_DST_PITCH" usage="rp_blit">
+ <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88df" name="RB_BLIT_CLEAR_COLOR_DW0" usage="rp_blit"/>
- <reg32 offset="0x88e0" name="RB_BLIT_CLEAR_COLOR_DW1" usage="rp_blit"/>
- <reg32 offset="0x88e1" name="RB_BLIT_CLEAR_COLOR_DW2" usage="rp_blit"/>
- <reg32 offset="0x88e2" name="RB_BLIT_CLEAR_COLOR_DW3" usage="rp_blit"/>
+ <reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x88e1" name="RB_RESOLVE_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x88e2" name="RB_RESOLVE_CLEAR_COLOR_DW3" usage="rp_blit"/>
+
+ <enum name="a6xx_blit_event_type">
+ <value value="0x0" name="BLIT_EVENT_STORE"/>
+ <value value="0x1" name="BLIT_EVENT_STORE_AND_CLEAR"/>
+ <value value="0x2" name="BLIT_EVENT_CLEAR"/>
+ <value value="0x3" name="BLIT_EVENT_LOAD"/>
+ </enum>
<!-- seems somewhat similar to what we called RB_CLEAR_CNTL on a5xx: -->
- <reg32 offset="0x88e3" name="RB_BLIT_INFO" usage="rp_blit">
- <bitfield name="UNK0" pos="0" type="boolean"/> <!-- s8 stencil restore/clear? But also color restore? -->
- <bitfield name="GMEM" pos="1" type="boolean"/> <!-- set for restore and clear to gmem? -->
+ <reg32 offset="0x88e3" name="RB_RESOLVE_OPERATION" usage="rp_blit">
+ <bitfield name="TYPE" low="0" high="1" type="a6xx_blit_event_type"/>
<bitfield name="SAMPLE_0" pos="2" type="boolean"/> <!-- takes sample 0 instead of averaging -->
<bitfield name="DEPTH" pos="3" type="boolean"/> <!-- z16/z32/z24s8/x24x8 clear or resolve? -->
<doc>
@@ -3853,16 +1680,20 @@ to upconvert to 32b float internally?
<!-- set when this is the last resolve on a650+ -->
<bitfield name="LAST" low="8" high="9"/>
<!--
- a618 GLES: color render target number being resolved for RM6_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
- a618 VK: 0x8 for depth RM6_RESOLVE, 0x9 for separate stencil, 0 otherwise.
-
- We believe this is related to concurrent resolves
+ a618 GLES: color render target number being resolved for CCU_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
+ a618 VK: 0x8 for depth CCU_RESOLVE, 0x9 for separate stencil, 0 otherwise.
+ a7xx VK: 0x8 for depth, 0x9 for separate stencil, 0x0 to 0x7 used for concurrent resolves of color render
+ targets inside a given resolve group.
-->
<bitfield name="BUFFER_ID" low="12" high="15"/>
</reg32>
- <reg32 offset="0x88e4" name="RB_UNKNOWN_88E4" variants="A7XX-" usage="rp_blit">
- <!-- Value conditioned based on predicate, changed before blits -->
- <bitfield name="UNK0" pos="0" type="boolean"/>
+
+ <enum name="a7xx_blit_clear_mode">
+ <value value="0x0" name="CLEAR_MODE_SYSMEM"/>
+ <value value="0x1" name="CLEAR_MODE_GMEM"/>
+ </enum>
+ <reg32 offset="0x88e4" name="RB_CLEAR_TARGET" variants="A7XX-" usage="rp_blit">
+ <bitfield name="CLEAR_MODE" pos="0" type="a7xx_blit_clear_mode"/>
</reg32>
<enum name="a6xx_ccu_cache_size">
@@ -3871,7 +1702,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/>
<value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/>
</enum>
- <reg32 offset="0x88e5" name="RB_CCU_CNTL2" variants="A7XX-" usage="cmd">
+ <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/>
<bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/>
<bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
@@ -3895,7 +1726,13 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88f4" name="RB_UNKNOWN_88F4" low="0" high="2"/>
+
+ <reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
+ <bitfield name="UNK2" pos="2" type="boolean"/>
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
+ </reg32>
<!-- Connected to VK_EXT_fragment_density_map? -->
<reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
<!-- 0x88f6-0x88ff invalid -->
@@ -3906,7 +1743,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <array offset="0x8903" name="RB_MRT_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
+ <array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
<reg32 offset="2" name="PITCH">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
@@ -3915,10 +1752,10 @@ to upconvert to 32b float internally?
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
- RB_SAMPLE_COUNT_ADDR register is used up to (and including) a730. After that
+ RB_SAMPLE_COUNTER_BASE register is used up to (and including) a730. After that
the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT.
</doc>
- <reg64 offset="0x8927" name="RB_SAMPLE_COUNT_ADDR" type="waddress" align="16" usage="cmd"/>
+ <reg64 offset="0x8927" name="RB_SAMPLE_COUNTER_BASE" type="waddress" align="16" usage="cmd"/>
<!-- 0x8929-0x89ff invalid -->
<!-- TODO: there are some registers in the 0x8a00-0x8bff range -->
@@ -3932,10 +1769,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
- <reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
+ <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8c01" name="RB_A2D_PIXEL_CNTL" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_src_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_src_texture_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3954,7 +1791,7 @@ to upconvert to 32b float internally?
<bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
</bitset>
- <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_dest_buffer_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3965,26 +1802,26 @@ to upconvert to 32b float internally?
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
- <reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_A2D_DEST_BUFFER_INFO" type="a6xx_a2d_dest_buffer_info" usage="rp_blit"/>
+ <reg64 offset="0x8c18" name="RB_A2D_DEST_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1a" name="RB_A2D_DEST_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
- <reg64 offset="0x8c1b" name="RB_2D_DST_PLANE1" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1d" name="RB_2D_DST_PLANE_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x8c1e" name="RB_2D_DST_PLANE2" type="waddress" align="64" usage="rp_blit"/>
+ <reg64 offset="0x8c1b" name="RB_A2D_DEST_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1d" name="RB_A2D_DEST_BUFFER_PITCH_1" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
- <reg64 offset="0x8c20" name="RB_2D_DST_FLAGS" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_2D_DST_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
- <reg64 offset="0x8c23" name="RB_2D_DST_FLAGS_PLANE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c25" name="RB_2D_DST_FLAGS_PLANE_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- TODO: 0x8c26-0x8c33 are all full 32-bit registers -->
<!-- unlike a5xx, these are per channel values rather than packed -->
- <reg32 offset="0x8c2c" name="RB_2D_SRC_SOLID_C0" usage="rp_blit"/>
- <reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
- <reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
- <reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
+ <reg32 offset="0x8c2c" name="RB_A2D_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x8c2d" name="RB_A2D_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x8c2e" name="RB_A2D_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x8c2f" name="RB_A2D_CLEAR_COLOR_DW3" usage="rp_blit"/>
<reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
@@ -3996,7 +1833,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff -->
<reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
<!-- 0x02080000 in GMEM, zero otherwise? -->
- <reg32 offset="0x8e06" name="RB_UNKNOWN_8E06" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8e06" name="RB_CCU_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A6XX">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
@@ -4017,10 +1854,21 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
<!--TODO: valid mask 0xfffffc1f -->
</reg32>
+ <enum name="a7xx_concurrent_resolve_mode">
+ <value value="0x0" name="CONCURRENT_RESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_RESOLVE_MODE_1"/>
+ <value value="0x2" name="CONCURRENT_RESOLVE_MODE_2"/>
+ </enum>
+ <enum name="a7xx_concurrent_unresolve_mode">
+ <value value="0x0" name="CONCURRENT_UNRESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_UNRESOLVE_MODE_PARTIAL"/>
+ <value value="0x3" name="CONCURRENT_UNRESOLVE_MODE_FULL"/>
+ </enum>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A7XX-">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
- <bitfield name="CONCURRENT_RESOLVE" pos="2" type="boolean"/>
- <!-- rest of the bits were moved to RB_CCU_CNTL2 -->
+ <bitfield name="CONCURRENT_RESOLVE_MODE" low="2" high="3" type="a7xx_concurrent_resolve_mode"/>
+ <bitfield name="CONCURRENT_UNRESOLVE_MODE" low="5" high="6" type="a7xx_concurrent_unresolve_mode"/>
+ <!-- rest of the bits were moved to RB_CCU_CACHE_CNTL -->
</reg32>
<reg32 offset="0x8e08" name="RB_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
@@ -4046,9 +1894,9 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/>
<!-- 0x8e3e-0x8e4f invalid -->
<!-- GMEM save/restore for preemption: -->
- <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE" pos="0" type="boolean"/>
+ <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE" pos="0" type="boolean"/>
<!-- address for GMEM save/restore? -->
- <reg32 offset="0x8e51" name="RB_UNKNOWN_8E51" type="waddress" align="1"/>
+ <reg32 offset="0x8e51" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR" type="waddress" align="1"/>
<!-- 0x8e53-0x8e7f invalid -->
<reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/>
<!-- 0x8e80-0x8e83 are valid -->
@@ -4069,38 +1917,38 @@ to upconvert to 32b float internally?
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <bitset name="a6xx_vpc_xs_layer_cntl" inline="yes">
+ <bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
<bitfield name="VIEWLOC" low="8" high="15" type="uint"/>
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9314" name="VPC_VS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
- <!-- this mirrors PC_RASTER_CNTL::DISCARD, although it seems it's unused -->
+ <!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_POLYGON_MODE" usage="rp_blit">
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <bitset name="a6xx_primitive_cntl_0" inline="yes">
+ <bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
<bitfield name="D3D_VERTEX_ORDERING" pos="2" type="boolean">
@@ -4113,7 +1961,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK3" pos="3" type="boolean"/>
</bitset>
- <bitset name="a6xx_primitive_cntl_5" inline="yes">
+ <bitset name="a6xx_gs_param_0" inline="yes">
<doc>
geometry shader
</doc>
@@ -4125,7 +1973,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK18" pos="18"/>
</bitset>
- <bitset name="a6xx_multiview_cntl" inline="yes">
+ <bitset name="a6xx_stereo_rendering_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<bitfield name="DISABLEMULTIPOS" pos="1" type="boolean">
<doc>
@@ -4139,10 +1987,10 @@ to upconvert to 32b float internally?
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_MULTIVIEW_MASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -4159,11 +2007,11 @@ to upconvert to 32b float internally?
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_PS_REPL" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -4172,12 +2020,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VAR" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -4198,7 +2046,7 @@ to upconvert to 32b float internally?
When EmitStreamVertex(N) happens, the HW goes to DWORD
64 * N and then "executes" the next 64 DWORD's.
- This field is auto-incremented when VPC_SO_PROG is
+ This field is auto-incremented when VPC_SO_MAPPING_PORT is
written to.
-->
<bitfield name="ADDR" low="0" high="7" type="hex"/>
@@ -4206,7 +2054,7 @@ to upconvert to 32b float internally?
<bitfield name="RESET" pos="16" type="boolean"/>
</reg32>
<!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_PROG" usage="rp_blit">
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
@@ -4215,7 +2063,7 @@ to upconvert to 32b float internally?
<bitfield name="B_EN" pos="23" type="boolean"/>
</reg32>
- <reg64 offset="0x9218" name="VPC_SO_STREAM_COUNTS" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
<array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
@@ -4225,14 +2073,14 @@ to upconvert to 32b float internally?
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_POINT_COORD_INVERT" usage="cmd">
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
<bitfield name="INVERT" pos="0" type="boolean"/>
</reg32>
<!-- 0x9237-0x92ff invalid -->
<!-- always 0x0 ? -->
<reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
- <bitset name="a6xx_vpc_xs_pack" inline="yes">
+ <bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4249,11 +2097,11 @@ to upconvert to 32b float internally?
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_CNTL_0" usage="rp_blit">
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -4272,7 +2120,7 @@ to upconvert to 32b float internally?
</bitfield>
</reg32>
- <reg32 offset="0x9305" name="VPC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -4282,19 +2130,19 @@ to upconvert to 32b float internally?
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
- <reg32 offset="0x9306" name="VPC_SO_DISABLE" usage="rp_blit">
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
<bitfield name="DISABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x9307" name="VPC_POLYGON_MODE2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_BASE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
<bitfield name="BASE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
@@ -4311,15 +2159,15 @@ to upconvert to 32b float internally?
<!-- TODO: regs from 0x9624-0x963a -->
<!-- 0x963b-0x97ff invalid -->
- <reg32 offset="0x9800" name="PC_TESS_NUM_VERTEX" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
<!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_INPUT_SIZE" usage="rp_blit">
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
</reg32>
- <reg32 offset="0x9802" name="PC_TESS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
</reg32>
@@ -4334,7 +2182,7 @@ to upconvert to 32b float internally?
</reg32>
<!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
@@ -4344,15 +2192,15 @@ to upconvert to 32b float internally?
<!-- 0x980b-0x983f invalid -->
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_CMD">
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9841" name="PC_DISPATCH_CMD">
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9842" name="PC_EVENT_CMD">
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4367,27 +2215,27 @@ to upconvert to 32b float internally?
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_POLYGON_MODE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9809" name="PC_POLYGON_MODE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9980" name="PC_RASTER_CNTL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <!-- VPC_RASTER_CNTL -->
- <reg32 offset="0x9107" name="PC_RASTER_CNTL" variants="A7XX-" usage="rp_blit">
+ <!-- VPC_RAST_STREAM_CNTL -->
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9317" name="PC_RASTER_CNTL_V2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
@@ -4397,17 +2245,17 @@ to upconvert to 32b float internally?
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_TESS_PARAM_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TESS_FACTOR_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
- <bitset name="a6xx_xs_out_cntl" inline="yes">
+ <bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4417,19 +2265,19 @@ to upconvert to 32b float internally?
<bitfield name="PSIZE" pos="8" type="boolean"/>
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
- <!-- note: PC_VS_OUT_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
<!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -4438,9 +2286,9 @@ to upconvert to 32b float internally?
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_MULTIVIEW_MASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
@@ -4451,31 +2299,31 @@ to upconvert to 32b float internally?
<!-- TODO: 0x9e00-0xa000 range incomplete -->
<reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
<reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DRAW_INDX_BASE"/>
- <reg32 offset="0x9e06" name="PC_DRAW_FIRST_INDX" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DRAW_MAX_INDICES" type="uint"/>
- <reg64 offset="0x9e08" name="PC_TESSFACTOR_ADDR" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESSFACTOR_ADDR" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+ <reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAW_INITIATOR" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAW_NUM_INSTANCES" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAW_NUM_INDICES" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VSTREAM_CONTROL">
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
</reg32>
- <reg64 offset="0x9e12" name="PC_BIN_PRIM_STRM" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_BIN_DRAW_STRM" type="waddress" align="32"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg32 offset="0x9e1c" name="PC_VISIBILITY_OVERRIDE">
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
</reg32>
@@ -4488,18 +2336,18 @@ to upconvert to 32b float internally?
<!-- always 0x0 -->
<reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/>
- <reg32 offset="0xa000" name="VFD_CONTROL_0" usage="rp_blit">
+ <reg32 offset="0xa000" name="VFD_CNTL_0" usage="rp_blit">
<bitfield name="FETCH_CNT" low="0" high="5" type="uint"/>
<bitfield name="DECODE_CNT" low="8" high="13" type="uint"/>
</reg32>
- <reg32 offset="0xa001" name="VFD_CONTROL_1" usage="rp_blit">
+ <reg32 offset="0xa001" name="VFD_CNTL_1" usage="rp_blit">
<bitfield name="REGID4VTX" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID4INST" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID4PRIMID" low="16" high="23" type="a3xx_regid"/>
<!-- only used for VS in non-multi-position-output case -->
<bitfield name="REGID4VIEWID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa002" name="VFD_CONTROL_2" usage="rp_blit">
+ <reg32 offset="0xa002" name="VFD_CNTL_2" usage="rp_blit">
<bitfield name="REGID_HSRELPATCHID" low="0" high="7" type="a3xx_regid">
<doc>
This is the ID of the current patch within the
@@ -4512,32 +2360,32 @@ to upconvert to 32b float internally?
</bitfield>
<bitfield name="REGID_INVOCATIONID" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa003" name="VFD_CONTROL_3" usage="rp_blit">
+ <reg32 offset="0xa003" name="VFD_CNTL_3" usage="rp_blit">
<bitfield name="REGID_DSPRIMID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID_DSRELPATCHID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
<bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa004" name="VFD_CONTROL_4" usage="rp_blit">
+ <reg32 offset="0xa004" name="VFD_CNTL_4" usage="rp_blit">
<bitfield name="UNK0" low="0" high="7" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa005" name="VFD_CONTROL_5" usage="rp_blit">
+ <reg32 offset="0xa005" name="VFD_CNTL_5" usage="rp_blit">
<bitfield name="REGID_GSHEADER" low="0" high="7" type="a3xx_regid"/>
<bitfield name="UNK8" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa006" name="VFD_CONTROL_6" usage="rp_blit">
+ <reg32 offset="0xa006" name="VFD_CNTL_6" usage="rp_blit">
<!--
True if gl_PrimitiveID is read via the FS
-->
<bitfield name="PRIMID4PSEN" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xa007" name="VFD_MODE_CNTL" usage="cmd">
+ <reg32 offset="0xa007" name="VFD_RENDER_MODE" usage="cmd">
<bitfield name="RENDER_MODE" low="0" high="2" type="a6xx_render_mode"/>
</reg32>
- <reg32 offset="0xa008" name="VFD_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
- <reg32 offset="0xa009" name="VFD_ADD_OFFSET" usage="cmd">
+ <reg32 offset="0xa008" name="VFD_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0xa009" name="VFD_MODE_CNTL" usage="cmd">
<!-- add VFD_INDEX_OFFSET to REGID4VTX -->
<bitfield name="VERTEX" pos="0" type="boolean"/>
<!-- add VFD_INSTANCE_START_OFFSET to REGID4INST -->
@@ -4546,14 +2394,14 @@ to upconvert to 32b float internally?
<reg32 offset="0xa00e" name="VFD_INDEX_OFFSET" usage="rp_blit"/>
<reg32 offset="0xa00f" name="VFD_INSTANCE_START_OFFSET" usage="rp_blit"/>
- <array offset="0xa010" name="VFD_FETCH" stride="4" length="32" usage="rp_blit">
+ <array offset="0xa010" name="VFD_VERTEX_BUFFER" stride="4" length="32" usage="rp_blit">
<reg64 offset="0x0" name="BASE" type="address" align="1"/>
<reg32 offset="0x2" name="SIZE" type="uint"/>
<reg32 offset="0x3" name="STRIDE" low="0" high="11" type="uint"/>
</array>
- <array offset="0xa090" name="VFD_DECODE" stride="2" length="32" usage="rp_blit">
+ <array offset="0xa090" name="VFD_FETCH_INSTR" stride="2" length="32" usage="rp_blit">
<reg32 offset="0x0" name="INSTR">
- <!-- IDX and byte OFFSET into VFD_FETCH -->
+ <!-- IDX and byte OFFSET into VFD_VERTEX_BUFFER -->
<bitfield name="IDX" low="0" high="4" type="uint"/>
<bitfield name="OFFSET" low="5" high="16"/>
<bitfield name="INSTANCED" pos="17" type="boolean"/>
@@ -4573,7 +2421,7 @@ to upconvert to 32b float internally?
<reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0xa600" name="VFD_UNKNOWN_A600" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
<array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/>
@@ -4588,7 +2436,7 @@ to upconvert to 32b float internally?
<value value="1" name="THREAD128"/>
</enum>
- <bitset name="a6xx_sp_xs_ctrl_reg0" inline="yes">
+ <bitset name="a6xx_sp_xs_cntl_0" inline="yes">
<!-- if set to SINGLE, only use 1 concurrent wave on each SP -->
<bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
<!--
@@ -4620,7 +2468,7 @@ to upconvert to 32b float internally?
-->
<bitfield name="BINDLESS_TEX" pos="0" type="boolean"/>
<bitfield name="BINDLESS_SAMP" pos="1" type="boolean"/>
- <bitfield name="BINDLESS_IBO" pos="2" type="boolean"/>
+ <bitfield name="BINDLESS_UAV" pos="2" type="boolean"/>
<bitfield name="BINDLESS_UBO" pos="3" type="boolean"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
@@ -4630,17 +2478,17 @@ to upconvert to 32b float internally?
-->
<bitfield name="NTEX" low="9" high="16" type="uint"/>
<bitfield name="NSAMP" low="17" high="21" type="uint"/>
- <bitfield name="NIBO" low="22" high="28" type="uint"/>
+ <bitfield name="NUAV" low="22" high="28" type="uint"/>
</bitset>
- <bitset name="a6xx_sp_xs_prim_cntl" inline="yes">
+ <bitset name="a6xx_sp_xs_output_cntl" inline="yes">
<!-- # of VS outputs including pos/psize -->
<bitfield name="OUT" low="0" high="5" type="uint"/>
<!-- FLAGS_REGID only for GS -->
<bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xa800" name="SP_VS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa800" name="SP_VS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!--
This field actually controls all geometry stages. TCS, TES, and
GS must have the same mergedregs setting as VS.
@@ -4665,10 +2513,10 @@ to upconvert to 32b float internally?
</reg32>
<!-- bitmask of true/false conditions for VS brac.N instructions,
bit N corresponds to brac.N -->
- <reg32 offset="0xa801" name="SP_VS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa801" name="SP_VS_BOOLEAN_CF_MASK" type="hex"/>
<!-- # of VS outputs including pos/psize -->
- <reg32 offset="0xa802" name="SP_VS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa803" name="SP_VS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa802" name="SP_VS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa803" name="SP_VS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4678,12 +2526,12 @@ to upconvert to 32b float internally?
</array>
<!--
Starting with a5xx, position/psize outputs from shader end up in the
- SP_VS_OUT map, with highest OUTLOCn position. (Generally they are
+ SP_VS_OUTPUT map, with highest OUTLOCn position. (Generally they are
the last entries too, except when gl_PointCoord is used, blob inserts
an extra varying after, but with a lower OUTLOC position. If present,
psize is last, preceded by position.
-->
- <array offset="0xa813" name="SP_VS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa813" name="SP_VS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4752,7 +2600,7 @@ to upconvert to 32b float internally?
</bitfield>
</bitset>
- <bitset name="a6xx_sp_xs_pvt_mem_hw_stack_offset" inline="yes">
+ <bitset name="a6xx_sp_xs_pvt_mem_stack_offset" inline="yes">
<doc>
This seems to be be the equivalent of HWSTACKOFFSET in
a3xx. The ldp/stp offset formula above isn't affected by
@@ -4763,18 +2611,18 @@ to upconvert to 32b float internally?
<bitfield name="OFFSET" low="0" high="18" shr="11"/>
</bitset>
- <reg32 offset="0xa81b" name="SP_VS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa81c" name="SP_VS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa81b" name="SP_VS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa81c" name="SP_VS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa821" name="SP_VS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa822" name="SP_VS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa822" name="SP_VS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa824" name="SP_VS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa825" name="SP_VS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82d" name="SP_VS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa824" name="SP_VS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa825" name="SP_VS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82d" name="SP_VS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa830" name="SP_HS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa830" name="SP_HS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
@@ -4782,32 +2630,32 @@ to upconvert to 32b float internally?
Total size of local storage in dwords divided by the wave size.
The maximum value is 64. With the wave size being always 64 for HS,
the maximum size of local storage should be:
- 64 (wavesize) * 64 (SP_HS_WAVE_INPUT_SIZE) * 4 = 16k
+ 64 (wavesize) * 64 (SP_HS_CNTL_1) * 4 = 16k
-->
- <reg32 offset="0xa831" name="SP_HS_WAVE_INPUT_SIZE" low="0" high="7" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa832" name="SP_HS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa831" name="SP_HS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa832" name="SP_HS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa833" name="SP_HS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa834" name="SP_HS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa833" name="SP_HS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa834" name="SP_HS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa836" name="SP_HS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa837" name="SP_HS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa837" name="SP_HS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa839" name="SP_HS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa83a" name="SP_HS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83a" name="SP_HS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa83c" name="SP_HS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82f" name="SP_HS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa83c" name="SP_HS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82f" name="SP_HS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa840" name="SP_DS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa840" name="SP_DS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa841" name="SP_DS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa841" name="SP_DS_BOOLEAN_CF_MASK" type="hex"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa842" name="SP_DS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa843" name="SP_DS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa842" name="SP_DS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa843" name="SP_DS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4815,7 +2663,7 @@ to upconvert to 32b float internally?
<bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
</reg32>
</array>
- <array offset="0xa853" name="SP_DS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa853" name="SP_DS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4825,22 +2673,22 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa85b" name="SP_DS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa85c" name="SP_DS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa85b" name="SP_DS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa85c" name="SP_DS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa85e" name="SP_DS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa861" name="SP_DS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa862" name="SP_DS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa862" name="SP_DS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa864" name="SP_DS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa865" name="SP_DS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa868" name="SP_DS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa864" name="SP_DS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa865" name="SP_DS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa868" name="SP_DS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa870" name="SP_GS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa870" name="SP_GS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa871" name="SP_GS_PRIM_SIZE" low="0" high="7" type="uint" usage="rp_blit">
+ <reg32 offset="0xa871" name="SP_GS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit">
<doc>
Normally the size of the output of the last stage in
dwords. It should be programmed as follows:
@@ -4854,11 +2702,11 @@ to upconvert to 32b float internally?
doesn't matter in practice.
</doc>
</reg32>
- <reg32 offset="0xa872" name="SP_GS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa872" name="SP_GS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa873" name="SP_GS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa874" name="SP_GS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa874" name="SP_GS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4867,7 +2715,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa884" name="SP_GS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa884" name="SP_GS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4877,29 +2725,29 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa88c" name="SP_GS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa88d" name="SP_GS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa88c" name="SP_GS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa88d" name="SP_GS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa88f" name="SP_GS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa890" name="SP_GS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa890" name="SP_GS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa892" name="SP_GS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa893" name="SP_GS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa893" name="SP_GS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa895" name="SP_GS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa896" name="SP_GS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa899" name="SP_GS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
-
- <reg64 offset="0xa8a0" name="SP_VS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a2" name="SP_HS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a4" name="SP_DS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a6" name="SP_GS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a8" name="SP_VS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8aa" name="SP_HS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ac" name="SP_DS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ae" name="SP_GS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg32 offset="0xa895" name="SP_GS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa896" name="SP_GS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa899" name="SP_GS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
+
+ <reg64 offset="0xa8a0" name="SP_VS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a2" name="SP_HS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a4" name="SP_DS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a6" name="SP_GS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a8" name="SP_VS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8aa" name="SP_HS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ac" name="SP_DS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ae" name="SP_GS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<!-- TODO: 4 unknown bool registers 0xa8c0-0xa8c3 -->
- <reg32 offset="0xa980" name="SP_FS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa980" name="SP_PS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<bitfield name="UNK21" pos="21" type="boolean"/>
<bitfield name="VARYING" pos="22" type="boolean"/>
@@ -4909,8 +2757,7 @@ to upconvert to 32b float internally?
fine derivatives and quad subgroup ops.
</doc>
</bitfield>
- <!-- note: vk blob uses bit24 -->
- <bitfield name="UNK24" pos="24" type="boolean"/>
+ <bitfield name="INOUTREGOVERLAP" pos="24" type="boolean"/>
<bitfield name="UNK25" pos="25" type="boolean"/>
<bitfield name="PIXLODENABLE" pos="26" type="boolean">
<doc>
@@ -4923,12 +2770,12 @@ to upconvert to 32b float internally?
<bitfield name="EARLYPREAMBLE" pos="28" type="boolean"/>
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="0xa981" name="SP_FS_BRANCH_COND" type="hex"/>
- <reg32 offset="0xa982" name="SP_FS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa983" name="SP_FS_OBJ_START" type="address" align="32" usage="rp_blit"/>
- <reg32 offset="0xa985" name="SP_FS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa986" name="SP_FS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
- <reg32 offset="0xa988" name="SP_FS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa981" name="SP_PS_BOOLEAN_CF_MASK" type="hex"/>
+ <reg32 offset="0xa982" name="SP_PS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa983" name="SP_PS_BASE" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa985" name="SP_PS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa986" name="SP_PS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa988" name="SP_PS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
<reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
@@ -4948,7 +2795,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT6" pos="6" type="boolean"/>
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0xa98b" name="SP_FS_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0xa98b" name="SP_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -4958,17 +2805,17 @@ to upconvert to 32b float internally?
<bitfield name="RT6" low="24" high="27"/>
<bitfield name="RT7" low="28" high="31"/>
</reg32>
- <reg32 offset="0xa98c" name="SP_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0xa98c" name="SP_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPMASK_REGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="STENCILREF_REGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa98d" name="SP_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0xa98d" name="SP_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <array offset="0xa98e" name="SP_FS_OUTPUT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa98e" name="SP_PS_OUTPUT" stride="1" length="8" usage="rp_blit">
<doc>per MRT</doc>
<reg32 offset="0x0" name="REG">
<bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
@@ -4976,7 +2823,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa996" name="SP_FS_MRT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa996" name="SP_PS_MRT" stride="1" length="8" usage="rp_blit">
<reg32 offset="0" name="REG">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="COLOR_SINT" pos="8" type="boolean"/>
@@ -4985,7 +2832,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <reg32 offset="0xa99e" name="SP_FS_PREFETCH_CNTL" usage="rp_blit">
+ <reg32 offset="0xa99e" name="SP_PS_INITIAL_TEX_LOAD_CNTL" usage="rp_blit">
<bitfield name="COUNT" low="0" high="2" type="uint"/>
<bitfield name="IJ_WRITE_DISABLE" pos="3" type="boolean"/>
<doc>
@@ -5002,7 +2849,7 @@ to upconvert to 32b float internally?
<!-- Blob never uses it -->
<bitfield name="CONSTSLOTID4COORD" low="16" high="24" type="uint" variants="A7XX-"/>
</reg32>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A6XX" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A6XX" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A6XX">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="10" type="uint"/>
@@ -5016,7 +2863,7 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="29" high="31" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A7XX-" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A7XX-" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A7XX-">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="9" type="uint"/>
@@ -5028,22 +2875,23 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="26" high="29" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa9a3" name="SP_FS_BINDLESS_PREFETCH" stride="1" length="4" usage="rp_blit">
+ <array offset="0xa9a3" name="SP_PS_INITIAL_TEX_INDEX" stride="1" length="4" usage="rp_blit">
<reg32 offset="0" name="CMD">
<bitfield name="SAMP_ID" low="0" high="15" type="uint"/>
<bitfield name="TEX_ID" low="16" high="31" type="uint"/>
</reg32>
</array>
- <reg32 offset="0xa9a7" name="SP_FS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa9a7" name="SP_PS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? -->
- <reg32 offset="0xa9a9" name="SP_FS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9a9" name="SP_PS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9ab" name="SP_PS_UNKNOWN_A9AB" variants="A7XX-" usage="cmd"/>
<!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS -->
- <reg32 offset="0xa9b0" name="SP_CS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="cmd">
+ <reg32 offset="0xa9b0" name="SP_CS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="cmd">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<!-- seems to make SP use less concurrent threads when possible? -->
<bitfield name="UNK21" pos="21" type="boolean"/>
@@ -5053,8 +2901,15 @@ to upconvert to 32b float internally?
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
+ <enum name="a6xx_const_ram_mode">
+ <value value="0x0" name="CONSTLEN_128"/>
+ <value value="0x1" name="CONSTLEN_192"/>
+ <value value="0x2" name="CONSTLEN_256"/>
+ <value value="0x3" name="CONSTLEN_512"/> <!-- a7xx only -->
+ </enum>
+
<!-- set for compute shaders -->
- <reg32 offset="0xa9b1" name="SP_CS_UNKNOWN_A9B1" usage="cmd">
+ <reg32 offset="0xa9b1" name="SP_CS_CNTL_1" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint">
<doc>
If 0 - all 32k of shared storage is enabled, otherwise
@@ -5065,32 +2920,36 @@ to upconvert to 32b float internally?
always return 0)
</doc>
</bitfield>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode">
+ <doc>
+ This defines the split between consts and local
+ memory in the Local Buffer. The programmed value
+ must be at least the actual CONSTLEN.
+ </doc>
+ </bitfield>
</reg32>
- <reg32 offset="0xa9b2" name="SP_CS_BRANCH_COND" type="hex" usage="cmd"/>
- <reg32 offset="0xa9b3" name="SP_CS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="cmd"/>
- <reg64 offset="0xa9b4" name="SP_CS_OBJ_START" type="address" align="32" usage="cmd"/>
+ <reg32 offset="0xa9b2" name="SP_CS_BOOLEAN_CF_MASK" type="hex" usage="cmd"/>
+ <reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
+ <reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_ADDR" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
- <reg32 offset="0xa9ba" name="SP_CS_TEX_COUNT" low="0" high="7" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
- <reg32 offset="0xa9bc" name="SP_CS_INSTRLEN" low="0" high="27" type="uint" usage="cmd"/>
- <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="cmd"/>
+ <reg32 offset="0xa9bc" name="SP_CS_INSTR_SIZE" low="0" high="27" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="cmd"/>
<reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa9c5" name="SP_CS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c5" name="SP_CS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_0 -->
- <reg32 offset="0xa9c2" name="SP_CS_CNTL_0" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_CONST_CONFIG_0 -->
+ <reg32 offset="0xa9c2" name="SP_CS_WIE_CNTL_0" usage="cmd">
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="WGSIZECONSTID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_1 -->
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_WGE_CNTL -->
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A6XX" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5102,7 +2961,18 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A7XX-" usage="cmd">
+ <enum name="a7xx_workitem_rast_order">
+ <value value="0x0" name="WORKITEMRASTORDER_LINEAR"/>
+ <doc>
+ This is a fixed tiling, with 4x4 invocation outer tiles
+ containing 2x2 invocation inner tiles. The intent is to
+ improve cache locality with textures and images accessed
+ using gl_LocalInvocationID.
+ </doc>
+ <value value="0x1" name="WORKITEMRASTORDER_TILED"/>
+ </enum>
+
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A7XX-" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
@@ -5110,18 +2980,16 @@ to upconvert to 32b float internally?
<!-- 1 thread per wave (would hang if THREAD128 is also set) -->
<bitfield name="THREADSIZE_SCALAR" pos="9" type="boolean"/>
- <!-- Affects getone. If enabled, getone sometimes executed 1? less times
- than there are subgroups.
- -->
- <bitfield name="UNK15" pos="15" type="boolean"/>
+ <doc>How invocations/fibers within a workgroup are tiled.</doc>
+ <bitfield name="WORKITEMRASTORDER" pos="15" type="a7xx_workitem_rast_order"/>
</reg32>
<!-- TODO: two 64kb aligned addresses at a9d0/a9d2 -->
- <reg64 offset="0xa9e0" name="SP_FS_TEX_SAMP" type="address" align="16" usage="rp_blit"/>
- <reg64 offset="0xa9e2" name="SP_CS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa9e4" name="SP_FS_TEX_CONST" type="address" align="64" usage="rp_blit"/>
- <reg64 offset="0xa9e6" name="SP_CS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa9e0" name="SP_PS_SAMPLER_BASE" type="address" align="16" usage="rp_blit"/>
+ <reg64 offset="0xa9e2" name="SP_CS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa9e4" name="SP_PS_TEXMEMOBJ_BASE" type="address" align="64" usage="rp_blit"/>
+ <reg64 offset="0xa9e6" name="SP_CS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<enum name="a6xx_bindless_descriptor_size">
<doc>
@@ -5146,18 +3014,19 @@ to upconvert to 32b float internally?
</array>
<!--
- IBO state for compute shader:
+ UAV state for compute shader:
-->
- <reg64 offset="0xa9f2" name="SP_CS_IBO" type="address" align="16"/>
- <reg32 offset="0xaa00" name="SP_CS_IBO_COUNT" low="0" high="6" type="uint"/>
+ <reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
- <reg32 offset="0xaa01" name="SP_FS_VGPR_CONFIG" type="uint" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xaa01" name="SP_PS_VGS_CNTL" type="uint" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xaa02" name="SP_PS_ALIASED_COMPONENTS_CONTROL" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa02" name="SP_PS_OUTPUT_CONST_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="ENABLED" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xaa03" name="SP_PS_ALIASED_COMPONENTS" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa03" name="SP_PS_OUTPUT_CONST_MASK" variants="A7XX-" usage="cmd">
<doc>
Specify for which components the output color should be read
from alias, e.g. for:
@@ -5167,7 +3036,7 @@ to upconvert to 32b float internally?
alias.1.b32.0 r1.x, c4.x
alias.1.b32.0 r0.x, c0.x
- the SP_PS_ALIASED_COMPONENTS would be 0x00001111
+ the SP_PS_OUTPUT_CONST_MASK would be 0x00001111
</doc>
<bitfield name="RT0" low="0" high="3"/>
@@ -5193,7 +3062,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="ISAMMODE_GL"/>
</enum>
- <reg32 offset="0xab00" name="SP_MODE_CONTROL" usage="rp_blit">
+ <reg32 offset="0xab00" name="SP_MODE_CNTL" usage="rp_blit">
<!--
When set, half register loads from the constant file will
load a 32-bit value (so hc0.y loads the same value as c0.y)
@@ -5210,16 +3079,16 @@ to upconvert to 32b float internally?
<reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xab04" name="SP_FS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xab05" name="SP_FS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xab04" name="SP_PS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xab05" name="SP_PS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
- <array offset="0xab10" name="SP_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
+ <array offset="0xab10" name="SP_GFX_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
</reg64>
</array>
- <array offset="0xab0a" name="SP_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
+ <array offset="0xab0a" name="SP_GFX_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A7XX-">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
@@ -5227,15 +3096,15 @@ to upconvert to 32b float internally?
</array>
<!--
- Combined IBO state for 3d pipe, used for Image and SSBO write/atomic
- instructions VS/HS/DS/GS/FS. See SP_CS_IBO_* for compute shaders.
+ Combined UAV state for 3d pipe, used for Image and SSBO write/atomic
+ instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
- <reg64 offset="0xab1a" name="SP_IBO" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_IBO_COUNT" low="0" high="6" type="uint" usage="cmd"/>
+ <reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
<reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
- <bitset name="a6xx_sp_2d_dst_format" inline="yes">
+ <bitset name="a6xx_sp_a2d_output_info" inline="yes">
<bitfield name="NORM" pos="0" type="boolean"/>
<bitfield name="SINT" pos="1" type="boolean"/>
<bitfield name="UINT" pos="2" type="boolean"/>
@@ -5248,8 +3117,8 @@ to upconvert to 32b float internally?
<bitfield name="MASK" low="12" high="15"/>
</bitset>
- <reg32 offset="0xacc0" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa9bf" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xacc0" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xa9bf" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/>
<reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
@@ -5257,16 +3126,16 @@ to upconvert to 32b float internally?
<!-- TODO: valid bits 0x3c3f, see kernel -->
</reg32>
<reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/>
- <reg32 offset="0xae04" name="SP_FLOAT_CNTL" usage="cmd">
+ <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="cmd">
<bitfield name="F16_NO_INF" pos="3" type="boolean"/>
</reg32>
<reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae08" name="SP_UNKNOWN_AE08" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae09" name="SP_UNKNOWN_AE09" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0a" name="SP_UNKNOWN_AE0A" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0f" name="SP_PERFCTR_ENABLE" usage="cmd">
+ <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="cmd">
<!-- some perfcntrs are affected by a per-stage enable bit
(PERF_SP_ALU_WORKING_CYCLES for example)
TODO: verify position of HS/DS/GS bits -->
@@ -5281,7 +3150,7 @@ to upconvert to 32b float internally?
<array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/>
<reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae6c" name="SP_UNKNOWN_AE6C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
<bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
<bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
@@ -5301,33 +3170,44 @@ to upconvert to 32b float internally?
"a6xx_sp_ps_tp_cluster" but this actually specifies the border
color base for compute shaders.
-->
- <reg64 offset="0xb180" name="SP_PS_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
+ <reg64 offset="0xb180" name="TPL1_CS_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
<reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/>
<reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/>
<reg32 offset="0xb190" name="SP_UNKNOWN_B190"/>
<reg32 offset="0xb191" name="SP_UNKNOWN_B191"/>
- <!-- could be all the stuff below here is actually TPL1?? -->
-
- <reg32 offset="0xb300" name="SP_TP_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb300" name="TPL1_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" low="2" high="3"/>
</reg32>
- <reg32 offset="0xb301" name="SP_TP_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb301" name="TPL1_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
<!-- looks to work in the same way as a5xx: -->
- <reg64 offset="0xb302" name="SP_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="SP_TP_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0xb305" name="SP_TP_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb306" name="SP_TP_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb307" name="SP_TP_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0xb309" name="SP_TP_MODE_CNTL" usage="cmd">
+ <reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+
+ <enum name="a6xx_coord_round">
+ <value value="0" name="COORD_TRUNCATE"/>
+ <value value="1" name="COORD_ROUND_NEAREST_EVEN"/>
+ </enum>
+
+ <enum name="a6xx_nearest_mode">
+ <value value="0" name="ROUND_CLAMP_TRUNCATE"/>
+ <value value="1" name="CLAMP_ROUND_TRUNCATE"/>
+ </enum>
+
+ <reg32 offset="0xb309" name="TPL1_MODE_CNTL" usage="cmd">
<bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/>
- <bitfield name="UNK3" low="2" high="7"/>
+ <bitfield name="TEXCOORDROUNDMODE" pos="2" type="a6xx_coord_round"/>
+ <bitfield name="NEARESTMIPSNAP" pos="5" type="a6xx_nearest_mode"/>
+ <bitfield name="DESTDATATYPEOVERRIDE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/>
@@ -5336,42 +3216,45 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb4c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb4c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c4" name="SP_PS_2D_SRC_PITCH" variants="A6XX" usage="rp_blit">
+ <reg64 offset="0xb4c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A6XX" usage="rp_blit">
<bitfield name="UNK0" low="0" high="8"/>
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb2c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="SP_PS_2D_SRC_PITCH" variants="A7XX">
- <bitfield name="UNK0" low="0" high="8"/>
- <bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
+ <reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <!--
+ Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
+ is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
+ -->
+ <bitfield name="PITCH" low="3" high="23" type="uint"/>
</reg32>
<!-- planes for NV12, etc. (TODO: not tested) -->
- <reg64 offset="0xb4c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A6XX"/>
- <reg32 offset="0xb4c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
- <reg64 offset="0xb4c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xb4c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A6XX"/>
+ <reg32 offset="0xb4c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
+ <reg64 offset="0xb4c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xb2c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A7XX-"/>
- <reg32 offset="0xb2c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
- <reg64 offset="0xb2c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A7XX-"/>
+ <reg64 offset="0xb2c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A7XX-"/>
+ <reg32 offset="0xb2c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
+ <reg64 offset="0xb2c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A7XX-"/>
- <reg64 offset="0xb4ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
+ <reg64 offset="0xb4ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
- <reg64 offset="0xb2ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
+ <reg64 offset="0xb2ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb4cd" name="SP_PS_UNKNOWN_B4CD" low="6" high="31" variants="A6XX"/>
<reg32 offset="0xb4ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A6XX"/>
@@ -5383,8 +3266,12 @@ to upconvert to 32b float internally?
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="SP_PS_2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
- <reg32 offset="0xb2d2" name="SP_PS_UNKNOWN_B2D2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="RAW_COPY" pos="0" type="boolean"/>
+ <bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
<reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
@@ -5422,34 +3309,44 @@ to upconvert to 32b float internally?
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
- <bitset name="a6xx_hlsq_xs_cntl" inline="yes">
+ <bitset name="a6xx_xs_const_config" inline="yes">
<bitfield name="CONSTLEN" low="0" high="7" shr="2" type="uint"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
<bitfield name="READ_IMM_SHARED_CONSTS" pos="9" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0xb800" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb801" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb802" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb803" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb800" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb801" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb802" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb803" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa827" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa83f" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa867" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa898" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa827" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa83f" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa867" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa898" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9aa" name="HLSQ_FS_UNKNOWN_A9AA" variants="A7XX-" usage="rp_blit">
- <!-- Tentatively named, appears to disable consts being loaded via CP_LOAD_STATE6_FRAG -->
- <bitfield name="CONSTS_LOAD_DISABLE" pos="0" type="boolean"/>
+ <reg32 offset="0xa9aa" name="SP_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="FS_DISABLE" pos="0" type="boolean"/>
</reg32>
- <!-- Always 0 -->
- <reg32 offset="0xa9ac" name="HLSQ_UNKNOWN_A9AC" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ac" name="SP_DITHER_CNTL" variants="A7XX-" usage="cmd">
+ <bitfield name="DITHER_MODE_MRT0" low="0" high="1" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT1" low="2" high="3" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT2" low="4" high="5" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT3" low="6" high="7" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT4" low="8" high="9" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT5" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT6" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT7" low="14" high="15" type="adreno_rb_dither_mode"/>
+ </reg32>
- <!-- Used in VK_KHR_fragment_shading_rate -->
- <reg32 offset="0xa9ad" name="HLSQ_UNKNOWN_A9AD" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ad" name="SP_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="3" type="boolean"/>
+ </reg32>
- <reg32 offset="0xa9ae" name="HLSQ_UNKNOWN_A9AE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9ae" name="SP_PS_CNTL_1" variants="A7XX-" usage="rp_blit">
<bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/>
<!-- UNK8 is set on a730/a740 -->
<bitfield name="UNK8" pos="8" type="boolean"/>
@@ -5462,94 +3359,94 @@ to upconvert to 32b float internally?
<reg32 offset="0xb823" name="HLSQ_LOAD_STATE_GEOM_DATA"/>
- <bitset name="a6xx_hlsq_fs_cntl_0" inline="yes">
+ <bitset name="a6xx_sp_ps_wave_cntl" inline="yes">
<!-- must match SP_FS_CTRL -->
<bitfield name="THREADSIZE" pos="0" type="a6xx_threadsize"/>
<bitfield name="VARYINGS" pos="1" type="boolean"/>
<bitfield name="UNK2" low="2" high="11"/>
</bitset>
- <bitset name="a6xx_hlsq_control_3_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_1" inline="yes">
<!-- register loaded with position (bary.f) -->
<bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
<bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_4_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_2" inline="yes">
<bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
<bitfield name="XYCOORDREGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_5_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_3" inline="yes">
<bitfield name="LINELENGTHREGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="FOVEATIONQUALITYREGID" low="8" high="15" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xb980" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb980" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb981" name="HLSQ_UNKNOWN_B981" pos="0" type="boolean" variants="A6XX"/> <!-- never used by blob -->
- <reg32 offset="0xb982" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb982" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A6XX" usage="rp_blit">
<!-- Sets the maximum number of primitives allowed in one FS wave minus one, similarly to the
A3xx field, except that it's not necessary to set it to anything but the maximum, since
the hardware will simply emit smaller waves when it runs out of space. -->
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xb983" name="HLSQ_CONTROL_2_REG" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb983" name="SP_REG_PROG_ID_0" variants="A6XX" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb984" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb985" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb986" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb987" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xa9c6" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9c7" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xb984" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb985" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb986" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb987" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9c7" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A7XX-" usage="rp_blit">
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xa9c8" name="HLSQ_CONTROL_2_REG" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9c8" name="SP_REG_PROG_ID_0" variants="A7XX-" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa9c9" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9ca" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cb" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cd" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c9" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9ca" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cb" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cd" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="cmd"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xb990" name="HLSQ_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb990" name="SP_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb991" name="HLSQ_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb991" name="SP_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb992" name="HLSQ_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb992" name="SP_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb993" name="HLSQ_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb993" name="SP_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb994" name="HLSQ_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb994" name="SP_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb995" name="HLSQ_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb995" name="SP_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb996" name="HLSQ_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb996" name="SP_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb997" name="HLSQ_CS_CNTL_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb997" name="SP_CS_CONST_CONFIG_0" variants="A6XX" usage="rp_blit">
<!-- these are all vec3. first 3 need to be high regs
- WGSIZECONSTID is the local size (from HLSQ_CS_NDRANGE_0)
+ WGSIZECONSTID is the local size (from SP_CS_NDRANGE_0)
WGOFFSETCONSTID is WGIDCONSTID*WGSIZECONSTID
-->
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
@@ -5557,7 +3454,7 @@ to upconvert to 32b float internally?
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb998" name="HLSQ_CS_CNTL_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb998" name="SP_CS_WGE_CNTL" variants="A6XX" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5569,40 +3466,40 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xb999" name="HLSQ_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99a" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99b" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb999" name="SP_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99a" name="SP_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99b" name="SP_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xa9d4" name="HLSQ_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d4" name="SP_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d5" name="HLSQ_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d5" name="SP_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d6" name="HLSQ_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d6" name="SP_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d7" name="HLSQ_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d7" name="SP_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d8" name="HLSQ_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d8" name="SP_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d9" name="HLSQ_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d9" name="SP_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9da" name="HLSQ_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9da" name="SP_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xa9dc" name="HLSQ_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9dd" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9de" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dc" name="SP_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dd" name="SP_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9de" name="SP_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
<enum name="a7xx_cs_yalign">
<value name="CS_YALIGN_1" value="8"/>
@@ -5611,19 +3508,29 @@ to upconvert to 32b float internally?
<value name="CS_YALIGN_8" value="1"/>
</enum>
- <reg32 offset="0xa9db" name="HLSQ_CS_CNTL_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9db" name="SP_CS_WGE_CNTL" variants="A7XX-" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
<bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
- <bitfield name="UNK11" pos="11" type="boolean"/>
- <bitfield name="UNK22" pos="22" type="boolean"/>
- <bitfield name="UNK26" pos="26" type="boolean"/>
- <bitfield name="YALIGN" low="27" high="30" type="a7xx_cs_yalign"/>
+ <doc>
+ When this bit is enabled, the dispatch order interleaves
+ the z coordinate instead of launching all workgroups
+ with z=0, then all with z=1 and so on.
+ </doc>
+ <bitfield name="WORKGROUPRASTORDERZFIRSTEN" pos="11" type="boolean"/>
+ <doc>
+ When both fields are non-0 then the dispatcher uses
+ these tile sizes to launch workgroups in a tiled manner
+ when the x and y workgroup counts are
+ both more than 1.
+ </doc>
+ <bitfield name="WGTILEWIDTH" low="20" high="25"/>
+ <bitfield name="WGTILEHEIGHT" low="26" high="31"/>
</reg32>
- <reg32 offset="0xa9df" name="HLSQ_CS_LOCAL_SIZE" variants="A7XX-" usage="cmd">
- <!-- localsize is value minus one: -->
+ <reg32 offset="0xa9df" name="SP_CS_NDRANGE_7" variants="A7XX-" usage="cmd">
+ <!-- The size of the last workgroup. localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
@@ -5641,29 +3548,27 @@ to upconvert to 32b float internally?
</reg64>
</array>
- <!-- new in a6xx gen4, mirror of SP_CS_UNKNOWN_A9B1? -->
- <reg32 offset="0xb9d0" name="HLSQ_CS_UNKNOWN_B9D0" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, mirror of SP_CS_CNTL_1? -->
+ <reg32 offset="0xb9d0" name="HLSQ_CS_CTRL_REG1" variants="A6XX" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint"/>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
+ <reg32 offset="0xbb00" name="SP_DRAW_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
+ <reg32 offset="0xbb01" name="SP_KERNEL_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
+ <reg32 offset="0xbb02" name="SP_EVENT_INITIATOR" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xbb08" name="HLSQ_INVALIDATE_CMD" variants="A6XX" usage="cmd">
+ <reg32 offset="0xbb08" name="SP_UPDATE_CNTL" variants="A6XX" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5678,8 +3583,8 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- Note: these only do something when HLSQ_SHARED_CONSTS is set to 1 -->
<bitfield name="CS_SHARED_CONST" pos="19" type="boolean"/>
@@ -5690,20 +3595,20 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
- <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <reg32 offset="0xab1c" name="SP_DRAW_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <reg32 offset="0xab1d" name="SP_KERNEL_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <reg32 offset="0xab1e" name="SP_EVENT_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5718,18 +3623,18 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- SS6_BINDLESS: one bit per bindless base -->
<bitfield name="CS_BINDLESS" low="9" high="16" type="hex"/>
<bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/>
</reg32>
- <reg32 offset="0xbb10" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xab03" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="HLSQ_SHARED_CONSTS_IMM" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -5738,7 +3643,7 @@ to upconvert to 32b float internally?
const pool and 16 in the geometry const pool although
only 8 are actually used (why?) and they are mapped to
c504-c511 in each stage. Both VS and FS shared consts
- are written using ST6_CONSTANTS/SB6_IBO, so that both
+ are written using ST6_CONSTANTS/SB6_UAV, so that both
the geometry and FS shared consts can be written at once
by using CP_LOAD_STATE6 rather than
CP_LOAD_STATE6_FRAG/CP_LOAD_STATE6_GEOM. In addition
@@ -5747,13 +3652,13 @@ to upconvert to 32b float internally?
There is also a separate shared constant pool for CS,
which is loaded through CP_LOAD_STATE6_FRAG with
- ST6_UBO/ST6_IBO. However the only real difference for CS
+ ST6_UBO/ST6_UAV. However the only real difference for CS
is the dword units.
</doc>
<bitfield name="ENABLE" pos="0" type="boolean"/>
</reg32>
- <!-- mirror of SP_BINDLESS_BASE -->
+ <!-- mirror of SP_GFX_BINDLESS_BASE -->
<array offset="0xbb20" name="HLSQ_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="cmd">
<reg64 offset="0" name="DESCRIPTOR">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
@@ -5788,10 +3693,10 @@ to upconvert to 32b float internally?
sequence. The sequence used internally for an event looks like:
- write EVENT_CMD pipe register
- write CP_EVENT_START
- - write HLSQ_EVENT_CMD with event or HLSQ_DRAW_CMD
- - write PC_EVENT_CMD with event or PC_DRAW_CMD
- - write HLSQ_EVENT_CMD(CONTEXT_DONE)
- - write PC_EVENT_CMD(CONTEXT_DONE)
+ - write SP_EVENT_INITIATOR with event or SP_DRAW_INITIATOR
+ - write PC_EVENT_INITIATOR with event or PC_DRAW_INITIATOR
+ - write SP_EVENT_INITIATOR(CONTEXT_DONE)
+ - write PC_EVENT_INITIATOR(CONTEXT_DONE)
- write CP_EVENT_END
Writing to CP_EVENT_END seems to actually trigger the context roll
-->
@@ -5809,193 +3714,6 @@ to upconvert to 32b float internally?
</reg32>
</domain>
-<!-- Seems basically the same as a5xx, maybe move to common.xml.. -->
-<domain name="A6XX_TEX_SAMP" width="32">
- <doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
-
- <reg32 offset="0" name="0">
- <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
- <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
- <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
- <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
- <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
- <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="CLAMPENABLE" pos="0" type="boolean">
- <doc>
- clamp result to [0, 1] if the format is unorm or
- [-1, 1] if the format is snorm, *after*
- filtering. Has no effect for other formats.
- </doc>
- </bitfield>
- <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
- <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
- <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
- <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
- <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
- <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
- <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
- <bitfield name="BCOLOR" low="7" high="31"/>
- </reg32>
- <reg32 offset="3" name="3"/>
-</domain>
-
-<domain name="A6XX_TEX_CONST" width="32" varset="chip">
- <doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
- <enum name="a6xx_tex_type"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_1D" value="0"/>
- <value name="A6XX_TEX_2D" value="1"/>
- <value name="A6XX_TEX_CUBE" value="2"/>
- <value name="A6XX_TEX_3D" value="3"/>
- <value name="A6XX_TEX_BUFFER" value="4"/>
- </enum>
- <reg32 offset="0" name="0">
- <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
- <bitfield name="SRGB" pos="2" type="boolean"/>
- <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
- <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
- <!-- overlaps with MIPLVLS -->
- <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
- <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
- <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
- <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
- <!--
- Why is the swap needed in addition to SWIZ_*? The swap
- is performed before border color replacement, while the
- swizzle is applied after after it.
- -->
- <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="WIDTH" low="0" high="14" type="uint"/>
- <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
- <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="2" name="2">
- <!--
- These fields overlap PITCH, and are used instead of
- PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
- -->
- <doc> probably for D3D structured UAVs, normally set to 1 </doc>
- <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
- <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
-
- <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
- <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
- <doc>Pitch in bytes (so actually stride)</doc>
- <bitfield name="PITCH" low="7" high="28" type="uint"/>
- <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
- </reg32>
- <reg32 offset="3" name="3">
- <!--
- ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
- for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
- layer size at the point that it stops being reduced moving to
- higher (smaller) mipmap levels
- -->
- <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
- <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
- <!--
- by default levels with w < 16 are linear
- TILE_ALL makes all levels have tiling
- seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
- -->
- <bitfield name="TILE_ALL" pos="27" type="boolean"/>
- <bitfield name="FLAG" pos="28" type="boolean"/>
- </reg32>
- <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
- the address of the non-flag base buffer is determined automatically,
- and must follow the flag buffer
- -->
- <reg32 offset="4" name="4">
- <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="DEPTH" low="17" high="29" type="uint"/>
- </reg32>
- <reg32 offset="6" name="6">
- <!-- overlaps with PLANE_PITCH -->
- <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
- <!-- pitch for plane 2 / plane 3 -->
- <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
- </reg32>
- <!-- 7/8 is plane 2 address for planar formats -->
- <reg32 offset="7" name="7">
- <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="8" name="8">
- <bitfield name="FLAG_HI" low="0" high="16"/>
- </reg32>
- <!-- 9/10 is plane 3 address for planar formats -->
- <reg32 offset="9" name="9">
- <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
- </reg32>
- <reg32 offset="10" name="10">
- <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
- <!-- log2 size of the first level, required for mipmapping -->
- <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
- <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
- </reg32>
- <reg32 offset="11" name="11"/>
- <reg32 offset="12" name="12"/>
- <reg32 offset="13" name="13"/>
- <reg32 offset="14" name="14"/>
- <reg32 offset="15" name="15"/>
-</domain>
-
-<domain name="A6XX_UBO" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
- </reg32>
-</domain>
-
<domain name="A6XX_PDC" width="32">
<reg32 offset="0x1140" name="GPU_ENABLE_PDC"/>
<reg32 offset="0x1148" name="GPU_SEQ_START_ADDR"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
new file mode 100644
index 000000000000..307d43dda8a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+
+<domain name="A6XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+ </enum>
+ <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+ </enum>
+ <enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="CLAMPENABLE" pos="0" type="boolean">
+ <doc>
+ clamp result to [0, 1] if the format is unorm or
+ [-1, 1] if the format is snorm, *after*
+ filtering. Has no effect for other formats.
+ </doc>
+ </bitfield>
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
+ <bitfield name="FASTBORDERCOLOR" low="2" high="3" type="a6xx_fast_border_color"/>
+ <bitfield name="FASTBORDERCOLOREN" pos="4" type="boolean"/>
+ <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
+ <bitfield name="BCOLOR" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
+ <doc>Texture constant dwords</doc>
+ <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <!-- overlaps with MIPLVLS -->
+ <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
+ <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
+ <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
+ <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
+ <!--
+ Why is the swap needed in addition to SWIZ_*? The swap
+ is performed before border color replacement, while the
+ swizzle is applied after after it.
+ -->
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ These fields overlap PITCH, and are used instead of
+ PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
+ -->
+ <doc> probably for D3D structured UAVs, normally set to 1 </doc>
+ <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
+ <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
+
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="7" high="28" type="uint"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
+ for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
+ layer size at the point that it stops being reduced moving to
+ higher (smaller) mipmap levels
+ -->
+ <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
+ <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
+ <!--
+ by default levels with w < 16 are linear
+ TILE_ALL makes all levels have tiling
+ seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
+ -->
+ <bitfield name="TILE_ALL" pos="27" type="boolean"/>
+ <bitfield name="FLAG" pos="28" type="boolean"/>
+ </reg32>
+ <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
+ the address of the non-flag base buffer is determined automatically,
+ and must follow the flag buffer
+ -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="DEPTH" low="17" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <!-- overlaps with PLANE_PITCH -->
+ <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
+ <!-- pitch for plane 2 / plane 3 -->
+ <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
+ </reg32>
+ <!-- 7/8 is plane 2 address for planar formats -->
+ <reg32 offset="7" name="7">
+ <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="8" name="8">
+ <bitfield name="FLAG_HI" low="0" high="16"/>
+ </reg32>
+ <!-- 9/10 is plane 3 address for planar formats -->
+ <reg32 offset="9" name="9">
+ <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="10" name="10">
+ <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
+ <!-- log2 size of the first level, required for mipmapping -->
+ <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
+ <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="11" name="11"/>
+ <reg32 offset="12" name="12"/>
+ <reg32 offset="13" name="13"/>
+ <reg32 offset="14" name="14"/>
+ <reg32 offset="15" name="15"/>
+</domain>
+
+<domain name="A6XX_UBO" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
new file mode 100644
index 000000000000..665539b098c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -0,0 +1,383 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_tile_mode">
+ <value name="TILE6_LINEAR" value="0"/>
+ <value name="TILE6_2" value="2"/>
+ <value name="TILE6_3" value="3"/>
+</enum>
+
+<enum name="a6xx_format">
+ <value value="0x02" name="FMT6_A8_UNORM"/>
+ <value value="0x03" name="FMT6_8_UNORM"/>
+ <value value="0x04" name="FMT6_8_SNORM"/>
+ <value value="0x05" name="FMT6_8_UINT"/>
+ <value value="0x06" name="FMT6_8_SINT"/>
+
+ <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
+ <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
+ <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
+ <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
+
+ <value value="0x0f" name="FMT6_8_8_UNORM"/>
+ <value value="0x10" name="FMT6_8_8_SNORM"/>
+ <value value="0x11" name="FMT6_8_8_UINT"/>
+ <value value="0x12" name="FMT6_8_8_SINT"/>
+ <value value="0x13" name="FMT6_L8_A8_UNORM"/>
+
+ <value value="0x15" name="FMT6_16_UNORM"/>
+ <value value="0x16" name="FMT6_16_SNORM"/>
+ <value value="0x17" name="FMT6_16_FLOAT"/>
+ <value value="0x18" name="FMT6_16_UINT"/>
+ <value value="0x19" name="FMT6_16_SINT"/>
+
+ <value value="0x21" name="FMT6_8_8_8_UNORM"/>
+ <value value="0x22" name="FMT6_8_8_8_SNORM"/>
+ <value value="0x23" name="FMT6_8_8_8_UINT"/>
+ <value value="0x24" name="FMT6_8_8_8_SINT"/>
+
+ <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
+ <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
+ <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
+ <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
+ <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
+
+ <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
+
+ <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
+ <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
+ <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
+ <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
+ <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
+
+ <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
+
+ <value value="0x43" name="FMT6_16_16_UNORM"/>
+ <value value="0x44" name="FMT6_16_16_SNORM"/>
+ <value value="0x45" name="FMT6_16_16_FLOAT"/>
+ <value value="0x46" name="FMT6_16_16_UINT"/>
+ <value value="0x47" name="FMT6_16_16_SINT"/>
+
+ <value value="0x48" name="FMT6_32_UNORM"/>
+ <value value="0x49" name="FMT6_32_SNORM"/>
+ <value value="0x4a" name="FMT6_32_FLOAT"/>
+ <value value="0x4b" name="FMT6_32_UINT"/>
+ <value value="0x4c" name="FMT6_32_SINT"/>
+ <value value="0x4d" name="FMT6_32_FIXED"/>
+
+ <value value="0x58" name="FMT6_16_16_16_UNORM"/>
+ <value value="0x59" name="FMT6_16_16_16_SNORM"/>
+ <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
+ <value value="0x5b" name="FMT6_16_16_16_UINT"/>
+ <value value="0x5c" name="FMT6_16_16_16_SINT"/>
+
+ <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
+ <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
+ <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
+ <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
+
+ <value value="0x65" name="FMT6_32_32_UNORM"/>
+ <value value="0x66" name="FMT6_32_32_SNORM"/>
+ <value value="0x67" name="FMT6_32_32_FLOAT"/>
+ <value value="0x68" name="FMT6_32_32_UINT"/>
+ <value value="0x69" name="FMT6_32_32_SINT"/>
+ <value value="0x6a" name="FMT6_32_32_FIXED"/>
+
+ <value value="0x70" name="FMT6_32_32_32_UNORM"/>
+ <value value="0x71" name="FMT6_32_32_32_SNORM"/>
+ <value value="0x72" name="FMT6_32_32_32_UINT"/>
+ <value value="0x73" name="FMT6_32_32_32_SINT"/>
+ <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
+ <value value="0x75" name="FMT6_32_32_32_FIXED"/>
+
+ <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
+ <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
+ <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
+ <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
+ <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
+
+ <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
+ <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
+ <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
+ <value value="0x8f" name="FMT6_NV21"/>
+ <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
+
+ <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
+
+ <!-- Note: tiling/UBWC for these may be different from equivalent formats
+ For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
+ -->
+ <value value="0x94" name="FMT6_NV12_Y"/>
+ <value value="0x95" name="FMT6_NV12_UV"/>
+ <value value="0x96" name="FMT6_NV12_VU"/>
+ <value value="0x97" name="FMT6_NV12_4R"/>
+ <value value="0x98" name="FMT6_NV12_4R_Y"/>
+ <value value="0x99" name="FMT6_NV12_4R_UV"/>
+ <value value="0x9a" name="FMT6_P010"/>
+ <value value="0x9b" name="FMT6_P010_Y"/>
+ <value value="0x9c" name="FMT6_P010_UV"/>
+ <value value="0x9d" name="FMT6_TP10"/>
+ <value value="0x9e" name="FMT6_TP10_Y"/>
+ <value value="0x9f" name="FMT6_TP10_UV"/>
+
+ <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
+
+ <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
+ <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
+ <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
+ <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
+ <value value="0xaf" name="FMT6_ETC1"/>
+ <value value="0xb0" name="FMT6_ETC2_RGB8"/>
+ <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
+ <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
+ <value value="0xb3" name="FMT6_DXT1"/>
+ <value value="0xb4" name="FMT6_DXT3"/>
+ <value value="0xb5" name="FMT6_DXT5"/>
+ <value value="0xb6" name="FMT6_RGTC1_UNORM"/>
+ <value value="0xb7" name="FMT6_RGTC1_UNORM_FAST"/>
+ <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
+ <value value="0xb9" name="FMT6_RGTC1_SNORM_FAST"/>
+ <value value="0xba" name="FMT6_RGTC2_UNORM"/>
+ <value value="0xbb" name="FMT6_RGTC2_UNORM_FAST"/>
+ <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
+ <value value="0xbd" name="FMT6_RGTC2_SNORM_FAST"/>
+ <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
+ <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
+ <value value="0xc0" name="FMT6_BPTC"/>
+ <value value="0xc1" name="FMT6_ASTC_4x4"/>
+ <value value="0xc2" name="FMT6_ASTC_5x4"/>
+ <value value="0xc3" name="FMT6_ASTC_5x5"/>
+ <value value="0xc4" name="FMT6_ASTC_6x5"/>
+ <value value="0xc5" name="FMT6_ASTC_6x6"/>
+ <value value="0xc6" name="FMT6_ASTC_8x5"/>
+ <value value="0xc7" name="FMT6_ASTC_8x6"/>
+ <value value="0xc8" name="FMT6_ASTC_8x8"/>
+ <value value="0xc9" name="FMT6_ASTC_10x5"/>
+ <value value="0xca" name="FMT6_ASTC_10x6"/>
+ <value value="0xcb" name="FMT6_ASTC_10x8"/>
+ <value value="0xcc" name="FMT6_ASTC_10x10"/>
+ <value value="0xcd" name="FMT6_ASTC_12x10"/>
+ <value value="0xce" name="FMT6_ASTC_12x12"/>
+
+ <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
+ <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
+
+ <!-- Not a hw enum, used internally in driver -->
+ <value value="0xff" name="FMT6_NONE"/>
+
+</enum>
+
+<!-- probably same as a5xx -->
+<enum name="a6xx_polygon_mode">
+ <value name="POLYMODE6_POINTS" value="1"/>
+ <value name="POLYMODE6_LINES" value="2"/>
+ <value name="POLYMODE6_TRIANGLES" value="3"/>
+</enum>
+
+<enum name="a6xx_depth_format">
+ <value name="DEPTH6_NONE" value="0"/>
+ <value name="DEPTH6_16" value="1"/>
+ <value name="DEPTH6_24_8" value="2"/>
+ <value name="DEPTH6_32" value="4"/>
+</enum>
+
+<bitset name="a6x_cp_protect" inline="yes">
+ <bitfield name="BASE_ADDR" low="0" high="17"/>
+ <bitfield name="MASK_LEN" low="18" high="30"/>
+ <bitfield name="READ" pos="31" type="boolean"/>
+</bitset>
+
+<enum name="a6xx_shader_id">
+ <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
+ <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
+ <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
+ <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
+ <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
+ <value value="0x29" name="A6XX_SP_INST_DATA"/>
+ <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
+ <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
+ <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
+ <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
+ <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
+ <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
+ <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
+ <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
+ <value value="0x32" name="A6XX_SP_GFX_UAV_BASE_DATA"/>
+ <value value="0x33" name="A6XX_SP_INST_TAG"/>
+ <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
+ <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
+ <value value="0x36" name="A6XX_SP_SMO_TAG"/>
+ <value value="0x37" name="A6XX_SP_STATE_DATA"/>
+ <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
+ <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
+ <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
+ <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
+ <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
+ <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
+ <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
+ <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
+ <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
+ <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
+ <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
+</enum>
+
+<enum name="a6xx_debugbus_id">
+ <value value="0x1" name="A6XX_DBGBUS_CP"/>
+ <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
+ <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
+ <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
+ <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
+ <value value="0x6" name="A6XX_DBGBUS_DPM"/>
+ <value value="0x7" name="A6XX_DBGBUS_TESS"/>
+ <value value="0x8" name="A6XX_DBGBUS_PC"/>
+ <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
+ <value value="0xa" name="A6XX_DBGBUS_VPC"/>
+ <value value="0xb" name="A6XX_DBGBUS_TSE"/>
+ <value value="0xc" name="A6XX_DBGBUS_RAS"/>
+ <value value="0xd" name="A6XX_DBGBUS_VSC"/>
+ <value value="0xe" name="A6XX_DBGBUS_COM"/>
+ <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
+ <value value="0x11" name="A6XX_DBGBUS_A2D"/>
+ <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
+ <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
+ <value value="0x14" name="A6XX_DBGBUS_RBP"/>
+ <value value="0x15" name="A6XX_DBGBUS_DCS"/>
+ <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
+ <value value="0x17" name="A6XX_DBGBUS_CX"/>
+ <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
+ <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
+ <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
+ <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
+ <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
+ <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
+ <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
+ <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
+ <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
+ <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
+ <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
+ <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
+ <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
+ <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
+ <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
+ <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
+ <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
+ <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
+ <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
+ <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
+ <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
+ <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
+ <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
+ <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
+ <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
+ <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
+ <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
+ <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
+ <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
+ <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
+ <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
+ <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
+</enum>
+
+<!--
+Used in a6xx_a2d_bit_cntl.. the value mostly seems to correlate to the
+component type/size, so I think it relates to internal format used for
+blending? The one exception is that 16b unorm and 32b float use the
+same value... maybe 16b unorm is uncommon enough that it was just easier
+to upconvert to 32b float internally?
+
+ 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
+16b unorm: 4
+
+32b int: 7
+16b int: 6
+ 8b int: 5
+
+32b float: 4
+16b float: 3
+ -->
+<enum name="a6xx_2d_ifmt">
+ <value value="0x10" name="R2D_UNORM8"/>
+ <value value="0x7" name="R2D_INT32"/>
+ <value value="0x6" name="R2D_INT16"/>
+ <value value="0x5" name="R2D_INT8"/>
+ <value value="0x4" name="R2D_FLOAT32"/>
+ <value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x1" name="R2D_UNORM8_SRGB"/>
+ <value value="0x0" name="R2D_RAW"/>
+</enum>
+
+<enum name="a6xx_tex_type">
+ <value name="A6XX_TEX_1D" value="0"/>
+ <value name="A6XX_TEX_2D" value="1"/>
+ <value name="A6XX_TEX_CUBE" value="2"/>
+ <value name="A6XX_TEX_3D" value="3"/>
+ <value name="A6XX_TEX_BUFFER" value="4"/>
+ <doc>
+ A special buffer type for usage as the source for buffer
+ to image copies with lower alignment requirements than
+ A6XX_TEX_2D, available since A7XX.
+ </doc>
+ <value name="A6XX_TEX_IMG_BUFFER" value="5"/>
+</enum>
+
+<enum name="a6xx_ztest_mode">
+ <doc>Allow early z-test and early-lrz (if applicable)</doc>
+ <value value="0x0" name="A6XX_EARLY_Z"/>
+ <doc>Disable early z-test and early-lrz test (if applicable)</doc>
+ <value value="0x1" name="A6XX_LATE_Z"/>
+ <doc>
+ A special mode that allows early-lrz (if applicable) or early-z
+ tests, but also does late-z tests at which point it writes depth.
+
+ This mode is used when fragment can be killed (via discard or
+ sample mask) after early-z tests and it writes depth. In such case
+ depth can be written only at late-z stage, but it's ok to use
+ early-z to discard fragments.
+
+ However this mode is not compatible with:
+ - Lack of D/S attachment
+ - Stencil writes on stencil or depth test failures
+ - Per-sample shading
+ </doc>
+ <value value="0x2" name="A6XX_EARLY_Z_LATE_Z"/>
+ <doc>Not a real hw value, used internally by mesa</doc>
+ <value value="0x3" name="A6XX_INVALID_ZTEST"/>
+</enum>
+
+<enum name="a6xx_tess_spacing">
+ <value value="0x0" name="TESS_EQUAL"/>
+ <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
+ <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
+</enum>
+<enum name="a6xx_tess_output">
+ <value value="0x0" name="TESS_POINTS"/>
+ <value value="0x1" name="TESS_LINES"/>
+ <value value="0x2" name="TESS_CW_TRIS"/>
+ <value value="0x3" name="TESS_CCW_TRIS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
new file mode 100644
index 000000000000..c446a2eb1120
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
@@ -0,0 +1,600 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_cp_perfcounter_select">
+ <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="PERF_CP_SQE_IDLE"/>
+ <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="PERF_CP_PM4_DATA"/>
+ <value value="46" name="PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
+</enum>
+
+<enum name="a6xx_rbbm_perfcounter_select">
+ <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a6xx_pc_perfcounter_select">
+ <value value="0" name="PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
+ <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
+ <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="16" name="PERF_PC_INSTANCES"/>
+ <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
+ <value value="18" name="PERF_PC_DEAD_PRIM"/>
+ <value value="19" name="PERF_PC_LIVE_PRIM"/>
+ <value value="20" name="PERF_PC_VERTEX_HITS"/>
+ <value value="21" name="PERF_PC_IA_VERTICES"/>
+ <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
+ <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
+ <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
+ <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
+ <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
+ <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
+ <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
+ <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
+ <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
+ <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
+ <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
+ <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
+ <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
+ <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
+ <value value="38" name="PERF_PC_TSE_VERTEX"/>
+ <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
+</enum>
+
+<enum name="a6xx_vfd_perfcounter_select">
+ <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
+</enum>
+
+<enum name="a6xx_hlsq_perfcounter_select">
+ <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
+ <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
+ <value value="8" name="PERF_HLSQ_QUADS"/>
+ <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
+ <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="PERF_HLSQ_PIXELS"/>
+ <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+</enum>
+
+<enum name="a6xx_vpc_perfcounter_select">
+ <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
+ <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
+</enum>
+
+<enum name="a6xx_tse_perfcounter_select">
+ <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="PERF_TSE_CINVOCATION"/>
+ <value value="16" name="PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a6xx_ras_perfcounter_select">
+ <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="PERF_RAS_8X4_TILES"/>
+ <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="PERF_RAS_BLOCKS"/>
+</enum>
+
+<enum name="a6xx_uche_perfcounter_select">
+ <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="PERF_UCHE_EVICTS"/>
+ <value value="19" name="PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
+</enum>
+
+<enum name="a6xx_tp_perfcounter_select">
+ <value value="0" name="PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
+ <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
+ <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="PERF_TP_QUADS_1D"/>
+ <value value="19" name="PERF_TP_QUADS_2D"/>
+ <value value="20" name="PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="PERF_TP_QUADS_3D"/>
+ <value value="22" name="PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
+ <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
+</enum>
+
+<enum name="a6xx_sp_perfcounter_select">
+ <value value="0" name="PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="PERF_SP_GPR_READ"/>
+ <value value="58" name="PERF_SP_GPR_WRITE"/>
+ <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="PERF_SP_WORKING_EU"/>
+ <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
+</enum>
+
+<enum name="a6xx_rb_perfcounter_select">
+ <value value="0" name="PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="PERF_RB_Z_READ"/>
+ <value value="13" name="PERF_RB_Z_WRITE"/>
+ <value value="14" name="PERF_RB_C_READ"/>
+ <value value="15" name="PERF_RB_C_WRITE"/>
+ <value value="16" name="PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="PERF_RB_Z_PASS"/>
+ <value value="18" name="PERF_RB_Z_FAIL"/>
+ <value value="19" name="PERF_RB_S_FAIL"/>
+ <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="PERF_RB_3D_PIXELS"/>
+ <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
+</enum>
+
+<enum name="a6xx_vsc_perfcounter_select">
+ <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VSC_EOT_NUM"/>
+ <value value="4" name="PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a6xx_ccu_perfcounter_select">
+ <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
+ <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
+ <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="9" name="PERF_CCU_GMEM_READ"/>
+ <value value="10" name="PERF_CCU_GMEM_WRITE"/>
+ <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
+ <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
+ <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
+ <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
+ <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
+ <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
+ <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
+ <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
+ <value value="27" name="PERF_CCU_2D_RD_REQ"/>
+ <value value="28" name="PERF_CCU_2D_WR_REQ"/>
+</enum>
+
+<enum name="a6xx_lrz_perfcounter_select">
+ <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
+ <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
+ <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
+ <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
+</enum>
+
+<enum name="a6xx_cmp_perfcounter_select">
+ <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
+ <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
+ <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
+ <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
+ <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
+ <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
+ <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
+ <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
+ <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
+ <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
+ <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
new file mode 100644
index 000000000000..661b0dd0f675
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_statetype_id">
+ <value value="0" name="A7XX_TP0_NCTX_REG"/>
+ <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
+ <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
+ <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
+ <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
+ <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
+ <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
+ <value value="9" name="A7XX_TP0_TMO_DATA"/>
+ <value value="10" name="A7XX_TP0_SMO_DATA"/>
+ <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="32" name="A7XX_SP_NCTX_REG"/>
+ <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
+ <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
+ <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
+ <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
+ <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
+ <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
+ <value value="39" name="A7XX_SP_INST_DATA"/>
+ <value value="40" name="A7XX_SP_INST_DATA_1"/>
+ <value value="41" name="A7XX_SP_LB_0_DATA"/>
+ <value value="42" name="A7XX_SP_LB_1_DATA"/>
+ <value value="43" name="A7XX_SP_LB_2_DATA"/>
+ <value value="44" name="A7XX_SP_LB_3_DATA"/>
+ <value value="45" name="A7XX_SP_LB_4_DATA"/>
+ <value value="46" name="A7XX_SP_LB_5_DATA"/>
+ <value value="47" name="A7XX_SP_LB_6_DATA"/>
+ <value value="48" name="A7XX_SP_LB_7_DATA"/>
+ <value value="49" name="A7XX_SP_CB_RAM"/>
+ <value value="50" name="A7XX_SP_LB_13_DATA"/>
+ <value value="51" name="A7XX_SP_LB_14_DATA"/>
+ <value value="52" name="A7XX_SP_INST_TAG"/>
+ <value value="53" name="A7XX_SP_INST_DATA_2"/>
+ <value value="54" name="A7XX_SP_TMO_TAG"/>
+ <value value="55" name="A7XX_SP_SMO_TAG"/>
+ <value value="56" name="A7XX_SP_STATE_DATA"/>
+ <value value="57" name="A7XX_SP_HWAVE_RAM"/>
+ <value value="58" name="A7XX_SP_L0_INST_BUF"/>
+ <value value="59" name="A7XX_SP_LB_8_DATA"/>
+ <value value="60" name="A7XX_SP_LB_9_DATA"/>
+ <value value="61" name="A7XX_SP_LB_10_DATA"/>
+ <value value="62" name="A7XX_SP_LB_11_DATA"/>
+ <value value="63" name="A7XX_SP_LB_12_DATA"/>
+ <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
+ <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
+ <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
+ <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
+ <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
+ <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
+ <value value="82" name="A7XX_HLSQ_INST_RAM"/>
+ <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
+ <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
+ <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
+ <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
+ <value value="93" name="A7XX_HLSQ_STPROC_META"/>
+ <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
+ <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
+ <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
+ <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
+ <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
+ <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
+</enum>
+
+<enum name="a7xx_state_location">
+ <value value="0" name="A7XX_HLSQ_STATE"/>
+ <value value="1" name="A7XX_HLSQ_DP"/>
+ <value value="2" name="A7XX_SP_TOP"/>
+ <value value="3" name="A7XX_USPTP"/>
+ <value value="4" name="A7XX_HLSQ_DP_STR"/>
+</enum>
+
+<enum name="a7xx_pipe">
+ <value value="0" name="A7XX_PIPE_NONE"/>
+ <value value="1" name="A7XX_PIPE_BR"/>
+ <value value="2" name="A7XX_PIPE_BV"/>
+ <value value="3" name="A7XX_PIPE_LPAC"/>
+</enum>
+
+<enum name="a7xx_cluster">
+ <value value="0" name="A7XX_CLUSTER_NONE"/>
+ <value value="1" name="A7XX_CLUSTER_FE"/>
+ <value value="2" name="A7XX_CLUSTER_SP_VS"/>
+ <value value="3" name="A7XX_CLUSTER_PC_VS"/>
+ <value value="4" name="A7XX_CLUSTER_GRAS"/>
+ <value value="5" name="A7XX_CLUSTER_SP_PS"/>
+ <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
+ <value value="7" name="A7XX_CLUSTER_PS"/>
+</enum>
+
+<enum name="a7xx_debugbus_id">
+ <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
+ <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
+ <value value="3" name="A7XX_DBGBUS_RBBM"/>
+ <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
+ <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
+ <value value="7" name="A7XX_DBGBUS_HLSQ"/>
+ <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
+ <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
+ <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
+ <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
+ <value value="17" name="A7XX_DBGBUS_PC_BR"/>
+ <value value="18" name="A7XX_DBGBUS_PC_BV"/>
+ <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
+ <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
+ <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
+ <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
+ <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
+ <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
+ <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
+ <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
+ <value value="37" name="A7XX_DBGBUS_VSC"/>
+ <value value="39" name="A7XX_DBGBUS_COM_0"/>
+ <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
+ <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
+ <value value="47" name="A7XX_DBGBUS_UFC_0"/>
+ <value value="48" name="A7XX_DBGBUS_UFC_1"/>
+ <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
+ <value value="59" name="A7XX_DBGBUS_DBGC"/>
+ <value value="60" name="A7XX_DBGBUS_CX"/>
+ <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
+ <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
+ <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
+ <value value="66" name="A7XX_DBGBUS_LARC"/>
+ <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="70" name="A7XX_DBGBUS_RB_0"/>
+ <value value="71" name="A7XX_DBGBUS_RB_1"/>
+ <value value="72" name="A7XX_DBGBUS_RB_2"/>
+ <value value="73" name="A7XX_DBGBUS_RB_3"/>
+ <value value="74" name="A7XX_DBGBUS_RB_4"/>
+ <value value="75" name="A7XX_DBGBUS_RB_5"/>
+ <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="106" name="A7XX_DBGBUS_CCU_0"/>
+ <value value="107" name="A7XX_DBGBUS_CCU_1"/>
+ <value value="108" name="A7XX_DBGBUS_CCU_2"/>
+ <value value="109" name="A7XX_DBGBUS_CCU_3"/>
+ <value value="110" name="A7XX_DBGBUS_CCU_4"/>
+ <value value="111" name="A7XX_DBGBUS_CCU_5"/>
+ <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
+ <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
+ <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
+ <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
+ <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
+ <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
+ <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
+ <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
+ <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
+ <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
+ <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
+ <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
+ <value value="234" name="A7XX_DBGBUS_USP_0"/>
+ <value value="235" name="A7XX_DBGBUS_USP_1"/>
+ <value value="236" name="A7XX_DBGBUS_USP_2"/>
+ <value value="237" name="A7XX_DBGBUS_USP_3"/>
+ <value value="238" name="A7XX_DBGBUS_USP_4"/>
+ <value value="239" name="A7XX_DBGBUS_USP_5"/>
+ <value value="266" name="A7XX_DBGBUS_TP_0"/>
+ <value value="267" name="A7XX_DBGBUS_TP_1"/>
+ <value value="268" name="A7XX_DBGBUS_TP_2"/>
+ <value value="269" name="A7XX_DBGBUS_TP_3"/>
+ <value value="270" name="A7XX_DBGBUS_TP_4"/>
+ <value value="271" name="A7XX_DBGBUS_TP_5"/>
+ <value value="272" name="A7XX_DBGBUS_TP_6"/>
+ <value value="273" name="A7XX_DBGBUS_TP_7"/>
+ <value value="274" name="A7XX_DBGBUS_TP_8"/>
+ <value value="275" name="A7XX_DBGBUS_TP_9"/>
+ <value value="276" name="A7XX_DBGBUS_TP_10"/>
+ <value value="277" name="A7XX_DBGBUS_TP_11"/>
+ <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
+ <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
+ <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
+ <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
+ <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
+ <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
+ <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
+ <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
+ <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
+ <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
+ <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
+ <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
+ <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
+ <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
+ <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
+ <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
+ <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
+ <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
+ <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
+ <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
+ <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
+ <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
+ <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
+ <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
+ <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
+ <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
+ <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
+ <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
+ <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
new file mode 100644
index 000000000000..9bf78b0a854b
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
@@ -0,0 +1,1030 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 462713401622..7abc08635495 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -21,9 +21,9 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/>
<value name="VIZQUERY_END" value="8" variants="A2XX"/>
<value name="SC_WAIT_WC" value="9" variants="A2XX"/>
- <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX"/>
- <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX"/>
- <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX"/>
+ <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX-"/>
+ <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX-"/>
+ <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX-"/>
<!-- Not sure that these 4 events don't have the same meaning as on A5XX+ -->
<value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/>
<value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/>
@@ -31,8 +31,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/>
<value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/>
<doc>
- If A6XX_RB_SAMPLE_COUNT_CONTROL.copy is true, writes OQ Z passed
- sample counts to RB_SAMPLE_COUNT_ADDR. This writes to main
+ If A6XX_RB_SAMPLE_COUNTER_CNTL.copy is true, writes OQ Z passed
+ sample counts to RB_SAMPLE_COUNTER_BASE. This writes to main
memory, skipping UCHE.
</doc>
<value name="ZPASS_DONE" value="21"/>
@@ -98,6 +98,13 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT" value="30" variants="A5XX-"/>
<doc>
+ Flip between the primary and secondary LRZ buffers. This is used
+ for concurrent binning, so that BV can write to one buffer while
+ BR reads from the other.
+ </doc>
+ <value name="LRZ_FLIP_BUFFER" value="36" variants="A7XX"/>
+
+ <doc>
Clears based on GRAS_LRZ_CNTL configuration, could clear
fast-clear buffer or LRZ direction.
LRZ direction is stored at lrz_fc_offset + 0x200, has 1 byte which
@@ -114,6 +121,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
<value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
<value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
<value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
<value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
<value name="UNK_2C" value="44" variants="A5XX-"/>
@@ -372,7 +380,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE" value="0x30" variants="A3XX"/>
<value name="CP_LOAD_STATE4" value="0x30" variants="A4XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch enabled</doc>
- <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a"/>
+ <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a" variants="A3XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch disabled</doc>
<value name="CP_COND_INDIRECT_BUFFER_PFD" value="0x32" variants="A3XX"/>
<doc>Load a buffer with pre-fetch enabled</doc>
@@ -538,7 +546,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE6_GEOM" value="0x32" variants="A6XX-"/>
<value name="CP_LOAD_STATE6_FRAG" value="0x34" variants="A6XX-"/>
<!--
- Note: For IBO state (Image/SSBOs) which have shared state across
+ Note: For UAV state (Image/SSBOs) which have shared state across
shader stages, for 3d pipeline CP_LOAD_STATE6 is used. But for
compute shaders, CP_LOAD_STATE6_FRAG is used. Possibly they are
interchangable.
@@ -567,7 +575,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="IN_PREEMPT" value="0x0f" variants="A6XX-"/>
<!-- TODO do these exist on A5xx? -->
- <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX"/>
+ <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_MEM" value="0x74" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_REG" value="0x72" variants="A6XX-"/>
<value name="CP_WAIT_MEM_GTE" value="0x14" variants="A6XX"/>
@@ -650,6 +658,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<doc>Reset various on-chip state used for synchronization</doc>
<value name="CP_RESET_CONTEXT_STATE" value="0x1f" variants="A7XX-"/>
+
+ <doc>Invalidates the "CCHE" introduced on a740</doc>
+ <value name="CP_CCHE_INVALIDATE" value="0x3a" variants="A7XX-"/>
+
+ <value name="CP_SCOPE_CNTL" value="0x6c" variants="A7XX-"/>
</enum>
@@ -792,14 +805,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="SB6_GS_SHADER" value="0xb"/>
<value name="SB6_FS_SHADER" value="0xc"/>
<value name="SB6_CS_SHADER" value="0xd"/>
- <value name="SB6_IBO" value="0xe"/>
- <value name="SB6_CS_IBO" value="0xf"/>
+ <value name="SB6_UAV" value="0xe"/>
+ <value name="SB6_CS_UAV" value="0xf"/>
</enum>
<enum name="a6xx_state_type">
<value name="ST6_SHADER" value="0"/>
<value name="ST6_CONSTANTS" value="1"/>
<value name="ST6_UBO" value="2"/>
- <value name="ST6_IBO" value="3"/>
+ <value name="ST6_UAV" value="3"/>
</enum>
<enum name="a6xx_state_src">
<value name="SS6_DIRECT" value="0"/>
@@ -1121,39 +1134,93 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<enum name="a7xx_abs_mask_mode">
+ <value name="ABS_MASK" value="0x1"/>
+ <value name="NO_ABS_MASK" value="0x0"/>
+</enum>
+
<domain name="CP_SET_BIN_DATA5" width="32">
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex">
+ <doc>
+ A mask of bins, starting at VSC_N, whose
+ visibility is OR'd together. A value of 0 is
+ interpreted as 1 (i.e. just use VSC_N for
+ visbility) for backwards compatibility. Only
+ exists on a7xx.
+ </doc>
+ </bitfield>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes">
+ <doc>
+ If this field is 1, VSC_MASK and VSC_N are
+ ignored and instead a new ordinal immediately
+ after specifies the full 32-bit mask of bins
+ to use. The mask is "absolute" instead of
+ relative to VSC_N.
+ </doc>
+ </bitfield>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
- <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
- <!--
- a7xx adds a few more addresses to the end of the pkt
- -->
- <reg64 offset="7" name="7"/>
- <reg64 offset="9" name="9"/>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="7" name="7"/>
+ <reg64 offset="9" name="9"/>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="8" name="8"/>
+ <reg64 offset="10" name="10"/>
+ </stripe>
</domain>
<domain name="CP_SET_BIN_DATA5_OFFSET" width="32">
@@ -1164,23 +1231,42 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
stream is recorded.
</doc>
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex"/>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes"/>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
</domain>
<domain name="CP_REG_RMW" width="32">
@@ -1198,6 +1284,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
<reg32 offset="0" name="0">
<bitfield name="DST_REG" low="0" high="17" type="hex"/>
+ <bitfield name="DST_SCRATCH" pos="19" type="boolean" varset="chip" variants="A7XX-"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="23" type="boolean" varset="chip" variants="A7XX-"/>
<bitfield name="ROTATE" low="24" high="28" type="uint"/>
<bitfield name="SRC1_ADD" pos="29" type="boolean"/>
<bitfield name="SRC1_IS_REG" pos="30" type="boolean"/>
@@ -1348,6 +1437,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="SCRATCH" low="20" high="22" type="uint"/>
<!-- number of registers/dwords copied is CNT + 1. -->
<bitfield name="CNT" low="24" high="26" type="uint"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="27" type="boolean" varset="chip" variants="A7XX-"/>
</reg32>
</domain>
@@ -1655,8 +1746,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_SAMPLE_COUNT" pos="12" type="boolean"/>
<!-- Write sample count at (iova + 16) -->
<bitfield name="SAMPLE_COUNT_END_OFFSET" pos="13" type="boolean"/>
- <!-- *(iova + 8) = *(iova + 16) - *iova -->
- <bitfield name="WRITE_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
+ <!-- *(iova + 8) += *(iova + 16) - *iova -->
+ <bitfield name="WRITE_ACCUM_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
<!-- Next 4 flags are valid to set only when concurrent binning is enabled -->
<!-- Increment 16b BV counter. Valid only in BV pipe -->
@@ -1670,15 +1761,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_DST" pos="24" type="event_write_dst" addvariant="yes"/>
<!-- Writes into WRITE_DST from WRITE_SRC. RB_DONE_TS requires WRITE_ENABLED. -->
<bitfield name="WRITE_ENABLED" pos="27" type="boolean"/>
+ <bitfield name="IRQ" pos="31" type="boolean"/>
</reg32>
<stripe varset="event_write_dst" variants="EV_DST_RAM">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="1" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="PAYLOAD_0" low="0" high="31"/>
</reg32>
@@ -1773,13 +1860,23 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_MARKER" width="32" varset="chip" prefix="chip" variants="A6XX-">
<doc>Tell CP the current operation mode, indicates save and restore procedure</doc>
+ <enum name="set_marker_mode">
+ <value value="0" name="SET_RENDER_MODE"/>
+ <!-- IFPC - inter-frame power collapse -->
+ <value value="1" name="SET_IFPC_MODE"/>
+ </enum>
+ <enum name="a6xx_ifpc_mode">
+ <value value="0" name="IFPC_ENABLE"/>
+ <value value="1" name="IFPC_DISABLE"/>
+ </enum>
<enum name="a6xx_marker">
- <value value="1" name="RM6_BYPASS"/>
- <value value="2" name="RM6_BINNING"/>
- <value value="4" name="RM6_GMEM"/>
- <value value="5" name="RM6_ENDVIS"/>
- <value value="6" name="RM6_RESOLVE"/>
- <value value="7" name="RM6_YIELD"/>
+ <value value="1" name="RM6_DIRECT_RENDER"/>
+ <value value="2" name="RM6_BIN_VISIBILITY"/>
+ <value value="3" name="RM6_BIN_DIRECT"/>
+ <value value="4" name="RM6_BIN_RENDER_START"/>
+ <value value="5" name="RM6_BIN_END_OF_DRAWS"/>
+ <value value="6" name="RM6_BIN_RESOLVE"/>
+ <value value="7" name="RM6_BIN_RENDER_END"/>
<value value="8" name="RM6_COMPUTE"/>
<value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
@@ -1789,23 +1886,40 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
-->
<value value="0xd" name="RM6_IB1LIST_START"/>
<value value="0xe" name="RM6_IB1LIST_END"/>
- <!-- IFPC - inter-frame power collapse -->
- <value value="0x100" name="RM6_IFPC_ENABLE"/>
- <value value="0x101" name="RM6_IFPC_DISABLE"/>
</enum>
<reg32 offset="0" name="0">
+ <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
+ <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
+
+ <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <!-- used by preemption to determine if GMEM needs to be saved or not -->
+ <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+
+ <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
+
<!--
- NOTE: blob driver and some versions of freedreno/turnip set
- b4, which is unused (at least by current sqe fw), but interferes
- with parsing if we extend the size of the bitfield to include
- b8 (only sent by kernel mode driver). Really, the way the
- parsing works in the firmware, only b0-b3 are considered, but
- if b8 is set, the low bits are interpreted differently. To
- model this, without getting confused by spurious b4, this is
- described as two overlapping bitfields:
- -->
- <bitfield name="MODE" low="0" high="8" type="a6xx_marker"/>
- <bitfield name="MARKER" low="0" high="3" type="a6xx_marker"/>
+ CP_SET_MARKER is used with these bits to create a
+ critical section around a workaround for ray tracing.
+ The workaround happens after BVH building, and appears
+ to invalidate the RTU's BVH node cache. It makes sure
+ that only one of BR/BV/LPAC is executing the
+ workaround at a time, and no draws using RT on BV/LPAC
+ are executing while the workaround is executed on BR (or
+ vice versa, that no draws on BV/BR using RT are executed
+ while the workaround executes on LPAC), by
+ hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS.
+ The blob usage is:
+
+ CP_SET_MARKER(RT_WA_START)
+ ... workaround here ...
+ CP_SET_MARKER(RT_WA_END)
+ ...
+ CP_SET_MARKER(SHADER_USES_RT)
+ CP_DRAW_INDX(...) or CP_EXEC_CS(...)
+ -->
+ <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/>
</reg32>
</domain>
@@ -1832,9 +1946,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
If concurrent binning is disabled then BR also does binning so it will also
write the "real" registers in BR.
-->
- <value value="8" name="DRAW_STRM_ADDRESS"/>
- <value value="9" name="DRAW_STRM_SIZE_ADDRESS"/>
- <value value="10" name="PRIM_STRM_ADDRESS"/>
+ <value value="8" name="VSC_PIPE_DATA_DRAW_BASE"/>
+ <value value="9" name="VSC_SIZE_BASE"/>
+ <value value="10" name="VSC_PIPE_DATA_PRIM_BASE"/>
<value value="11" name="UNK_STRM_ADDRESS"/>
<value value="12" name="UNK_STRM_SIZE_ADDRESS"/>
@@ -1935,11 +2049,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
a bitmask of which modes pass the test.
-->
- <!-- RM6_BINNING -->
+ <!-- RM6_BIN_VISIBILITY -->
<bitfield name="BINNING" pos="25" variants="RENDER_MODE" type="boolean"/>
<!-- all others -->
<bitfield name="GMEM" pos="26" variants="RENDER_MODE" type="boolean"/>
- <!-- RM6_BYPASS -->
+ <!-- RM6_DIRECT_RENDER -->
<bitfield name="SYSMEM" pos="27" variants="RENDER_MODE" type="boolean"/>
<bitfield name="BV" pos="25" variants="THREAD_MODE" type="boolean"/>
@@ -2014,10 +2128,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_AMBLE" width="32">
<doc>
- Used by the userspace and kernel drivers to set various IB's
- which are executed during context save/restore for handling
- state that isn't restored by the context switch routine itself.
- </doc>
+ Used by the userspace and kernel drivers to set various IB's
+ which are executed during context save/restore for handling
+ state that isn't restored by the context switch routine itself.
+ </doc>
<enum name="amble_type">
<value name="PREAMBLE_AMBLE_TYPE" value="0">
<doc>Executed unconditionally when switching back to the context.</doc>
@@ -2087,12 +2201,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="UNK_EVENT_WRITE" value="0x4"/>
<doc>
Tracks GRAS_LRZ_CNTL::GREATER, GRAS_LRZ_CNTL::DIR, and
- GRAS_LRZ_DEPTH_VIEW with previous values, and if one of
+ GRAS_LRZ_VIEW_INFO with previous values, and if one of
the following is true:
- GRAS_LRZ_CNTL::GREATER has changed
- GRAS_LRZ_CNTL::DIR has changed, the old value is not
CUR_DIR_GE, and the new value is not CUR_DIR_DISABLED
- - GRAS_LRZ_DEPTH_VIEW has changed
+ - GRAS_LRZ_VIEW_INFO has changed
then it does a LRZ_FLUSH with GRAS_LRZ_CNTL::ENABLE
forced to 1.
Only exists in a650_sqe.fw.
@@ -2207,7 +2321,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_MEM_TO_SCRATCH_MEM" width="32">
<doc>
- Best guess is that it is a faster way to fetch all the VSC_STATE registers
+ Best guess is that it is a faster way to fetch all the VSC_CHANNEL_VISIBILITY registers
and keep them in a local scratch memory instead of fetching every time
when skipping IBs.
</doc>
@@ -2260,6 +2374,16 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_SCOPE_CNTL" width="32">
+ <enum name="cp_scope">
+ <value value="0" name="INTERRUPTS"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DISABLE_PREEMPTION" pos="0" type="boolean"/>
+ <bitfield low="28" high="31" name="SCOPE" type="cp_scope"/>
+ </reg32>
+</domain>
+
<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
<reg64 offset="0" name="IB_BASE" type="address"/>
<reg32 offset="2" name="2">
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d2c8c46bb041..4e5ac0f25dea 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -26,6 +26,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
+ <reg32 offset="0x001b0" name="CTRL_5"/>
<reg32 offset="0x00034" name="LANE_CFG0"/>
<reg32 offset="0x00038" name="LANE_CFG1"/>
<reg32 offset="0x0003c" name="PLL_CNTRL"/>
@@ -191,11 +192,24 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x01b0" name="COMMON_STATUS_ONE"/>
<reg32 offset="0x01b4" name="COMMON_STATUS_TWO"/>
<reg32 offset="0x01b8" name="BAND_SEL_CAL"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01bc to 0x01cc, however
+ we keep only one register map. That's not a problem, so far,
+ because this register is not used. The register map should be split
+ once it is going to be used. Comment out the code to prevent
+ any misuse due to the change in the offset.
<reg32 offset="0x01bc" name="ICODE_ACCUM_STATUS_LOW"/>
+ <reg32 offset="0x01cc" name="ICODE_ACCUM_STATUS_LOW"/>
+ -->
<reg32 offset="0x01c0" name="ICODE_ACCUM_STATUS_HIGH"/>
<reg32 offset="0x01c4" name="FD_OUT_LOW"/>
<reg32 offset="0x01c8" name="FD_OUT_HIGH"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01cc to 0x01bc, however
+ we keep only one register map. See above comment.
<reg32 offset="0x01cc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ <reg32 offset="0x01bc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ -->
<reg32 offset="0x01d0" name="PLL_MISC_CONFIG"/>
<reg32 offset="0x01d4" name="FLL_CONFIG"/>
<reg32 offset="0x01d8" name="FLL_FREQ_ACQ_TIME"/>
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index c183b1112bc4..0b756da2fec2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -91,20 +91,15 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
static struct drm_framebuffer *
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e5d37eee4301..e97e39abf3a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1839,7 +1839,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
backlight = nv_connector->backlight;
if (backlight && backlight->uses_dpcd)
drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
- (u16)backlight->dev->props.brightness);
+ backlight->dev->props.brightness);
#endif
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 9aae26eb7d8f..4a75d146a171 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -245,7 +245,7 @@ nv50_backlight_init(struct nouveau_backlight *bl,
if (nv_conn->type == DCB_CONNECTOR_eDP) {
int ret;
- u16 current_level;
+ u32 current_level;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 current_mode;
@@ -261,8 +261,9 @@ nv50_backlight_init(struct nouveau_backlight *bl,
NV_DEBUG(drm, "DPCD backlight controls supported on %s\n",
nv_conn->base.name);
- ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info,
+ 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index add006fc8d81..805d0a87aa54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
@@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
- const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
@@ -295,8 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
- info = drm_get_format_info(dev, mode_cmd);
-
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
@@ -320,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@@ -332,6 +330,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb;
@@ -342,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (!gem)
return ERR_PTR(-ENOENT);
- ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
+ ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
if (ret == 0)
return fb;
@@ -495,7 +494,7 @@ nouveau_display_hpd_work(struct work_struct *work)
if (first_changed_connector)
drm_connector_put(first_changed_connector);
- pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
noop:
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1f506f8b289c..470e0910d484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,8 +8,11 @@
#include <drm/drm_framebuffer.h>
+struct drm_format_info;
+
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb);
@@ -67,5 +70,6 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_format_info *,
const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 41b7c608c905..edbbda78bac9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -189,7 +189,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct nouveau_job_ops nouveau_exec_job_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6ded8c2b6d3b..9f345a008717 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -38,22 +38,16 @@
static const struct dma_fence_ops nouveau_fence_ops_uevent;
static const struct dma_fence_ops nouveau_fence_ops_legacy;
-static inline struct nouveau_fence *
-from_fence(struct dma_fence *fence)
-{
- return container_of(fence, struct nouveau_fence, base);
-}
-
static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence *fence)
{
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static int
+static bool
nouveau_fence_signal(struct nouveau_fence *fence)
{
- int drop = 0;
+ bool drop = false;
dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
@@ -63,7 +57,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- drop = 1;
+ drop = true;
}
dma_fence_put(&fence->base);
@@ -77,19 +71,17 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
fence->ops != &nouveau_fence_ops_uevent)
return NULL;
- return from_fence(fence);
+ return to_nouveau_fence(fence);
}
void
nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
- struct nouveau_fence *fence;
+ struct nouveau_fence *fence, *tmp;
unsigned long flags;
spin_lock_irqsave(&fctx->lock, flags);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
@@ -127,23 +119,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static int
+static void
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence;
- int drop = 0;
+ struct nouveau_fence *fence, *tmp;
+ bool drop = false;
u32 seq = fctx->read(chan);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if ((int)(seq - fence->base.seqno) < 0)
break;
- drop |= nouveau_fence_signal(fence);
+ if (nouveau_fence_signal(fence))
+ drop = true;
}
- return drop;
+ if (drop)
+ nvif_event_block(&fctx->event);
}
static void
@@ -151,22 +143,16 @@ nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
unsigned long flags;
- int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
- if (!list_empty(&fctx->pending)) {
- struct nouveau_fence *fence;
- struct nouveau_channel *chan;
-
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ fence = list_first_entry_or_null(&fctx->pending, typeof(*fence), head);
+ if (fence) {
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(chan, fctx))
- drop = 1;
+ nouveau_fence_update(chan, fctx);
}
- if (drop)
- nvif_event_block(&fctx->event);
-
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -246,9 +232,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return -ENODEV;
}
- if (nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
-
+ nouveau_fence_update(chan, fctx);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -256,31 +240,44 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
}
+void
+nouveau_fence_cancel(struct nouveau_fence *fence)
+{
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ if (!dma_fence_is_signaled_locked(&fence->base)) {
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ if (nouveau_fence_signal(fence))
+ nvif_event_block(&fctx->event);
+ }
+ spin_unlock_irqrestore(&fctx->lock, flags);
+}
+
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->base.ops == &nouveau_fence_ops_legacy ||
- fence->base.ops == &nouveau_fence_ops_uevent) {
- struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan;
- unsigned long flags;
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
+ unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return true;
+ if (dma_fence_is_signaled(&fence->base))
+ return true;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan)
+ nouveau_fence_update(chan, fctx);
+ spin_unlock_irqrestore(&fctx->lock, flags);
- spin_lock_irqsave(&fctx->lock, flags);
- chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (chan && nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
- spin_unlock_irqrestore(&fctx->lock, flags);
- }
return dma_fence_is_signaled(&fence->base);
}
static long
nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
unsigned long t = jiffies, timeout = t + wait;
@@ -460,7 +457,7 @@ static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
return !fctx->dead ? fctx->name : "dead channel";
@@ -474,7 +471,7 @@ static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
*/
static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
bool ret = false;
@@ -490,7 +487,7 @@ static bool nouveau_fence_is_signaled(struct dma_fence *f)
static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
/*
* caller should have a reference on the fence,
@@ -515,7 +512,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f)
static void nouveau_fence_release(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
@@ -533,7 +530,7 @@ static const struct dma_fence_ops nouveau_fence_ops_legacy = {
static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 6a983dd9f7b9..9957a919bd38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,12 +17,19 @@ struct nouveau_fence {
unsigned long timeout;
};
+static inline struct nouveau_fence *
+to_nouveau_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct nouveau_fence, base);
+}
+
int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
+void nouveau_fence_cancel(struct nouveau_fence *fence);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index d326e55d2d24..0cc0bc9f9952 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -11,6 +11,7 @@
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
+#include "nouveau_chan.h"
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
@@ -87,7 +88,8 @@ nouveau_job_init(struct nouveau_job *job,
}
ret = drm_sched_job_init(&job->base, &sched->entity,
- args->credits, NULL);
+ args->credits, NULL,
+ job->file_priv->client_id);
if (ret)
goto err_free_chains;
@@ -120,11 +122,9 @@ nouveau_job_done(struct nouveau_job *job)
{
struct nouveau_sched *sched = job->sched;
- spin_lock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
list_del(&job->entry);
- spin_unlock(&sched->job.list.lock);
-
- wake_up(&sched->job.wq);
+ spin_unlock(&sched->job_list.lock);
}
void
@@ -305,9 +305,9 @@ nouveau_job_submit(struct nouveau_job *job)
}
/* Submit was successful; add the job to the schedulers job list. */
- spin_lock(&sched->job.list.lock);
- list_add(&job->entry, &sched->job.list.head);
- spin_unlock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
+ list_add(&job->entry, &sched->job_list.head);
+ spin_unlock(&sched->job_list.lock);
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
@@ -370,7 +370,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
- enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET;
drm_sched_stop(sched, sched_job);
@@ -392,10 +392,23 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
+static void
+nouveau_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_fence *fence;
+ struct nouveau_job *job;
+
+ job = to_nouveau_job(sched_job);
+ fence = to_nouveau_fence(job->done_fence);
+
+ nouveau_fence_cancel(fence);
+}
+
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
+ .cancel_job = nouveau_sched_cancel_job,
};
static int
@@ -445,9 +458,8 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
goto fail_sched;
mutex_init(&sched->mutex);
- spin_lock_init(&sched->job.list.lock);
- INIT_LIST_HEAD(&sched->job.list.head);
- init_waitqueue_head(&sched->job.wq);
+ spin_lock_init(&sched->job_list.lock);
+ INIT_LIST_HEAD(&sched->job_list.head);
return 0;
@@ -481,16 +493,12 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
-
static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
-
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 20cd1da8db73..b98c3f0bef30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -103,12 +103,9 @@ struct nouveau_sched {
struct mutex mutex;
struct {
- struct {
- struct list_head head;
- spinlock_t lock;
- } list;
- struct wait_queue_head wq;
- } job;
+ struct list_head head;
+ spinlock_t lock;
+ } job_list;
};
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..ddfc46bc1b3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
u64 end = addr + range;
again:
- spin_lock(&sched->job.list.lock);
- list_for_each_entry(__job, &sched->job.list.head, entry) {
+ spin_lock(&sched->job_list.lock);
+ list_for_each_entry(__job, &sched->job_list.head, entry) {
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
list_for_each_op(op, &bind_job->ops) {
@@ -1030,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1038,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 4e09985424b6..e5bbd8563007 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -104,7 +104,7 @@ nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_c
if (cctx) {
refcount_inc(&cctx->refs);
*pcctx = cctx;
- mutex_unlock(&chan->cgrp->mutex);
+ mutex_unlock(&cgrp->mutex);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 7e9e2d3564da..6e63df816d85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -648,7 +648,7 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
if (IS_ERR(ctrl))
- return (void *)ctrl;
+ return ERR_CAST(ctrl);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
index 902876aa14d1..64fd670e99e1 100644
--- a/drivers/gpu/drm/nova/nova.rs
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -12,7 +12,7 @@ use crate::driver::NovaDriver;
kernel::module_auxiliary_driver! {
type: NovaDriver,
name: "Nova",
- author: "Danilo Krummrich",
+ authors: ["Danilo Krummrich"],
description: "Nova GPU driver",
license: "GPL v2",
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 533f70e8a4a6..cf055815077c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -524,7 +524,7 @@ static void dispc_save_context(struct dispc_device *dispc)
DSSDBG("context saved\n");
}
-static void dispc_restore_context(struct dispc_device *dispc)
+static noinline_for_stack void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 6eff97a09160..9f86db774c39 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -562,7 +562,6 @@ static const struct drm_bridge_funcs dpi_bridge_funcs = {
static void dpi_bridge_init(struct dpi_data *dpi)
{
- dpi->bridge.funcs = &dpi_bridge_funcs;
dpi->bridge.of_node = dpi->pdev->dev.of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
@@ -707,9 +706,9 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datalines;
int r;
- dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(&pdev->dev, struct dpi_data, bridge, &dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 91ee63bfe0bc..b129e5a8d791 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4701,7 +4701,6 @@ static const struct drm_bridge_funcs dsi_bridge_funcs = {
static void dsi_bridge_init(struct dsi_data *dsi)
{
- dsi->bridge.funcs = &dsi_bridge_funcs;
dsi->bridge.of_node = dsi->host.dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
@@ -4894,9 +4893,9 @@ static int dsi_probe(struct platform_device *pdev)
unsigned int i;
int r;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct dsi_data, bridge, &dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a3b22952fdc3..3cd612af2449 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -505,7 +505,6 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi4_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -761,9 +760,9 @@ static int hdmi4_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi4_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -774,25 +773,24 @@ static int hdmi4_probe(struct platform_device *pdev)
r = hdmi4_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -800,7 +798,7 @@ static int hdmi4_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -808,7 +806,7 @@ static int hdmi4_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -827,8 +825,6 @@ err_uninit_output:
hdmi4_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -841,8 +837,6 @@ static void hdmi4_remove(struct platform_device *pdev)
hdmi4_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c98444d39a9..5636b3dfec1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -480,7 +480,6 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi5_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -727,9 +726,9 @@ static int hdmi5_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi5_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -740,25 +739,24 @@ static int hdmi5_probe(struct platform_device *pdev)
r = hdmi5_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -766,7 +764,7 @@ static int hdmi5_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -774,7 +772,7 @@ static int hdmi5_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -793,8 +791,6 @@ err_uninit_output:
hdmi5_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -807,8 +803,6 @@ static void hdmi5_remove(struct platform_device *pdev)
hdmi5_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index e78826e4b560..df4cbc683e2c 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -284,7 +284,6 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
static void sdi_bridge_init(struct sdi_device *sdi)
{
- sdi->bridge.funcs = &sdi_bridge_funcs;
sdi->bridge.of_node = sdi->pdev->dev.of_node;
sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
@@ -344,21 +343,19 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datapairs;
int r;
- sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
- if (!sdi)
- return -ENOMEM;
+ sdi = devm_drm_bridge_alloc(&pdev->dev, struct sdi_device, bridge, &sdi_bridge_funcs);
+ if (IS_ERR(sdi))
+ return PTR_ERR(sdi);
ep = of_graph_get_next_port_endpoint(port, NULL);
- if (!ep) {
- r = 0;
- goto err_free;
- }
+ if (!ep)
+ return 0;
r = of_property_read_u32(ep, "datapairs", &datapairs);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_free;
+ return r;
}
sdi->datapairs = datapairs;
@@ -372,19 +369,14 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
r = PTR_ERR(sdi->vdds_sdi_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
- goto err_free;
+ return r;
}
r = sdi_init_output(sdi);
if (r)
- goto err_free;
+ return r;
return 0;
-
-err_free:
- kfree(sdi);
-
- return r;
}
void sdi_uninit_port(struct device_node *port)
@@ -395,5 +387,4 @@ void sdi_uninit_port(struct device_node *port)
return;
sdi_uninit_output(sdi);
- kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 50349518eda1..9b5d53dc361e 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -664,7 +664,6 @@ static const struct drm_bridge_funcs venc_bridge_funcs = {
static void venc_bridge_init(struct venc_device *venc)
{
- venc->bridge.funcs = &venc_bridge_funcs;
venc->bridge.of_node = venc->pdev->dev.of_node;
venc->bridge.ops = DRM_BRIDGE_OP_MODES;
venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
@@ -809,9 +808,9 @@ static int venc_probe(struct platform_device *pdev)
struct venc_device *venc;
int r;
- venc = kzalloc(sizeof(*venc), GFP_KERNEL);
- if (!venc)
- return -ENOMEM;
+ venc = devm_drm_bridge_alloc(&pdev->dev, struct venc_device, bridge, &venc_bridge_funcs);
+ if (IS_ERR(venc))
+ return PTR_ERR(venc);
venc->pdev = pdev;
@@ -824,26 +823,24 @@ static int venc_probe(struct platform_device *pdev)
venc->config = &venc_config_pal_trm;
venc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(venc->base)) {
- r = PTR_ERR(venc->base);
- goto err_free;
- }
+ if (IS_ERR(venc->base))
+ return PTR_ERR(venc->base);
venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(venc->vdda_dac_reg)) {
r = PTR_ERR(venc->vdda_dac_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
- goto err_free;
+ return r;
}
r = venc_get_clocks(venc);
if (r)
- goto err_free;
+ return r;
r = venc_probe_of(venc);
if (r)
- goto err_free;
+ return r;
pm_runtime_enable(&pdev->dev);
@@ -861,8 +858,6 @@ err_uninit_output:
venc_uninit_output(venc);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(venc);
return r;
}
@@ -875,8 +870,6 @@ static void venc_remove(struct platform_device *pdev)
venc_uninit_output(venc);
pm_runtime_disable(&pdev->dev);
-
- kfree(venc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 449d521c78fe..bb3105556f19 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -335,10 +335,9 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
#endif
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
unsigned int num_planes = info->num_planes;
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
@@ -352,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
}
}
- fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
@@ -366,9 +365,9 @@ error:
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
@@ -378,14 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_get_format_info(dev, mode_cmd);
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
- if (!format || i == ARRAY_SIZE(formats)) {
+ if (i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- omap_fb->format = format;
+ omap_fb->format = info;
mutex_init(&omap_fb->lock);
/*
@@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
- if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+ if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
- if (pitch % format->cpp[0]) {
+ if (pitch % info->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
- pitch, format->cpp[0]);
+ pitch, info->cpp[0]);
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < format->num_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
- unsigned int vsub = i == 0 ? 1 : format->vsub;
+ unsigned int vsub = i == 0 ? 1 : info->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
@@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index b75f0b5ef1d8..e6010302a22b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_device;
struct drm_file;
struct drm_framebuffer;
+struct drm_format_info;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane_state;
@@ -20,8 +21,10 @@ struct omap_overlay_info;
struct seq_file;
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 7b6396890681..948af7ec1130 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = omap_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index b9c67e4ca360..381552bfb409 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -8,7 +8,6 @@
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
-#include <linux/pfn_t.h>
#include <linux/vmalloc.h>
#include <drm/drm_prime.h>
@@ -371,8 +370,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address, pfn);
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -467,8 +465,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- ret = vmf_insert_mixed(vma,
- vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed(vma, vaddr, pfn);
if (ret & VM_FAULT_ERROR)
break;
pfn += priv->usergart[fmt].stride_pfn;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index cfebb08e8a62..09b9f7ff9340 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -193,6 +193,16 @@ config DRM_PANEL_HIMAX_HX83112A
Say Y here if you want to enable support for Himax HX83112A-based
display panels, such as the one found in the Fairphone 4 smartphone.
+config DRM_PANEL_HIMAX_HX83112B
+ tristate "Himax HX83112B-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want to enable support for Himax HX83112B-based
+ display panels, such as the one found in the Fairphone 3 smartphone.
+
config DRM_PANEL_HIMAX_HX8394
tristate "HIMAX HX8394 MIPI-DSI LCD panels"
depends on OF
@@ -647,6 +657,32 @@ config DRM_PANEL_RAYDIUM_RM69380
This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
in combination with an EDO OLED panel.
+config DRM_PANEL_RENESAS_R61307
+ tristate "Renesas R61307 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for KOE tx13d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 1024x768
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus Vu P895 smartphone
+ in combination with LCD panel.
+
+config DRM_PANEL_RENESAS_R69328
+ tristate "Renesas R69328 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI dx12d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 720x1280
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus 4X P895 smartphone
+ in combination with LCD panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 714cbac830e3..957555b49996 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
@@ -65,6 +66,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R61307) += panel-renesas-r61307.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R69328) += panel-renesas-r69328.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index df746baae301..4a8560b4b899 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -847,9 +847,6 @@ static int panel_add(struct panel_info *pinfo)
"failed to get enable gpio\n");
}
- drm_panel_init(&pinfo->base, dev, &panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return ret;
@@ -865,9 +862,11 @@ static int panel_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 3e5b0d8636d0..d5fe105bdbdd 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1720,8 +1720,6 @@ static int boe_panel_add(struct boe_panel *boe)
boe->base.prepare_prev_first = true;
- drm_panel_init(&boe->base, dev, &boe_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -1746,9 +1744,11 @@ static int boe_panel_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(&dsi->dev, __typeof(*boe), base,
+ &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 90e8c154a978..9a56e208cbdd 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -113,7 +113,7 @@ struct panel_delay {
* // do fixed enable delay
* // enforce prepare_to_enable min time
*
- * This is not specified in a standard way on eDP timing diagrams.
+ * This is usually (T4+T5+T6+T8)-min on eDP timing diagrams.
* It is effectively the time from HPD going high till you can
* turn on the backlight.
*/
@@ -1869,6 +1869,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x30ed, &delay_200_500_e50, "G156HAN03.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
@@ -1923,6 +1924,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0964, &delay_200_500_e50, "NV133WUM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
@@ -1937,6 +1939,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
@@ -1965,6 +1968,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1973,6 +1977,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
@@ -2005,6 +2010,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x1220, &delay_200_500_e50, "KD116N3730A05"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index b904d5437444..1f177834d629 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -206,9 +206,10 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
struct kd35t133 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct kd35t133, panel,
+ &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -248,9 +249,6 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 986e3e192881..6225501cb174 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -443,9 +443,11 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
unsigned int i;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct k101_im2ba02, panel,
+ &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -463,9 +465,6 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 48e3acaecdf3..4f8d6d8c07e4 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -189,16 +189,14 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
struct feiyang *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct feiyang, panel,
+ &feiyang_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 66abfc44e424..4c432d207634 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -989,8 +989,6 @@ static int hx83102_panel_add(struct hx83102 *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &hx83102_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (err < 0)
return dev_err_probe(dev, err, "failed to get orientation\n");
@@ -1013,9 +1011,11 @@ static int hx83102_probe(struct mipi_dsi_device *dsi)
int ret;
const struct hx83102_panel_desc *desc;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &hx83102_drm_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
index 47bce087e339..142cb1cc067a 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -269,9 +269,11 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
struct hx83112a_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx83112a_panel, panel,
+ &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd1";
ctx->supplies[1].supply = "vsn";
@@ -295,8 +297,6 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112b.c b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
new file mode 100644
index 000000000000..263f79a967de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2025 Luca Weiss <luca@lucaweiss.eu>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define HX83112B_SETPOWER1 0xb1
+#define HX83112B_SETDISP 0xb2
+#define HX83112B_SETDRV 0xb4
+#define HX83112B_SETEXTC 0xb9
+#define HX83112B_SETBANK 0xbd
+#define HX83112B_SETDGCLUT 0xc1
+#define HX83112B_SETDISMO 0xc2
+#define HX83112B_UNKNOWN1 0xc6
+#define HX83112B_SETPANEL 0xcc
+#define HX83112B_UNKNOWN2 0xd1
+#define HX83112B_SETPOWER2 0xd2
+#define HX83112B_SETGIP0 0xd3
+#define HX83112B_SETGIP1 0xd5
+#define HX83112B_SETGIP2 0xd6
+#define HX83112B_SETGIP3 0xd8
+#define HX83112B_SETIDLE 0xdd
+#define HX83112B_UNKNOWN3 0xe7
+#define HX83112B_UNKNOWN4 0xe9
+
+struct hx83112b_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data hx83112b_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct hx83112b_panel *to_hx83112b_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx83112b_panel, panel);
+}
+
+static void hx83112b_reset(struct hx83112b_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int hx83112b_on(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETEXTC, 0x83, 0x11, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x04, 0x38, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER1,
+ 0xf8, 0x27, 0x27, 0x00, 0x00, 0x0b, 0x0e,
+ 0x0b, 0x0e, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER2, 0x2d, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP,
+ 0x80, 0x02, 0x18, 0x80, 0x70, 0x00, 0x08,
+ 0x1c, 0x08, 0x11, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0xb5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETIDLE,
+ 0x00, 0x00, 0x08, 0x1c, 0x08, 0x34, 0x34,
+ 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV,
+ 0x65, 0x6b, 0x00, 0x00, 0xd0, 0xd4, 0x36,
+ 0xcf, 0x06, 0xce, 0x00, 0xce, 0x00, 0x00,
+ 0x00, 0x07, 0x00, 0x2a, 0x07, 0x01, 0x07,
+ 0x00, 0x00, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV, 0x01, 0x67, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0,
+ 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x13, 0x40, 0x04, 0x09,
+ 0x09, 0x0b, 0x0b, 0x32, 0x10, 0x08, 0x00,
+ 0x08, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32,
+ 0x10, 0x08, 0x00, 0x08, 0x00, 0x00, 0x0a,
+ 0x08, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xef);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP1,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x18,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xfc,
+ 0xfc, 0x00, 0x00, 0xfc, 0xfc, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP2,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x19,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa,
+ 0xab, 0xaf, 0xef, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xab, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xae, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xba, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xba, 0xaa,
+ 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xe4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x17, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x09, 0x09, 0x00, 0x07, 0xe8, 0x00, 0x26,
+ 0x00, 0x07, 0x00, 0x00, 0xe8, 0x32, 0x00,
+ 0xe9, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x12, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x02, 0x00, 0x01, 0x20, 0x01, 0x18, 0x08,
+ 0xa8, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x20, 0x20, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x00, 0xdc, 0x11, 0x70, 0x00, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x2a, 0xce, 0x02, 0x70, 0x01, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN2, 0x27);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_off(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_prepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ hx83112b_reset(ctx);
+
+ ret = hx83112b_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx83112b_unprepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = hx83112b_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode hx83112b_mode = {
+ .clock = (1080 + 40 + 4 + 12) * (2160 + 32 + 2 + 2) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 40,
+ .hsync_end = 1080 + 40 + 4,
+ .htotal = 1080 + 40 + 4 + 12,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 32,
+ .vsync_end = 2160 + 32 + 2,
+ .vtotal = 2160 + 32 + 2 + 2,
+ .width_mm = 65,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int hx83112b_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &hx83112b_mode);
+}
+
+static const struct drm_panel_funcs hx83112b_panel_funcs = {
+ .prepare = hx83112b_prepare,
+ .unprepare = hx83112b_unprepare,
+ .get_modes = hx83112b_get_modes,
+};
+
+static int hx83112b_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops hx83112b_bl_ops = {
+ .update_status = hx83112b_bl_update_status,
+};
+
+static struct backlight_device *
+hx83112b_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &hx83112b_bl_ops, &props);
+}
+
+static int hx83112b_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx83112b_panel *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct hx83112b_panel, panel,
+ &hx83112b_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(hx83112b_supplies),
+ hx83112b_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = hx83112b_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hx83112b_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx83112b_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx83112b_of_match[] = {
+ { .compatible = "djn,98-03057-6598b-i" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx83112b_of_match);
+
+static struct mipi_dsi_driver hx83112b_driver = {
+ .probe = hx83112b_probe,
+ .remove = hx83112b_remove,
+ .driver = {
+ .name = "panel-himax-hx83112b",
+ .of_match_table = hx83112b_of_match,
+ },
+};
+module_mipi_dsi_driver(hx83112b_driver);
+
+MODULE_DESCRIPTION("DRM driver for hx83112b-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index ff994bf0e3cc..c4d3e09a228d 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -477,6 +477,147 @@ static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
.init_sequence = mchp_ac40t08a_init_sequence,
};
+/*
+ * HL055FHAV028C is based on Himax HX8399, so datasheet pages are
+ * slightly different than HX8394 based panels.
+ */
+static void hl055fhav028c_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
+{
+ /* 6.3.6 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x99);
+
+ /* 6.3.17 SETOFFSET: Set offset voltage (D2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETOFFSET,
+ 0x77);
+
+ /* 6.3.1 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x02, 0x04, 0x74, 0x94, 0x01, 0x32,
+ 0x33, 0x11, 0x11, 0xab, 0x4d, 0x56,
+ 0x73, 0x02, 0x02);
+
+ /* 6.3.2 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x80, 0xae, 0x05, 0x07,
+ 0x5a, 0x11, 0x00, 0x00, 0x10, 0x1e,
+ 0x70, 0x03, 0xd4);
+
+ /* 6.3.3 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x00, 0xff, 0x02, 0xc0, 0x02, 0xc0,
+ 0x00, 0x00, 0x08, 0x00, 0x04, 0x06,
+ 0x00, 0x32, 0x04, 0x0a, 0x08, 0x21,
+ 0x03, 0x01, 0x00, 0x0f, 0xb8, 0x8b,
+ 0x02, 0xc0, 0x02, 0xc0, 0x00, 0x00,
+ 0x08, 0x00, 0x04, 0x06, 0x00, 0x32,
+ 0x04, 0x0a, 0x08, 0x01, 0x00, 0x0f,
+ 0xb8, 0x01);
+
+ /* 6.3.18 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x10, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 0x05, 0x07, 0x00, 0x00,
+ 0x00, 0x05, 0x40);
+
+ /* 6.3.19 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x21, 0x20, 0x01, 0x00, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 6.3.20 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x40, 0x40,
+ 0x20, 0x21, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x00, 0x01, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x40, 0x40,
+ 0x40, 0x40);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xa2, 0xaa, 0x02, 0xa0, 0xa2, 0xa8,
+ 0x02, 0xa0, 0xb0, 0x00, 0x00, 0x00,
+ 0xb0, 0x00, 0x00, 0x00);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xb0, 0x00, 0x00, 0x00, 0xb0, 0x00,
+ 0x00, 0x00, 0xe2, 0xaa, 0x03, 0xf0,
+ 0xe2, 0xaa, 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xe2, 0xaa, 0x03, 0xf0, 0xe2, 0xaa,
+ 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 6.3.4 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7a, 0x7a);
+
+ /* 6.3.26 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x18, 0x27, 0x24, 0x5a, 0x68,
+ 0x79, 0x78, 0x81, 0x8a, 0x92, 0x99,
+ 0x9e, 0xa7, 0xaf, 0xb4, 0xb9, 0xc3,
+ 0xc7, 0xd1, 0xc6, 0xd4, 0xd5, 0x6c,
+ 0x67, 0x71, 0x77, 0x00, 0x00, 0x18,
+ 0x27, 0x24, 0x5a, 0x68, 0x79, 0x78,
+ 0x81, 0x8a, 0x92, 0x99, 0x9e, 0xa7,
+ 0xaf, 0xb4, 0xb9, 0xc3, 0xc7, 0xd1,
+ 0xc6, 0xd4, 0xd5, 0x6c, 0x67, 0x77);
+
+ /* Unknown command, not listed in the HX8399-C datasheet (C6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xff, 0xf9);
+
+ /* 6.3.16 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x08);
+}
+
+static const struct drm_display_mode hl055fhav028c_mode = {
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 8,
+ .htotal = 1080 + 32 + 8 + 32,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 2,
+ .vtotal = 1920 + 16 + 2 + 14,
+ .clock = 134920,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 127,
+};
+
+static const struct hx8394_panel_desc hl055fhav028c_desc = {
+ .mode = &hl055fhav028c_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hl055fhav028c_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -611,9 +752,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
struct hx8394 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx8394, panel,
+ &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -645,9 +788,6 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
- drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -683,6 +823,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "huiling,hl055fhav028c", .data = &hl055fhav028c_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 94b7dfef3b5e..6ed544a83bdd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -722,9 +722,10 @@ static int ili9322_probe(struct spi_device *spi)
int ret;
int i;
- ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9322, panel,
+ &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
spi_set_drvdata(spi, ili);
@@ -883,9 +884,6 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index ff39f5dd4097..f7425dfaa50d 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -173,7 +173,6 @@ struct ili9341_config {
};
struct ili9341 {
- struct device *dev;
const struct ili9341_config *conf;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
@@ -490,9 +489,11 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct ili9341 *ili;
int ret;
- ili = devm_kzalloc(dev, sizeof(struct ili9341), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9341, panel,
+ &ili9341_dpi_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
ili->dbi = devm_kzalloc(dev, sizeof(struct mipi_dbi),
GFP_KERNEL);
@@ -526,8 +527,6 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
}
ili->max_spi_speed = ili->conf->max_spi_speed;
- drm_panel_init(&ili->panel, dev, &ili9341_dpi_funcs,
- DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
index 1cbc25758bd2..e6c483851f1f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -307,9 +307,12 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9805 *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9805, panel,
+ &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
@@ -320,9 +323,6 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
dsi->lanes = 2;
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return PTR_ERR(ctx->dvdd);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index a3c79ad99d0b..18aa6222b0c5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -166,9 +166,10 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9806e_panel *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ili9806e_panel, panel, &ili9806e_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = device_get_match_data(dev);
@@ -192,9 +193,6 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->lanes = ctx->desc->lanes;
- drm_panel_init(&ctx->panel, dev, &ili9806e_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (ret)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 28cd7560e5db..ac433345a179 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -43,6 +43,7 @@ struct ili9881c_desc {
const struct drm_display_mode *mode;
const unsigned long mode_flags;
u8 default_address_mode;
+ unsigned int lanes;
};
struct ili9881c {
@@ -1223,6 +1224,199 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_7inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x61),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x61),
+ ILI9881C_COMMAND_INSTR(0x10, 0x61),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x10),
+ ILI9881C_COMMAND_INSTR(0x51, 0x32),
+ ILI9881C_COMMAND_INSTR(0x52, 0x54),
+ ILI9881C_COMMAND_INSTR(0x53, 0x76),
+ ILI9881C_COMMAND_INSTR(0x54, 0x98),
+ ILI9881C_COMMAND_INSTR(0x55, 0xba),
+ ILI9881C_COMMAND_INSTR(0x56, 0x10),
+ ILI9881C_COMMAND_INSTR(0x57, 0x32),
+ ILI9881C_COMMAND_INSTR(0x58, 0x54),
+ ILI9881C_COMMAND_INSTR(0x59, 0x76),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x98),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xba),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xfe),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x60, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x06),
+ ILI9881C_COMMAND_INSTR(0x64, 0x07),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x01),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x76, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x80, 0x00),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x14),
+ ILI9881C_COMMAND_INSTR(0x83, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x7d),
+ ILI9881C_COMMAND_INSTR(0x55, 0x8f),
+ ILI9881C_COMMAND_INSTR(0x40, 0x33),
+ ILI9881C_COMMAND_INSTR(0x50, 0x96),
+ ILI9881C_COMMAND_INSTR(0x51, 0x96),
+ ILI9881C_COMMAND_INSTR(0x60, 0x23),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1449,6 +1643,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_7inch_default_mode = {
+ .clock = 83330,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 239,
+ .hsync_end = 720 + 239 + 33,
+ .htotal = 720 + 239 + 33 + 50,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 2,
+ .vtotal = 1280 + 20 + 2 + 30,
+
+ .width_mm = 90,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1506,16 +1717,15 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9881c *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9881c, panel, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
@@ -1549,7 +1759,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = 4;
+ dsi->lanes = ctx->desc->lanes;
return mipi_dsi_attach(dsi);
}
@@ -1567,6 +1777,7 @@ static const struct ili9881c_desc lhr050h41_desc = {
.init_length = ARRAY_SIZE(lhr050h41_init),
.mode = &lhr050h41_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc k101_im2byl02_desc = {
@@ -1574,6 +1785,7 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.init_length = ARRAY_SIZE(k101_im2byl02_init),
.mode = &k101_im2byl02_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc kd050hdfia020_desc = {
@@ -1599,6 +1811,7 @@ static const struct ili9881c_desc w552946aba_desc = {
.mode = &w552946aba_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
};
static const struct ili9881c_desc am8001280g_desc = {
@@ -1609,6 +1822,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_7inch_desc = {
+ .init = rpi_7inch_init,
+ .init_length = ARRAY_SIZE(rpi_7inch_init),
+ .mode = &rpi_7inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
@@ -1616,6 +1837,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 3c24a63b6be8..85c7059be214 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -614,8 +614,6 @@ static int ili9882t_add(struct ili9882t *ili)
gpiod_set_value(ili->enable_gpio, 0);
- drm_panel_init(&ili->base, dev, &ili9882t_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -640,9 +638,11 @@ static int ili9882t_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- ili = devm_kzalloc(&dsi->dev, sizeof(*ili), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(&dsi->dev, __typeof(*ili), base,
+ &ili9882t_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index f85b7a4cbb42..b2309900873b 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -204,9 +204,11 @@ static int ej030na_probe(struct spi_device *spi)
struct ej030na *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct ej030na, panel,
+ &ej030na_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -231,9 +233,6 @@ static int ej030na_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &ej030na_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d95c0d4f3e35..80afeeab9475 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -382,9 +382,11 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
struct device *dev = &dsi->dev;
int err, i;
- innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
+ innolux = devm_drm_panel_alloc(dev, struct innolux_panel, base,
+ &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(innolux))
+ return PTR_ERR(innolux);
innolux->desc = desc;
@@ -410,9 +412,6 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&innolux->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index eb0f8373258c..5c2530598ddb 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1120,9 +1120,10 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
struct jadard *jadard;
int ret;
- jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
- if (!jadard)
- return -ENOMEM;
+ jadard = devm_drm_panel_alloc(dev, struct jadard, panel, &jadard_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(jadard))
+ return PTR_ERR(jadard);
desc = of_device_get_match_data(dev);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -1148,9 +1149,6 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(jadard->vccio);
}
- drm_panel_init(&jadard->panel, dev, &jadard_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 4eb71e85e9e9..cbe354b51bce 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -175,9 +175,11 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
struct jdi_fhd_r63452 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct jdi_fhd_r63452, panel,
+ &jdi_fhd_r63452_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -192,8 +194,6 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5b5082efb282..5f897e143758 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -435,9 +435,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to create backlight\n");
- drm_panel_init(&jdi->base, &jdi->link1->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -475,10 +472,13 @@ static int jdi_panel_dsi_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi) {
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi),
+ base, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(jdi);
}
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index b1ce186de261..3513e5c4dd8c 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -402,9 +402,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to register backlight %d\n", ret);
- drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -426,9 +423,11 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi)
- return -ENOMEM;
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), base,
+ &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi))
+ return PTR_ERR(jdi);
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index 0e5e8e57bd1e..67ca055f06f3 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -821,9 +821,6 @@ static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
"failed to get enable gpio");
- drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
- &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&khadas_ts050->base);
if (err)
return err;
@@ -850,10 +847,12 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
- GFP_KERNEL);
- if (!khadas_ts050)
- return -ENOMEM;
+ khadas_ts050 = devm_drm_panel_alloc(&dsi->dev, __typeof(*khadas_ts050),
+ base, &khadas_ts050_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(khadas_ts050))
+ return PTR_ERR(khadas_ts050);
khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index d6b912277196..2fc7b0779b37 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -337,9 +337,6 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
kingdisplay->enable_gpio = NULL;
}
- drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
- &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&kingdisplay->base);
if (err)
return err;
@@ -364,9 +361,12 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL);
- if (!kingdisplay)
- return -ENOMEM;
+ kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
+ &kingdisplay_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(kingdisplay))
+ return PTR_ERR(kingdisplay);
mipi_dsi_set_drvdata(dsi, kingdisplay);
kingdisplay->link = dsi;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 77f74e6c467e..0856df5a6ee2 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -548,9 +548,11 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
struct ltk050h3146w *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk050h3146w, panel,
+ &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -577,9 +579,6 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_desc->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 6b18cf00fd4a..7f19fd5b8060 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -604,9 +604,11 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk500hd1829, panel,
+ &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -643,9 +645,6 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 9d0d4faa3f58..b2be6727bf73 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -178,9 +178,10 @@ static int lb035q02_probe(struct spi_device *spi)
struct lb035q02_device *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct lb035q02_device, panel,
+ &lb035q02_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -195,9 +196,6 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index cf246d15b7b6..dec619902c15 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -243,9 +243,11 @@ static int lg4573_probe(struct spi_device *spi)
struct lg4573 *ctx;
int ret;
- ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&spi->dev, struct lg4573, panel,
+ &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->spi = spi;
@@ -258,9 +260,6 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index f3dcc39670ea..46a56ea92ad9 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -246,8 +246,6 @@ static int sw43408_add(struct sw43408_panel *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->base);
return ret;
}
@@ -257,9 +255,11 @@ static int sw43408_probe(struct mipi_dsi_device *dsi)
struct sw43408_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsi->mode_flags = MIPI_DSI_MODE_LPM;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
index 032c542aab0f..24b34443ace0 100644
--- a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
+++ b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
@@ -190,9 +190,11 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST);
- lcd = devm_kzalloc(&dsi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lincoln_lcd197_panel, panel,
+ &lincoln_lcd197_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
mipi_dsi_set_drvdata(dsi, lcd);
lcd->dsi = dsi;
@@ -214,9 +216,6 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(lcd->reset_gpio),
"failed to get reset gpio");
- drm_panel_init(&lcd->panel, dev,
- &lincoln_lcd197_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&lcd->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ba6c015aabba..23fd535d8f47 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -164,9 +164,11 @@ static int panel_lvds_probe(struct platform_device *pdev)
struct panel_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_panel_alloc(&pdev->dev, struct panel_lvds, panel,
+ &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = &pdev->dev;
@@ -214,10 +216,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
* driver.
*/
- /* Register the panel. */
- drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
- DRM_MODE_CONNECTOR_LVDS);
-
ret = drm_panel_of_backlight(&lvds->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
index 799c2161fc85..cde168ec631c 100644
--- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -370,9 +370,11 @@ static int d53e6ea8966_probe(struct spi_device *spi)
.node = NULL,
};
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct d53e6ea8966, panel,
+ &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
spi_set_drvdata(spi, db);
@@ -425,9 +427,6 @@ static int d53e6ea8966_probe(struct spi_device *spi)
db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (db->panel_info->backlight_register) {
ret = db->panel_info->backlight_register(db);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 4db852ffb0f6..55664f5d5aa5 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -234,9 +234,11 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
struct mantix *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct mantix, panel, &mantix_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
@@ -271,9 +273,6 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vddi))
return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
- drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 81c5c541a351..d5c7210de4af 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -178,9 +178,10 @@ static int nl8048_probe(struct spi_device *spi)
struct nl8048_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct nl8048_panel, panel,
+ &nl8048_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -204,9 +205,6 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index b6429795e8f5..22560384e48e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -361,9 +361,11 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
struct panel_nv3051d *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct panel_nv3051d, panel,
+ &panel_nv3051d_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -391,9 +393,6 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_info->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 06e16a7c14a7..0db9cadd868e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -777,9 +777,10 @@ static int nv3052c_probe(struct spi_device *spi)
struct nv3052c *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct nv3052c, panel, &nv3052c_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->dev = dev;
@@ -803,9 +804,6 @@ static int nv3052c_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- drm_panel_init(&priv->panel, dev, &nv3052c_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return dev_err_probe(dev, err, "Failed to attach backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 549b86f2cc28..3189d89c7ca0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1087,9 +1087,12 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
struct nt35510 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35510, panel,
+ &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
mipi_dsi_set_drvdata(dsi, nt);
nt->dev = dev;
@@ -1142,9 +1145,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(nt->reset_gpio);
}
- drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/*
* First, try to locate an external backlight (such as on GPIO)
* if this fails, assume we will want to use the internal backlight
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 5bbea734123b..98f0782c8411 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -456,9 +456,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
struct nt35560 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35560, panel,
+ &nt35560_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
nt->video_mode = of_property_read_bool(dev->of_node,
"enforce-video-mode");
@@ -502,9 +505,6 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
"failed to request GPIO\n");
- drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt,
&nt35560_bl_ops, &nt35560_bl_props);
if (IS_ERR(nt->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 08b22b592ab0..94aa6489d99f 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -449,9 +449,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, num_dsis = 1, ret;
- nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35950, panel, &nt35950_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
ret = nt35950_sharp_init_vregs(nt, dev);
if (ret)
@@ -491,9 +492,6 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
nt->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, nt);
- drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&nt->panel);
if (ret) {
if (num_dsis == 2)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 116d67bfa114..32cf64c7c18b 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1171,9 +1171,11 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, ret;
- pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(dev, struct panel_info, panel,
+ &nt36523_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
pinfo->vddio = devm_regulator_get(dev, "vddio");
if (IS_ERR(pinfo->vddio))
@@ -1211,7 +1213,6 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
pinfo->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
- drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = of_drm_get_panel_orientation(dev->of_node, &pinfo->orientation);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index c2abd20e0734..29e1f6aea480 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -608,8 +608,6 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
"failed to get reset gpio from DT\n");
- drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
@@ -625,9 +623,11 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
const struct nt36672a_panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index 8c9e04207ba9..c5e00eb55722 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -522,9 +522,11 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx;
int i, ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct nt36672e_panel, panel,
+ &nt36672e_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc) {
@@ -553,8 +555,6 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->mode_flags = ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 9fa7654e2b67..a629976bae54 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -246,9 +246,10 @@ static int nt39016_probe(struct spi_device *spi)
struct nt39016 *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct nt39016, drm_panel, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -279,9 +280,6 @@ static int nt39016_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err)
return dev_err_probe(dev, err, "Failed to get backlight handle\n");
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 94ae8c8270b8..66f99982f360 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -175,9 +175,11 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lcd_olinuxino, panel,
+ &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
i2c_set_clientdata(client, lcd);
lcd->dev = dev;
@@ -234,9 +236,6 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index fc87f61d4400..3231e84dc66c 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -237,9 +237,11 @@ static int ota5601a_probe(struct spi_device *spi)
struct ota5601a *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct ota5601a, drm_panel,
+ &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -273,9 +275,6 @@ static int ota5601a_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err) {
if (err != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87bbb25d119a..a0f58c3b73f6 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -424,9 +424,11 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
struct otm8009a *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct otm8009a, panel,
+ &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -451,9 +453,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dev, ctx,
&otm8009a_backlight_ops,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index dbea84f51514..2334b77f348c 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -132,9 +132,6 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
- drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
- &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&osd101t2587->base);
if (ret)
return ret;
@@ -161,9 +158,12 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET;
- osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
- if (!osd101t2587)
- return -ENOMEM;
+ osd101t2587 = devm_drm_panel_alloc(&dsi->dev, __typeof(*osd101t2587), base,
+ &osd101t2587_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(osd101t2587))
+ return PTR_ERR(osd101t2587);
mipi_dsi_set_drvdata(dsi, osd101t2587);
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index d1c5c9bc3c56..3c3308fc55df 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -166,9 +166,6 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
- drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
- &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&wuxga_nt->base);
if (ret)
return ret;
@@ -196,9 +193,12 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
- if (!wuxga_nt)
- return -ENOMEM;
+ wuxga_nt = devm_drm_panel_alloc(&dsi->dev, __typeof(*wuxga_nt), base,
+ &wuxga_nt_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(wuxga_nt))
+ return PTR_ERR(wuxga_nt);
mipi_dsi_set_drvdata(dsi, wuxga_nt);
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index e10e469aa7a6..dc4bb8ad9131 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -373,9 +373,12 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
.node = NULL,
};
- ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
- if (!ts)
- return -ENOMEM;
+ ts = devm_drm_panel_alloc(dev, __typeof(*ts), base,
+ &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ts))
+ return PTR_ERR(ts);
i2c_set_clientdata(i2c, ts);
@@ -428,9 +431,6 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
return PTR_ERR(ts->dsi);
}
- drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index b2029e035635..2af6aa47a551 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -527,9 +527,11 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
int ret;
u32 video_mode;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct rad_panel, panel,
+ &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
@@ -586,8 +588,6 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
index 64b685dc11f6..333faed62da7 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -36,12 +36,14 @@ static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel
static void raydium_rm67200_reset(struct raydium_rm67200 *ctx)
{
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ }
}
static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx,
@@ -318,6 +320,7 @@ static void w552793baa_setup(struct mipi_dsi_multi_context *ctx)
static int raydium_rm67200_prepare(struct drm_panel *panel)
{
struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
int ret;
ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies);
@@ -328,6 +331,12 @@ static int raydium_rm67200_prepare(struct drm_panel *panel)
msleep(60);
+ ctx->panel_info->panel_setup(&mctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 30);
+
return 0;
}
@@ -343,20 +352,6 @@ static int raydium_rm67200_unprepare(struct drm_panel *panel)
return 0;
}
-static int raydium_rm67200_enable(struct drm_panel *panel)
-{
- struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
- struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
-
- rm67200->panel_info->panel_setup(&ctx);
- mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
- mipi_dsi_msleep(&ctx, 120);
- mipi_dsi_dcs_set_display_on_multi(&ctx);
- mipi_dsi_msleep(&ctx, 30);
-
- return ctx.accum_err;
-}
-
static int raydium_rm67200_disable(struct drm_panel *panel)
{
struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
@@ -381,7 +376,6 @@ static const struct drm_panel_funcs raydium_rm67200_funcs = {
.prepare = raydium_rm67200_prepare,
.unprepare = raydium_rm67200_unprepare,
.get_modes = raydium_rm67200_get_modes,
- .enable = raydium_rm67200_enable,
.disable = raydium_rm67200_disable,
};
@@ -391,9 +385,11 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
struct raydium_rm67200 *ctx;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct raydium_rm67200, panel,
+ &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_info = device_get_match_data(dev);
if (!ctx->panel_info)
@@ -407,7 +403,7 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
@@ -421,9 +417,6 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -471,6 +464,7 @@ static const struct raydium_rm67200_panel_info w552793baa_info = {
.vtotal = 1952,
.width_mm = 68, /* 68.04mm */
.height_mm = 121, /* 120.96mm */
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
.type = DRM_MODE_TYPE_DRIVER,
},
.regulators = w552793baa_regulators,
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 7b7fe987e292..669b5f5c1ad9 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -327,9 +327,11 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
struct rm68200 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm68200, panel,
+ &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -355,9 +357,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
index ea1b728e85a2..8e9484768657 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
@@ -281,9 +281,11 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
struct rm692e5_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm692e5_panel, panel,
+ &rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "dvdd";
@@ -306,8 +308,6 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
index d3071c01aaea..86769cadec97 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -208,9 +208,11 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
struct device_node *dsi_sec;
int ret, i;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm69380_panel, panel,
+ &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "avdd";
@@ -248,8 +250,6 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
ctx->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
- drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm69380_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-renesas-r61307.c b/drivers/gpu/drm/panel/panel-renesas-r61307.c
new file mode 100644
index 000000000000..319415194839
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r61307.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R61307_MACP 0xb0 /* Manufacturer CMD Protect */
+#define R61307_MACP_ON 0x03
+#define R61307_MACP_OFF 0x04
+
+#define R61307_INVERSION 0xc1
+#define R61307_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R61307_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R61307_GAMMA_SET_C 0xca /* Gamma Setting C */
+#define R61307_CONTRAST_SET 0xcc
+
+struct renesas_r61307 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vcc_supply;
+ struct regulator *iovcc_supply;
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+
+ bool dig_cont_adj;
+ bool inversion;
+ u32 gamma;
+};
+
+static const u8 gamma_setting[][25] = {
+ { /* sentinel */ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x0b, 0x0d, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06,
+ 0x0a, 0x0c, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06
+ },
+};
+
+static inline struct renesas_r61307 *to_renesas_r61307(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r61307, panel);
+}
+
+static void renesas_r61307_reset(struct renesas_r61307 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r61307_prepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ ret = regulator_enable(priv->iovcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable iovcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ renesas_r61307_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r61307_enable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 80);
+
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_OFF);
+
+ if (priv->dig_cont_adj)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_CONTRAST_SET,
+ 0xdc, 0xb4, 0xff);
+
+ if (priv->gamma)
+ mipi_dsi_generic_write_multi(&ctx, gamma_setting[priv->gamma],
+ sizeof(gamma_setting[priv->gamma]));
+
+ if (priv->inversion)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x50, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x11);
+ else
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x10, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x01);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r61307_disable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 100);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r61307_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->iovcc_supply);
+ usleep_range(2000, 3000);
+ regulator_disable(priv->vcc_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r61307_mode = {
+ .clock = (768 + 116 + 81 + 5) * (1024 + 24 + 8 + 2) * 60 / 1000,
+ .hdisplay = 768,
+ .hsync_start = 768 + 116,
+ .hsync_end = 768 + 116 + 81,
+ .htotal = 768 + 116 + 81 + 5,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 24,
+ .vsync_end = 1024 + 24 + 8,
+ .vtotal = 1024 + 24 + 8 + 2,
+ .width_mm = 76,
+ .height_mm = 101,
+};
+
+static int renesas_r61307_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r61307_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r61307_panel_funcs = {
+ .prepare = renesas_r61307_prepare,
+ .enable = renesas_r61307_enable,
+ .disable = renesas_r61307_disable,
+ .unprepare = renesas_r61307_unprepare,
+ .get_modes = renesas_r61307_get_modes,
+};
+
+static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r61307 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r61307, panel,
+ &renesas_r61307_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vcc_supply = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get vcc-supply\n");
+
+ priv->iovcc_supply = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(priv->iovcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->iovcc_supply),
+ "Failed to get iovcc-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset gpios\n");
+
+ if (device_property_read_bool(dev, "renesas,inversion"))
+ priv->inversion = true;
+
+ if (device_property_read_bool(dev, "renesas,contrast"))
+ priv->dig_cont_adj = true;
+
+ priv->gamma = 0;
+ device_property_read_u32(dev, "renesas,gamma", &priv->gamma);
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r61307_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r61307 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r61307_of_match[] = {
+ { .compatible = "hit,tx13d100vm0eaa" },
+ { .compatible = "koe,tx13d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r61307_of_match);
+
+static struct mipi_dsi_driver renesas_r61307_driver = {
+ .probe = renesas_r61307_probe,
+ .remove = renesas_r61307_remove,
+ .driver = {
+ .name = "panel-renesas-r61307",
+ .of_match_table = renesas_r61307_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r61307_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R61307-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-renesas-r69328.c b/drivers/gpu/drm/panel/panel-renesas-r69328.c
new file mode 100644
index 000000000000..46287ab04c30
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r69328.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R69328_MACP 0xb0 /* Manufacturer Access CMD Protect */
+#define R69328_MACP_ON 0x03
+#define R69328_MACP_OFF 0x04
+
+#define R69328_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R69328_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R69328_GAMMA_SET_C 0xca /* Gamma Setting C */
+
+#define R69328_POWER_SET 0xd1
+
+struct renesas_r69328 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct renesas_r69328 *to_renesas_r69328(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r69328, panel);
+}
+
+static void renesas_r69328_reset(struct renesas_r69328 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r69328_prepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(priv->vddio_supply);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable vddio power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ renesas_r69328_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r69328_enable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ /* Set address mode */
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 100);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_OFF);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_POWER_SET, 0x14, 0x1d,
+ 0x21, 0x67, 0x11, 0x9a);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_A, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_B, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_C, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r69328_disable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r69328_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->vddio_supply);
+ regulator_disable(priv->vdd_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r69328_mode = {
+ .clock = (720 + 92 + 62 + 4) * (1280 + 6 + 3 + 1) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 92,
+ .hsync_end = 720 + 92 + 62,
+ .htotal = 720 + 92 + 62 + 4,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 3,
+ .vtotal = 1280 + 6 + 3 + 1,
+ .width_mm = 59,
+ .height_mm = 105,
+};
+
+static int renesas_r69328_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r69328_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r69328_panel_funcs = {
+ .prepare = renesas_r69328_prepare,
+ .enable = renesas_r69328_enable,
+ .disable = renesas_r69328_disable,
+ .unprepare = renesas_r69328_unprepare,
+ .get_modes = renesas_r69328_get_modes,
+};
+
+static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r69328 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r69328, panel,
+ &renesas_r69328_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vdd_supply),
+ "Failed to get vdd-supply\n");
+
+ priv->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(priv->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vddio_supply),
+ "Failed to get vddio-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r69328_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r69328 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r69328_of_match[] = {
+ { .compatible = "jdi,dx12d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r69328_of_match);
+
+static struct mipi_dsi_driver renesas_r69328_driver = {
+ .probe = renesas_r69328_probe,
+ .remove = renesas_r69328_remove,
+ .driver = {
+ .name = "panel-renesas-r69328",
+ .of_match_table = renesas_r69328_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r69328_driver);
+
+MODULE_AUTHOR("Maxim Schwalm <maxim.schwalm@gmail.com>");
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R69328-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 2ef5ea5eaeeb..ad35d0fb0a16 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -143,9 +143,11 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
struct rb070d30_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct rb070d30_panel, panel,
+ &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
if (IS_ERR(ctx->supply))
@@ -154,9 +156,6 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
index cf6186312252..188dd7cf0297 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
@@ -211,9 +211,11 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
struct ams581vf01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ams581vf01, panel,
+ &ams581vf01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams581vf01_supplies),
@@ -235,8 +237,6 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams581vf01_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
index 817365cb5e46..f8ebbd4a530b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
@@ -257,9 +257,11 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
struct ams639rq08 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ams639rq08, panel,
+ &ams639rq08_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams639rq08_supplies),
@@ -281,8 +283,6 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams639rq08_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 9a482a744b8c..20ec27d2d6c2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -266,9 +266,12 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
struct device *dev = &aux_ep->dev;
int ret;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct atana33xc20_panel, base,
+ &atana33xc20_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
dev_set_drvdata(dev, panel);
panel->aux = aux_ep->aux;
@@ -301,8 +304,6 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
if (ret)
return ret;
- drm_panel_init(&panel->base, dev, &atana33xc20_funcs, DRM_MODE_CONNECTOR_eDP);
-
pm_runtime_get_sync(dev);
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 14c6700e37b3..a97182f3c990 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -267,9 +267,11 @@ static int db7430_probe(struct spi_device *spi)
struct db7430 *db;
int ret;
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct db7430, panel, &db7430_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
+
db->dev = dev;
/*
@@ -294,9 +296,6 @@ static int db7430_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
- drm_panel_init(&db->panel, dev, &db7430_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
/* FIXME: if no external backlight, use internal backlight */
ret = drm_panel_of_backlight(&db->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9f438683a6f6..c7f2241523a0 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -339,9 +339,11 @@ static int ld9040_probe(struct spi_device *spi)
struct ld9040 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ld9040, panel,
+ &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
@@ -373,9 +375,6 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
ctx, &ld9040_bl_ops,
&ld9040_bl_props);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 79f611963c61..ba1a02000bb9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -166,9 +166,11 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
struct s6d16d0 *s6;
int ret;
- s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
- if (!s6)
- return -ENOMEM;
+ s6 = devm_drm_panel_alloc(dev, struct s6d16d0, panel,
+ &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s6))
+ return PTR_ERR(s6);
mipi_dsi_set_drvdata(dsi, s6);
s6->dev = dev;
@@ -200,9 +202,6 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&s6->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 2adb223a895c..300dc19bd9d1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -247,9 +247,11 @@ static int s6d27a1_probe(struct spi_device *spi)
struct s6d27a1 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d27a1, panel,
+ &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -277,9 +279,6 @@ static int s6d27a1_probe(struct spi_device *spi)
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
- drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index 93f11e2e9398..692020081524 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -244,7 +244,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.init_func = s6d7aa0_lsl080al02_init,
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
- .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP,
.bus_flags = 0,
.has_backlight = false,
@@ -392,9 +392,11 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
struct s6d7aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d7aa0, panel,
+ &s6d7aa0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc)
@@ -420,8 +422,6 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &s6d7aa0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
index 27a059b55ae5..f4d75eca3cdf 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -185,9 +185,11 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
struct s6e3fa7_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3fa7_panel, panel,
+ &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -202,8 +204,6 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index ab8b58545284..1db0c63b1131 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -681,9 +681,11 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
struct s6e3ha2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3ha2, panel,
+ &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -731,8 +733,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
- drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
index 64c6f7d45bed..550e9ef9bb71 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
@@ -253,9 +253,11 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
struct s6e3ha8 *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct s6e3ha8, panel,
+ &s6e3ha8_amb577px01_wqhd_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies),
s6e3ha8_supplies,
@@ -279,8 +281,6 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
priv->panel.prepare_prev_first = true;
drm_panel_add(&priv->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 364f1c9a16d9..6f3d39556f92 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -437,9 +437,11 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
struct s6e63j0x03 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e63j0x03, panel,
+ &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -462,8 +464,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"cannot get reset-gpio\n");
- drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6917ffda5b2b..ea241c89593b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -13,6 +13,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e92e95158d1f..e91f50662997 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -687,9 +687,11 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams427ap24 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams427ap24, panel,
+ &s6e88a0_ams427ap24_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(dev,
ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
@@ -711,8 +713,6 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 57b1a899bbdc..ca5cad41ff1d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -165,9 +165,11 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams452ef01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams452ef01, panel,
+ &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -192,9 +194,6 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index c51d07ec1529..1b5c500d4f4e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -979,9 +979,11 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
struct s6e8aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa0, panel,
+ &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -990,7 +992,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
- | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+ | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
ret = s6e8aa0_parse_dt(ctx);
if (ret < 0)
@@ -1014,8 +1016,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index d92ae6b6100f..064258217d50 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -191,9 +191,11 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
struct sofef00_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sofef00_panel, panel,
+ &sofef00_panel_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
@@ -211,9 +213,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 7d1b421ea9dd..0935d83ee2db 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -204,9 +204,11 @@ static int seiko_panel_probe(struct device *dev,
struct seiko_panel *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct seiko_panel, base,
+ &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -224,9 +226,6 @@ static int seiko_panel_probe(struct device *dev,
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
- drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index a0d76d588da1..d159b0e4fdb6 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -279,9 +279,6 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&sharp->base);
if (ret)
return ret;
@@ -323,10 +320,12 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
- if (!sharp) {
+ sharp = devm_drm_panel_alloc(&dsi->dev, __typeof(*sharp), base,
+ &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(sharp)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(sharp);
}
mipi_dsi_set_drvdata(dsi, sharp);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index a9673a52b861..938beac4655d 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -138,9 +138,10 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd;
- lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&pdev->dev, struct ls037v7dw01_panel, panel,
+ &ls037v7dw01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
platform_set_drvdata(pdev, lcd);
lcd->pdev = pdev;
@@ -181,9 +182,6 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 0b4e0983639b..0456f3d705e7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -193,9 +193,11 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
struct sharp_ls060 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sharp_ls060, panel,
+ &sharp_ls060_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->vddi_supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi_supply))
@@ -227,9 +229,6 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9f81fa960b46..3333d4a07504 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1305,6 +1305,30 @@ static const struct panel_desc auo_g190ean01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_p238han01_timings = {
+ .pixelclock = { 107400000, 142400000, 180000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 30, 70, 650 },
+ .hback_porch = { 30, 70, 650 },
+ .hsync_len = { 20, 40, 136 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 5, 19, 318 },
+ .vback_porch = { 5, 19, 318 },
+ .vsync_len = { 4, 12, 120 },
+};
+
+static const struct panel_desc auo_p238han01 = {
+ .timings = &auo_p238han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 527,
+ .height = 296,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -4976,6 +5000,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g190ean01",
.data = &auo_g190ean01,
}, {
+ .compatible = "auo,p238han01",
+ .data = &auo_p238han01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 1f72ef7ca74c..2f79ec4a2063 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -520,6 +520,28 @@ static void rg28xx_gip_sequence(struct st7701 *st7701)
st7701_switch_cmd_bkx(st7701, false, 0);
}
+static void wf40eswaa6mnn0_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x28, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0xA0, 0x00, 0x00, 0x07, 0xA0, 0x00,
+ 0x00, 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x11, 0x11, 0x44, 0x44, 0xED, 0xA0, 0x00,
+ 0x00, 0xEC, 0xA0, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0A, 0xE9, 0xD8, 0xA0, 0x0C, 0xEB, 0xD8,
+ 0xA0, 0x0E, 0xED, 0xD8, 0xA0, 0x10, 0xEF, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x09, 0xE8, 0xD8, 0xA0, 0x0B, 0xEA, 0xD8,
+ 0xA0, 0x0D, 0xEC, 0xD8, 0xA0, 0x0F, 0xEE, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x88, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x3C, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x02, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x45, 0x67, 0x98, 0xBA);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -1135,6 +1157,107 @@ static const struct st7701_panel_desc rg28xx_desc = {
.gip_sequence = rg28xx_gip_sequence,
};
+static const struct drm_display_mode wf40eswaa6mnn0_mode = {
+ .clock = 18306,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 45,
+ .htotal = 480 + 2 + 45 + 13,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 70,
+ .vtotal = 480 + 2 + 70 + 13,
+
+ .width_mm = 72,
+ .height_mm = 70,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc wf40eswaa6mnn0_desc = {
+ .mode = &wf40eswaa6mnn0_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 0,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0x1),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x04),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x14),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xb3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3a),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x19),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0f),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x02),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x0f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xa3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x0d)
+ },
+ .nlinv = 3,
+ .vop_uv = 4737500,
+ .vcom_uv = 662500,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4600,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIDDLE,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = wf40eswaa6mnn0_gip_sequence,
+};
+
static void st7701_cleanup(void *data)
{
struct st7701 *st7701 = (struct st7701 *)data;
@@ -1150,9 +1273,10 @@ static int st7701_probe(struct device *dev, int connector_type)
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
- if (!st7701)
- return -ENOMEM;
+ st7701 = devm_drm_panel_alloc(dev, struct st7701, panel, &st7701_funcs,
+ connector_type);
+ if (IS_ERR(st7701))
+ return PTR_ERR(st7701);
desc = of_device_get_match_data(dev);
if (!desc)
@@ -1176,7 +1300,6 @@ static int st7701_probe(struct device *dev, int connector_type)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
st7701->panel.prepare_prev_first = true;
/**
@@ -1265,6 +1388,7 @@ static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
+ { .compatible = "winstar,wf40eswaa6mnn0", .data = &wf40eswaa6mnn0_desc },
{ }
};
MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 67e8e45498cb..1a007a244d84 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -846,9 +846,11 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
struct st7703 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7703, panel,
+ &st7703_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -876,9 +878,6 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
- drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 28bfc48a9127..04d91929eedd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -612,9 +612,10 @@ static int st7789v_probe(struct spi_device *spi)
struct st7789v *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7789v, panel,
+ &st7789v_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
@@ -626,9 +627,6 @@ static int st7789v_probe(struct spi_device *spi)
ctx->info = device_get_match_data(&spi->dev);
- drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ctx->power = devm_regulator_get(dev, "power");
ret = PTR_ERR_OR_ZERO(ctx->power);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d437f5c84f5f..fe043de791b0 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -607,9 +607,10 @@ static int acx565akm_probe(struct spi_device *spi)
struct acx565akm_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct acx565akm_panel, panel,
+ &acx565akm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
spi->mode = SPI_MODE_3;
@@ -635,9 +636,6 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 97f4bb4e1029..7c989b70ab51 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -175,9 +175,11 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
struct sony_td4353_jdi *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sony_td4353_jdi, panel,
+ &sony_td4353_jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->type = (uintptr_t)of_device_get_match_data(dev);
@@ -206,9 +208,6 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 104b2290560e..216a6ad8696e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -433,9 +433,11 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
struct truly_nt35521 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct truly_nt35521, panel,
+ &truly_nt35521_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "positive5";
ctx->supplies[1].supply = "negative5";
@@ -465,9 +467,6 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index e780faee1857..4854437e2899 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -68,9 +68,11 @@ static int summit_probe(struct mipi_dsi_device *dsi)
struct summit_data *s_data;
int ret;
- s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
- if (!s_data)
- return -ENOMEM;
+ s_data = devm_drm_panel_alloc(dev, struct summit_data, panel,
+ &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s_data))
+ return PTR_ERR(s_data);
mipi_dsi_set_drvdata(dsi, s_data);
s_data->dsi = dsi;
@@ -85,8 +87,6 @@ static int summit_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(s_data->bl))
return PTR_ERR(s_data->bl);
- drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&s_data->panel);
return mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index b148e6cba9bd..3a74d48753d9 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -229,9 +229,11 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
struct r63353_panel *panel;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct r63353_panel, base,
+ &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
panel->dsi = dsi;
@@ -258,9 +260,6 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(panel->reset_gpio);
}
- drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
panel->base.prepare_prev_first = true;
ret = drm_panel_of_backlight(&panel->base);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 11d460d2ea19..ee86ff20c1bd 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -318,9 +318,11 @@ static int td028ttec1_probe(struct spi_device *spi)
struct td028ttec1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td028ttec1_panel, panel,
+ &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -334,9 +336,6 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index cf4609bb9b1d..b18af526b54c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -421,9 +421,10 @@ static int td043mtea1_probe(struct spi_device *spi)
struct td043mtea1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (lcd == NULL)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td043mtea1_panel, panel,
+ &td043mtea1_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -455,9 +456,6 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index f6a212e542cb..0beba5c08956 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -405,9 +405,11 @@ static int tpg110_probe(struct spi_device *spi)
struct tpg110 *tpg;
int ret;
- tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL);
- if (!tpg)
- return -ENOMEM;
+ tpg = devm_drm_panel_alloc(dev, struct tpg110, panel,
+ &tpg110_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(tpg))
+ return PTR_ERR(tpg);
+
tpg->dev = dev;
/* We get the physical display dimensions from the DT */
@@ -438,9 +440,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&tpg->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 3ea0a86f6e69..690cccedd438 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -255,9 +255,11 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
struct drm_dsc_config *dsc;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_r66451, panel,
+ &visionox_r66451_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
if (!dsc)
@@ -297,7 +299,6 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index be3a9797fbce..909c280eab1f 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -15,11 +16,138 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct visionox_rm69299_panel_desc {
+ const struct drm_display_mode *mode;
+ const u8 *init_seq;
+ unsigned int init_seq_len;
+};
+
struct visionox_rm69299 {
struct drm_panel panel;
- struct regulator_bulk_data supplies[2];
+ struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
+ const struct visionox_rm69299_panel_desc *desc;
+};
+
+static const struct regulator_bulk_data visionox_rm69299_supplies[] = {
+ { .supply = "vdda", .init_load_uA = 32000 },
+ { .supply = "vdd3p3", .init_load_uA = 13200 },
+};
+
+static const u8 visionox_rm69299_1080x2248_60hz_init_seq[][2] = {
+ { 0xfe, 0x00 }, { 0xc2, 0x08 }, { 0x35, 0x00 }, { 0x51, 0xff },
+};
+
+static const u8 visionox_rm69299_1080x2160_60hz_init_seq[][2] = {
+ { 0xfe, 0x40 }, { 0x05, 0x04 }, { 0x06, 0x08 }, { 0x08, 0x04 },
+ { 0x09, 0x08 }, { 0x0a, 0x07 }, { 0x0b, 0xcc }, { 0x0c, 0x07 },
+ { 0x0d, 0x90 }, { 0x0f, 0x87 }, { 0x20, 0x8d }, { 0x21, 0x8d },
+ { 0x24, 0x05 }, { 0x26, 0x05 }, { 0x28, 0x05 }, { 0x2a, 0x05 },
+ { 0x2d, 0x28 }, { 0x2f, 0x28 }, { 0x30, 0x32 }, { 0x31, 0x32 },
+ { 0x37, 0x80 }, { 0x38, 0x30 }, { 0x39, 0xa8 }, { 0x46, 0x48 },
+ { 0x47, 0x48 }, { 0x6b, 0x10 }, { 0x6f, 0x02 }, { 0x74, 0x2b },
+ { 0x80, 0x1a }, { 0xfe, 0x40 }, { 0x93, 0x10 }, { 0x16, 0x00 },
+ { 0x85, 0x07 }, { 0x84, 0x01 }, { 0x86, 0x0f }, { 0x87, 0x05 },
+ { 0x8c, 0x00 }, { 0x88, 0x2e }, { 0x89, 0x2e }, { 0x8b, 0x09 },
+ { 0x95, 0x00 }, { 0x91, 0x00 }, { 0x90, 0x00 }, { 0x8d, 0xd0 },
+ { 0x8a, 0x03 }, { 0xfe, 0xa0 }, { 0x13, 0x00 }, { 0x33, 0x00 },
+ { 0x0b, 0x33 }, { 0x36, 0x1e }, { 0x31, 0x88 }, { 0x32, 0x88 },
+ { 0x37, 0xf1 }, { 0xfe, 0x50 }, { 0x00, 0x00 }, { 0x01, 0x00 },
+ { 0x02, 0x00 }, { 0x03, 0xe9 }, { 0x04, 0x00 }, { 0x05, 0xf6 },
+ { 0x06, 0x01 }, { 0x07, 0x2c }, { 0x08, 0x01 }, { 0x09, 0x62 },
+ { 0x0a, 0x01 }, { 0x0b, 0x98 }, { 0x0c, 0x01 }, { 0x0d, 0xbf },
+ { 0x0e, 0x01 }, { 0x0f, 0xf6 }, { 0x10, 0x02 }, { 0x11, 0x24 },
+ { 0x12, 0x02 }, { 0x13, 0x4e }, { 0x14, 0x02 }, { 0x15, 0x70 },
+ { 0x16, 0x02 }, { 0x17, 0xaf }, { 0x18, 0x02 }, { 0x19, 0xe2 },
+ { 0x1a, 0x03 }, { 0x1b, 0x1f }, { 0x1c, 0x03 }, { 0x1d, 0x52 },
+ { 0x1e, 0x03 }, { 0x1f, 0x82 }, { 0x20, 0x03 }, { 0x21, 0xb6 },
+ { 0x22, 0x03 }, { 0x23, 0xf0 }, { 0x24, 0x04 }, { 0x25, 0x1f },
+ { 0x26, 0x04 }, { 0x27, 0x37 }, { 0x28, 0x04 }, { 0x29, 0x59 },
+ { 0x2a, 0x04 }, { 0x2b, 0x68 }, { 0x30, 0x04 }, { 0x31, 0x85 },
+ { 0x32, 0x04 }, { 0x33, 0xa2 }, { 0x34, 0x04 }, { 0x35, 0xbc },
+ { 0x36, 0x04 }, { 0x37, 0xd8 }, { 0x38, 0x04 }, { 0x39, 0xf4 },
+ { 0x3a, 0x05 }, { 0x3b, 0x0e }, { 0x40, 0x05 }, { 0x41, 0x13 },
+ { 0x42, 0x05 }, { 0x43, 0x1f }, { 0x44, 0x05 }, { 0x45, 0x1f },
+ { 0x46, 0x00 }, { 0x47, 0x00 }, { 0x48, 0x01 }, { 0x49, 0x43 },
+ { 0x4a, 0x01 }, { 0x4b, 0x4c }, { 0x4c, 0x01 }, { 0x4d, 0x6f },
+ { 0x4e, 0x01 }, { 0x4f, 0x92 }, { 0x50, 0x01 }, { 0x51, 0xb5 },
+ { 0x52, 0x01 }, { 0x53, 0xd4 }, { 0x58, 0x02 }, { 0x59, 0x06 },
+ { 0x5a, 0x02 }, { 0x5b, 0x33 }, { 0x5c, 0x02 }, { 0x5d, 0x59 },
+ { 0x5e, 0x02 }, { 0x5f, 0x7d }, { 0x60, 0x02 }, { 0x61, 0xbd },
+ { 0x62, 0x02 }, { 0x63, 0xf7 }, { 0x64, 0x03 }, { 0x65, 0x31 },
+ { 0x66, 0x03 }, { 0x67, 0x63 }, { 0x68, 0x03 }, { 0x69, 0x9d },
+ { 0x6a, 0x03 }, { 0x6b, 0xd2 }, { 0x6c, 0x04 }, { 0x6d, 0x05 },
+ { 0x6e, 0x04 }, { 0x6f, 0x38 }, { 0x70, 0x04 }, { 0x71, 0x51 },
+ { 0x72, 0x04 }, { 0x73, 0x70 }, { 0x74, 0x04 }, { 0x75, 0x85 },
+ { 0x76, 0x04 }, { 0x77, 0xa1 }, { 0x78, 0x04 }, { 0x79, 0xc0 },
+ { 0x7a, 0x04 }, { 0x7b, 0xd8 }, { 0x7c, 0x04 }, { 0x7d, 0xf2 },
+ { 0x7e, 0x05 }, { 0x7f, 0x10 }, { 0x80, 0x05 }, { 0x81, 0x21 },
+ { 0x82, 0x05 }, { 0x83, 0x2e }, { 0x84, 0x05 }, { 0x85, 0x3a },
+ { 0x86, 0x05 }, { 0x87, 0x3e }, { 0x88, 0x00 }, { 0x89, 0x00 },
+ { 0x8a, 0x01 }, { 0x8b, 0x86 }, { 0x8c, 0x01 }, { 0x8d, 0x8f },
+ { 0x8e, 0x01 }, { 0x8f, 0xb3 }, { 0x90, 0x01 }, { 0x91, 0xd7 },
+ { 0x92, 0x01 }, { 0x93, 0xfb }, { 0x94, 0x02 }, { 0x95, 0x18 },
+ { 0x96, 0x02 }, { 0x97, 0x4f }, { 0x98, 0x02 }, { 0x99, 0x7e },
+ { 0x9a, 0x02 }, { 0x9b, 0xa6 }, { 0x9c, 0x02 }, { 0x9d, 0xcf },
+ { 0x9e, 0x03 }, { 0x9f, 0x14 }, { 0xa4, 0x03 }, { 0xa5, 0x52 },
+ { 0xa6, 0x03 }, { 0xa7, 0x93 }, { 0xac, 0x03 }, { 0xad, 0xcf },
+ { 0xae, 0x04 }, { 0xaf, 0x08 }, { 0xb0, 0x04 }, { 0xb1, 0x42 },
+ { 0xb2, 0x04 }, { 0xb3, 0x7f }, { 0xb4, 0x04 }, { 0xb5, 0xb4 },
+ { 0xb6, 0x04 }, { 0xb7, 0xcc }, { 0xb8, 0x04 }, { 0xb9, 0xf2 },
+ { 0xba, 0x05 }, { 0xbb, 0x0c }, { 0xbc, 0x05 }, { 0xbd, 0x26 },
+ { 0xbe, 0x05 }, { 0xbf, 0x4b }, { 0xc0, 0x05 }, { 0xc1, 0x64 },
+ { 0xc2, 0x05 }, { 0xc3, 0x83 }, { 0xc4, 0x05 }, { 0xc5, 0xa1 },
+ { 0xc6, 0x05 }, { 0xc7, 0xba }, { 0xc8, 0x05 }, { 0xc9, 0xc4 },
+ { 0xca, 0x05 }, { 0xcb, 0xd5 }, { 0xcc, 0x05 }, { 0xcd, 0xd5 },
+ { 0xce, 0x00 }, { 0xcf, 0xce }, { 0xd0, 0x00 }, { 0xd1, 0xdb },
+ { 0xd2, 0x01 }, { 0xd3, 0x32 }, { 0xd4, 0x01 }, { 0xd5, 0x3b },
+ { 0xd6, 0x01 }, { 0xd7, 0x74 }, { 0xd8, 0x01 }, { 0xd9, 0x7d },
+ { 0xfe, 0x60 }, { 0x00, 0xcc }, { 0x01, 0x0f }, { 0x02, 0xff },
+ { 0x03, 0x01 }, { 0x04, 0x00 }, { 0x05, 0x02 }, { 0x06, 0x00 },
+ { 0x07, 0x00 }, { 0x09, 0xc4 }, { 0x0a, 0x00 }, { 0x0b, 0x04 },
+ { 0x0c, 0x01 }, { 0x0d, 0x00 }, { 0x0e, 0x04 }, { 0x0f, 0x00 },
+ { 0x10, 0x71 }, { 0x12, 0xc4 }, { 0x13, 0x00 }, { 0x14, 0x04 },
+ { 0x15, 0x01 }, { 0x16, 0x00 }, { 0x17, 0x06 }, { 0x18, 0x00 },
+ { 0x19, 0x71 }, { 0x1b, 0xc4 }, { 0x1c, 0x00 }, { 0x1d, 0x02 },
+ { 0x1e, 0x00 }, { 0x1f, 0x00 }, { 0x20, 0x08 }, { 0x21, 0x66 },
+ { 0x22, 0xb4 }, { 0x24, 0xc4 }, { 0x25, 0x00 }, { 0x26, 0x02 },
+ { 0x27, 0x00 }, { 0x28, 0x00 }, { 0x29, 0x07 }, { 0x2a, 0x66 },
+ { 0x2b, 0xb4 }, { 0x2f, 0xc4 }, { 0x30, 0x00 }, { 0x31, 0x04 },
+ { 0x32, 0x01 }, { 0x33, 0x00 }, { 0x34, 0x03 }, { 0x35, 0x00 },
+ { 0x36, 0x71 }, { 0x38, 0xc4 }, { 0x39, 0x00 }, { 0x3a, 0x04 },
+ { 0x3b, 0x01 }, { 0x3d, 0x00 }, { 0x3f, 0x05 }, { 0x40, 0x00 },
+ { 0x41, 0x71 }, { 0x83, 0xce }, { 0x84, 0x02 }, { 0x85, 0x20 },
+ { 0x86, 0xdc }, { 0x87, 0x00 }, { 0x88, 0x04 }, { 0x89, 0x00 },
+ { 0x8a, 0xbb }, { 0x8b, 0x80 }, { 0xc7, 0x0e }, { 0xc8, 0x05 },
+ { 0xc9, 0x1f }, { 0xca, 0x06 }, { 0xcb, 0x00 }, { 0xcc, 0x03 },
+ { 0xcd, 0x04 }, { 0xce, 0x1f }, { 0xcf, 0x1f }, { 0xd0, 0x1f },
+ { 0xd1, 0x1f }, { 0xd2, 0x1f }, { 0xd3, 0x1f }, { 0xd4, 0x1f },
+ { 0xd5, 0x1f }, { 0xd6, 0x1f }, { 0xd7, 0x17 }, { 0xd8, 0x1f },
+ { 0xd9, 0x16 }, { 0xda, 0x1f }, { 0xdb, 0x0e }, { 0xdc, 0x01 },
+ { 0xdd, 0x1f }, { 0xde, 0x02 }, { 0xdf, 0x00 }, { 0xe0, 0x03 },
+ { 0xe1, 0x04 }, { 0xe2, 0x1f }, { 0xe3, 0x1f }, { 0xe4, 0x1f },
+ { 0xe5, 0x1f }, { 0xe6, 0x1f }, { 0xe7, 0x1f }, { 0xe8, 0x1f },
+ { 0xe9, 0x1f }, { 0xea, 0x1f }, { 0xeb, 0x17 }, { 0xec, 0x1f },
+ { 0xed, 0x16 }, { 0xee, 0x1f }, { 0xef, 0x03 }, { 0xfe, 0x70 },
+ { 0x5a, 0x0b }, { 0x5b, 0x0b }, { 0x5c, 0x55 }, { 0x5d, 0x24 },
+ { 0xfe, 0x90 }, { 0x12, 0x24 }, { 0x13, 0x49 }, { 0x14, 0x92 },
+ { 0x15, 0x86 }, { 0x16, 0x61 }, { 0x17, 0x18 }, { 0x18, 0x24 },
+ { 0x19, 0x49 }, { 0x1a, 0x92 }, { 0x1b, 0x86 }, { 0x1c, 0x61 },
+ { 0x1d, 0x18 }, { 0x1e, 0x24 }, { 0x1f, 0x49 }, { 0x20, 0x92 },
+ { 0x21, 0x86 }, { 0x22, 0x61 }, { 0x23, 0x18 }, { 0xfe, 0x40 },
+ { 0x0e, 0x10 }, { 0xfe, 0xa0 }, { 0x04, 0x80 }, { 0x16, 0x00 },
+ { 0x26, 0x10 }, { 0x2f, 0x37 }, { 0xfe, 0xd0 }, { 0x06, 0x0f },
+ { 0x4b, 0x00 }, { 0x56, 0x4a }, { 0xfe, 0x00 }, { 0xc2, 0x09 },
+ { 0x35, 0x00 }, { 0xfe, 0x70 }, { 0x7d, 0x61 }, { 0x7f, 0x00 },
+ { 0x7e, 0x4e }, { 0x52, 0x2c }, { 0x49, 0x00 }, { 0x4a, 0x00 },
+ { 0x4b, 0x00 }, { 0x4c, 0x00 }, { 0x4d, 0xe8 }, { 0x4e, 0x25 },
+ { 0x4f, 0x6e }, { 0x50, 0xae }, { 0x51, 0x2f }, { 0xad, 0xf4 },
+ { 0xae, 0x8f }, { 0xaf, 0x00 }, { 0xb0, 0x54 }, { 0xb1, 0x3a },
+ { 0xb2, 0x00 }, { 0xb3, 0x00 }, { 0xb4, 0x00 }, { 0xb5, 0x00 },
+ { 0xb6, 0x18 }, { 0xb7, 0x30 }, { 0xb8, 0x4a }, { 0xb9, 0x98 },
+ { 0xba, 0x30 }, { 0xbb, 0x60 }, { 0xbc, 0x50 }, { 0xbd, 0x00 },
+ { 0xbe, 0x00 }, { 0xbf, 0x39 }, { 0xfe, 0x00 }, { 0x51, 0x66 },
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
@@ -31,7 +159,8 @@ static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
{
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
if (ret < 0)
return ret;
@@ -54,37 +183,32 @@ static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
{
gpiod_set_value(ctx->reset_gpio, 0);
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return regulator_bulk_disable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
}
static int visionox_rm69299_unprepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
ctx->dsi->mode_flags = 0;
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
- if (ret < 0)
- dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
/* 120ms delay required here as per DCS spec */
- msleep(120);
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = visionox_rm69299_power_off(ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return ret;
+ return visionox_rm69299_power_off(ctx);
}
static int visionox_rm69299_prepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret, i;
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
@@ -92,52 +216,20 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
- goto power_off;
- }
+ for (i = 0; i < ctx->desc->init_seq_len; i++)
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, &ctx->desc->init_seq[i * 2], 2);
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
-
-power_off:
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
@@ -154,14 +246,26 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
.flags = 0,
};
+static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 8,
+ .vsync_end = 2160 + 8 + 4,
+ .vtotal = 2160 + 8 + 4 + 4,
+ .flags = 0,
+};
+
static int visionox_rm69299_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev,
- &visionox_rm69299_1080x2248_60hz);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
@@ -187,20 +291,22 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm69299, panel,
+ &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->desc = device_get_match_data(dev);
+ if (!ctx->desc)
+ return -EINVAL;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- ctx->supplies[0].supply = "vdda";
- ctx->supplies[0].init_load_uA = 32000;
- ctx->supplies[1].supply = "vdd3p3";
- ctx->supplies[1].init_load_uA = 13200;
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(visionox_rm69299_supplies),
+ visionox_rm69299_supplies, &ctx->supplies);
if (ret < 0)
return ret;
@@ -210,8 +316,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -239,8 +343,23 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
}
+const struct visionox_rm69299_panel_desc visionox_rm69299_1080p_display_desc = {
+ .mode = &visionox_rm69299_1080x2248_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2248_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2248_60hz_init_seq),
+};
+
+const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
+ .mode = &visionox_rm69299_1080x2160_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+};
+
static const struct of_device_id visionox_rm69299_of_match[] = {
- { .compatible = "visionox,rm69299-1080p-display", },
+ { .compatible = "visionox,rm69299-1080p-display",
+ .data = &visionox_rm69299_1080p_display_desc },
+ { .compatible = "visionox,rm69299-shift",
+ .data = &visionox_rm69299_shift_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
index 4db7fa8d74c4..e53645d59413 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -360,9 +360,11 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
struct visionox_rm692e5 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm692e5, panel,
+ &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_rm692e5_supplies),
@@ -383,8 +385,6 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 17b8defe79c1..97a79411e1ec 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -248,9 +248,11 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
struct visionox_vtdr6130 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_vtdr6130, panel,
+ &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_vtdr6130_supplies),
@@ -273,9 +275,6 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 2591ff8f0d4e..dd74610bd2eb 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -347,9 +347,11 @@ static int ws2401_probe(struct spi_device *spi)
struct ws2401 *ws;
int ret;
- ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL);
- if (!ws)
- return -ENOMEM;
+ ws = devm_drm_panel_alloc(dev, struct ws2401, panel, &ws2401_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
ws->dev = dev;
/*
@@ -379,9 +381,6 @@ static int ws2401_probe(struct spi_device *spi)
ws2401_read_mtp_id(ws);
ws2401_power_off(ws);
- drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ws->panel);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2b91414c2829..fc6516373b5d 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -241,9 +241,10 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
struct xpp055c272 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct xpp055c272, panel,
+ &xpp055c272_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -269,9 +270,6 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 3385fd3ef41a..5d0dce10336b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,7 +29,7 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *ptdev = dev_get_drvdata(dev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
@@ -40,7 +40,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
- ptdev->pfdevfreq.current_frequency = *freq;
+ pfdev->pfdevfreq.current_frequency = *freq;
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 5d35076b2e6d..04bec27449cb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,6 +209,11 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+#ifdef CONFIG_DEBUG_FS
+ mutex_init(&pfdev->debugfs.gems_lock);
+ INIT_LIST_HEAD(&pfdev->debugfs.gems_list);
+#endif
+
err = panfrost_pm_domain_init(pfdev);
if (err)
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index dcff70f905cd..077525a3ad68 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -111,6 +111,17 @@ struct panfrost_compatible {
u8 gpu_quirks;
};
+/**
+ * struct panfrost_device_debugfs - Device-wide DebugFS tracking structures
+ */
+struct panfrost_device_debugfs {
+ /** @gems_list: Device-wide list of GEM objects owned by at least one file. */
+ struct list_head gems_list;
+
+ /** @gems_lock: Serializes access to the device-wide list of GEM objects. */
+ struct mutex gems_lock;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -164,6 +175,10 @@ struct panfrost_device {
atomic_t use_count;
spinlock_t lock;
} cycle_counter;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_device_debugfs debugfs;
+#endif
};
struct panfrost_mmu {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f1ec3b02f15a..1ea6c509a5d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
@@ -312,7 +313,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- 1, NULL);
+ 1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -495,6 +496,46 @@ out_put_object:
return ret;
}
+static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panfrost_set_label_bo *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user(u64_to_user_ptr(args->label),
+ PANFROST_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panfrost_gem_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -561,6 +602,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -618,6 +660,37 @@ static const struct file_operations panfrost_drm_driver_fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ panfrost_gem_debugfs_print_bos(pfdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
+static void panfrost_debugfs_init(struct drm_minor *minor)
+{
+ panthor_gems_debugfs_init(minor);
+}
+#endif
+
/*
* Panfrost driver version:
* - 1.0 - initial interface
@@ -625,6 +698,7 @@ static const struct file_operations panfrost_drm_driver_fops = {
* - 1.2 - adds AFBC_FEATURES query
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
+ * - 1.4 - adds SET_LABEL_BO
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -637,10 +711,13 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panfrost_debugfs_init,
+#endif
};
static int panfrost_probe(struct platform_device *pdev)
@@ -789,6 +866,8 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
+static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2",
+ "core3", "core4" };
/*
* The old data with two power supplies for MT8183 is here only to
* keep retro-compatibility with older devicetrees, as DVFS will
@@ -797,51 +876,53 @@ static const struct panfrost_compatible amlogic_data = {
* On new devicetrees please use the _b variant with a single and
* coupled regulators instead.
*/
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
-static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const legacy_supplies[] = { "mali", "sram", NULL };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
- .supply_names = mediatek_mt8183_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(legacy_supplies) - 1,
+ .supply_names = legacy_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
};
-static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL };
static const struct panfrost_compatible mediatek_mt8183_b_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
static const struct panfrost_compatible mediatek_mt8186_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
- .pm_domain_names = mediatek_mt8186_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-/* MT8188 uses the same power domains and power supplies as MT8183 */
static const struct panfrost_compatible mediatek_mt8188_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
-static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
-static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2",
- "core3", "core4" };
static const struct panfrost_compatible mediatek_mt8192_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1,
- .supply_names = mediatek_mt8192_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
- .pm_domain_names = mediatek_mt8192_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 5,
+ .pm_domain_names = mediatek_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
+};
+
+static const struct panfrost_compatible mediatek_mt8370_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
@@ -868,6 +949,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data },
{ .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 963f04ba2de6..85d6289a6eda 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
@@ -11,6 +12,36 @@
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
+{
+ struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+#else
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{}
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
+#endif
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -35,6 +66,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+ kfree_const(bo->label.str);
+ panfrost_gem_debugfs_bo_rm(bo);
+ mutex_destroy(&bo->label.lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -260,6 +295,9 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_wc = !pfdev->coherent;
+ mutex_init(&obj->label.lock);
+
+ panfrost_gem_debugfs_bo_add(pfdev, obj);
return &obj->base.base;
}
@@ -300,5 +338,153 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
bo = to_panfrost_bo(obj);
bo->noexec = true;
+ /*
+ * We assign this generic label because this function cannot
+ * be reached through any of the Panfrost UM driver-specific
+ * code paths, unless one is given by explicitly calling the
+ * SET_LABEL_BO ioctl. It is therefore preferable to have a
+ * blanket BO tag that tells us the object was imported from
+ * another driver than nothing at all.
+ */
+ panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
+
return obj;
}
+
+void
+panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panfrost_gem_set_label(obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+struct flag_def {
+ u32 flag;
+ const char *name;
+};
+
+static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const struct flag_def gem_state_flags_names[] = {
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
+ gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = panfrost_gem_rss(&bo->base.base);
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ if (bo->base.madv < 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
+ else if (bo->base.madv > 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
+
+ seq_printf(m, "0x%-10x", gem_state_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panfrost_gem_object *bo;
+
+ panfrost_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state label\n");
+ seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
+ list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
+ panfrost_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "===================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 7516b7ecf7fe..8de3e76f2717 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -8,6 +8,46 @@
#include <drm/drm_mm.h>
struct panfrost_mmu;
+struct panfrost_device;
+
+#define PANFROST_BO_LABEL_MAXLEN 4096
+
+enum panfrost_debugfs_gem_state_flags {
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(0),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(1),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED: GEM BO was reclaimed by the shrinker. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED = BIT(2),
+
+ /**
+ * @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE: GEM BO pages were marked as no longer
+ * needed by UM and can be reclaimed by the shrinker.
+ */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE = BIT(3),
+};
+
+/**
+ * struct panfrost_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panfrost_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+};
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
@@ -41,8 +81,26 @@ struct panfrost_gem_object {
*/
size_t heap_rss_size;
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
bool noexec :1;
bool is_heap :1;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_gem_debugfs debugfs;
+#endif
};
struct panfrost_gem_mapping {
@@ -89,4 +147,12 @@ void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
int panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+void panfrost_gem_set_label(struct drm_gem_object *obj, const char *label);
+void panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label);
+
+#ifdef CONFIG_DEBUG_FS
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 15e2d505550f..82acabb21b27 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -751,11 +751,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
int js = panfrost_job_get_slot(job);
/*
- * If the GPU managed to complete this jobs fence, the timeout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* Panfrost IRQ handler may take a long time to process an interrupt
@@ -770,7 +770,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
@@ -786,7 +786,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void panfrost_reset_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 52befead08c6..563f16bae543 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -111,6 +111,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_put_mapping;
perfcnt->buf = map.vaddr;
+ panfrost_gem_internal_set_label(&bo->base, "Perfcnt sample buffer");
+
/*
* Invalidate the cache and clear the counters to start from a fresh
* state.
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 465d3ab1b79e..4fc7cf2aeed5 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -230,6 +230,24 @@ struct panthor_file {
/** @ptdev: Device attached to this file. */
struct panthor_device *ptdev;
+ /** @user_mmio: User MMIO related fields. */
+ struct {
+ /**
+ * @offset: Offset used for user MMIO mappings.
+ *
+ * This offset should not be used to check the type of mapping
+ * except in panthor_mmap(). After that point, MMIO mapping
+ * offsets have been adjusted to match
+ * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used
+ * instead.
+ * Make sure this rule is followed at all times, because
+ * userspace is in control of the offset, and can change the
+ * value behind our back. Otherwise it can lead to erroneous
+ * branching happening in kernel space.
+ */
+ u64 offset;
+ } user_mmio;
+
/** @vms: VM pool attached to this file. */
struct panthor_vm_pool *vms;
@@ -437,4 +455,75 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
extern struct workqueue_struct *panthor_cleanup_wq;
+static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data)
+{
+ writel(data, ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg)
+{
+ return readl(ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return readl_relaxed(ptdev->iomem + reg);
+}
+
+static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data)
+{
+ gpu_write(ptdev, reg, lower_32_bits(data));
+ gpu_write(ptdev, reg + 4, upper_32_bits(data));
+}
+
+static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read_relaxed(ptdev, reg) |
+ ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg)
+{
+ u32 lo, hi1, hi2;
+ do {
+ hi1 = gpu_read(ptdev, reg + 4);
+ lo = gpu_read(ptdev, reg);
+ hi2 = gpu_read(ptdev, reg + 4);
+ } while (hi1 != hi2);
+ return lo | ((u64)hi2 << 32);
+}
+
+#define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \
+ timeout_us, false, dev, reg)
+
+#define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 6200cad22563..1116f2d2826e 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -772,8 +772,8 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
#else
arg->timestamp_frequency = 0;
#endif
- arg->current_timestamp = panthor_gpu_read_timestamp(ptdev);
- arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev);
+ arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
+ arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
pm_runtime_put(ptdev->base.dev);
return 0;
@@ -996,7 +996,8 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
struct drm_sched_job *job;
- job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ job = panthor_job_create(pfile, args->group_handle, qsubmit,
+ file->client_id);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
@@ -1378,6 +1379,20 @@ err_put_obj:
return ret;
}
+static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
+ void *data, struct drm_file *file)
+{
+ struct drm_panthor_set_user_mmio_offset *args = data;
+ struct panthor_file *pfile = file->driver_priv;
+
+ if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
+ args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+ return -EINVAL;
+
+ WRITE_ONCE(pfile->user_mmio.offset, args->offset);
+ return 0;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1395,6 +1410,18 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
}
pfile->ptdev = ptdev;
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT))
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+#endif
+
ret = panthor_vm_pool_create(pfile);
if (ret)
@@ -1448,6 +1475,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1456,30 +1484,26 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
struct panthor_file *pfile = file->driver_priv;
struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ u64 user_mmio_offset;
int ret, cookie;
if (!drm_dev_enter(file->minor->dev, &cookie))
return -ENODEV;
-#ifdef CONFIG_ARM64
- /*
- * With 32-bit systems being limited by the 32-bit representation of
- * mmap2's pgoffset field, we need to make the MMIO offset arch
- * specific. This converts a user MMIO offset into something the kernel
- * driver understands.
+ /* Adjust the user MMIO offset to match the offset used kernel side.
+ * We use a local variable with a READ_ONCE() here to make sure
+ * the user_mmio_offset we use for the is_user_mmio_mapping() check
+ * hasn't changed when we do the offset adjustment.
*/
- if (test_tsk_thread_flag(current, TIF_32BIT) &&
- offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
- offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
- DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
+ if (offset >= user_mmio_offset) {
+ offset -= user_mmio_offset;
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- }
-#endif
-
- if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
ret = panthor_device_mmap_io(ptdev, vma);
- else
+ } else {
ret = drm_gem_mmap(filp, vma);
+ }
drm_dev_exit(cookie);
return ret;
@@ -1583,6 +1607,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
* - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
+ * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1596,7 +1621,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 7bc38e635329..36f1034839c2 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1063,8 +1063,8 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
u32 status;
gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
- if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_DISABLED, 10, 100000))
+ if (gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
drm_err(&ptdev->base, "Failed to stop MCU");
}
@@ -1089,8 +1089,9 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
- if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000)) {
+ if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10,
+ 100000)) {
ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 7c00fd77758b..a123bc740ba1 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -16,10 +16,15 @@
#include "panthor_mmu.h"
#ifdef CONFIG_DEBUG_FS
-static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
- struct panthor_gem_object *bo)
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo)
{
INIT_LIST_HEAD(&bo->debugfs.node);
+}
+
+static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
bo->debugfs.creator.tgid = current->group_leader->pid;
get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
@@ -44,14 +49,13 @@ static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
{
- bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ bo->debugfs.flags = usage_flags;
+ panthor_gem_debugfs_bo_add(bo);
}
#else
-static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
- struct panthor_gem_object *bo)
-{}
static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {}
#endif
static void panthor_gem_free_object(struct drm_gem_object *obj)
@@ -246,7 +250,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
mutex_init(&obj->label.lock);
- panthor_gem_debugfs_bo_add(ptdev, obj);
+ panthor_gem_debugfs_bo_init(obj);
return &obj->base.base;
}
@@ -285,6 +289,8 @@ panthor_gem_create_with_handle(struct drm_file *file,
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
}
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -296,12 +302,6 @@ panthor_gem_create_with_handle(struct drm_file *file,
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
- /*
- * No explicit flags are needed in the call below, since the
- * function internally sets the INITIALIZED bit for us.
- */
- panthor_gem_debugfs_set_usage_flags(bo, 0);
-
return ret;
}
@@ -387,7 +387,7 @@ static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
unsigned int refcount = kref_read(&bo->base.base.refcount);
char creator_info[32] = {};
size_t resident_size;
- u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ u32 gem_usage_flags = bo->debugfs.flags;
u32 gem_state_flags = 0;
/* Skip BOs being destroyed. */
@@ -436,8 +436,7 @@ void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
scoped_guard(mutex, &ptdev->gems.lock) {
list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
- if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED)
- panthor_gem_debugfs_bo_print(bo, m, &totals);
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
}
}
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 4dd732dcd59f..8fc7215e9b90 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -35,9 +35,6 @@ enum panthor_debugfs_gem_usage_flags {
/** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
-
- /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */
- PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31),
};
/**
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 32d678a0114e..cb7a335e07d7 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -108,14 +108,9 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
- ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
- ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
- ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
- ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
@@ -154,8 +149,7 @@ static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
- u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
- gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+ u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
@@ -246,45 +240,27 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwroff_reg, mask);
+ gpu_write64(ptdev, pwroff_reg, mask);
- if (mask >> 32)
- gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
return 0;
@@ -307,45 +283,27 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwron_reg, mask);
-
- if (mask >> 32)
- gpu_write(ptdev, pwron_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
+ gpu_write64(ptdev, pwron_reg, mask);
- ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
- val, (mask32 & val) == mask32,
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
+ (mask & val) == val,
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
}
return 0;
@@ -494,49 +452,3 @@ void panthor_gpu_resume(struct panthor_device *ptdev)
panthor_gpu_l2_power_on(ptdev);
}
-/**
- * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
- * @ptdev: Device.
- * @reg: The offset of the register to read.
- *
- * Return: The counter value.
- */
-static u64
-panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
-{
- u32 hi, lo;
-
- do {
- hi = gpu_read(ptdev, reg + 0x4);
- lo = gpu_read(ptdev, reg);
- } while (hi != gpu_read(ptdev, reg + 0x4));
-
- return ((u64)hi << 32) | lo;
-}
-
-/**
- * panthor_gpu_read_timestamp() - Read the timestamp register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp value.
- */
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
-{
- return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
-}
-
-/**
- * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp offset value.
- */
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
-{
- u32 hi, lo;
-
- hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
- lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
-
- return ((u64)hi << 32) | lo;
-}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index 7f6133a66127..7c17a8c06858 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -30,9 +30,9 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_on(ptdev, #type, \
- type ## _PWRON_LO, \
- type ## _PWRTRANS_LO, \
- type ## _READY_LO, \
+ type ## _PWRON, \
+ type ## _PWRTRANS, \
+ type ## _READY, \
mask, timeout_us)
/**
@@ -42,15 +42,13 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_off(ptdev, #type, \
- type ## _PWROFF_LO, \
- type ## _PWRTRANS_LO, \
+ type ## _PWROFF, \
+ type ## _PWRTRANS, \
mask, timeout_us)
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6ca9a2642a4e..4140f697ba5a 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -510,9 +510,9 @@ static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending.
*/
- ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
- val, !(val & AS_STATUS_AS_ACTIVE),
- 10, 100000);
+ ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
+ !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
if (ret) {
panthor_device_schedule_reset(ptdev);
@@ -564,8 +564,7 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
region = region_width | region_start;
/* Lock the region that needs to be updated */
- gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
- gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ gpu_write64(ptdev, AS_LOCKADDR(as_nr), region);
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
@@ -615,14 +614,9 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -635,14 +629,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -896,17 +885,6 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
return ret;
}
-/**
- * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
- * @vm: VM whose cache to flush
- *
- * Return: 0 on success, a negative error code if flush failed.
- */
-int panthor_vm_flush_all(struct panthor_vm *vm)
-{
- return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
-}
-
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
@@ -1681,8 +1659,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
u32 source_id;
fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
- addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
- addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+ addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
/* decode the fault status */
exception_type = fault_status & 0xFF;
@@ -2282,7 +2259,7 @@ static enum drm_gpu_sched_stat
panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
{
WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
@@ -2523,7 +2500,7 @@ panthor_vm_bind_job_create(struct drm_file *file,
kref_init(&job->refcount);
job->vm = panthor_vm_get(vm);
- ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index fc274637114e..0e268fdfdb2f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -33,7 +33,6 @@ int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
-int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index a7a323dc5cf9..48bbfd40138c 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -63,20 +63,16 @@
#define GPU_STATUS_DBG_ENABLED BIT(8)
#define GPU_FAULT_STATUS 0x3C
-#define GPU_FAULT_ADDR_LO 0x40
-#define GPU_FAULT_ADDR_HI 0x44
+#define GPU_FAULT_ADDR 0x40
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
-#define GPU_TIMESTAMP_OFFSET_LO 0x88
-#define GPU_TIMESTAMP_OFFSET_HI 0x8C
-#define GPU_CYCLE_COUNT_LO 0x90
-#define GPU_CYCLE_COUNT_HI 0x94
-#define GPU_TIMESTAMP_LO 0x98
-#define GPU_TIMESTAMP_HI 0x9C
+#define GPU_TIMESTAMP_OFFSET 0x88
+#define GPU_CYCLE_COUNT 0x90
+#define GPU_TIMESTAMP 0x98
#define GPU_THREAD_MAX_THREADS 0xA0
#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
@@ -85,47 +81,29 @@
#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
-#define GPU_SHADER_PRESENT_LO 0x100
-#define GPU_SHADER_PRESENT_HI 0x104
-#define GPU_TILER_PRESENT_LO 0x110
-#define GPU_TILER_PRESENT_HI 0x114
-#define GPU_L2_PRESENT_LO 0x120
-#define GPU_L2_PRESENT_HI 0x124
-
-#define SHADER_READY_LO 0x140
-#define SHADER_READY_HI 0x144
-#define TILER_READY_LO 0x150
-#define TILER_READY_HI 0x154
-#define L2_READY_LO 0x160
-#define L2_READY_HI 0x164
-
-#define SHADER_PWRON_LO 0x180
-#define SHADER_PWRON_HI 0x184
-#define TILER_PWRON_LO 0x190
-#define TILER_PWRON_HI 0x194
-#define L2_PWRON_LO 0x1A0
-#define L2_PWRON_HI 0x1A4
-
-#define SHADER_PWROFF_LO 0x1C0
-#define SHADER_PWROFF_HI 0x1C4
-#define TILER_PWROFF_LO 0x1D0
-#define TILER_PWROFF_HI 0x1D4
-#define L2_PWROFF_LO 0x1E0
-#define L2_PWROFF_HI 0x1E4
-
-#define SHADER_PWRTRANS_LO 0x200
-#define SHADER_PWRTRANS_HI 0x204
-#define TILER_PWRTRANS_LO 0x210
-#define TILER_PWRTRANS_HI 0x214
-#define L2_PWRTRANS_LO 0x220
-#define L2_PWRTRANS_HI 0x224
-
-#define SHADER_PWRACTIVE_LO 0x240
-#define SHADER_PWRACTIVE_HI 0x244
-#define TILER_PWRACTIVE_LO 0x250
-#define TILER_PWRACTIVE_HI 0x254
-#define L2_PWRACTIVE_LO 0x260
-#define L2_PWRACTIVE_HI 0x264
+#define GPU_SHADER_PRESENT 0x100
+#define GPU_TILER_PRESENT 0x110
+#define GPU_L2_PRESENT 0x120
+
+#define SHADER_READY 0x140
+#define TILER_READY 0x150
+#define L2_READY 0x160
+
+#define SHADER_PWRON 0x180
+#define TILER_PWRON 0x190
+#define L2_PWRON 0x1A0
+
+#define SHADER_PWROFF 0x1C0
+#define TILER_PWROFF 0x1D0
+#define L2_PWROFF 0x1E0
+
+#define SHADER_PWRTRANS 0x200
+#define TILER_PWRTRANS 0x210
+#define L2_PWRTRANS 0x220
+
+#define SHADER_PWRACTIVE 0x240
+#define TILER_PWRACTIVE 0x250
+#define L2_PWRACTIVE 0x260
#define GPU_REVID 0x280
@@ -168,10 +146,8 @@
#define MMU_AS_SHIFT 6
#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
-#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
-#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
-#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
-#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_TRANSTAB(as) (MMU_AS(as) + 0x0)
+#define AS_MEMATTR(as) (MMU_AS(as) + 0x8)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
((w) ? BIT(0) : 0) | \
@@ -183,8 +159,7 @@
#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
-#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
-#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_LOCKADDR(as) (MMU_AS(as) + 0x10)
#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
#define AS_COMMAND_NOP 0
#define AS_COMMAND_UPDATE 1
@@ -199,12 +174,10 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
-#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
-#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_FAULTADDRESS(as) (MMU_AS(as) + 0x20)
#define AS_STATUS(as) (MMU_AS(as) + 0x28)
#define AS_STATUS_AS_ACTIVE BIT(0)
-#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
-#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG(as) (MMU_AS(as) + 0x30)
#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
@@ -222,18 +195,11 @@
#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
#define AS_TRANSCFG_WXN BIT(35)
#define AS_TRANSCFG_XREADABLE BIT(36)
-#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
-#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+#define AS_FAULTEXTRA(as) (MMU_AS(as) + 0x38)
#define CSF_GPU_LATEST_FLUSH_ID 0x10000
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
-#define gpu_write(dev, reg, data) \
- writel(data, (dev)->iomem + (reg))
-
-#define gpu_read(dev, reg) \
- readl((dev)->iomem + (reg))
-
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 43ee57728de5..8f17394cc82a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3241,7 +3241,7 @@ queue_timedout_job(struct drm_sched_job *sched_job)
queue_start(queue);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void queue_free_job(struct drm_sched_job *sched_job)
@@ -3732,7 +3732,8 @@ struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit)
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id)
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_job *job;
@@ -3804,7 +3805,7 @@ panthor_job_create(struct panthor_file *pfile,
ret = drm_sched_job_init(&job->base,
&job->group->queues[job->queue_idx]->entity,
- credits, job->group);
+ credits, job->group, drm_client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index e650a445cf50..742b0b4ff3a3 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -29,7 +29,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit);
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id);
struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
void panthor_job_put(struct drm_sched_job *job);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 70aff64ced87..ae7e572b1b4a 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1176,9 +1176,10 @@ err_drm_connector_cleanup:
static struct drm_framebuffer *
qxl_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file_priv, info, mode_cmd,
&qxl_fb_funcs);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a46613283393..266c57733136 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -211,7 +211,7 @@ static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
surf->base_align = track->group_size;
surf->palign = palign;
surf->halign = 1;
- if (surf->nbx & (palign - 1)) {
+ if ((surf->nbx & (palign - 1)) && !(palign == 64 && surf->nbx == 32)) {
if (prefix) {
dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
__func__, __LINE__, prefix, surf->nbx, palign);
@@ -2661,6 +2661,95 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_COND_EXEC:
+ {
+ u64 offset;
+
+ if (pkt->count != 2) {
+ DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 0);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 0] = offset;
+ ib[idx + 1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
+ case PACKET3_COND_WRITE:
+ if (pkt->count != 7) {
+ DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x10) {
+ u64 offset;
+ /* POLL is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 1);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 1] = offset;
+ ib[idx + 2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* POLL is a reg. */
+ reg = radeon_get_ib_value(p, idx + 1) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
+ return -EINVAL;
+ }
+ }
+ if (idx_value & 0x100) {
+ u64 offset;
+ /* WRITE is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 5);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 5] = offset;
+ ib[idx + 6] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* WRITE is a reg. */
+ reg = radeon_get_ib_value(p, idx + 5) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
+ return -EINVAL;
+ }
+ }
+ break;
case PACKET3_NOP:
break;
default:
@@ -3406,7 +3495,12 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
case CAYMAN_PACKET3_DEALLOC_STATE:
break;
case PACKET3_COND_WRITE:
- if (idx_value & 0x100) {
+ if (!(idx_value & 0x10)) {
+ reg = ib[idx + 1] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ if (!(idx_value & 0x100)) {
reg = ib[idx + 5] * 4;
if (!evergreen_vm_reg_valid(reg))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8f5f8abcb1b4..4dc77c398617 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
int
radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
fb->obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@@ -1314,6 +1315,7 @@ radeon_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
@@ -1340,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 267f082bc430..88e821d67af7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -110,9 +110,10 @@
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
* 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
* 2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL)
+ * 2.51.0 - Add evergreen/cayman OpenGL 4.6 compatibility
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 50
+#define KMS_DRIVER_MINOR 51
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_no_wb;
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index d4a58bd679db..dc81b0c2dbff 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
- const struct drm_format_info *info;
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@@ -67,7 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -205,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
+ const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct drm_gem_object *gobj;
@@ -223,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
+ format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
+ mode_cmd.modifier[0]);
+ ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
@@ -235,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3102f6c2d055..9e34da2cacef 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -40,6 +40,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
+struct drm_format_info;
struct edid;
struct drm_edid;
@@ -890,6 +891,7 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *rfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 4c8fe83dd610..216219accfd9 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -426,6 +426,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
@@ -490,7 +491,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
index f9893d7d6dfc..e9e59c5e70d5 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
@@ -16,7 +16,7 @@ struct rcar_du_format_info;
struct rcar_du_group;
/*
- * The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * The R-Car DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index a9145253294f..af58b814e588 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -878,9 +878,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
struct rcar_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct rcar_lvds, bridge,
+ &rcar_lvds_bridge_ops);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
platform_set_drvdata(pdev, lvds);
@@ -895,7 +896,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 7ab8be46c7f6..1af4c73f7a88 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -918,7 +918,6 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
}
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
drm_bridge_add(&dsi->bridge);
@@ -1004,9 +1003,10 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
struct rcar_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (dsi == NULL)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rcar_mipi_dsi, bridge,
+ &rcar_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index 5e40f0c1e7b0..e1aa6a719529 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -50,9 +50,20 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
}
};
+static const struct rzg2l_du_device_info rzg2l_du_r9a09g057_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct of_device_id rzg2l_du_of_table[] = {
{ .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { .compatible = "renesas,r9a09g057-du", .data = &rzg2l_du_r9a09g057_info },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 564ab4cb3d37..5e6dd16705e6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -22,6 +22,26 @@
* Encoder
*/
+static unsigned int rzg2l_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
@@ -50,10 +70,26 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
struct drm_bridge *bridge;
int ret;
- /* Locate the DRM bridge from the DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge)
- return -EPROBE_DEFER;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if (output == RZG2L_DU_OUTPUT_DPAD0 && rzg2l_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+ }
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
enc_node, rzg2l_du_output_name(output));
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 55a97691e9b2..87f171145a23 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -191,6 +191,7 @@ int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct rzg2l_du_format_info *format;
@@ -214,7 +215,7 @@ rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index dc6ab012cdb6..f87337c3cbb5 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -4,10 +4,14 @@
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -23,13 +28,37 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
#include "rzg2l_mipi_dsi_regs.h"
+#define RZG2L_DCS_BUF_SIZE 128 /* Maximum DCS buffer size in external memory. */
+
+#define RZ_MIPI_DSI_FEATURE_16BPP BIT(0)
+
+struct rzg2l_mipi_dsi;
+
+struct rzg2l_mipi_dsi_hw_info {
+ int (*dphy_init)(struct rzg2l_mipi_dsi *dsi, u64 hsfreq_millihz);
+ void (*dphy_startup_late_init)(struct rzg2l_mipi_dsi *dsi);
+ void (*dphy_exit)(struct rzg2l_mipi_dsi *dsi);
+ int (*dphy_conf_clks)(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz);
+ unsigned int (*dphy_mode_clk_check)(struct rzg2l_mipi_dsi *dsi,
+ unsigned long mode_freq);
+ u32 phy_reg_offset;
+ u32 link_reg_offset;
+ unsigned long min_dclk;
+ unsigned long max_dclk;
+ u8 features;
+};
+
struct rzg2l_mipi_dsi {
struct device *dev;
void __iomem *mmio;
+ const struct rzg2l_mipi_dsi_hw_info *info;
+
struct reset_control *rstc;
struct reset_control *arstc;
struct reset_control *prstc;
@@ -44,6 +73,10 @@ struct rzg2l_mipi_dsi {
unsigned int num_data_lanes;
unsigned int lanes;
unsigned long mode_flags;
+
+ /* DCS buffer pointers when using external memory. */
+ dma_addr_t dcs_buf_phys;
+ u8 *dcs_buf_virt;
};
static inline struct rzg2l_mipi_dsi *
@@ -75,7 +108,7 @@ struct rzg2l_mipi_dsi_timings {
static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
{
- .hsfreq_max = 80000,
+ .hsfreq_max = 80000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 13,
@@ -89,7 +122,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 125000,
+ .hsfreq_max = 125000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -103,7 +136,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 250000,
+ .hsfreq_max = 250000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -117,7 +150,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 360000,
+ .hsfreq_max = 360000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 10,
@@ -131,7 +164,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 720000,
+ .hsfreq_max = 720000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -145,7 +178,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 1500000,
+ .hsfreq_max = 1500000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -162,22 +195,22 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + reg);
+ iowrite32(data, dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg);
+ iowrite32(data, dsi->mmio + dsi->info->link_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + reg);
+ return ioread32(dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + LINK_REG_OFFSET + reg);
+ return ioread32(dsi->mmio + dsi->info->link_reg_offset + reg);
}
/* -----------------------------------------------------------------------------
@@ -185,8 +218,9 @@ static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
*/
static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi,
- unsigned long hsfreq)
+ u64 hsfreq_millihz)
{
+ unsigned long hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
const struct rzg2l_mipi_dsi_timings *dphy_timings;
unsigned int i;
u32 dphyctrl0;
@@ -255,20 +289,17 @@ static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi)
reset_control_assert(dsi->rstc);
}
-static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
- const struct drm_display_mode *mode)
+static int rzg2l_dphy_conf_clks(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz)
{
- unsigned long hsfreq;
+ unsigned long vclk_rate;
unsigned int bpp;
- u32 txsetr;
- u32 clstptsetr;
- u32 lptrnstsetr;
- u32 clkkpt;
- u32 clkbfht;
- u32 clkstpt;
- u32 golpbkt;
- int ret;
+ clk_set_rate(dsi->vclk, mode_freq * KILO);
+ vclk_rate = clk_get_rate(dsi->vclk);
+ if (vclk_rate != mode_freq * KILO)
+ dev_dbg(dsi->dev, "Requested vclk rate %lu, actual %lu mismatch\n",
+ mode_freq * KILO, vclk_rate);
/*
* Relationship between hsclk and vclk must follow
* vclk * bpp = hsclk * 8 * lanes
@@ -277,18 +308,39 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* hsclk: DSI HS Byte clock frequency (Hz)
* lanes: number of data lanes
*
- * hsclk(bit) = hsclk(byte) * 8
+ * hsclk(bit) = hsclk(byte) * 8 = hsfreq
*/
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes);
+ *hsfreq_millihz = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(vclk_rate, bpp * MILLI),
+ dsi->lanes);
+
+ return 0;
+}
+
+static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ unsigned long hsfreq;
+ u64 hsfreq_millihz;
+ u32 txsetr;
+ u32 clstptsetr;
+ u32 lptrnstsetr;
+ u32 clkkpt;
+ u32 clkbfht;
+ u32 clkstpt;
+ u32 golpbkt;
+ u32 dsisetr;
+ int ret;
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
return ret;
- clk_set_rate(dsi->vclk, mode->clock * 1000);
+ ret = dsi->info->dphy_conf_clks(dsi, mode->clock, &hsfreq_millihz);
+ if (ret < 0)
+ goto err_phy;
- ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq);
+ ret = dsi->info->dphy_init(dsi, hsfreq_millihz);
if (ret < 0)
goto err_phy;
@@ -296,6 +348,10 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN;
rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr);
+ if (dsi->info->dphy_startup_late_init)
+ dsi->info->dphy_startup_late_init(dsi);
+
+ hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
/*
* Global timings characteristic depends on high speed Clock Frequency
* Currently MIPI DSI-IF just supports maximum FHD@60 with:
@@ -304,12 +360,12 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* - data lanes: maximum 4 lanes
* Therefore maximum hsclk will be 891 Mbps.
*/
- if (hsfreq > 445500) {
+ if (hsfreq > 445500000) {
clkkpt = 12;
clkbfht = 15;
clkstpt = 48;
golpbkt = 75;
- } else if (hsfreq > 250000) {
+ } else if (hsfreq > 250000000) {
clkkpt = 7;
clkbfht = 8;
clkstpt = 27;
@@ -328,10 +384,19 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt);
rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr);
+ /*
+ * Increase MRPSZ as the default value of 1 will result in long read
+ * commands payload not being saved to memory.
+ */
+ dsisetr = rzg2l_mipi_dsi_link_read(dsi, DSISETR);
+ dsisetr &= ~DSISETR_MRPSZ;
+ dsisetr |= FIELD_PREP(DSISETR_MRPSZ, RZG2L_DCS_BUF_SIZE);
+ rzg2l_mipi_dsi_link_write(dsi, DSISETR, dsisetr);
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
return ret;
@@ -339,7 +404,7 @@ err_phy:
static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi)
{
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
}
@@ -532,8 +597,8 @@ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
flags);
}
-static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void rzg2l_mipi_dsi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
@@ -550,6 +615,13 @@ static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
return;
rzg2l_mipi_dsi_set_display_timing(dsi, mode);
+}
+
+static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+ int ret;
ret = rzg2l_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
@@ -582,9 +654,22 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- if (mode->clock > 148500)
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+
+ if (mode->clock > dsi->info->max_dclk)
return MODE_CLOCK_HIGH;
+ if (mode->clock < dsi->info->min_dclk)
+ return MODE_CLOCK_LOW;
+
+ if (dsi->info->dphy_mode_clk_check) {
+ enum drm_mode_status status;
+
+ status = dsi->info->dphy_mode_clk_check(dsi, mode->clock);
+ if (status != MODE_OK)
+ return status;
+ }
+
return MODE_OK;
}
@@ -593,6 +678,7 @@ static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = rzg2l_mipi_dsi_atomic_pre_enable,
.atomic_enable = rzg2l_mipi_dsi_atomic_enable,
.atomic_disable = rzg2l_mipi_dsi_atomic_disable,
.mode_valid = rzg2l_mipi_dsi_bridge_mode_valid,
@@ -617,8 +703,16 @@ static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host,
switch (mipi_dsi_pixel_format_to_bpp(device->format)) {
case 24:
+ break;
case 18:
break;
+ case 16:
+ if (!(dsi->info->features & RZ_MIPI_DSI_FEATURE_16BPP)) {
+ dev_err(dsi->dev, "Unsupported format 0x%04x\n",
+ device->format);
+ return -EINVAL;
+ }
+ break;
default:
dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format);
return -EINVAL;
@@ -651,9 +745,168 @@ static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rzg2l_mipi_dsi_read_response(struct rzg2l_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *msg_rx = msg->rx_buf;
+ u8 datatype;
+ u32 result;
+ u16 size;
+
+ result = rzg2l_mipi_dsi_link_read(dsi, RXRSS0R);
+ if (result & RXRSS0R_RXPKTDFAIL) {
+ dev_err(dsi->dev, "packet rx data did not save correctly\n");
+ return -EPROTO;
+ }
+
+ if (result & RXRSS0R_RXFAIL) {
+ dev_err(dsi->dev, "packet rx failure\n");
+ return -EPROTO;
+ }
+
+ if (!(result & RXRSS0R_RXSUC))
+ return -EPROTO;
+
+ datatype = FIELD_GET(RXRSS0R_DT, result);
+
+ switch (datatype) {
+ case 0:
+ dev_dbg(dsi->dev, "ACK\n");
+ return 0;
+ case MIPI_DSI_RX_END_OF_TRANSMISSION:
+ dev_dbg(dsi->dev, "EoTp\n");
+ return 0;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_dbg(dsi->dev, "Acknowledge and error report: $%02x%02x\n",
+ (u8)FIELD_GET(RXRSS0R_DATA1, result),
+ (u8)FIELD_GET(RXRSS0R_DATA0, result));
+ return 0;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ return 1;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ msg_rx[1] = FIELD_GET(RXRSS0R_DATA1, result);
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = FIELD_GET(RXRSS0R_WC, result);
+
+ if (size > msg->rx_len) {
+ dev_err(dsi->dev, "rx buffer too small");
+ return -ENOSPC;
+ }
+
+ memcpy(msg_rx, dsi->dcs_buf_virt, size);
+ return size;
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n", datatype);
+ return -EPROTO;
+ }
+}
+
+static ssize_t rzg2l_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ bool need_bta;
+ u32 value;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Terminate operation after this descriptor is finished */
+ value = SQCH0DSC0AR_NXACT_TERM;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ need_bta = true; /* Message with explicitly requested ACK */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NON_READ);
+ } else if (msg->rx_buf && msg->rx_len > 0) {
+ need_bta = true; /* Read request */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_READ);
+ } else {
+ need_bta = false;
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NONE);
+ }
+
+ /* Set transmission speed */
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ value |= SQCH0DSC0AR_SPD_LOW;
+ else
+ value |= SQCH0DSC0AR_SPD_HIGH;
+
+ /* Write TX packet header */
+ value |= FIELD_PREP(SQCH0DSC0AR_DT, packet.header[0]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA0, packet.header[1]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA1, packet.header[2]);
+
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ value |= SQCH0DSC0AR_FMT_LONG;
+
+ if (packet.payload_length > RZG2L_DCS_BUF_SIZE) {
+ dev_err(dsi->dev, "Packet Tx payload size (%d) too large",
+ (unsigned int)packet.payload_length);
+ return -ENOSPC;
+ }
+
+ /* Copy TX packet payload data to memory space */
+ memcpy(dsi->dcs_buf_virt, packet.payload, packet.payload_length);
+ } else {
+ value |= SQCH0DSC0AR_FMT_SHORT;
+ }
+
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0AR, value);
+
+ /*
+ * Write: specify payload data source location, only used for
+ * long packet.
+ * Read: specify payload data storage location of response
+ * packet. Note: a read packet is always a short packet.
+ * If the response packet is a short packet or a long packet
+ * with WC = 0 (no payload), DTSEL is meaningless.
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0BR, SQCH0DSC0BR_DTSEL_MEM_SPACE);
+
+ /*
+ * Set SQCHxSR.AACTFIN bit when descriptor actions are finished.
+ * Read: set Rx result save slot number to 0 (ACTCODE).
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0CR, SQCH0DSC0CR_FINACT);
+
+ /* Set rx/tx payload data address, only relevant for long packet. */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0DR, (u32)dsi->dcs_buf_phys);
+
+ /* Start sequence 0 operation */
+ value = rzg2l_mipi_dsi_link_read(dsi, SQCH0SET0R);
+ value |= SQCH0SET0R_START;
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SET0R, value);
+
+ /* Wait for operation to finish */
+ ret = read_poll_timeout(rzg2l_mipi_dsi_link_read,
+ value, value & SQCH0SR_ADESFIN,
+ 2000, 20000, false, dsi, SQCH0SR);
+ if (ret == 0) {
+ /* Success: clear status bit */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SCR, SQCH0SCR_ADESFIN);
+
+ if (need_bta)
+ ret = rzg2l_mipi_dsi_read_response(dsi, msg);
+ else
+ ret = packet.payload_length;
+ }
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
.attach = rzg2l_mipi_dsi_host_attach,
.detach = rzg2l_mipi_dsi_host_detach,
+ .transfer = rzg2l_mipi_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -701,13 +954,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
u32 txsetr;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rzg2l_mipi_dsi, bridge,
+ &rzg2l_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0)
return dev_err_probe(dsi->dev, ret,
@@ -723,7 +979,7 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->vclk))
return PTR_ERR(dsi->vclk);
- dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst");
+ dsi->rstc = devm_reset_control_get_optional_exclusive(dsi->dev, "rst");
if (IS_ERR(dsi->rstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc),
"failed to get rst\n");
@@ -751,17 +1007,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
* mode->clock and format are not available. So initialize DPHY with
* timing parameters for 80Mbps.
*/
- ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000);
+ ret = dsi->info->dphy_init(dsi, 80000000ULL * MILLI);
if (ret < 0)
goto err_phy;
txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR);
dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes);
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
/* Init host device */
@@ -771,10 +1026,15 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_disable;
+ dsi->dcs_buf_virt = dma_alloc_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE,
+ &dsi->dcs_buf_phys, GFP_KERNEL);
+ if (!dsi->dcs_buf_virt)
+ return -ENOMEM;
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
err_pm_disable:
pm_runtime_disable(dsi->dev);
@@ -785,12 +1045,23 @@ static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
+ dma_free_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE, dsi->dcs_buf_virt,
+ dsi->dcs_buf_phys);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
}
+static const struct rzg2l_mipi_dsi_hw_info rzg2l_mipi_dsi_info = {
+ .dphy_init = rzg2l_mipi_dsi_dphy_init,
+ .dphy_exit = rzg2l_mipi_dsi_dphy_exit,
+ .dphy_conf_clks = rzg2l_dphy_conf_clks,
+ .link_reg_offset = 0x10000,
+ .min_dclk = 5803,
+ .max_dclk = 148500,
+};
+
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
- { .compatible = "renesas,rzg2l-mipi-dsi" },
+ { .compatible = "renesas,rzg2l-mipi-dsi", .data = &rzg2l_mipi_dsi_info, },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..d8082a87d874 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
@@ -41,8 +41,6 @@
#define DSIDPHYTIM3_THS_ZERO(x) ((x) << 0)
/* --------------------------------------------------------*/
-/* Link Registers */
-#define LINK_REG_OFFSET 0x10000
/* Link Status Register */
#define LINKSR 0x10
@@ -81,6 +79,20 @@
#define RSTSR_SWRSTLP (1 << 1)
#define RSTSR_SWRSTHS (1 << 0)
+/* DSI Set Register */
+#define DSISETR 0x120
+#define DSISETR_MRPSZ GENMASK(15, 0)
+
+/* Rx Result Save Slot 0 Register */
+#define RXRSS0R 0x240
+#define RXRSS0R_RXPKTDFAIL BIT(28)
+#define RXRSS0R_RXFAIL BIT(27)
+#define RXRSS0R_RXSUC BIT(25)
+#define RXRSS0R_DT GENMASK(21, 16)
+#define RXRSS0R_DATA1 GENMASK(15, 8)
+#define RXRSS0R_DATA0 GENMASK(7, 0)
+#define RXRSS0R_WC GENMASK(15, 0) /* Word count for long packet. */
+
/* Clock Lane Stop Time Set Register */
#define CLSTPTSETR 0x314
#define CLSTPTSETR_CLKKPT(x) ((x) << 24)
@@ -148,4 +160,44 @@
#define VICH1HPSETR_HFP(x) (((x) & 0x1fff) << 16)
#define VICH1HPSETR_HBP(x) (((x) & 0x1fff) << 0)
+/* Sequence Channel 0 Set 0 Register */
+#define SQCH0SET0R 0x5c0
+#define SQCH0SET0R_START BIT(0)
+
+/* Sequence Channel 0 Status Register */
+#define SQCH0SR 0x5d0
+#define SQCH0SR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Status Clear Register */
+#define SQCH0SCR 0x5d4
+#define SQCH0SCR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Descriptor 0-A Register */
+#define SQCH0DSC0AR 0x780
+#define SQCH0DSC0AR_NXACT_TERM 0 /* Bit 28 */
+#define SQCH0DSC0AR_BTA GENMASK(27, 26)
+#define SQCH0DSC0AR_BTA_NONE 0
+#define SQCH0DSC0AR_BTA_NON_READ 1
+#define SQCH0DSC0AR_BTA_READ 2
+#define SQCH0DSC0AR_BTA_ONLY 3
+#define SQCH0DSC0AR_SPD_HIGH 0
+#define SQCH0DSC0AR_SPD_LOW BIT(25)
+#define SQCH0DSC0AR_FMT_SHORT 0
+#define SQCH0DSC0AR_FMT_LONG BIT(24)
+#define SQCH0DSC0AR_DT GENMASK(21, 16)
+#define SQCH0DSC0AR_DATA1 GENMASK(15, 8)
+#define SQCH0DSC0AR_DATA0 GENMASK(7, 0)
+
+/* Sequence Channel 0 Descriptor 0-B Register */
+#define SQCH0DSC0BR 0x784
+#define SQCH0DSC0BR_DTSEL_MEM_SPACE BIT(24) /* Use external memory */
+
+/* Sequence Channel 0 Descriptor 0-C Register */
+#define SQCH0DSC0CR 0x788
+#define SQCH0DSC0CR_FINACT BIT(0)
+#define SQCH0DSC0CR_AUXOP BIT(22)
+
+/* Sequence Channel 0 Descriptor 0-D Register */
+#define SQCH0DSC0DR 0x78c
+
#endif /* __RZG2L_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
index 4202ab00fb0c..fd9460da1789 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
@@ -117,6 +117,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
static struct drm_framebuffer *
shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct shmob_drm_format_info *format;
@@ -144,7 +145,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 292c31de18f1..b7e3f5dcf8d5 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -16,7 +16,9 @@
#include <sound/hdmi-codec.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -25,9 +27,9 @@
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
-static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
+static inline struct cdn_dp_device *bridge_to_dp(struct drm_bridge *bridge)
{
- return container_of(connector, struct cdn_dp_device, connector);
+ return container_of(bridge, struct cdn_dp_device, bridge);
}
static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder)
@@ -231,9 +233,9 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
}
static enum drm_connector_status
-cdn_dp_connector_detect(struct drm_connector *connector, bool force)
+cdn_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
enum drm_connector_status status = connector_status_disconnected;
mutex_lock(&dp->lock);
@@ -244,41 +246,25 @@ cdn_dp_connector_detect(struct drm_connector *connector, bool force)
return status;
}
-static void cdn_dp_connector_destroy(struct drm_connector *connector)
+static const struct drm_edid *
+cdn_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
- .detect = cdn_dp_connector_detect,
- .destroy = cdn_dp_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int cdn_dp_connector_get_modes(struct drm_connector *connector)
-{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- int ret = 0;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ const struct drm_edid *drm_edid;
mutex_lock(&dp->lock);
-
- ret = drm_edid_connector_add_modes(connector);
-
+ drm_edid = drm_edid_read_custom(connector, cdn_dp_get_edid_block, dp);
mutex_unlock(&dp->lock);
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-cdn_dp_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+cdn_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
u32 requested, actual, rate, sink_max, source_max = 0;
u8 lanes, bpc;
@@ -323,11 +309,6 @@ cdn_dp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
- .get_modes = cdn_dp_connector_get_modes,
- .mode_valid = cdn_dp_connector_mode_valid,
-};
-
static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
{
int ret;
@@ -360,7 +341,6 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
- const struct drm_display_info *info = &dp->connector.display_info;
int ret;
if (!cdn_dp_check_sink_connection(dp))
@@ -373,17 +353,6 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
return ret;
}
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = drm_edid_read_custom(&dp->connector,
- cdn_dp_get_edid_block, dp);
- drm_edid_connector_update(&dp->connector, dp->drm_edid);
-
- dp->sink_has_audio = info->has_audio;
-
- if (dp->drm_edid)
- DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
- info->width_mm / 10, info->height_mm / 10);
-
return 0;
}
@@ -488,10 +457,6 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
dp->active = false;
dp->max_lanes = 0;
dp->max_rate = 0;
- if (!dp->connected) {
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
- }
return 0;
}
@@ -546,26 +511,13 @@ err_clk_disable:
return ret;
}
-static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void cdn_dp_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct video_info *video = &dp->video_info;
- switch (display_info->bpc) {
- case 10:
- video->color_depth = 10;
- break;
- case 6:
- video->color_depth = 6;
- break;
- default:
- video->color_depth = 8;
- break;
- }
-
video->color_fmt = PXL_RGB;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
@@ -592,19 +544,37 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
}
-static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp,
- bool plugged)
+static void cdn_dp_display_info_update(struct cdn_dp_device *dp,
+ struct drm_display_info *display_info)
{
- if (dp->codec_dev)
- dp->plugged_cb(dp->codec_dev, plugged);
+ struct video_info *video = &dp->video_info;
+
+ switch (display_info->bpc) {
+ case 10:
+ video->color_depth = 10;
+ break;
+ case 6:
+ video->color_depth = 6;
+ break;
+ default:
+ video->color_depth = 8;
+ break;
+ }
}
-static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
int ret, val;
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector)
+ return;
+
+ cdn_dp_display_info_update(dp, &connector->display_info);
+
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, &dp->encoder.encoder);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
return;
@@ -625,7 +595,7 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
ret = cdn_dp_enable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to enable bridge %d\n",
ret);
goto out;
}
@@ -655,24 +625,21 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
goto out;
}
- cdn_dp_audio_handle_plugged_change(dp, true);
-
out:
mutex_unlock(&dp->lock);
}
-static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
- cdn_dp_audio_handle_plugged_change(dp, false);
if (dp->active) {
ret = cdn_dp_disable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to disable bridge %d\n",
ret);
}
}
@@ -704,9 +671,6 @@ static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
- .mode_set = cdn_dp_encoder_mode_set,
- .enable = cdn_dp_encoder_enable,
- .disable = cdn_dp_encoder_disable,
.atomic_check = cdn_dp_encoder_atomic_check,
};
@@ -779,11 +743,12 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return 0;
}
-static int cdn_dp_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int cdn_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct audio_info audio = {
.sample_width = params->sample_width,
.sample_rate = params->sample_rate,
@@ -805,7 +770,7 @@ static int cdn_dp_audio_hw_params(struct device *dev, void *data,
audio.format = AFMT_SPDIF;
break;
default:
- DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
+ drm_err(bridge->dev, "Invalid format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
@@ -819,9 +784,10 @@ out:
return ret;
}
-static void cdn_dp_audio_shutdown(struct device *dev, void *data)
+static void cdn_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -835,10 +801,11 @@ out:
mutex_unlock(&dp->lock);
}
-static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
+static int cdn_dp_audio_mute_stream(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -854,57 +821,22 @@ out:
return ret;
}
-static int cdn_dp_audio_get_eld(struct device *dev, void *data,
- u8 *buf, size_t len)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
-
- return 0;
-}
-
-static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- mutex_lock(&dp->lock);
- dp->plugged_cb = fn;
- dp->codec_dev = codec_dev;
- cdn_dp_audio_handle_plugged_change(dp, dp->connected);
- mutex_unlock(&dp->lock);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = cdn_dp_audio_hw_params,
- .audio_shutdown = cdn_dp_audio_shutdown,
- .mute_stream = cdn_dp_audio_mute_stream,
- .get_eld = cdn_dp_audio_get_eld,
- .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
+static const struct drm_bridge_funcs cdn_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .detect = cdn_dp_bridge_detect,
+ .edid_read = cdn_dp_bridge_edid_read,
+ .atomic_enable = cdn_dp_bridge_atomic_enable,
+ .atomic_disable = cdn_dp_bridge_atomic_disable,
+ .mode_valid = cdn_dp_bridge_mode_valid,
+ .mode_set = cdn_dp_bridge_mode_set,
+
+ .dp_audio_prepare = cdn_dp_audio_prepare,
+ .dp_audio_mute_stream = cdn_dp_audio_mute_stream,
+ .dp_audio_shutdown = cdn_dp_audio_shutdown,
};
-static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .i2s = 1,
- .spdif = 1,
- .ops = &audio_codec_ops,
- .max_i2s_channels = 8,
- .no_capture_mute = 1,
- };
-
- dp->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(dp->audio_pdev);
-}
-
static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
{
int ret;
@@ -1006,7 +938,9 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
- drm_connector_helper_hpd_irq_event(&dp->connector);
+ drm_bridge_hpd_notify(&dp->bridge,
+ dp->connected ? connector_status_connected
+ : connector_status_disconnected);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
@@ -1062,26 +996,35 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
- connector = &dp->connector;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->dpms = DRM_MODE_DPMS_OFF;
-
- ret = drm_connector_init(drm_dev, connector,
- &cdn_dp_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
- goto err_free_encoder;
- }
+ dp->bridge.ops =
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DP_AUDIO;
+ dp->bridge.of_node = dp->dev->of_node;
+ dp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ dp->bridge.hdmi_audio_dev = dp->dev;
+ dp->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ dp->bridge.hdmi_audio_spdif_playback = 1;
+ dp->bridge.hdmi_audio_dai_port = -1;
+
+ ret = devm_drm_bridge_add(dev, &dp->bridge);
+ if (ret)
+ return ret;
- drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
+ ret = drm_bridge_attach(encoder, &dp->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("failed to attach connector and encoder\n");
- goto err_free_connector;
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dp->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
}
+ drm_connector_attach_encoder(connector, encoder);
+
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
@@ -1092,7 +1035,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
if (ret) {
DRM_DEV_ERROR(dev,
"register EXTCON_DISP_DP notifier err\n");
- goto err_free_connector;
+ return ret;
}
}
@@ -1101,30 +1044,19 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
schedule_work(&dp->event_work);
return 0;
-
-err_free_connector:
- drm_connector_cleanup(connector);
-err_free_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
}
static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder.encoder;
- struct drm_connector *connector = &dp->connector;
cancel_work_sync(&dp->event_work);
- cdn_dp_encoder_disable(encoder);
encoder->funcs->destroy(encoder);
- connector->funcs->destroy(connector);
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
@@ -1171,9 +1103,10 @@ static int cdn_dp_probe(struct platform_device *pdev)
int ret;
int i;
- dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(dev, struct cdn_dp_device, bridge,
+ &cdn_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = dev;
match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
@@ -1209,19 +1142,11 @@ static int cdn_dp_probe(struct platform_device *pdev)
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
- ret = cdn_dp_audio_codec_init(dp, dev);
- if (ret)
- return ret;
-
ret = component_add(dev, &cdn_dp_component_ops);
if (ret)
- goto err_audio_deinit;
+ return ret;
return 0;
-
-err_audio_deinit:
- platform_device_unregister(dp->audio_pdev);
- return ret;
}
static void cdn_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 17498f576ce7..e9c30b9fd543 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -8,6 +8,7 @@
#define _CDN_DP_CORE_H
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
@@ -65,12 +66,11 @@ struct cdn_dp_port {
struct cdn_dp_device {
struct device *dev;
struct drm_device *drm_dev;
- struct drm_connector connector;
+ struct drm_bridge bridge;
struct rockchip_encoder encoder;
struct drm_display_mode mode;
struct platform_device *audio_pdev;
struct work_struct event_work;
- const struct drm_edid *drm_edid;
struct mutex lock;
bool connected;
@@ -101,9 +101,5 @@ struct cdn_dp_device {
int active_port;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- bool sink_has_audio;
-
- hdmi_codec_plugged_cb plugged_cb;
- struct device *codec_dev;
};
#endif /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index f737e7d46e66..acb59b25d928 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -213,17 +213,13 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->ref_clk)) {
ret = PTR_ERR(hdmi->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get reference clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get reference clock\n");
}
hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf");
if (IS_ERR(hdmi->grf_clk)) {
ret = PTR_ERR(hdmi->grf_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get grf clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get grf clock\n");
}
ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9");
@@ -573,17 +569,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "Unable to parse OF data\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "Unable to parse OF data\n");
}
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get phy\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get phy\n");
}
if (hdmi->phy) {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index db4b4038e51d..1ab3ad4bde9e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -29,11 +29,360 @@
#include "rockchip_drm_drv.h"
-#include "inno_hdmi.h"
+#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+#define DDC_SEGMENT_ADDR 0x30
+
+#define HDMI_SCL_RATE (100 * 1000)
+
+#define DDC_BUS_FREQ_L 0x4b
+#define DDC_BUS_FREQ_H 0x4c
+
+#define HDMI_SYS_CTRL 0x00
+#define m_RST_ANALOG BIT(6)
+#define v_RST_ANALOG (0 << 6)
+#define v_NOT_RST_ANALOG BIT(6)
+#define m_RST_DIGITAL BIT(5)
+#define v_RST_DIGITAL (0 << 5)
+#define v_NOT_RST_DIGITAL BIT(5)
+#define m_REG_CLK_INV BIT(4)
+#define v_REG_CLK_NOT_INV (0 << 4)
+#define v_REG_CLK_INV BIT(4)
+#define m_VCLK_INV BIT(3)
+#define v_VCLK_NOT_INV (0 << 3)
+#define v_VCLK_INV BIT(3)
+#define m_REG_CLK_SOURCE BIT(2)
+#define v_REG_CLK_SOURCE_TMDS (0 << 2)
+#define v_REG_CLK_SOURCE_SYS BIT(2)
+#define m_POWER BIT(1)
+#define v_PWR_ON (0 << 1)
+#define v_PWR_OFF BIT(1)
+#define m_INT_POL BIT(0)
+#define v_INT_POL_HIGH 1
+#define v_INT_POL_LOW 0
+
+#define HDMI_VIDEO_CONTRL1 0x01
+#define m_VIDEO_INPUT_FORMAT (7 << 1)
+#define m_DE_SOURCE BIT(0)
+#define v_VIDEO_INPUT_FORMAT(n) ((n) << 1)
+#define v_DE_EXTERNAL 1
+#define v_DE_INTERNAL 0
+enum {
+ VIDEO_INPUT_SDR_RGB444 = 0,
+ VIDEO_INPUT_DDR_RGB444 = 5,
+ VIDEO_INPUT_DDR_YCBCR422 = 6
+};
-#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define HDMI_VIDEO_CONTRL2 0x02
+#define m_VIDEO_OUTPUT_COLOR (3 << 6)
+#define m_VIDEO_INPUT_BITS (3 << 4)
+#define m_VIDEO_INPUT_CSP BIT(0)
+#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
+#define v_VIDEO_INPUT_BITS(n) ((n) << 4)
+#define v_VIDEO_INPUT_CSP(n) ((n) << 0)
+enum {
+ VIDEO_INPUT_12BITS = 0,
+ VIDEO_INPUT_10BITS = 1,
+ VIDEO_INPUT_REVERT = 2,
+ VIDEO_INPUT_8BITS = 3,
+};
+
+#define HDMI_VIDEO_CONTRL 0x03
+#define m_VIDEO_AUTO_CSC BIT(7)
+#define v_VIDEO_AUTO_CSC(n) ((n) << 7)
+#define m_VIDEO_C0_C2_SWAP BIT(0)
+#define v_VIDEO_C0_C2_SWAP(n) ((n) << 0)
+enum {
+ C0_C2_CHANGE_ENABLE = 0,
+ C0_C2_CHANGE_DISABLE = 1,
+ AUTO_CSC_DISABLE = 0,
+ AUTO_CSC_ENABLE = 1,
+};
+
+#define HDMI_VIDEO_CONTRL3 0x04
+#define m_COLOR_DEPTH_NOT_INDICATED BIT(4)
+#define m_SOF BIT(3)
+#define m_COLOR_RANGE BIT(2)
+#define m_CSC BIT(0)
+#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
+#define v_SOF_ENABLE (0 << 3)
+#define v_SOF_DISABLE BIT(3)
+#define v_COLOR_RANGE_FULL BIT(2)
+#define v_COLOR_RANGE_LIMITED (0 << 2)
+#define v_CSC_ENABLE 1
+#define v_CSC_DISABLE 0
+
+#define HDMI_AV_MUTE 0x05
+#define m_AVMUTE_CLEAR BIT(7)
+#define m_AVMUTE_ENABLE BIT(6)
+#define m_AUDIO_MUTE BIT(1)
+#define m_VIDEO_BLACK BIT(0)
+#define v_AVMUTE_CLEAR(n) ((n) << 7)
+#define v_AVMUTE_ENABLE(n) ((n) << 6)
+#define v_AUDIO_MUTE(n) ((n) << 1)
+#define v_VIDEO_MUTE(n) ((n) << 0)
+
+#define HDMI_VIDEO_TIMING_CTL 0x08
+#define v_HSYNC_POLARITY(n) ((n) << 3)
+#define v_VSYNC_POLARITY(n) ((n) << 2)
+#define v_INETLACE(n) ((n) << 1)
+#define v_EXTERANL_VIDEO(n) ((n) << 0)
+
+#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
+#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
+#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
+#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
+#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
+#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
+#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
+#define HDMI_VIDEO_EXT_HDURATION_H 0x10
+#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
+#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
+#define HDMI_VIDEO_EXT_VBLANK 0x13
+#define HDMI_VIDEO_EXT_VDELAY 0x14
+#define HDMI_VIDEO_EXT_VDURATION 0x15
+
+#define HDMI_VIDEO_CSC_COEF 0x18
+
+#define HDMI_AUDIO_CTRL1 0x35
+enum {
+ CTS_SOURCE_INTERNAL = 0,
+ CTS_SOURCE_EXTERNAL = 1,
+};
+
+#define v_CTS_SOURCE(n) ((n) << 7)
+
+enum {
+ DOWNSAMPLE_DISABLE = 0,
+ DOWNSAMPLE_1_2 = 1,
+ DOWNSAMPLE_1_4 = 2,
+};
+
+#define v_DOWN_SAMPLE(n) ((n) << 5)
+
+enum {
+ AUDIO_SOURCE_IIS = 0,
+ AUDIO_SOURCE_SPDIF = 1,
+};
+
+#define v_AUDIO_SOURCE(n) ((n) << 3)
+
+#define v_MCLK_ENABLE(n) ((n) << 2)
+
+enum {
+ MCLK_128FS = 0,
+ MCLK_256FS = 1,
+ MCLK_384FS = 2,
+ MCLK_512FS = 3,
+};
+
+#define v_MCLK_RATIO(n) (n)
+
+#define AUDIO_SAMPLE_RATE 0x37
+
+enum {
+ AUDIO_32K = 0x3,
+ AUDIO_441K = 0x0,
+ AUDIO_48K = 0x2,
+ AUDIO_882K = 0x8,
+ AUDIO_96K = 0xa,
+ AUDIO_1764K = 0xc,
+ AUDIO_192K = 0xe,
+};
+
+#define AUDIO_I2S_MODE 0x38
+
+enum {
+ I2S_CHANNEL_1_2 = 1,
+ I2S_CHANNEL_3_4 = 3,
+ I2S_CHANNEL_5_6 = 7,
+ I2S_CHANNEL_7_8 = 0xf
+};
+
+#define v_I2S_CHANNEL(n) ((n) << 2)
+
+enum {
+ I2S_STANDARD = 0,
+ I2S_LEFT_JUSTIFIED = 1,
+ I2S_RIGHT_JUSTIFIED = 2,
+};
+
+#define v_I2S_MODE(n) (n)
+
+#define AUDIO_I2S_MAP 0x39
+#define AUDIO_I2S_SWAPS_SPDIF 0x3a
+#define v_SPIDF_FREQ(n) (n)
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
+#define m_AUDIO_STATUS_NLPCM BIT(7)
+#define m_AUDIO_STATUS_USE BIT(6)
+#define m_AUDIO_STATUS_COPYRIGHT BIT(5)
+#define m_AUDIO_STATUS_ADDITION (3 << 2)
+#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
+#define v_AUDIO_STATUS_NLPCM(n) (((n) & 1) << 7)
+#define AUDIO_N_H 0x3f
+#define AUDIO_N_M 0x40
+#define AUDIO_N_L 0x41
+
+#define HDMI_AUDIO_CTS_H 0x45
+#define HDMI_AUDIO_CTS_M 0x46
+#define HDMI_AUDIO_CTS_L 0x47
+
+#define HDMI_DDC_CLK_L 0x4b
+#define HDMI_DDC_CLK_H 0x4c
+
+#define HDMI_EDID_SEGMENT_POINTER 0x4d
+#define HDMI_EDID_WORD_ADDR 0x4e
+#define HDMI_EDID_FIFO_OFFSET 0x4f
+#define HDMI_EDID_FIFO_ADDR 0x50
+
+#define HDMI_PACKET_SEND_MANUAL 0x9c
+#define HDMI_PACKET_SEND_AUTO 0x9d
+#define m_PACKET_GCP_EN BIT(7)
+#define m_PACKET_MSI_EN BIT(6)
+#define m_PACKET_SDI_EN BIT(5)
+#define m_PACKET_VSI_EN BIT(4)
+#define v_PACKET_GCP_EN(n) (((n) & 1) << 7)
+#define v_PACKET_MSI_EN(n) (((n) & 1) << 6)
+#define v_PACKET_SDI_EN(n) (((n) & 1) << 5)
+#define v_PACKET_VSI_EN(n) (((n) & 1) << 4)
+
+#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
+
+enum {
+ INFOFRAME_VSI = 0x05,
+ INFOFRAME_AVI = 0x06,
+ INFOFRAME_AAI = 0x08,
+};
+
+#define HDMI_CONTROL_PACKET_ADDR 0xa0
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+enum {
+ AVI_COLOR_MODE_RGB = 0,
+ AVI_COLOR_MODE_YCBCR422 = 1,
+ AVI_COLOR_MODE_YCBCR444 = 2,
+ AVI_COLORIMETRY_NO_DATA = 0,
+
+ AVI_COLORIMETRY_SMPTE_170M = 1,
+ AVI_COLORIMETRY_ITU709 = 2,
+ AVI_COLORIMETRY_EXTENDED = 3,
+
+ AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
+ AVI_CODED_FRAME_ASPECT_4_3 = 1,
+ AVI_CODED_FRAME_ASPECT_16_9 = 2,
+
+ ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
+ ACTIVE_ASPECT_RATE_4_3 = 0x09,
+ ACTIVE_ASPECT_RATE_16_9 = 0x0A,
+ ACTIVE_ASPECT_RATE_14_9 = 0x0B,
+};
+
+#define HDMI_HDCP_CTRL 0x52
+#define m_HDMI_DVI BIT(1)
+#define v_HDMI_DVI(n) ((n) << 1)
+
+#define HDMI_INTERRUPT_MASK1 0xc0
+#define HDMI_INTERRUPT_STATUS1 0xc1
+#define m_INT_ACTIVE_VSYNC BIT(5)
+#define m_INT_EDID_READY BIT(2)
+
+#define HDMI_INTERRUPT_MASK2 0xc2
+#define HDMI_INTERRUPT_STATUS2 0xc3
+#define m_INT_HDCP_ERR BIT(7)
+#define m_INT_BKSV_FLAG BIT(6)
+#define m_INT_HDCP_OK BIT(4)
+
+#define HDMI_STATUS 0xc8
+#define m_HOTPLUG BIT(7)
+#define m_MASK_INT_HOTPLUG BIT(5)
+#define m_INT_HOTPLUG BIT(1)
+#define v_MASK_INT_HOTPLUG(n) (((n) & 0x1) << 5)
+
+#define HDMI_COLORBAR 0xc9
+
+#define HDMI_PHY_SYNC 0xce
+#define HDMI_PHY_SYS_CTL 0xe0
+#define m_TMDS_CLK_SOURCE BIT(5)
+#define v_TMDS_FROM_PLL (0 << 5)
+#define v_TMDS_FROM_GEN BIT(5)
+#define m_PHASE_CLK BIT(4)
+#define v_DEFAULT_PHASE (0 << 4)
+#define v_SYNC_PHASE BIT(4)
+#define m_TMDS_CURRENT_PWR BIT(3)
+#define v_TURN_ON_CURRENT (0 << 3)
+#define v_CAT_OFF_CURRENT BIT(3)
+#define m_BANDGAP_PWR BIT(2)
+#define v_BANDGAP_PWR_UP (0 << 2)
+#define v_BANDGAP_PWR_DOWN BIT(2)
+#define m_PLL_PWR BIT(1)
+#define v_PLL_PWR_UP (0 << 1)
+#define v_PLL_PWR_DOWN BIT(1)
+#define m_TMDS_CHG_PWR BIT(0)
+#define v_TMDS_CHG_PWR_UP (0 << 0)
+#define v_TMDS_CHG_PWR_DOWN BIT(0)
+
+#define HDMI_PHY_CHG_PWR 0xe1
+#define v_CLK_CHG_PWR(n) (((n) & 1) << 3)
+#define v_DATA_CHG_PWR(n) (((n) & 7) << 0)
+
+#define HDMI_PHY_DRIVER 0xe2
+#define v_CLK_MAIN_DRIVER(n) ((n) << 4)
+#define v_DATA_MAIN_DRIVER(n) ((n) << 0)
+
+#define HDMI_PHY_PRE_EMPHASIS 0xe3
+#define v_PRE_EMPHASIS(n) (((n) & 7) << 4)
+#define v_CLK_PRE_DRIVER(n) (((n) & 3) << 2)
+#define v_DATA_PRE_DRIVER(n) (((n) & 3) << 0)
+
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
+#define v_FEEDBACK_DIV_LOW(n) ((n) & 0xff)
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
+#define v_FEEDBACK_DIV_HIGH(n) ((n) & 1)
+
+#define HDMI_PHY_PRE_DIV_RATIO 0xed
+#define v_PRE_DIV_RATIO(n) ((n) & 0x1f)
+
+#define HDMI_CEC_CTRL 0xd0
+#define m_ADJUST_FOR_HISENSE BIT(6)
+#define m_REJECT_RX_BROADCAST BIT(5)
+#define m_BUSFREETIME_ENABLE BIT(2)
+#define m_REJECT_RX BIT(1)
+#define m_START_TX BIT(0)
+
+#define HDMI_CEC_DATA 0xd1
+#define HDMI_CEC_TX_OFFSET 0xd2
+#define HDMI_CEC_RX_OFFSET 0xd3
+#define HDMI_CEC_CLK_H 0xd4
+#define HDMI_CEC_CLK_L 0xd5
+#define HDMI_CEC_TX_LENGTH 0xd6
+#define HDMI_CEC_RX_LENGTH 0xd7
+#define HDMI_CEC_TX_INT_MASK 0xd8
+#define m_TX_DONE BIT(3)
+#define m_TX_NOACK BIT(2)
+#define m_TX_BROADCAST_REJ BIT(1)
+#define m_TX_BUSNOTFREE BIT(0)
+
+#define HDMI_CEC_RX_INT_MASK 0xd9
+#define m_RX_LA_ERR BIT(4)
+#define m_RX_GLITCH BIT(3)
+#define m_RX_DONE BIT(0)
+
+#define HDMI_CEC_TX_INT 0xda
+#define HDMI_CEC_RX_INT 0xdb
+#define HDMI_CEC_BUSFREETIME_L 0xdc
+#define HDMI_CEC_BUSFREETIME_H 0xdd
+#define HDMI_CEC_LOGICADDR 0xde
+
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define RK3036_GRF_SOC_CON2 0x148
#define RK3036_HDMI_PHSYNC BIT(4)
@@ -255,22 +604,37 @@ static void inno_hdmi_power_up(struct inno_hdmi *hdmi,
inno_hdmi_sys_power(hdmi, true);
};
-static void inno_hdmi_reset(struct inno_hdmi *hdmi)
+static void inno_hdmi_init_hw(struct inno_hdmi *hdmi)
{
u32 val;
u32 msk;
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
- udelay(100);
+ usleep_range(100, 150);
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
- udelay(100);
+ usleep_range(100, 150);
msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
inno_hdmi_standby(hdmi);
+
+ /*
+ * When the controller isn't configured to an accurate
+ * video timing and there is no reference clock available,
+ * then the TMDS clock source would be switched to PCLK_HDMI,
+ * so we need to init the TMDS rate to PCLK rate, and
+ * reconfigure the DDC clock.
+ */
+ if (hdmi->refclk)
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
+ else
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+
+ /* Unmute hotplug interrupt */
+ hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
}
static int inno_hdmi_disable_frame(struct drm_connector *connector,
@@ -775,8 +1139,7 @@ static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
* we assume that each word write to this i2c adapter
* should be the offset of EDID word address.
*/
- if ((msgs->len != 1) ||
- ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
+ if (msgs->len != 1 || (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
return -EINVAL;
reinit_completion(&hdmi->i2c->cmp);
@@ -867,10 +1230,9 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
- devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
@@ -907,71 +1269,37 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
- hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
+ hdmi->pclk = devm_clk_get_enabled(hdmi->dev, "pclk");
if (IS_ERR(hdmi->pclk))
return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
- ret = clk_prepare_enable(hdmi->pclk);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret);
-
- hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
- if (IS_ERR(hdmi->refclk)) {
- ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
- goto err_disable_pclk;
- }
-
- ret = clk_prepare_enable(hdmi->refclk);
- if (ret) {
- ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret);
- goto err_disable_pclk;
- }
+ hdmi->refclk = devm_clk_get_optional_enabled(hdmi->dev, "ref");
+ if (IS_ERR(hdmi->refclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
if (hdmi->variant->dev_type == RK3036_HDMI) {
hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
- if (IS_ERR(hdmi->grf)) {
- ret = dev_err_probe(dev, PTR_ERR(hdmi->grf),
- "Unable to get rockchip,grf\n");
- goto err_disable_clk;
- }
+ if (IS_ERR(hdmi->grf))
+ return dev_err_probe(dev,
+ PTR_ERR(hdmi->grf), "Unable to get rockchip,grf\n");
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_disable_clk;
- }
+ if (irq < 0)
+ return irq;
- inno_hdmi_reset(hdmi);
+ inno_hdmi_init_hw(hdmi);
hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_clk;
- }
-
- /*
- * When the controller isn't configured to an accurate
- * video timing and there is no reference clock available,
- * then the TMDS clock source would be switched to PCLK_HDMI,
- * so we need to init the TMDS rate to PCLK rate, and
- * reconfigure the DDC clock.
- */
- if (hdmi->refclk)
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
- else
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+ if (IS_ERR(hdmi->ddc))
+ return PTR_ERR(hdmi->ddc);
ret = inno_hdmi_register(drm, hdmi);
if (ret)
- goto err_put_adapter;
+ return ret;
dev_set_drvdata(dev, hdmi);
- /* Unmute hotplug interrupt */
- hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
-
ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
inno_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
@@ -982,12 +1310,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_hdmi:
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_put_adapter:
- i2c_put_adapter(hdmi->ddc);
-err_disable_clk:
- clk_disable_unprepare(hdmi->refclk);
-err_disable_pclk:
- clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -998,10 +1320,6 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-
- i2c_put_adapter(hdmi->ddc);
- clk_disable_unprepare(hdmi->refclk);
- clk_disable_unprepare(hdmi->pclk);
}
static const struct component_ops inno_hdmi_ops = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
deleted file mode 100644
index 8b7ef3fac485..000000000000
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Rockchip Electronics Co., Ltd.
- * Zheng Yang <zhengyang@rock-chips.com>
- * Yakir Yang <ykk@rock-chips.com>
- */
-
-#ifndef __INNO_HDMI_H__
-#define __INNO_HDMI_H__
-
-#define DDC_SEGMENT_ADDR 0x30
-
-#define HDMI_SCL_RATE (100*1000)
-#define DDC_BUS_FREQ_L 0x4b
-#define DDC_BUS_FREQ_H 0x4c
-
-#define HDMI_SYS_CTRL 0x00
-#define m_RST_ANALOG (1 << 6)
-#define v_RST_ANALOG (0 << 6)
-#define v_NOT_RST_ANALOG (1 << 6)
-#define m_RST_DIGITAL (1 << 5)
-#define v_RST_DIGITAL (0 << 5)
-#define v_NOT_RST_DIGITAL (1 << 5)
-#define m_REG_CLK_INV (1 << 4)
-#define v_REG_CLK_NOT_INV (0 << 4)
-#define v_REG_CLK_INV (1 << 4)
-#define m_VCLK_INV (1 << 3)
-#define v_VCLK_NOT_INV (0 << 3)
-#define v_VCLK_INV (1 << 3)
-#define m_REG_CLK_SOURCE (1 << 2)
-#define v_REG_CLK_SOURCE_TMDS (0 << 2)
-#define v_REG_CLK_SOURCE_SYS (1 << 2)
-#define m_POWER (1 << 1)
-#define v_PWR_ON (0 << 1)
-#define v_PWR_OFF (1 << 1)
-#define m_INT_POL (1 << 0)
-#define v_INT_POL_HIGH 1
-#define v_INT_POL_LOW 0
-
-#define HDMI_VIDEO_CONTRL1 0x01
-#define m_VIDEO_INPUT_FORMAT (7 << 1)
-#define m_DE_SOURCE (1 << 0)
-#define v_VIDEO_INPUT_FORMAT(n) (n << 1)
-#define v_DE_EXTERNAL 1
-#define v_DE_INTERNAL 0
-enum {
- VIDEO_INPUT_SDR_RGB444 = 0,
- VIDEO_INPUT_DDR_RGB444 = 5,
- VIDEO_INPUT_DDR_YCBCR422 = 6
-};
-
-#define HDMI_VIDEO_CONTRL2 0x02
-#define m_VIDEO_OUTPUT_COLOR (3 << 6)
-#define m_VIDEO_INPUT_BITS (3 << 4)
-#define m_VIDEO_INPUT_CSP (1 << 0)
-#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
-#define v_VIDEO_INPUT_BITS(n) (n << 4)
-#define v_VIDEO_INPUT_CSP(n) (n << 0)
-enum {
- VIDEO_INPUT_12BITS = 0,
- VIDEO_INPUT_10BITS = 1,
- VIDEO_INPUT_REVERT = 2,
- VIDEO_INPUT_8BITS = 3,
-};
-
-#define HDMI_VIDEO_CONTRL 0x03
-#define m_VIDEO_AUTO_CSC (1 << 7)
-#define v_VIDEO_AUTO_CSC(n) (n << 7)
-#define m_VIDEO_C0_C2_SWAP (1 << 0)
-#define v_VIDEO_C0_C2_SWAP(n) (n << 0)
-enum {
- C0_C2_CHANGE_ENABLE = 0,
- C0_C2_CHANGE_DISABLE = 1,
- AUTO_CSC_DISABLE = 0,
- AUTO_CSC_ENABLE = 1,
-};
-
-#define HDMI_VIDEO_CONTRL3 0x04
-#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4)
-#define m_SOF (1 << 3)
-#define m_COLOR_RANGE (1 << 2)
-#define m_CSC (1 << 0)
-#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
-#define v_SOF_ENABLE (0 << 3)
-#define v_SOF_DISABLE (1 << 3)
-#define v_COLOR_RANGE_FULL (1 << 2)
-#define v_COLOR_RANGE_LIMITED (0 << 2)
-#define v_CSC_ENABLE 1
-#define v_CSC_DISABLE 0
-
-#define HDMI_AV_MUTE 0x05
-#define m_AVMUTE_CLEAR (1 << 7)
-#define m_AVMUTE_ENABLE (1 << 6)
-#define m_AUDIO_MUTE (1 << 1)
-#define m_VIDEO_BLACK (1 << 0)
-#define v_AVMUTE_CLEAR(n) (n << 7)
-#define v_AVMUTE_ENABLE(n) (n << 6)
-#define v_AUDIO_MUTE(n) (n << 1)
-#define v_VIDEO_MUTE(n) (n << 0)
-
-#define HDMI_VIDEO_TIMING_CTL 0x08
-#define v_HSYNC_POLARITY(n) (n << 3)
-#define v_VSYNC_POLARITY(n) (n << 2)
-#define v_INETLACE(n) (n << 1)
-#define v_EXTERANL_VIDEO(n) (n << 0)
-
-#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
-#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
-#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
-#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
-#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
-#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
-#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
-#define HDMI_VIDEO_EXT_HDURATION_H 0x10
-#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
-#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
-#define HDMI_VIDEO_EXT_VBLANK 0x13
-#define HDMI_VIDEO_EXT_VDELAY 0x14
-#define HDMI_VIDEO_EXT_VDURATION 0x15
-
-#define HDMI_VIDEO_CSC_COEF 0x18
-
-#define HDMI_AUDIO_CTRL1 0x35
-enum {
- CTS_SOURCE_INTERNAL = 0,
- CTS_SOURCE_EXTERNAL = 1,
-};
-#define v_CTS_SOURCE(n) (n << 7)
-
-enum {
- DOWNSAMPLE_DISABLE = 0,
- DOWNSAMPLE_1_2 = 1,
- DOWNSAMPLE_1_4 = 2,
-};
-#define v_DOWN_SAMPLE(n) (n << 5)
-
-enum {
- AUDIO_SOURCE_IIS = 0,
- AUDIO_SOURCE_SPDIF = 1,
-};
-#define v_AUDIO_SOURCE(n) (n << 3)
-
-#define v_MCLK_ENABLE(n) (n << 2)
-enum {
- MCLK_128FS = 0,
- MCLK_256FS = 1,
- MCLK_384FS = 2,
- MCLK_512FS = 3,
-};
-#define v_MCLK_RATIO(n) (n)
-
-#define AUDIO_SAMPLE_RATE 0x37
-enum {
- AUDIO_32K = 0x3,
- AUDIO_441K = 0x0,
- AUDIO_48K = 0x2,
- AUDIO_882K = 0x8,
- AUDIO_96K = 0xa,
- AUDIO_1764K = 0xc,
- AUDIO_192K = 0xe,
-};
-
-#define AUDIO_I2S_MODE 0x38
-enum {
- I2S_CHANNEL_1_2 = 1,
- I2S_CHANNEL_3_4 = 3,
- I2S_CHANNEL_5_6 = 7,
- I2S_CHANNEL_7_8 = 0xf
-};
-#define v_I2S_CHANNEL(n) ((n) << 2)
-enum {
- I2S_STANDARD = 0,
- I2S_LEFT_JUSTIFIED = 1,
- I2S_RIGHT_JUSTIFIED = 2,
-};
-#define v_I2S_MODE(n) (n)
-
-#define AUDIO_I2S_MAP 0x39
-#define AUDIO_I2S_SWAPS_SPDIF 0x3a
-#define v_SPIDF_FREQ(n) (n)
-
-#define N_32K 0x1000
-#define N_441K 0x1880
-#define N_882K 0x3100
-#define N_1764K 0x6200
-#define N_48K 0x1800
-#define N_96K 0x3000
-#define N_192K 0x6000
-
-#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
-#define m_AUDIO_STATUS_NLPCM (1 << 7)
-#define m_AUDIO_STATUS_USE (1 << 6)
-#define m_AUDIO_STATUS_COPYRIGHT (1 << 5)
-#define m_AUDIO_STATUS_ADDITION (3 << 2)
-#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
-#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7)
-#define AUDIO_N_H 0x3f
-#define AUDIO_N_M 0x40
-#define AUDIO_N_L 0x41
-
-#define HDMI_AUDIO_CTS_H 0x45
-#define HDMI_AUDIO_CTS_M 0x46
-#define HDMI_AUDIO_CTS_L 0x47
-
-#define HDMI_DDC_CLK_L 0x4b
-#define HDMI_DDC_CLK_H 0x4c
-
-#define HDMI_EDID_SEGMENT_POINTER 0x4d
-#define HDMI_EDID_WORD_ADDR 0x4e
-#define HDMI_EDID_FIFO_OFFSET 0x4f
-#define HDMI_EDID_FIFO_ADDR 0x50
-
-#define HDMI_PACKET_SEND_MANUAL 0x9c
-#define HDMI_PACKET_SEND_AUTO 0x9d
-#define m_PACKET_GCP_EN (1 << 7)
-#define m_PACKET_MSI_EN (1 << 6)
-#define m_PACKET_SDI_EN (1 << 5)
-#define m_PACKET_VSI_EN (1 << 4)
-#define v_PACKET_GCP_EN(n) ((n & 1) << 7)
-#define v_PACKET_MSI_EN(n) ((n & 1) << 6)
-#define v_PACKET_SDI_EN(n) ((n & 1) << 5)
-#define v_PACKET_VSI_EN(n) ((n & 1) << 4)
-
-#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
-enum {
- INFOFRAME_VSI = 0x05,
- INFOFRAME_AVI = 0x06,
- INFOFRAME_AAI = 0x08,
-};
-
-#define HDMI_CONTROL_PACKET_ADDR 0xa0
-#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
-enum {
- AVI_COLOR_MODE_RGB = 0,
- AVI_COLOR_MODE_YCBCR422 = 1,
- AVI_COLOR_MODE_YCBCR444 = 2,
- AVI_COLORIMETRY_NO_DATA = 0,
-
- AVI_COLORIMETRY_SMPTE_170M = 1,
- AVI_COLORIMETRY_ITU709 = 2,
- AVI_COLORIMETRY_EXTENDED = 3,
-
- AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
- AVI_CODED_FRAME_ASPECT_4_3 = 1,
- AVI_CODED_FRAME_ASPECT_16_9 = 2,
-
- ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
- ACTIVE_ASPECT_RATE_4_3 = 0x09,
- ACTIVE_ASPECT_RATE_16_9 = 0x0A,
- ACTIVE_ASPECT_RATE_14_9 = 0x0B,
-};
-
-#define HDMI_HDCP_CTRL 0x52
-#define m_HDMI_DVI (1 << 1)
-#define v_HDMI_DVI(n) (n << 1)
-
-#define HDMI_INTERRUPT_MASK1 0xc0
-#define HDMI_INTERRUPT_STATUS1 0xc1
-#define m_INT_ACTIVE_VSYNC (1 << 5)
-#define m_INT_EDID_READY (1 << 2)
-
-#define HDMI_INTERRUPT_MASK2 0xc2
-#define HDMI_INTERRUPT_STATUS2 0xc3
-#define m_INT_HDCP_ERR (1 << 7)
-#define m_INT_BKSV_FLAG (1 << 6)
-#define m_INT_HDCP_OK (1 << 4)
-
-#define HDMI_STATUS 0xc8
-#define m_HOTPLUG (1 << 7)
-#define m_MASK_INT_HOTPLUG (1 << 5)
-#define m_INT_HOTPLUG (1 << 1)
-#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5)
-
-#define HDMI_COLORBAR 0xc9
-
-#define HDMI_PHY_SYNC 0xce
-#define HDMI_PHY_SYS_CTL 0xe0
-#define m_TMDS_CLK_SOURCE (1 << 5)
-#define v_TMDS_FROM_PLL (0 << 5)
-#define v_TMDS_FROM_GEN (1 << 5)
-#define m_PHASE_CLK (1 << 4)
-#define v_DEFAULT_PHASE (0 << 4)
-#define v_SYNC_PHASE (1 << 4)
-#define m_TMDS_CURRENT_PWR (1 << 3)
-#define v_TURN_ON_CURRENT (0 << 3)
-#define v_CAT_OFF_CURRENT (1 << 3)
-#define m_BANDGAP_PWR (1 << 2)
-#define v_BANDGAP_PWR_UP (0 << 2)
-#define v_BANDGAP_PWR_DOWN (1 << 2)
-#define m_PLL_PWR (1 << 1)
-#define v_PLL_PWR_UP (0 << 1)
-#define v_PLL_PWR_DOWN (1 << 1)
-#define m_TMDS_CHG_PWR (1 << 0)
-#define v_TMDS_CHG_PWR_UP (0 << 0)
-#define v_TMDS_CHG_PWR_DOWN (1 << 0)
-
-#define HDMI_PHY_CHG_PWR 0xe1
-#define v_CLK_CHG_PWR(n) ((n & 1) << 3)
-#define v_DATA_CHG_PWR(n) ((n & 7) << 0)
-
-#define HDMI_PHY_DRIVER 0xe2
-#define v_CLK_MAIN_DRIVER(n) (n << 4)
-#define v_DATA_MAIN_DRIVER(n) (n << 0)
-
-#define HDMI_PHY_PRE_EMPHASIS 0xe3
-#define v_PRE_EMPHASIS(n) ((n & 7) << 4)
-#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2)
-#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0)
-
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
-#define v_FEEDBACK_DIV_LOW(n) (n & 0xff)
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
-#define v_FEEDBACK_DIV_HIGH(n) (n & 1)
-
-#define HDMI_PHY_PRE_DIV_RATIO 0xed
-#define v_PRE_DIV_RATIO(n) (n & 0x1f)
-
-#define HDMI_CEC_CTRL 0xd0
-#define m_ADJUST_FOR_HISENSE (1 << 6)
-#define m_REJECT_RX_BROADCAST (1 << 5)
-#define m_BUSFREETIME_ENABLE (1 << 2)
-#define m_REJECT_RX (1 << 1)
-#define m_START_TX (1 << 0)
-
-#define HDMI_CEC_DATA 0xd1
-#define HDMI_CEC_TX_OFFSET 0xd2
-#define HDMI_CEC_RX_OFFSET 0xd3
-#define HDMI_CEC_CLK_H 0xd4
-#define HDMI_CEC_CLK_L 0xd5
-#define HDMI_CEC_TX_LENGTH 0xd6
-#define HDMI_CEC_RX_LENGTH 0xd7
-#define HDMI_CEC_TX_INT_MASK 0xd8
-#define m_TX_DONE (1 << 3)
-#define m_TX_NOACK (1 << 2)
-#define m_TX_BROADCAST_REJ (1 << 1)
-#define m_TX_BUSNOTFREE (1 << 0)
-
-#define HDMI_CEC_RX_INT_MASK 0xd9
-#define m_RX_LA_ERR (1 << 4)
-#define m_RX_GLITCH (1 << 3)
-#define m_RX_DONE (1 << 0)
-
-#define HDMI_CEC_TX_INT 0xda
-#define HDMI_CEC_RX_INT 0xdb
-#define HDMI_CEC_BUSFREETIME_L 0xdc
-#define HDMI_CEC_BUSFREETIME_H 0xdd
-#define HDMI_CEC_LOGICADDR 0xde
-
-#endif /* __INNO_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index e7875b52f298..ae4a5ac2299a 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -450,7 +450,7 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
};
static enum drm_connector_status
-rk3066_hdmi_bridge_detect(struct drm_bridge *bridge)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index dcc1f07632c3..2f469d370021 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -30,21 +30,18 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
static struct drm_framebuffer *
rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_afbc_framebuffer *afbc_fb;
- const struct drm_format_info *info;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
if (!afbc_fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base,
+ file, info, mode_cmd,
&rockchip_drm_fb_funcs);
if (ret) {
kfree(afbc_fb);
@@ -52,16 +49,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
}
if (drm_is_afbc(mode_cmd->modifier[0])) {
- int ret, i;
-
- ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_fb_afbc_init(dev, info, mode_cmd, afbc_fb);
if (ret) {
- struct drm_gem_object **obj = afbc_fb->base.obj;
-
- for (i = 0; i < info->num_planes; ++i)
- drm_gem_object_put(obj[i]);
-
- kfree(afbc_fb);
+ drm_framebuffer_put(&afbc_fb->base);
return ERR_PTR(ret);
}
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index d0f5fea15e21..186f6452a7d3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -146,25 +146,6 @@ static void vop2_unlock(struct vop2 *vop2)
mutex_unlock(&vop2->vop2_lock);
}
-/*
- * Note:
- * The write mask function is documented but missing on rk3566/8, writes
- * to these bits have no effect. For newer soc(rk3588 and following) the
- * write mask is needed for register writes.
- *
- * GLB_CFG_DONE_EN has no write mask bit.
- *
- */
-static void vop2_cfg_done(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
-
- val |= BIT(vp->id) | (BIT(vp->id) << 16);
-
- regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
-}
-
static void vop2_win_disable(struct vop2_win *win)
{
vop2_win_write(win, VOP2_WIN_ENABLE, 0);
@@ -854,6 +835,11 @@ static void vop2_enable(struct vop2 *vop2)
if (vop2->version == VOP_VERSION_RK3588)
rk3588_vop2_power_domain_enable_all(vop2);
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ vop2->old_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ vop2->old_port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ }
+
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
@@ -2422,6 +2408,10 @@ static int vop2_create_crtcs(struct vop2 *vop2)
break;
}
}
+
+ if (!vp->primary_plane)
+ return dev_err_probe(drm->dev, -ENOENT,
+ "no primary plane for vp %d\n", i);
}
/* Register all unused window as overlay plane */
@@ -2724,6 +2714,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n");
mutex_init(&vop2->vop2_lock);
+ mutex_init(&vop2->ovl_lock);
ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index fc3ecb9fcd95..fa5c56f16047 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -334,6 +334,19 @@ struct vop2 {
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
+ /*
+ * Used to record layer selection configuration on rk356x/rk3588
+ * as register RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are
+ * shared for all the Video Ports.
+ */
+ u32 old_layer_sel;
+ u32 old_port_sel;
+ /*
+ * Ensure that the updates to these two registers(RKK3568_OVL_LAYER_SEL/RK3568_OVL_PORT_SEL)
+ * take effect in sequence.
+ */
+ struct mutex ovl_lock;
+
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -727,6 +740,7 @@ enum dst_factor_mode {
#define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20)
#define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18)
#define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16)
+#define RK3588_OVL_PORT_SET__PORT3_MUX GENMASK(15, 12)
#define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8)
#define RK3568_OVL_PORT_SET__PORT1_MUX GENMASK(7, 4)
#define RK3568_OVL_PORT_SET__PORT0_MUX GENMASK(3, 0)
@@ -831,4 +845,23 @@ static inline struct vop2_win *to_vop2_win(struct drm_plane *p)
return container_of(p, struct vop2_win, base);
}
+/*
+ * Note:
+ * The write mask function is documented but missing on rk3566/8, writes
+ * to these bits have no effect. For newer soc(rk3588 and following) the
+ * write mask is needed for register writes.
+ *
+ * GLB_CFG_DONE_EN has no write mask bit.
+ *
+ */
+static inline void vop2_cfg_done(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
+
+ val |= BIT(vp->id) | (BIT(vp->id) << 16);
+
+ regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
+}
+
#endif /* _ROCKCHIP_DRM_VOP2_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index a673779de3d2..2411260db51d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -56,14 +56,13 @@ struct rockchip_lvds {
struct drm_device *drm_dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
- struct drm_connector connector;
struct rockchip_encoder encoder;
struct dev_pin_info *pins;
};
-static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector)
+static inline struct rockchip_lvds *brige_to_lvds(struct drm_bridge *bridge)
{
- return container_of(connector, struct rockchip_lvds, connector);
+ return (struct rockchip_lvds *)bridge->driver_private;
}
static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder)
@@ -106,25 +105,21 @@ static inline int rockchip_lvds_name_to_output(const char *s)
return -EINVAL;
}
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+static int
+rockchip_lvds_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct rockchip_lvds *lvds = brige_to_lvds(bridge);
struct drm_panel *panel = lvds->panel;
return drm_panel_get_modes(panel, connector);
}
static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
- .get_modes = rockchip_lvds_connector_get_modes,
+struct drm_bridge_funcs rockchip_lvds_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .get_modes = rockchip_lvds_bridge_get_modes,
};
static int
@@ -606,26 +601,23 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
- connector = &lvds->connector;
if (lvds->panel) {
- connector->dpms = DRM_MODE_DPMS_OFF;
- ret = drm_connector_init(drm_dev, connector,
- &rockchip_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret < 0) {
- drm_err(drm_dev,
- "failed to initialize connector: %d\n", ret);
+ lvds->bridge = drm_panel_bridge_add_typed(lvds->panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds->bridge)) {
+ ret = PTR_ERR(lvds->bridge);
goto err_free_encoder;
}
+ }
- drm_connector_helper_add(connector,
- &rockchip_lvds_connector_helper_funcs);
- } else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (lvds->bridge) {
+ lvds->bridge->driver_private = lvds;
+ lvds->bridge->ops = DRM_BRIDGE_OP_MODES;
+ lvds->bridge->funcs = &rockchip_lvds_bridge_funcs;
+
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
- goto err_free_encoder;
+ goto err_free_bridge;
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
@@ -633,14 +625,14 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
"failed to initialize bridge connector: %pe\n",
connector);
ret = PTR_ERR(connector);
- goto err_free_encoder;
+ goto err_free_bridge;
}
- }
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
+ goto err_free_bridge;
+ }
}
pm_runtime_enable(dev);
@@ -649,8 +641,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
return 0;
-err_free_connector:
- drm_connector_cleanup(connector);
+err_free_bridge:
+ drm_panel_bridge_remove(lvds->bridge);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_put_remote:
@@ -670,8 +662,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
encoder_funcs = lvds->soc_data->helper_funcs;
encoder_funcs->disable(&lvds->encoder.encoder);
pm_runtime_disable(dev);
- drm_connector_cleanup(&lvds->connector);
- drm_encoder_cleanup(&lvds->encoder.encoder);
}
static const struct component_ops rockchip_lvds_component_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 32c4ed685739..45c5e3987813 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2052,12 +2052,55 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
}
}
+static u32 rk3568_vop2_read_port_mux(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+}
+
+static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
+{
+ u32 port_mux_sel;
+ int ret;
+
+ /*
+ * Spin until the previous port_mux figuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
+ port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
+ port_mux_sel, vop2->old_port_sel);
+}
+
+static u32 rk3568_vop2_read_layer_cfg(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+}
+
+static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
+{
+ u32 atv_layer_cfg;
+ int ret;
+
+ /*
+ * Spin until the previous layer configuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
+ atv_layer_cfg == cfg, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
+ atv_layer_cfg, cfg);
+}
+
static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
+ u32 old_layer_sel = 0;
+ u32 atv_layer_sel = 0;
+ u32 old_port_sel = 0;
u8 layer_id;
u8 old_layer_id;
u8 layer_sel_id;
@@ -2069,19 +2112,18 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct vop2_video_port *vp2 = &vop2->vps[2];
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+ mutex_lock(&vop2->ovl_lock);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
- ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
- vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
-
- port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ old_port_sel = vop2->old_port_sel;
+ port_sel = old_port_sel;
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
if (vp0->nlayers)
@@ -2102,7 +2144,13 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
- layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ /* Fixed value for rk3588 */
+ if (vop2->version == VOP_VERSION_RK3588)
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SET__PORT3_MUX, 7);
+
+ atv_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ old_layer_sel = vop2->old_layer_sel;
+ layer_sel = old_layer_sel;
ofs = 0;
for (i = 0; i < vp->id; i++)
@@ -2186,8 +2234,37 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
old_win->data->layer_sel_id[vp->id]);
}
+ vop2->old_layer_sel = layer_sel;
+ vop2->old_port_sel = port_sel;
+ /*
+ * As the RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are shared by all Video Ports,
+ * and the configuration take effect by one Video Port's vsync.
+ * When performing layer migration or change the zpos of layers, there are two things
+ * to be observed and followed:
+ * 1. When a layer is migrated from one VP to another, the configuration of the layer
+ * can only take effect after the Port mux configuration is enabled.
+ *
+ * 2. When we change the zpos of layers, we must ensure that the change for the previous
+ * VP takes effect before we proceed to change the next VP. Otherwise, the new
+ * configuration might overwrite the previous one for the previous VP, or it could
+ * lead to the configuration of the previous VP being take effect along with the VSYNC
+ * of the new VP.
+ */
+ if (layer_sel != old_layer_sel || port_sel != old_port_sel)
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
+
+ if (port_sel != old_port_sel) {
+ vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ vop2_cfg_done(vp);
+ rk3568_vop2_wait_for_port_mux_done(vop2);
+ }
+
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
- vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ mutex_unlock(&vop2->ovl_lock);
}
static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index f56e77e7f6d0..261713dd7d5a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -32,78 +32,123 @@
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+/**
+ * DOC: uAPI trace events
+ *
+ * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``,
+ * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered
+ * stable uAPI.
+ *
+ * Common trace events attributes:
+ *
+ * * ``dev`` - the dev_name() of the device running the job.
+ *
+ * * ``ring`` - the hardware ring running the job. Together with ``dev`` it
+ * uniquely identifies where the job is going to be executed.
+ *
+ * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of
+ * &struct drm_sched_fence.finished
+ *
+ * All the events depends on drm_sched_job_arm() having been called already for
+ * the job because they use &struct drm_sched_job.sched or
+ * &struct drm_sched_job.s_fence.
+ */
+
DECLARE_EVENT_CLASS(drm_sched_job,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
- __field(struct drm_sched_entity *, entity)
- __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name)
- __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
+ __string(dev, dev_name(sched_job->sched->dev))
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, client_id)
),
TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
__assign_str(name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->credit_count);
+ __assign_str(dev);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->client_id = sched_job->s_fence->drm_client_id;
),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __get_str(name),
- __entry->job_count, __entry->hw_job_count)
+ TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu",
+ __get_str(dev),
+ __entry->fence_context, __entry->fence_seqno, __get_str(name),
+ __entry->job_count, __entry->hw_job_count, __entry->client_id)
);
-DEFINE_EVENT(drm_sched_job, drm_sched_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-DEFINE_EVENT(drm_sched_job, drm_run_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-TRACE_EVENT(drm_sched_process_job,
+TRACE_EVENT(drm_sched_job_done,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
),
TP_fast_assign(
- __entry->fence = &fence->finished;
+ __entry->fence_context = fence->finished.context;
+ __entry->fence_seqno = fence->finished.seqno;
),
- TP_printk("fence=%p signaled", __entry->fence)
+ TP_printk("fence=%llu:%llu signaled",
+ __entry->fence_context, __entry->fence_seqno)
);
-TRACE_EVENT(drm_sched_job_wait_dep,
+TRACE_EVENT(drm_sched_job_add_dep,
+ TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("fence=%llu:%llu depends on fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
+);
+
+TRACE_EVENT(drm_sched_job_unschedulable,
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __string(name, sched_job->sched->name)
- __field(uint64_t, id)
- __field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
- __assign_str(name);
- __entry->id = sched_job->id;
- __entry->fence = fence;
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
- __get_str(name), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
);
#endif /* _GPU_SCHED_TRACE_H_ */
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index ac678de7fe5e..8867b95ab089 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -461,10 +461,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
-
- if (drm_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity)) {
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
return NULL;
+ }
}
/* skip jobs from entity that marked guilty */
@@ -529,10 +529,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
/*
- * Only when the queue is empty are we guaranteed that the scheduler
- * thread cannot change ->last_scheduled. To enforce ordering we need
- * a read barrier here. See drm_sched_entity_pop_job() for the other
- * side.
+ * Only when the queue is empty are we guaranteed that
+ * drm_sched_run_job_work() cannot change entity->last_scheduled. To
+ * enforce ordering we need a read barrier here. See
+ * drm_sched_entity_pop_job() for the other side.
*/
smp_rmb();
@@ -570,7 +570,15 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
bool first;
ktime_t submit_ts;
- trace_drm_sched_job(sched_job, entity);
+ trace_drm_sched_job_queue(sched_job, entity);
+
+ if (trace_drm_sched_job_add_dep_enabled()) {
+ struct dma_fence *entry;
+ unsigned long index;
+
+ xa_for_each(&sched_job->dependencies, index, entry)
+ trace_drm_sched_job_add_dep(sched_job, entry);
+ }
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index e971528504a5..9391d6f0dc01 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -206,7 +206,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
- void *owner)
+ void *owner,
+ u64 drm_client_id)
{
struct drm_sched_fence *fence = NULL;
@@ -215,6 +216,7 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
+ fence->drm_client_id = drm_client_id;
spin_lock_init(&fence->lock);
return fence;
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
index 599cf6e1bb74..7ea5a6736f98 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -24,7 +24,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
- void *owner);
+ void *owner, u64 drm_client_id);
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity);
void drm_sched_fence_free(struct drm_sched_fence *fence);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 829579c41c6b..e2cda28a1af4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -66,6 +66,7 @@
* This implies waiting for previously executed jobs.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -83,12 +84,6 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map drm_sched_lockdep_map = {
- .name = "drm_sched_lockdep_map"
-};
-#endif
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -268,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current
- * entity in terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
}
}
list_for_each_entry(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current entity in
- * terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
if (entity == rq->current_entity)
break;
@@ -308,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
spin_unlock(&rq->lock);
return NULL;
+
+found:
+ if (!drm_sched_can_queue(sched, entity)) {
+ /*
+ * If scheduler cannot take more jobs signal the caller to not
+ * consider lower priority queues.
+ */
+ entity = ERR_PTR(-ENOSPC);
+ } else {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ }
+
+ spin_unlock(&rq->lock);
+
+ return entity;
}
/**
@@ -379,11 +366,16 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished))
__drm_sched_run_free_queue(sched);
+}
+
+static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_run_free_queue(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -391,7 +383,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -401,7 +393,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -536,11 +528,37 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_unlock(&sched->job_list_lock);
}
+/**
+ * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
+ * @sched: scheduler instance
+ * @job: job to be reinserted on the pending list
+ *
+ * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
+ * hung and is making progress, the scheduler must reinsert the job back into
+ * @sched->pending_list. Otherwise, the job and its resources won't be freed
+ * through the &struct drm_sched_backend_ops.free_job callback.
+ *
+ * This function must be used in "false timeout" cases only.
+ */
+static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job)
+{
+ spin_lock(&sched->job_list_lock);
+ list_add(&job->list, &sched->pending_list);
+
+ /* After reinserting the job, the scheduler enqueues the free-job work
+ * again if ready. Otherwise, a signaled job could be added to the
+ * pending list, but never freed.
+ */
+ drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -551,9 +569,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -568,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+
+ if (status == DRM_GPU_SCHED_STAT_NO_HANG)
+ drm_sched_job_reinsert_on_false_timeout(sched, job);
} else {
spin_unlock(&sched->job_list_lock);
}
@@ -590,6 +612,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
* This function is typically used for reset recovery (see the docu of
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler teardown, i.e., before calling drm_sched_fini().
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -599,10 +625,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -615,7 +641,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -674,15 +701,19 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler startup. The scheduler itself is fully operational after
* drm_sched_init() succeeded.
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -764,6 +795,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -784,7 +817,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -810,7 +844,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -850,7 +884,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -1193,7 +1226,7 @@ static void drm_sched_free_job_work(struct work_struct *w)
if (job)
sched->ops->free_job(job);
- drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1229,7 +1262,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
/*
* The run_job() callback must by definition return a fence whose
* refcount has been incremented for the scheduler already.
@@ -1256,6 +1289,25 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_run_job_queue(sched);
}
+static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
+{
+#if (IS_ENABLED(CONFIG_LOCKDEP))
+ static struct lockdep_map map = {
+ .name = "drm_sched_lockdep_map"
+ };
+
+ /*
+ * Avoid leaking a lockdep map on each drm sched creation and
+ * destruction by using a single lockdep map for all drm sched
+ * allocated submit_wq.
+ */
+
+ return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
+#else
+ return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -1296,13 +1348,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
-#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
- WQ_MEM_RECLAIM,
- &drm_sched_lockdep_map);
-#else
- sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
-#endif
+ sched->submit_wq = drm_sched_alloc_wq(args->name);
if (!sched->submit_wq)
return -ENOMEM;
@@ -1348,6 +1394,18 @@ Out_check_own:
}
EXPORT_SYMBOL(drm_sched_init);
+static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job, *tmp;
+
+ /* All other accessors are stopped. No locking necessary. */
+ list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
+ sched->ops->cancel_job(job);
+ list_del(&job->list);
+ sched->ops->free_job(job);
+ }
+}
+
/**
* drm_sched_fini - Destroy a gpu scheduler
*
@@ -1355,19 +1413,11 @@ EXPORT_SYMBOL(drm_sched_init);
*
* Tears down and cleans up the scheduler.
*
- * This stops submission of new jobs to the hardware through
- * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
- * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
- * There is no solution for this currently. Thus, it is up to the driver to make
- * sure that:
- *
- * a) drm_sched_fini() is only called after for all submitted jobs
- * drm_sched_backend_ops.free_job() has been called or that
- * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
- * after drm_sched_fini() ran are freed manually.
- *
- * FIXME: Take care of the above problem and prevent this function from leaking
- * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
+ * This stops submission of new jobs to the hardware through &struct
+ * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
+ * is implemented, all jobs will be canceled through it and afterwards cleaned
+ * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
+ * implemented, memory could leak.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1397,11 +1447,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ /* Avoid memory leaks if supported by the driver. */
+ if (sched->ops->cancel_job)
+ drm_sched_cancel_remaining_jobs(sched);
+
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+
+ if (!list_empty(&sched->pending_list))
+ dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index f999c8859cf7..65acffc3fea8 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -63,8 +63,8 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
lockdep_assert_held(&sched->lock);
job->flags |= DRM_MOCK_SCHED_JOB_DONE;
- list_move_tail(&job->link, &sched->done_list);
- dma_fence_signal(&job->hw_fence);
+ list_del(&job->link);
+ dma_fence_signal_locked(&job->hw_fence);
complete(&job->done);
}
@@ -117,13 +117,13 @@ drm_mock_sched_job_new(struct kunit *test,
ret = drm_sched_job_init(&job->base,
&entity->base,
1,
- NULL);
+ NULL,
+ 1);
KUNIT_ASSERT_EQ(test, ret, 0);
job->test = test;
init_completion(&job->done);
- spin_lock_init(&job->lock);
INIT_LIST_HEAD(&job->link);
hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -169,7 +169,7 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
dma_fence_init(&job->hw_fence,
&drm_mock_sched_hw_fence_ops,
- &job->lock,
+ &sched->lock,
sched->hw_timeline.context,
atomic_inc_return(&sched->hw_timeline.next_seqno));
@@ -200,38 +200,82 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
return &job->hw_fence;
}
+/*
+ * Normally, drivers would take appropriate measures in this callback, such as
+ * killing the entity the faulty job is associated with, resetting the hardware
+ * and / or resubmitting non-faulty jobs.
+ *
+ * For the mock scheduler, there are no hardware rings to be resetted nor jobs
+ * to be resubmitted. Thus, this function merely ensures that
+ * a) timedout fences get signaled properly and removed from the pending list
+ * b) the mock scheduler framework gets informed about the timeout via a flag
+ * c) The drm_sched_job, not longer needed, gets freed
+ */
static enum drm_gpu_sched_stat
mock_sched_timedout_job(struct drm_sched_job *sched_job)
{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
+ job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
- job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+ dma_fence_set_error(&job->hw_fence, -ETIMEDOUT);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ dma_fence_put(&job->hw_fence);
+ drm_sched_job_cleanup(sched_job);
+ /* Mock job itself is freed by the kunit framework. */
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void mock_sched_free_job(struct drm_sched_job *sched_job)
{
- struct drm_mock_scheduler *sched =
- drm_sched_to_mock_sched(sched_job->sched);
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
- unsigned long flags;
- /* Remove from the scheduler done list. */
- spin_lock_irqsave(&sched->lock, flags);
- list_del(&job->link);
- spin_unlock_irqrestore(&sched->lock, flags);
dma_fence_put(&job->hw_fence);
-
drm_sched_job_cleanup(sched_job);
/* Mock job itself is freed by the kunit framework. */
}
+static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ dma_fence_set_error(&job->hw_fence, -ECANCELED);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
+ * Mock job itself is freed by the kunit framework.
+ */
+}
+
static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
.run_job = mock_sched_run_job,
.timedout_job = mock_sched_timedout_job,
- .free_job = mock_sched_free_job
+ .free_job = mock_sched_free_job,
+ .cancel_job = mock_sched_cancel_job,
};
/**
@@ -265,7 +309,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
sched->hw_timeline.context = dma_fence_context_alloc(1);
atomic_set(&sched->hw_timeline.next_seqno, 0);
INIT_LIST_HEAD(&sched->job_list);
- INIT_LIST_HEAD(&sched->done_list);
spin_lock_init(&sched->lock);
return sched;
@@ -280,38 +323,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
*/
void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
{
- struct drm_mock_sched_job *job, *next;
- unsigned long flags;
- LIST_HEAD(list);
-
- drm_sched_wqueue_stop(&sched->base);
-
- /* Force complete all unfinished jobs. */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->job_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry(job, &list, link)
- hrtimer_cancel(&job->timer);
-
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &list, link)
- drm_mock_sched_job_complete(job);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- /*
- * Free completed jobs and jobs not yet processed by the DRM scheduler
- * free worker.
- */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->done_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry_safe(job, next, &list, link)
- mock_sched_free_job(&job->base);
-
drm_sched_fini(&sched->base);
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 27caf8285fb7..63d4f2ac7074 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -49,7 +49,6 @@ struct drm_mock_scheduler {
spinlock_t lock;
struct list_head job_list;
- struct list_head done_list;
struct {
u64 context;
@@ -98,6 +97,7 @@ struct drm_mock_sched_job {
#define DRM_MOCK_SCHED_JOB_DONE 0x1
#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
unsigned long flags;
struct list_head link;
@@ -106,7 +106,6 @@ struct drm_mock_sched_job {
unsigned int duration_us;
ktime_t finish_at;
- spinlock_t lock;
struct dma_fence hw_fence;
struct kunit *test;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 7230057e0594..55eb142bd7c5 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -5,6 +5,8 @@
#include "sched_tests.h"
+#define MOCK_TIMEOUT (HZ / 5)
+
/*
* DRM scheduler basic tests should check the basic functional correctness of
* the scheduler, including some very light smoke testing. More targeted tests,
@@ -28,7 +30,7 @@ static void drm_sched_basic_exit(struct kunit *test)
static int drm_sched_timeout_init(struct kunit *test)
{
- test->priv = drm_mock_sched_new(test, HZ);
+ test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT);
return 0;
}
@@ -204,6 +206,47 @@ static struct kunit_suite drm_sched_basic = {
.test_cases = drm_sched_basic_tests,
};
+static void drm_sched_basic_cancel(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Check that drm_sched_fini() uses the cancel_job() callback to cancel
+ * jobs that are still pending.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+
+ KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED);
+}
+
+static struct kunit_case drm_sched_cancel_tests[] = {
+ KUNIT_CASE(drm_sched_basic_cancel),
+ {}
+};
+
+static struct kunit_suite drm_sched_cancel = {
+ .name = "drm_sched_basic_cancel_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_cancel_tests,
+};
+
static void drm_sched_basic_timeout(struct kunit *test)
{
struct drm_mock_scheduler *sched = test->priv;
@@ -227,14 +270,14 @@ static void drm_sched_basic_timeout(struct kunit *test)
done = drm_mock_sched_job_wait_scheduled(job, HZ);
KUNIT_ASSERT_TRUE(test, done);
- done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2);
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
0);
- done = drm_mock_sched_job_wait_finished(job, HZ);
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT);
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
@@ -244,8 +287,51 @@ static void drm_sched_basic_timeout(struct kunit *test)
drm_mock_sched_entity_free(entity);
}
+static void drm_sched_skip_reset(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that if the job is still running, the timeout handler
+ * will skip the reset and allow the job to complete.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET;
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
+ 0);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
static struct kunit_case drm_sched_timeout_tests[] = {
KUNIT_CASE(drm_sched_basic_timeout),
+ KUNIT_CASE(drm_sched_skip_reset),
{}
};
@@ -471,6 +557,7 @@ static struct kunit_suite drm_sched_credits = {
kunit_test_suites(&drm_sched_basic,
&drm_sched_timeout,
+ &drm_sched_cancel,
&drm_sched_priority,
&drm_sched_modify_sched,
&drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
index 741d1bb4b83f..6de7d92d9b74 100644
--- a/drivers/gpu/drm/sitronix/Kconfig
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -11,10 +11,6 @@ config DRM_ST7571_I2C
if M is selected the module will be called st7571-i2c.
-config TINYDRM_ST7586
- tristate
- default n
-
config DRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
@@ -22,17 +18,12 @@ config DRM_ST7586
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
- default TINYDRM_ST7586
help
DRM driver for the following Sitronix ST7586 panels:
* LEGO MINDSTORMS EV3
If M is selected the module will be called st7586.
-config TINYDRM_ST7735R
- tristate
- default n
-
config DRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
@@ -41,7 +32,6 @@ config DRM_ST7735R
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
- default TINYDRM_ST7735R
help
DRM driver for Sitronix ST7715R/ST7735R with one of the following
LCDs:
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index eec846892962..453eb7e045e5 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -68,6 +68,9 @@
#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
#define ST7571_COMMAND_SET_NORMAL (0x00)
+/* ST7567 commands */
+#define ST7567_SET_LCD_BIAS(m) (0xa2 | FIELD_PREP(GENMASK(0, 0), (m)))
+
#define ST7571_PAGE_HEIGHT 8
#define DRIVER_NAME "st7571"
@@ -92,6 +95,7 @@ struct st7571_panel_constraints {
struct st7571_panel_data {
int (*init)(struct st7571_device *st7571);
+ int (*parse_dt)(struct st7571_device *st7571);
struct st7571_panel_constraints constraints;
};
@@ -550,8 +554,8 @@ static const struct drm_crtc_funcs st7571_crtc_funcs = {
* Encoder
*/
-static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void st7571_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct st7571_device *st7571 = drm_to_st7571(drm);
@@ -565,8 +569,8 @@ static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
st7571_send_command_list(st7571, &command, 1);
}
-static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void st7571_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct st7571_device *st7571 = drm_to_st7571(drm);
@@ -581,8 +585,8 @@ static const struct drm_encoder_funcs st7571_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
- .atomic_enable = ssd130x_encoder_atomic_enable,
- .atomic_disable = ssd130x_encoder_atomic_disable,
+ .atomic_enable = st7571_encoder_atomic_enable,
+ .atomic_disable = st7571_encoder_atomic_disable,
};
/*
@@ -773,6 +777,32 @@ static int st7571_validate_parameters(struct st7571_device *st7571)
return 0;
}
+static int st7567_parse_dt(struct st7571_device *st7567)
+{
+ struct device *dev = &st7567->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7567->width_mm);
+ of_property_read_u32(np, "height-mm", &st7567->height_mm);
+
+ st7567->pformat = &st7571_monochrome;
+ st7567->bpp = 1;
+
+ st7567->startline = dt.vfront_porch.typ;
+ st7567->nlines = dt.vactive.typ;
+ st7567->ncols = dt.hactive.typ;
+
+ return 0;
+}
+
static int st7571_parse_dt(struct st7571_device *st7571)
{
struct device *dev = &st7571->client->dev;
@@ -804,7 +834,9 @@ static int st7571_parse_dt(struct st7571_device *st7571)
st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(st7571->reset))
- return PTR_ERR(st7571->reset);
+ return dev_err_probe(dev, PTR_ERR(st7571->reset),
+ "Failed to get reset gpio\n");
+
return 0;
}
@@ -816,6 +848,38 @@ static void st7571_reset(struct st7571_device *st7571)
gpiod_set_value_cansleep(st7571->reset, 0);
}
+static int st7567_lcd_init(struct st7571_device *st7567)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7567 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7567_SET_LCD_BIAS(1),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_REGULATOR_REG(4),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x20),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7567->startline),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ return st7571_send_command_list(st7567, commands, ARRAY_SIZE(commands));
+}
+
static int st7571_lcd_init(struct st7571_device *st7571)
{
/*
@@ -879,7 +943,7 @@ static int st7571_probe(struct i2c_client *client)
i2c_set_clientdata(client, st7571);
st7571->pdata = device_get_match_data(&client->dev);
- ret = st7571_parse_dt(st7571);
+ ret = st7571->pdata->parse_dt(st7571);
if (ret)
return ret;
@@ -960,8 +1024,21 @@ static void st7571_remove(struct i2c_client *client)
drm_dev_unplug(&st7571->dev);
}
+struct st7571_panel_data st7567_config = {
+ .init = st7567_lcd_init,
+ .parse_dt = st7567_parse_dt,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 64,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = false,
+ },
+};
+
struct st7571_panel_data st7571_config = {
.init = st7571_lcd_init,
+ .parse_dt = st7571_parse_dt,
.constraints = {
.min_nlines = 1,
.max_nlines = 128,
@@ -972,12 +1049,14 @@ struct st7571_panel_data st7571_config = {
};
static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7567", .data = &st7567_config },
{ .compatible = "sitronix,st7571", .data = &st7571_config },
{},
};
MODULE_DEVICE_TABLE(of, st7571_of_match);
static const struct i2c_device_id st7571_id[] = {
+ { "st7567", 0 },
{ "st7571", 0 },
{ }
};
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 74a1eef4674e..7484d3c3f4ed 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -97,7 +97,7 @@ struct sti_dvo {
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
};
struct sti_dvo_connector {
@@ -439,7 +439,6 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -455,20 +454,14 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
connector->dvo = dvo;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = dvo;
- bridge->funcs = &sti_dvo_bridge_funcs;
- bridge->of_node = dvo->dev.of_node;
- drm_bridge_add(bridge);
+ dvo->bridge.driver_private = dvo;
+ dvo->bridge.of_node = dvo->dev.of_node;
+ drm_bridge_add(&dvo->bridge);
- err = drm_bridge_attach(encoder, bridge, NULL, 0);
+ err = drm_bridge_attach(encoder, &dvo->bridge, NULL, 0);
if (err)
return err;
- dvo->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
@@ -490,7 +483,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&dvo->bridge);
return -EINVAL;
}
@@ -499,7 +492,7 @@ static void sti_dvo_unbind(struct device *dev,
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
- drm_bridge_remove(dvo->bridge);
+ drm_bridge_remove(&dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
@@ -515,10 +508,10 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
- if (!dvo) {
- DRM_ERROR("Failed to allocate memory for DVO\n");
- return -ENOMEM;
+ dvo = devm_drm_bridge_alloc(dev, struct sti_dvo, bridge, &sti_dvo_bridge_funcs);
+ if (IS_ERR(dvo)) {
+ DRM_ERROR("Failed to allocate DVO\n");
+ return PTR_ERR(dvo);
}
dvo->dev = pdev->dev;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d202b6c1eb8f..2c015f563de9 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -246,6 +246,7 @@ struct sti_hda {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
+ struct drm_bridge bridge;
void __iomem *regs;
void __iomem *video_dacs_ctrl;
struct clk *clk_pix;
@@ -262,6 +263,11 @@ struct sti_hda_connector {
#define to_sti_hda_connector(x) \
container_of(x, struct sti_hda_connector, drm_connector)
+static struct sti_hda *drm_bridge_to_sti_hda(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hda, bridge);
+}
+
static u32 hda_read(struct sti_hda *hda, int offset)
{
return readl(hda->regs + offset);
@@ -401,7 +407,7 @@ static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
static void sti_hda_disable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val;
if (!hda->enabled)
@@ -426,7 +432,7 @@ static void sti_hda_disable(struct drm_bridge *bridge)
static void sti_hda_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val, i, mode_idx;
u32 src_filter_y, src_filter_c;
u32 *coef_y, *coef_c;
@@ -517,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 mode_idx;
int hddac_rate;
int ret;
@@ -677,7 +683,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_hda_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -693,13 +698,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = hda;
- bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hda->bridge, NULL, 0);
connector->encoder = encoder;
@@ -745,9 +744,9 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
- if (!hda)
- return -ENOMEM;
+ hda = devm_drm_bridge_alloc(dev, struct sti_hda, bridge, &sti_hda_bridge_funcs);
+ if (IS_ERR(hda))
+ return PTR_ERR(hda);
hda->dev = pdev->dev;
hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 37b8d619066e..4e7c3d78b2b9 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -168,6 +168,11 @@ struct sti_hdmi_connector {
#define to_sti_hdmi_connector(x) \
container_of(x, struct sti_hdmi_connector, drm_connector)
+static struct sti_hdmi *drm_bridge_to_sti_hdmi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hdmi, bridge);
+}
+
static const struct drm_prop_enum_list colorspace_mode_names[] = {
{ HDMI_COLORSPACE_RGB, "rgb" },
{ HDMI_COLORSPACE_YUV422, "yuv422" },
@@ -749,7 +754,7 @@ static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
u32 val = hdmi_read(hdmi, HDMI_CFG);
@@ -881,7 +886,7 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
DRM_DEBUG_DRIVER("\n");
@@ -936,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
int ret;
DRM_DEBUG_DRIVER("\n");
@@ -1273,7 +1278,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -1289,13 +1293,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
connector->hdmi = hdmi;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -EINVAL;
-
- bridge->driver_private = hdmi;
- bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
connector->encoder = encoder;
@@ -1385,9 +1383,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct sti_hdmi, bridge, &sti_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
if (ddc) {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 6d4c3f57bc46..91d43dd46f13 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -12,6 +12,7 @@
#include <media/cec-notifier.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_property.h>
@@ -86,6 +87,7 @@ struct sti_hdmi {
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
struct cec_notifier *notifier;
+ struct drm_bridge bridge;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index a3ae9a93ce66..07788e8d3d83 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1049,9 +1049,9 @@ static int lvds_probe(struct platform_device *pdev)
dev_dbg(dev, "Probing LVDS driver...\n");
- lvds = devm_kzalloc(dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(dev, struct stm_lvds, lvds_bridge, &lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -1164,7 +1164,6 @@ static int lvds_probe(struct platform_device *pdev)
goto err_lvds_probe;
}
- lvds->lvds_bridge.funcs = &lvds_bridge_funcs;
lvds->lvds_bridge.of_node = dev->of_node;
lvds->hw_version = lvds_read(lvds, LVDS_VERR);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 58480d8e4f70..c100d29b1a89 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -212,7 +212,7 @@ void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
mode, encoding, range);
return;
@@ -228,7 +228,7 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
return;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b41d33baa30..31a8409b98f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -274,6 +274,7 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
u32 bld_base = sun8i_blender_base(mixer);
+ struct regmap *bld_regs = sun8i_blender_regmap(mixer);
struct drm_plane_state *plane_state;
struct drm_plane *plane;
u32 route = 0, pipe_en = 0;
@@ -313,12 +314,13 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
}
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
pipe_en | SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
- SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
+ if (mixer->cfg->de_type != SUN8I_MIXER_DE33)
+ regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
+ SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
@@ -367,25 +369,31 @@ static void sun8i_mixer_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ struct regmap *bld_regs;
u32 bld_base, size, val;
bool interlaced;
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
size = SUN8I_MIXER_SIZE(mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_SIZE, size);
- regmap_write(engine->regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(mixer->top_regs, SUN50I_MIXER_GLOBAL_SIZE, size);
+ else
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_SIZE, size);
+
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
if (interlaced)
val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
else
val = 0;
- regmap_update_bits(engine->regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
+ regmap_update_bits(bld_regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED, val);
DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
@@ -399,12 +407,29 @@ static const struct sunxi_engine_ops sun8i_engine_ops = {
};
static const struct regmap_config sun8i_mixer_regmap_config = {
+ .name = "layers",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xffffc, /* guessed */
};
+static const struct regmap_config sun8i_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x3c,
+};
+
+static const struct regmap_config sun8i_disp_regmap_config = {
+ .name = "display",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x20000,
+};
+
static int sun8i_mixer_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
@@ -425,6 +450,50 @@ static int sun8i_mixer_of_get_id(struct device_node *node)
return of_ep.id;
}
+static void sun8i_mixer_init(struct sun8i_mixer *mixer)
+{
+ struct regmap *top_regs, *disp_regs;
+ unsigned int base = sun8i_blender_base(mixer);
+ int plane_cnt, i;
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ top_regs = mixer->top_regs;
+ disp_regs = mixer->disp_regs;
+ } else {
+ top_regs = mixer->engine.regs;
+ disp_regs = mixer->engine.regs;
+ }
+
+ /* Enable the mixer */
+ regmap_write(top_regs, SUN8I_MIXER_GLOBAL_CTL,
+ SUN8I_MIXER_GLOBAL_CTL_RT_EN);
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(top_regs, SUN50I_MIXER_GLOBAL_CLK, 1);
+
+ /* Set background color to black */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /*
+ * Set fill color of bottom plane to black. Generally not needed
+ * except when VI plane is at bottom (zpos = 0) and enabled.
+ */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
+ for (i = 0; i < plane_cnt; i++)
+ regmap_write(disp_regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
+ SUN8I_MIXER_BLEND_MODE_DEF);
+
+ regmap_update_bits(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -433,8 +502,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
void __iomem *regs;
- unsigned int base;
- int plane_cnt;
int i, ret;
/*
@@ -495,6 +562,30 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
return PTR_ERR(mixer->engine.regs);
}
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->top_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_top_regmap_config);
+ if (IS_ERR(mixer->top_regs)) {
+ dev_err(dev, "Couldn't create the top regmap\n");
+ return PTR_ERR(mixer->top_regs);
+ }
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "display");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->disp_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_disp_regmap_config);
+ if (IS_ERR(mixer->disp_regs)) {
+ dev_err(dev, "Couldn't create the disp regmap\n");
+ return PTR_ERR(mixer->disp_regs);
+ }
+ }
+
mixer->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(mixer->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -534,10 +625,8 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- base = sun8i_blender_base(mixer);
-
/* Reset registers and disable unused sub-engines */
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -551,7 +640,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
- } else {
+ } else if (mixer->cfg->de_type == SUN8I_MIXER_DE2) {
for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -564,31 +653,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
}
- /* Enable the mixer */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
- SUN8I_MIXER_GLOBAL_CTL_RT_EN);
-
- /* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- /*
- * Set fill color of bottom plane to black. Generally not needed
- * except when VI plane is at bottom (zpos = 0) and enabled.
- */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
- for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_MODE(base, i),
- SUN8I_MIXER_BLEND_MODE_DEF);
-
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+ sun8i_mixer_init(mixer);
return 0;
@@ -628,6 +693,7 @@ static void sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
@@ -636,6 +702,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
@@ -644,6 +711,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 432000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -653,6 +721,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -662,6 +731,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -670,6 +740,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
+ .de_type = SUN8I_MIXER_DE2,
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
@@ -680,6 +751,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
.ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -689,6 +761,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x1,
.scanline_yuv = 1024,
@@ -698,6 +771,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -707,6 +781,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -716,7 +791,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
- .is_de3 = true,
+ .de_type = SUN8I_MIXER_DE3,
.mod_rate = 600000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -724,6 +799,17 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE33,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ .ui_num = 3,
+ .vi_num = 1,
+ .map = {0, 6, 7, 8},
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -769,6 +855,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-h6-de3-mixer-0",
.data = &sun50i_h6_mixer0_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-mixer-0",
+ .data = &sun50i_h616_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index d7898c9c9cc0..a1c1cbccc654 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -21,6 +21,9 @@
#define SUN8I_MIXER_GLOBAL_DBUFF 0x8
#define SUN8I_MIXER_GLOBAL_SIZE 0xc
+#define SUN50I_MIXER_GLOBAL_SIZE 0x8
+#define SUN50I_MIXER_GLOBAL_CLK 0xc
+
#define SUN8I_MIXER_GLOBAL_CTL_RT_EN BIT(0)
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
@@ -151,6 +154,12 @@ enum {
CCSC_D1_MIXER0_LAYOUT,
};
+enum sun8i_mixer_type {
+ SUN8I_MIXER_DE2,
+ SUN8I_MIXER_DE3,
+ SUN8I_MIXER_DE33,
+};
+
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -162,8 +171,9 @@ enum {
* @ccsc: select set of CCSC base addresses from the enumeration above.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
- * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @de_type: sun8i_mixer_type enum representing the display engine generation.
* @scaline_yuv: size of a scanline for VI scaler for YUV formats.
+ * @map: channel map for DE variants processing YUV separately (DE33)
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -171,8 +181,9 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
- unsigned int is_de3 : 1;
+ unsigned int de_type;
unsigned int scanline_yuv;
+ unsigned int map[6];
};
struct sun8i_mixer {
@@ -184,6 +195,9 @@ struct sun8i_mixer {
struct clk *bus_clk;
struct clk *mod_clk;
+
+ struct regmap *top_regs;
+ struct regmap *disp_regs;
};
enum {
@@ -214,13 +228,22 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
static inline u32
sun8i_blender_base(struct sun8i_mixer *mixer)
{
- return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+ return mixer->cfg->de_type == SUN8I_MIXER_DE3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline struct regmap *
+sun8i_blender_regmap(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->de_type == SUN8I_MIXER_DE33 ?
+ mixer->disp_regs : mixer->engine.regs;
}
static inline u32
sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_CH_BASE + channel * DE3_CH_SIZE;
else
return DE2_CH_BASE + channel * DE2_CH_SIZE;
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index b90e5edef4e8..f97be0040aab 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -23,6 +23,7 @@
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
@@ -51,6 +52,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -59,6 +61,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -91,22 +94,34 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
- sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
- dst_h, hscale, vscale, hphase, vphase);
- sun8i_ui_scaler_enable(mixer, channel, true);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ sun8i_vi_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase,
+ state->fb->format);
+ sun8i_vi_scaler_enable(mixer, channel, true);
+ } else {
+ sun8i_ui_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase);
+ sun8i_ui_scaler_enable(mixer, channel, true);
+ }
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- sun8i_ui_scaler_enable(mixer, channel, false);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ sun8i_vi_scaler_enable(mixer, channel, false);
+ else
+ sun8i_ui_scaler_enable(mixer, channel, false);
}
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index ae0806bccac7..8b7a58e27517 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -93,7 +93,7 @@ static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
{
int vi_num = mixer->cfg->vi_num;
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * vi_num +
DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 9c09d9c08496..a09ee4097537 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -25,7 +25,7 @@ static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
ch_base = sun8i_channel_base(mixer, channel);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
@@ -55,6 +55,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -66,6 +67,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -183,10 +185,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
@@ -483,7 +485,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
@@ -507,7 +509,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
- if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
+ if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
@@ -524,7 +526,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index 7ba75011adf9..82df6244af88 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -835,7 +835,9 @@ static const u32 bicubic4coefftab32[480] = {
static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return sun8i_channel_base(mixer, channel) + 0x3000;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * channel;
else
@@ -956,7 +958,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index cb08a88242cc..1424b63dde99 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -93,6 +93,10 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de
* Plane
*/
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index ffaa2522ab96..1bcdb5ee8f09 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -47,6 +47,144 @@ EXPORT_SYMBOL(drm_sysfb_mode);
* Plane
*/
+static u32 to_nonalpha_fourcc(u32 fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
+static bool is_listed_fourcc(const u32 *fourccs, size_t nfourccs, u32 fourcc)
+{
+ const u32 *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_sysfb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and additional emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Sysfb devices that have to emulate
+ * the format should use drm_sysfb_build_fourcc_list() to create a list
+ * of supported color formats. The returned list can be handed over to
+ * drm_universal_plane_init() et al. Native formats will go before
+ * emulated formats. Native formats with alpha channel will be replaced
+ * by equal formats without alpha channel, as primary planes usually
+ * don't support alpha. Other heuristics might be applied to optimize
+ * the sorting order. Formats near the beginning of the list are usually
+ * preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const u32 extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = to_nonalpha_fourcc(native_fourccs[i]);
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
index 46912924636a..1883c4a8604c 100644
--- a/drivers/gpu/drm/sysfb/efidrm.c
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -202,7 +202,7 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
&format->format, width, height, stride);
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -271,8 +271,8 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- efi->formats, ARRAY_SIZE(efi->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
primary_plane = &efi->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index fddfe8bea9f7..8d8ab39c5f36 100644
--- a/drivers/gpu/drm/sysfb/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -8,13 +8,13 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -644,36 +644,36 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
writeb(b, data);
}
-static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
- const struct drm_format_info *format)
+static void ofdrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ odev->funcs->cmap_write(odev, i8, r8, g8, b8);
+}
+
+static void ofdrm_device_fill_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = i * 8 + i / 4;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = i * 8 + i / 4;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_fill_gamma_565(crtc, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
- odev->funcs->cmap_write(odev, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -682,42 +682,21 @@ static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
}
}
-static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ofdrm_device_load_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = lut[i * 8 + i / 4].red >> 8;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = lut[i * 8 + i / 4].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
- unsigned char r = lut[i].red >> 8;
- unsigned char g = lut[i].green >> 8;
- unsigned char b = lut[i].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_888(crtc, lut, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -753,9 +732,9 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
- ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
+ ofdrm_device_load_gamma(odev, format, crtc_state->gamma_lut->data);
else
- ofdrm_device_set_gamma_linear(odev, format);
+ ofdrm_device_fill_gamma(odev, format);
}
}
@@ -1035,8 +1014,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- odev->formats, ARRAY_SIZE(odev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index a1c3119330de..8530a3ef8a7a 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -18,7 +18,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -765,8 +764,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- sdev->formats, ARRAY_SIZE(sdev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index f7532db3831f..90615e9ac86b 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -9,6 +9,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -87,15 +88,10 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
u16 red, u16 green, u16 blue)
{
- u8 i8, r8, g8, b8;
-
- if (index > 255)
- return;
-
- i8 = index;
- r8 = red >> 8;
- g8 = green >> 8;
- b8 = blue >> 8;
+ u8 i8 = index;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
outb_p(i8, VGA_PEL_IW);
outb_p(r8, VGA_PEL_D);
@@ -120,9 +116,6 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
0x00,
};
- if (index > 255)
- return;
-
__asm__ __volatile__ (
"call *(%%esi)"
: /* no return value */
@@ -135,43 +128,36 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
- const struct drm_format_info *format)
+static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ u8 i8 = index & 0xff;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ vesa->cmap_write(vesa, i8, red, green, blue);
+}
+
+static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- b16 = r16;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i) {
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- vesa->cmap_write(vesa, i, 0, g16, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i) {
- r16 = (i << 8) | i;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -180,38 +166,24 @@ static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
}
}
-static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 8 + i / 4].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 4 + i / 16].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i)
- vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i)
- vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -253,13 +225,13 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (sysfb_crtc_state->format == sysfb->fb_format) {
if (crtc_state->gamma_lut)
- vesadrm_set_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
} else {
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
}
}
}
@@ -377,7 +349,7 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
drm_warn(dev, "hardware palette is unchangeable, colors may be incorrect\n");
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -435,8 +407,8 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- vesa->formats, ARRAY_SIZE(vesa->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
primary_plane = &vesa->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0b65e69f3a8a..1dd3670f37db 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -185,11 +185,13 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes);
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd);
#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 46170753699d..dd041089f797 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -102,6 +102,7 @@ static const struct drm_framebuffer_funcs tegra_fb_funcs = {
};
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes)
@@ -114,7 +115,7 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++)
fb->obj[i] = &planes[i]->gem;
@@ -132,9 +133,9 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(drm, cmd);
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct drm_framebuffer *fb;
@@ -166,7 +167,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
planes[i] = to_tegra_bo(gem);
}
- fb = tegra_fb_alloc(drm, cmd, planes, i);
+ fb = tegra_fb_alloc(drm, info, cmd, planes, i);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
goto unreference;
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index cd9d798f8870..1b70f5e164af 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -106,7 +106,9 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(info);
}
- fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+ fb = tegra_fb_alloc(drm,
+ drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
+ &cmd, &bo, 1);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index dbc1394f96b8..41a285ec889f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -523,10 +523,10 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
+ if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ dma_buf_detach(gem->dma_buf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 3afd6587df08..c0e952293ad0 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o
+ drm_rect_test.o \
+ drm_sysfb_modeset_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
index ff88ec2e911c..887020141c7f 100644
--- a/drivers/gpu/drm/tests/drm_bridge_test.c
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -8,36 +8,64 @@
#include <drm/drm_bridge_helper.h>
#include <drm/drm_kunit_helpers.h>
+#include <kunit/device.h>
#include <kunit/test.h>
+/*
+ * Mimick the typical "private" struct defined by a bridge driver, which
+ * embeds a bridge plus other fields.
+ *
+ * Having at least one member before @bridge ensures we test non-zero
+ * @bridge offset.
+ */
+struct drm_bridge_priv {
+ unsigned int enable_count;
+ unsigned int disable_count;
+ struct drm_bridge bridge;
+ void *data;
+};
+
struct drm_bridge_init_priv {
struct drm_device drm;
+ /** @dev: device, only for tests not needing a whole drm_device */
+ struct device *dev;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder encoder;
- struct drm_bridge bridge;
+ struct drm_bridge_priv *test_bridge;
struct drm_connector *connector;
- unsigned int enable_count;
- unsigned int disable_count;
+ bool destroyed;
};
+static struct drm_bridge_priv *bridge_to_priv(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct drm_bridge_priv, bridge);
+}
+
+static void drm_test_bridge_priv_destroy(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *bridge_priv = bridge_to_priv(bridge);
+ struct drm_bridge_init_priv *priv = (struct drm_bridge_init_priv *)bridge_priv->data;
+
+ priv->destroyed = true;
+}
+
static void drm_test_bridge_enable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
static void drm_test_bridge_disable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.enable = drm_test_bridge_enable,
.disable = drm_test_bridge_disable,
};
@@ -45,8 +73,7 @@ static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
@@ -54,13 +81,13 @@ static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.atomic_enable = drm_test_bridge_atomic_enable,
.atomic_disable = drm_test_bridge_atomic_disable,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -102,6 +129,12 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
if (IS_ERR(priv))
return ERR_CAST(priv);
+ priv->test_bridge = devm_drm_bridge_alloc(dev, struct drm_bridge_priv, bridge, funcs);
+ if (IS_ERR(priv->test_bridge))
+ return ERR_CAST(priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
drm = &priv->drm;
priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
NULL,
@@ -125,9 +158,8 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
- bridge->funcs = funcs;
ret = drm_kunit_bridge_add(test, bridge);
if (ret)
@@ -173,7 +205,7 @@ static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
retry_commit:
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge_state = drm_atomic_get_bridge_state(state, bridge);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
@@ -228,7 +260,7 @@ static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
* locking. The function would return NULL in all cases anyway,
* so we don't really have any concurrency to worry about.
*/
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
}
@@ -253,7 +285,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -279,14 +311,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -296,8 +328,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
/*
@@ -309,7 +341,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -318,14 +350,14 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -335,8 +367,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 0);
}
/*
@@ -348,7 +380,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
@@ -374,14 +406,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -391,8 +423,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
@@ -407,11 +439,83 @@ static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
.test_cases = drm_bridge_helper_reset_crtc_tests,
};
+static int drm_test_bridge_alloc_init(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ priv->dev = kunit_device_register(test, "drm-bridge-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ test->priv = priv;
+
+ priv->test_bridge = devm_drm_bridge_alloc(priv->dev, struct drm_bridge_priv, bridge,
+ &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ return 0;
+}
+
+/*
+ * Test that a bridge is freed when the device is destroyed in lack of
+ * other drm_bridge_get/put() operations.
+ */
+static void drm_test_drm_bridge_alloc_basic(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+/*
+ * Test that a bridge is not freed when the device is destroyed when there
+ * is still a reference to it, and freed when that reference is put.
+ */
+static void drm_test_drm_bridge_alloc_get_put(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ drm_bridge_get(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ drm_bridge_put(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+static struct kunit_case drm_bridge_alloc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_alloc_basic),
+ KUNIT_CASE(drm_test_drm_bridge_alloc_get_put),
+ { }
+};
+
+static struct kunit_suite drm_bridge_alloc_test_suite = {
+ .name = "drm_bridge_alloc",
+ .init = drm_test_bridge_alloc_init,
+ .test_cases = drm_bridge_alloc_tests,
+};
+
kunit_test_suites(
&drm_bridge_get_current_state_test_suite,
&drm_bridge_helper_reset_crtc_test_suite,
+ &drm_bridge_alloc_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
+
MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 35cd3405d045..7299fa8971ce 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -279,9 +279,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
0xFF, 0x00,
- 0x4C, 0x99,
- 0x19, 0x66,
- 0xE5, 0xB2,
+ 0x4C, 0x95,
+ 0x1C, 0x69,
+ 0xE2, 0xB2,
},
},
.rgb332_result = {
@@ -430,9 +430,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.gray8_result = {
.dst_pitch = 5,
.expected = {
- 0x3C, 0x33, 0xC4, 0x00, 0x00,
- 0xBB, 0x3C, 0x33, 0x00, 0x00,
- 0x34, 0xBB, 0x3C, 0x00, 0x00,
+ 0x3D, 0x32, 0xC1, 0x00, 0x00,
+ 0xBA, 0x3D, 0x32, 0x00, 0x00,
+ 0x34, 0xBA, 0x3D, 0x00, 0x00,
},
},
.rgb332_result = {
@@ -735,27 +735,22 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, false);
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
- drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, true);
+ drm_fb_xrgb8888_to_rgb565be(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip, &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1407,147 +1335,6 @@ static void drm_test_fb_clip_offset(struct kunit *test)
KUNIT_EXPECT_EQ(test, offset, params->expected_offset);
}
-struct fb_build_fourcc_list_case {
- const char *name;
- u32 native_fourccs[TEST_BUF_SIZE];
- size_t native_fourccs_size;
- u32 expected[TEST_BUF_SIZE];
- size_t expected_fourccs_size;
-};
-
-static struct fb_build_fourcc_list_case fb_build_fourcc_list_cases[] = {
- {
- .name = "no native formats",
- .native_fourccs = { },
- .native_fourccs_size = 0,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "XRGB8888 as native format",
- .native_fourccs = { DRM_FORMAT_XRGB8888 },
- .native_fourccs_size = 1,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "remove duplicates",
- .native_fourccs = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- },
- .native_fourccs_size = 11,
- .expected = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- },
- .expected_fourccs_size = 3,
- },
- {
- .name = "convert alpha formats",
- .native_fourccs = {
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_RGBA1010102,
- DRM_FORMAT_BGRA1010102,
- },
- .native_fourccs_size = 12,
- .expected = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_RGBX1010102,
- DRM_FORMAT_BGRX1010102,
- },
- .expected_fourccs_size = 12,
- },
- {
- .name = "random formats",
- .native_fourccs = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- },
- .native_fourccs_size = 10,
- .expected = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XRGB8888,
- },
- .expected_fourccs_size = 10,
- },
-};
-
-static void fb_build_fourcc_list_case_desc(struct fb_build_fourcc_list_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(fb_build_fourcc_list, fb_build_fourcc_list_cases, fb_build_fourcc_list_case_desc);
-
-static void drm_test_fb_build_fourcc_list(struct kunit *test)
-{
- const struct fb_build_fourcc_list_case *params = test->param_value;
- u32 fourccs_out[TEST_BUF_SIZE] = {0};
- size_t nfourccs_out;
- struct drm_device *drm;
- struct device *dev;
-
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
-
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
-
- nfourccs_out = drm_fb_build_fourcc_list(drm, params->native_fourccs,
- params->native_fourccs_size,
- fourccs_out, TEST_BUF_SIZE);
-
- KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
- KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
-}
-
struct fb_memcpy_case {
const char *name;
u32 format;
@@ -1910,12 +1697,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
memset(buf[i], 0, dst_size[i]);
}
- int blit_result;
-
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_memcpy(dst, dst_pitches, src, &fb, &params->clip);
- KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
@@ -1940,7 +1723,6 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xbgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_abgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_clip_offset, clip_offset_gen_params),
- KUNIT_CASE_PARAM(drm_test_fb_build_fourcc_list, fb_build_fourcc_list_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_memcpy, fb_memcpy_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index 6ea04cc8f324..9b8e01e8cd91 100644
--- a/drivers/gpu/drm/tests/drm_framebuffer_test.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -363,6 +363,7 @@ struct drm_framebuffer_test_priv {
static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 7ffd666753b1..8bd412735000 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -33,7 +33,7 @@ struct drm_atomic_helper_connector_hdmi_priv {
struct drm_encoder encoder;
struct drm_connector connector;
- const char *current_edid;
+ const void *current_edid;
size_t current_edid_len;
};
@@ -56,7 +56,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
}
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
- const char *edid, size_t edid_len)
+ const void *edid, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
connector_to_priv(connector);
@@ -89,15 +89,15 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
};
static enum drm_mode_status
-reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+reject_100mhz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned long long tmds_rate)
{
return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
}
-static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
- .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+static const struct drm_connector_hdmi_funcs reject_100mhz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100mhz_connector_tmds_char_rate_valid,
};
static int dummy_connector_get_modes(struct drm_connector *connector)
@@ -140,10 +140,11 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc,
- const struct drm_connector_hdmi_funcs *hdmi_funcs)
+__connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ const void *edid_data, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -182,6 +183,8 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
conn = &priv->connector;
+ conn->ycbcr_420_allowed = !!(formats & BIT(HDMI_COLORSPACE_YUV420));
+
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
@@ -197,29 +200,28 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
drm_mode_config_reset(drm);
+ if (edid_data && edid_len) {
+ ret = set_connector_edid(test, &priv->connector, edid_data, edid_len);
+ KUNIT_ASSERT_GT(test, ret, 0);
+ }
+
return priv;
}
+#define drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test, formats, max_bpc, funcs, edid) \
+ __connector_hdmi_init(test, formats, max_bpc, funcs, edid, ARRAY_SIZE(edid))
+
static
struct drm_atomic_helper_connector_hdmi_priv *
drm_kunit_helper_connector_hdmi_init(struct kunit *test,
unsigned int formats,
unsigned int max_bpc)
{
- struct drm_atomic_helper_connector_hdmi_priv *priv;
- int ret;
-
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- formats, max_bpc,
- &dummy_connector_hdmi_funcs);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
-
- ret = set_connector_edid(test, &priv->connector,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
- return priv;
+ return drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ formats,
+ max_bpc,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
}
/*
@@ -414,7 +416,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
@@ -474,7 +476,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
@@ -533,7 +535,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -595,7 +597,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -658,7 +660,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -720,7 +722,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -734,6 +736,107 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
}
/*
+ * Test that for an HDMI connector, with an HDMI monitor, we will
+ * get a limited RGB Quantization Range with a YUV420 mode, no
+ * matter what the value of the Broadcast RGB property is set to.
+ */
+static void drm_test_check_broadcast_rgb_cea_mode_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ broadcast_rgb = *(enum drm_hdmi_broadcast_rgb *)test->param_value;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_conn_state:
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (PTR_ERR(conn_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ conn_state->hdmi.broadcast_rgb = broadcast_rgb;
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.broadcast_rgb, broadcast_rgb);
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+
+ KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static const enum drm_hdmi_broadcast_rgb check_broadcast_rgb_cea_mode_yuv420_tests[] = {
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+ DRM_HDMI_BROADCAST_RGB_FULL,
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+static void
+check_broadcast_rgb_cea_mode_yuv420_desc(const enum drm_hdmi_broadcast_rgb *broadcast_rgb,
+ char *desc)
+{
+ sprintf(desc, "%s", drm_hdmi_connector_get_broadcast_rgb_name(*broadcast_rgb));
+}
+
+KUNIT_ARRAY_PARAM(check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_tests,
+ check_broadcast_rgb_cea_mode_yuv420_desc);
+
+/*
* Test that if we change the maximum bpc property to a different value,
* we trigger a mode change on the connector's CRTC, which will in turn
* disable/enable the connector.
@@ -752,19 +855,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -831,19 +931,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -905,21 +1002,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_dvi_1080p);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_dvi_1080p,
- ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -959,19 +1053,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1011,19 +1102,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1063,19 +1151,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1168,7 +1253,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
* Then we will pick the latter, and the computed TMDS character rate
* will be equal to 1.25 times the mode pixel clock.
*/
-static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1181,19 +1266,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1229,6 +1311,80 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
/*
* Test that if:
+ * - We have an HDMI connector and a display supporting both RGB and YUV420
+ * - The chosen mode can be supported in YUV420 output format only
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in YUV420/12bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/10bpc.
+ *
+ * Then we will pick the latter, and the computed TMDS character rate
+ * will be equal to 1.25 * 0.5 times the mode pixel clock.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *yuv420_only_mode;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 10, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ yuv420_only_mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, yuv420_only_mode->clock * 625);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if:
* - We have an HDMI connector supporting both RGB and YUV422 and up to
* 12 bpc
* - The chosen mode has a TMDS character rate higher than the display
@@ -1240,7 +1396,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
* Then we will prefer to keep the RGB format with a lower bpc over
* picking YUV422.
*/
-static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1253,21 +1409,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1304,6 +1457,170 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
}
/*
+ * Test that if:
+ * - We have an HDMI connector supporting both RGB and YUV420 and up to
+ * 12 bpc
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in RGB/10bpc but lower than the display supports in
+ * RGB/8bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/12bpc.
+ *
+ * Then we will prefer to keep the RGB format with a lower bpc over
+ * picking YUV420.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_also(info, preferred));
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 8, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if a driver supports only RGB, but the chosen mode can be
+ * supported by the screen only in YUV420 output format, we end up with
+ * unsuccessful fallback attempts.
+ */
+static void drm_test_check_driver_unsupported_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred, *yuv420_only_mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_FALSE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, drm_mode_is_420_also(info, preferred));
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_crtc_state:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, yuv420_only_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
* Test that if a driver and screen supports RGB and YUV formats, and we
* try to set the VIC 1 mode, we end up with 8bpc RGB even if we could
* have had a higher bpc.
@@ -1321,20 +1638,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1388,19 +1702,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1458,21 +1769,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1531,19 +1839,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1594,21 +1899,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1704,17 +2006,17 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_full_cea_mode_vic_1),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode_vic_1),
- /*
- * TODO: When we'll have YUV output support, we need to check
- * that the limited range is always set to limited no matter
- * what the value of Broadcast RGB is.
- */
+ KUNIT_CASE_PARAM(drm_test_check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_gen_params),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
- KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
- KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_rgb),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_yuv420),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420),
+ KUNIT_CASE(drm_test_check_driver_unsupported_fallback_yuv420),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_changed),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_output_bpc_dvi),
@@ -1927,28 +2229,20 @@ static void drm_test_check_mode_valid(struct kunit *test)
static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_100_MHz_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100mhz_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
- conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
/*
* Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
* 480p is allowed.
*/
- preferred = find_preferred_mode(conn);
+ preferred = find_preferred_mode(&priv->connector);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
@@ -1966,12 +2260,14 @@ static void drm_test_check_mode_valid_reject(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
+ unsigned char no_edid[] = {};
int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs,
+ no_edid);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1996,20 +2292,15 @@ static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_100mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_100mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
preferred = find_preferred_mode(conn);
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 6358397a5d7a..c59c8528a3f7 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -46,6 +46,13 @@
* Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
* Dummy Descriptor:
* Checksum: 0xab
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_dvi_1080p[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -62,6 +69,10 @@ static const unsigned char test_edid_dvi_1080p[] = {
};
/*
+ *
+ * This edid is intentionally broken with the 100MHz limit. It's meant
+ * to be used only with tests in unusual situations.
+ *
* edid-decode (hex):
*
* 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
@@ -73,14 +84,14 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 14 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10
*
* ----------------
*
@@ -135,8 +146,19 @@ static const unsigned char test_edid_dvi_1080p[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 100 MHz
- * Extended HDMI video details:
- * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ * Checksum: 0x10 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * Failures:
+ *
+ * EDID:
+ * CTA-861: The maximum HDMI TMDS clock is 100000 kHz, but one or more video timings go up to 148500 kHz.
+ *
+ * EDID conformity: FAIL
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -147,11 +169,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -160,7 +182,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xe4
+ 0x00, 0x00, 0x00, 0x10
};
/*
@@ -175,14 +197,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
*
* ----------------
*
@@ -237,8 +259,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xfc Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -249,11 +277,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -262,7 +290,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xfc
};
/*
@@ -277,14 +305,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e0
*
* ----------------
*
@@ -339,8 +367,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xe0 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -351,11 +385,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -364,7 +398,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xe0
};
/*
@@ -379,14 +413,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 a8
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d4
*
* ----------------
*
@@ -447,8 +481,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xa8 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xd4 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -461,9 +501,9 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -472,7 +512,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xa8
+ 0x00, 0x00, 0x00, 0xd4
};
/*
@@ -487,14 +527,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 8a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 44 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 8c
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b8
*
* ----------------
*
@@ -555,8 +595,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0x8c Unused space in Extension Block: 100 bytes
+ * Checksum: 0xb8 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -569,18 +615,250 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xb8
+};
+
+/*
+ * Max resolution:
+ * - 1920x1080@60Hz with RGB, YUV444, YUV422
+ * - 3840x2160@30Hz with YUV420 only
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 200 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 c0 1c 32 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 11 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 bb
+ *
+ * 02 03 29 31 42 90 5f 6c 03 0c 00 10 00 78 28 20
+ * 00 00 01 03 6d d8 5d c4 01 28 80 07 00 00 00 00
+ * 00 00 e3 0f 00 00 e2 0e 5f 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ca
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (960 mm x 540 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 170 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xbb
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 200 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 200 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * Empty Capability Map
+ * YCbCr 4:2:0 Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Checksum: 0xca
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xbb, 0x02, 0x03, 0x29, 0x31,
+ 0x42, 0x90, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x28, 0x20,
+ 0x00, 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x28, 0x80, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x00, 0x00, 0xe2, 0x0e,
+ 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xca
+};
+
+/*
+ * Max resolution: 3840x2160@30Hz with RGB, YUV444, YUV422, YUV420
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 340 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 04 74 00 30 f2 70 5a 80 b0 58
+ * 8a 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 22 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ce
+ *
+ * 02 03 27 31 41 5f 6c 03 0c 00 10 00 78 44 20 00
+ * 00 01 03 6d d8 5d c4 01 44 80 07 00 00 00 00 00
+ * 00 e3 0f 01 00 e1 0e 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 84
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz (1600 mm x 900 mm)
+ * Hfront 176 Hsync 88 Hback 296 Hpol P
+ * Vfront 8 Vsync 10 Vback 72 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 340 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xce
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 340 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 340 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * YCbCr 4:2:0 Video Data Block:
+ * Checksum: 0x84
+ */
+static const unsigned char test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x74, 0x00, 0x30, 0xf2, 0x70,
+ 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x22, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0x02, 0x03, 0x27, 0x31,
+ 0x41, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x44, 0x20, 0x00,
+ 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x44, 0x80, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x01, 0x00, 0xe1, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x8c
+ 0x00, 0x00, 0x00, 0x84
};
#endif // DRM_KUNIT_EDID_H_
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 5f7257840d8e..04edb6079c0d 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -13,6 +13,7 @@
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
diff --git a/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
new file mode 100644
index 000000000000..e875d876118f
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../sysfb/drm_sysfb_helper.h"
+
+#define TEST_BUF_SIZE 50
+
+struct sysfb_build_fourcc_list_case {
+ const char *name;
+ u32 native_fourccs[TEST_BUF_SIZE];
+ size_t native_fourccs_size;
+ u32 expected[TEST_BUF_SIZE];
+ size_t expected_fourccs_size;
+};
+
+static struct sysfb_build_fourcc_list_case sysfb_build_fourcc_list_cases[] = {
+ {
+ .name = "no native formats",
+ .native_fourccs = { },
+ .native_fourccs_size = 0,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "XRGB8888 as native format",
+ .native_fourccs = { DRM_FORMAT_XRGB8888 },
+ .native_fourccs_size = 1,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "remove duplicates",
+ .native_fourccs = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ },
+ .native_fourccs_size = 11,
+ .expected = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ },
+ .expected_fourccs_size = 3,
+ },
+ {
+ .name = "convert alpha formats",
+ .native_fourccs = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+ },
+ .native_fourccs_size = 12,
+ .expected = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ },
+ .expected_fourccs_size = 12,
+ },
+ {
+ .name = "random formats",
+ .native_fourccs = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ },
+ .native_fourccs_size = 10,
+ .expected = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XRGB8888,
+ },
+ .expected_fourccs_size = 10,
+ },
+};
+
+static void sysfb_build_fourcc_list_case_desc(struct sysfb_build_fourcc_list_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(sysfb_build_fourcc_list, sysfb_build_fourcc_list_cases,
+ sysfb_build_fourcc_list_case_desc);
+
+static void drm_test_sysfb_build_fourcc_list(struct kunit *test)
+{
+ const struct sysfb_build_fourcc_list_case *params = test->param_value;
+ u32 fourccs_out[TEST_BUF_SIZE] = {0};
+ size_t nfourccs_out;
+ struct drm_device *drm;
+ struct device *dev;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ nfourccs_out = drm_sysfb_build_fourcc_list(drm, params->native_fourccs,
+ params->native_fourccs_size,
+ fourccs_out, TEST_BUF_SIZE);
+
+ KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
+ KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
+}
+
+static struct kunit_case drm_sysfb_modeset_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_sysfb_build_fourcc_list, sysfb_build_fourcc_list_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_sysfb_modeset_test_suite = {
+ .name = "drm_sysfb_modeset_test",
+ .test_cases = drm_sysfb_modeset_test_cases,
+};
+
+kunit_test_suite(drm_sysfb_modeset_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_sysfb_modeset APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
index 312645271014..b6d6becf1683 100644
--- a/drivers/gpu/drm/tidss/Makefile
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -7,6 +7,7 @@ tidss-y := tidss_crtc.o \
tidss_irq.o \
tidss_plane.o \
tidss_scale_coefs.o \
- tidss_dispc.o
+ tidss_dispc.o \
+ tidss_oldi.o
obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 94f8e3178df5..a2f40a5c7703 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -130,7 +130,7 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
!to_tidss_crtc_state(cstate)->plane_pos_changed)
return;
- for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ for (layer = 0; layer < tidss->feat->num_vids ; layer++) {
struct drm_plane_state *pstate;
struct drm_plane *plane;
bool layer_active = false;
@@ -271,7 +271,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
* another videoport, the DSS will report sync lost issues. Disable all
* the layers here as a work-around.
*/
- for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ for (u32 layer = 0; layer < tidss->feat->num_vids; layer++)
dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
false);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index cacb5f3d8085..c0277fa36425 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -103,9 +103,16 @@ const struct dispc_features dispc_k2g_feats = {
},
},
- .num_planes = 1,
- .vid_name = { "vid1" },
- .vid_lite = { false },
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ },
+
.vid_order = { 0 },
};
@@ -139,7 +146,7 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
const struct dispc_features dispc_am65x_feats = {
.max_pclk_khz = {
[DISPC_VP_DPI] = 165000,
- [DISPC_VP_OLDI] = 165000,
+ [DISPC_VP_OLDI_AM65X] = 165000,
},
.scaling = {
@@ -169,7 +176,7 @@ const struct dispc_features dispc_am65x_feats = {
.vp_name = { "vp1", "vp2" },
.ovr_name = { "ovr1", "ovr2" },
.vpclk_name = { "vp1", "vp2" },
- .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+ .vp_bus_type = { DISPC_VP_OLDI_AM65X, DISPC_VP_DPI },
.vp_feat = { .color = {
.has_ctm = true,
@@ -178,11 +185,22 @@ const struct dispc_features dispc_am65x_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ },
+
+ .vid_order = {1, 0},
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -267,9 +285,32 @@ const struct dispc_features dispc_j721e_feats = {
.gamma_type = TIDSS_GAMMA_10BIT,
},
},
- .num_planes = 4,
- .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
- .vid_lite = { 0, 1, 0, 1, },
+
+ .num_vids = 4,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ {
+ .name = "vid2",
+ .is_lite = false,
+ .hw_id = 2,
+ },
+ {
+ .name = "vidl2",
+ .is_lite = true,
+ .hw_id = 3,
+ },
+ },
+
.vid_order = { 1, 3, 0, 2 },
};
@@ -315,11 +356,23 @@ const struct dispc_features dispc_am625_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
+
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
};
const struct dispc_features dispc_am62a7_feats = {
@@ -369,11 +422,58 @@ const struct dispc_features dispc_am62a7_feats = {
},
},
- .num_planes = 2,
- /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .num_vids = 2,
+
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
+};
+
+const struct dispc_features dispc_am62l_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .subrev = DISPC_AM62L,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {0},
};
static const u16 *dispc_common_regmap;
@@ -391,7 +491,7 @@ struct dispc_device {
void __iomem *base_ovr[TIDSS_MAX_PORTS];
void __iomem *base_vp[TIDSS_MAX_PORTS];
- struct regmap *oldi_io_ctrl;
+ struct regmap *am65x_oldi_io_ctrl;
struct clk *vp_clk[TIDSS_MAX_PORTS];
@@ -466,6 +566,29 @@ static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
return ioread32(base + reg);
}
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg)
+{
+ u32 count = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
+{
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+}
+
/*
* TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -734,7 +857,8 @@ static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -742,9 +866,10 @@ static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_id), stat);
}
static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
@@ -766,7 +891,8 @@ static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -774,9 +900,10 @@ static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_id), stat);
}
static
@@ -788,7 +915,8 @@ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
}
@@ -809,7 +937,7 @@ dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
status |= dispc_k3_vp_read_irqstatus(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
status |= dispc_k3_vid_read_irqstatus(dispc, i);
dispc_k3_clear_irqstatus(dispc, status);
@@ -825,7 +953,7 @@ static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
enable |= dispc_k3_vp_read_irqenable(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
enable |= dispc_k3_vid_read_irqenable(dispc, i);
return enable;
@@ -851,12 +979,15 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
main_disable |= BIT(i); /* VP IRQ */
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
+ u32 hw_id = dispc->feat->vid_info[i].hw_id;
+
dispc_k3_vid_set_irqenable(dispc, i, mask);
+
if (mask & DSS_IRQ_PLANE_MASK(i))
- main_enable |= BIT(i + 4); /* VID IRQ */
+ main_enable |= BIT(hw_id + 4); /* VID IRQ */
else
- main_disable |= BIT(i + 4); /* VID IRQ */
+ main_disable |= BIT(hw_id + 4); /* VID IRQ */
}
if (main_enable)
@@ -879,6 +1010,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -896,6 +1028,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -906,13 +1039,11 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
}
}
-enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
-
struct dispc_bus_format {
u32 bus_fmt;
u32 data_width;
bool is_oldi_fmt;
- enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+ enum oldi_mode_reg_val am65x_oldi_mode_reg_val;
};
static const struct dispc_bus_format dispc_bus_formats[] = {
@@ -956,7 +1087,7 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return -EINVAL;
}
- if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X &&
fmt->is_oldi_fmt) {
dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
__func__, dispc->feat->vp_name[hw_videoport]);
@@ -966,23 +1097,23 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return 0;
}
-static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+static void dispc_am65x_oldi_tx_power(struct dispc_device *dispc, bool power)
{
- u32 val = power ? 0 : OLDI_PWRDN_TX;
+ u32 val = power ? 0 : AM65X_OLDI_PWRDN_TX;
- if (WARN_ON(!dispc->oldi_io_ctrl))
+ if (WARN_ON(!dispc->am65x_oldi_io_ctrl))
return;
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
- OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT0_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT1_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT2_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT3_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_CLK_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
}
static void dispc_set_num_datalines(struct dispc_device *dispc,
@@ -1011,8 +1142,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
}
-static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
- const struct dispc_bus_format *fmt)
+static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
{
u32 oldi_cfg = 0;
u32 oldi_reset_bit = BIT(5 + hw_videoport);
@@ -1031,7 +1162,7 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1060,10 +1191,10 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
if (WARN_ON(!fmt))
return;
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
- dispc_oldi_tx_power(dispc, true);
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
+ dispc_am65x_oldi_tx_power(dispc, true);
- dispc_enable_oldi(dispc, hw_videoport, fmt);
+ dispc_enable_am65x_oldi(dispc, hw_videoport, fmt);
}
}
@@ -1119,7 +1250,7 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
align = true;
/* always use DE_HIGH for OLDI */
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X)
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
@@ -1145,10 +1276,10 @@ void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
{
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
- dispc_oldi_tx_power(dispc, false);
+ dispc_am65x_oldi_tx_power(dispc, false);
}
}
@@ -1310,7 +1441,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
* Calculate the percentage difference between the requested pixel clock rate
* and the effective rate resulting from calculating the clock divider value.
*/
-static
unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
{
int r = rate / 100, rr = real_rate / 100;
@@ -1358,8 +1488,10 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
x, 17, 6);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
@@ -1370,8 +1502,10 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
x, 13, 0);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
@@ -1388,6 +1522,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2025,7 +2160,7 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
bool need_scaling = state->src_w >> 16 != state->crtc_w ||
state->src_h >> 16 != state->crtc_h;
@@ -2096,7 +2231,7 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
@@ -2210,7 +2345,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2226,7 +2361,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2265,7 +2400,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2281,7 +2416,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2308,6 +2443,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2416,6 +2552,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2735,15 +2872,15 @@ static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
struct dispc_device *dispc)
{
- dispc->oldi_io_ctrl =
+ dispc->am65x_oldi_io_ctrl =
syscon_regmap_lookup_by_phandle(dev->of_node,
"ti,am65x-oldi-io-ctrl");
- if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
- dispc->oldi_io_ctrl = NULL;
- } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ if (PTR_ERR(dispc->am65x_oldi_io_ctrl) == -ENODEV) {
+ dispc->am65x_oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->am65x_oldi_io_ctrl)) {
dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
- __func__, PTR_ERR(dispc->oldi_io_ctrl));
- return PTR_ERR(dispc->oldi_io_ctrl);
+ __func__, PTR_ERR(dispc->am65x_oldi_io_ctrl));
+ return PTR_ERR(dispc->am65x_oldi_io_ctrl);
}
return 0;
}
@@ -2898,8 +3035,8 @@ int dispc_init(struct tidss_device *tidss)
if (r)
return r;
- for (i = 0; i < dispc->feat->num_planes; i++) {
- r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ for (i = 0; i < dispc->feat->num_vids; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_info[i].name,
&dispc->base_vid[i]);
if (r)
return r;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 086327d51a90..b8614f62186c 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,13 +46,19 @@ struct dispc_features_scaling {
u32 xinc_max;
};
+struct dispc_vid_info {
+ const char *name; /* Should match dt reg names */
+ u32 hw_id;
+ bool is_lite;
+};
+
struct dispc_errata {
bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
};
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
- DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_OLDI_AM65X, /* OLDI (LVDS) output for AM65x DSS */
DISPC_VP_INTERNAL, /* SoC internal routing */
DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
@@ -61,6 +67,7 @@ enum dispc_vp_bus_type {
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62L,
DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
@@ -82,18 +89,23 @@ struct dispc_features {
const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
struct tidss_vp_feat vp_feat;
- u32 num_planes;
- const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
- bool vid_lite[TIDSS_MAX_PLANES];
+ u32 num_vids;
+ struct dispc_vid_info vid_info[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
};
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
extern const struct dispc_features dispc_am62a7_feats;
+extern const struct dispc_features dispc_am62l_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg);
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport);
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate);
+
void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index e88148e44937..50a3f28250ef 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -226,18 +226,35 @@ enum dispc_common_regs {
#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+/* OLDI Config Bits (DISPC_VP_DSS_OLDI_CFG) */
+#define OLDI_ENABLE BIT(0)
+#define OLDI_MAP (BIT(1) | BIT(2) | BIT(3))
+#define OLDI_SRC BIT(4)
+#define OLDI_CLONE_MODE BIT(5)
+#define OLDI_MASTERSLAVE BIT(6)
+#define OLDI_DEPOL BIT(7)
+#define OLDI_MSB BIT(8)
+#define OLDI_LBEN BIT(9)
+#define OLDI_LBDATA BIT(10)
+#define OLDI_DUALMODESYNC BIT(11)
+#define OLDI_SOFTRST BIT(12)
+#define OLDI_TPATCFG BIT(13)
+
+/* LVDS Format values for OLDI_MAP field in DISPC_VP_OLDI_CFG register */
+enum oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
/*
* OLDI IO_CTRL register offsets. On AM654 the registers are found
* from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
* CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
* register range.
*/
-#define OLDI_DAT0_IO_CTRL 0x00
-#define OLDI_DAT1_IO_CTRL 0x04
-#define OLDI_DAT2_IO_CTRL 0x08
-#define OLDI_DAT3_IO_CTRL 0x0C
-#define OLDI_CLK_IO_CTRL 0x10
+#define AM65X_OLDI_DAT0_IO_CTRL 0x00
+#define AM65X_OLDI_DAT1_IO_CTRL 0x04
+#define AM65X_OLDI_DAT2_IO_CTRL 0x08
+#define AM65X_OLDI_DAT3_IO_CTRL 0x0C
+#define AM65X_OLDI_CLK_IO_CTRL 0x10
-#define OLDI_PWRDN_TX BIT(8)
+#define AM65X_OLDI_PWRDN_TX BIT(8)
#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d4652e8cc28c..a1b12e52aca4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -24,6 +24,7 @@
#include "tidss_drv.h"
#include "tidss_kms.h"
#include "tidss_irq.h"
+#include "tidss_oldi.h"
/* Power management */
@@ -147,6 +148,10 @@ static int tidss_probe(struct platform_device *pdev)
return ret;
}
+ ret = tidss_oldi_init(tidss);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init OLDI\n");
+
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
@@ -203,6 +208,8 @@ err_runtime_suspend:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
return ret;
}
@@ -227,6 +234,8 @@ static void tidss_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
/* devm allocated dispc goes away with the dev so mark it NULL */
dispc_remove(tidss);
@@ -242,6 +251,7 @@ static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
+ { .compatible = "ti,am62l-dss", .data = &dispc_am62l_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index 7f4f4282bc04..d14d5d28f0a3 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -11,8 +11,10 @@
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
+#define TIDSS_MAX_OLDI_TXES 2
typedef u32 dispc_irq_t;
+struct tidss_oldi;
struct tidss_device {
struct drm_device ddev; /* DRM device for DSS */
@@ -27,6 +29,9 @@ struct tidss_device {
unsigned int num_planes;
struct drm_plane *planes[TIDSS_MAX_PLANES];
+ unsigned int num_oldis;
+ struct tidss_oldi *oldis[TIDSS_MAX_OLDI_TXES];
+
unsigned int irq;
/* protects the irq masks field and irqenable/irqstatus registers */
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 95b4aeff2775..81a04f767770 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -90,14 +90,18 @@ int tidss_encoder_create(struct tidss_device *tidss,
struct drm_connector *connector;
int ret;
- t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder,
- encoder, encoder_type);
+ t_enc = devm_drm_bridge_alloc(tidss->dev, struct tidss_encoder,
+ bridge, &tidss_bridge_funcs);
if (IS_ERR(t_enc))
return PTR_ERR(t_enc);
+ ret = drm_simple_encoder_init(&tidss->ddev, &t_enc->encoder,
+ encoder_type);
+ if (ret)
+ return ret;
+
t_enc->tidss = tidss;
t_enc->next_bridge = next_bridge;
- t_enc->bridge.funcs = &tidss_bridge_funcs;
enc = &t_enc->encoder;
enc->possible_crtcs = possible_crtcs;
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index f371518f8697..c34eb90cddbe 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -115,7 +115,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
const struct dispc_features *feat = tidss->feat;
u32 max_vps = feat->num_vps;
- u32 max_planes = feat->num_planes;
+ u32 max_planes = feat->num_vids;
struct pipe pipes[TIDSS_MAX_PORTS];
u32 num_pipes = 0;
@@ -144,7 +144,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
dev_dbg(dev, "Setting up panel for port %d\n", i);
switch (feat->vp_bus_type[i]) {
- case DISPC_VP_OLDI:
+ case DISPC_VP_OLDI_AM65X:
enc_type = DRM_MODE_ENCODER_LVDS;
conn_type = DRM_MODE_CONNECTOR_LVDS;
break;
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
new file mode 100644
index 000000000000..8f25159d0666
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/mfd/syscon.h>
+#include <linux/media-bus-format.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+
+#include "tidss_dispc.h"
+#include "tidss_dispc_regs.h"
+#include "tidss_oldi.h"
+
+struct tidss_oldi {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+
+ enum tidss_oldi_link_type link_type;
+ const struct oldi_bus_format *bus_format;
+ u32 oldi_instance;
+ int companion_instance; /* -1 when OLDI TX operates in Single-Link */
+ u32 parent_vp;
+
+ struct clk *serial;
+ struct regmap *io_ctrl;
+};
+
+struct oldi_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ enum oldi_mode_reg_val oldi_mode_reg_val;
+ u32 input_bus_fmt;
+};
+
+static const struct oldi_bus_format oldi_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, SPWG_18, MEDIA_BUS_FMT_RGB666_1X18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, SPWG_24, MEDIA_BUS_FMT_RGB888_1X24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, JEIDA_24, MEDIA_BUS_FMT_RGB888_1X24 },
+};
+
+#define OLDI_IDLE_CLK_HZ 25000000 /*25 MHz */
+
+static inline struct tidss_oldi *
+drm_bridge_to_tidss_oldi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tidss_oldi, bridge);
+}
+
+static int tidss_oldi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (!oldi->next_bridge) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u Failure attach next bridge\n",
+ __func__, oldi->oldi_instance);
+ return -ENODEV;
+ }
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u DRM_BRIDGE_ATTACH_NO_CONNECTOR is mandatory.\n",
+ __func__, oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ return drm_bridge_attach(encoder, oldi->next_bridge, bridge, flags);
+}
+
+static int
+tidss_oldi_set_serial_clk(struct tidss_oldi *oldi, unsigned long rate)
+{
+ unsigned long new_rate;
+ int ret;
+
+ ret = clk_set_rate(oldi->serial, rate);
+ if (ret) {
+ dev_err(oldi->dev,
+ "OLDI%u: failed to set serial clk rate to %lu Hz\n",
+ oldi->oldi_instance, rate);
+ return ret;
+ }
+
+ new_rate = clk_get_rate(oldi->serial);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(oldi->dev,
+ "OLDI%u Clock rate %lu differs over 5%% from requested %lu\n",
+ oldi->oldi_instance, new_rate, rate);
+
+ dev_dbg(oldi->dev, "OLDI%u: new rate %lu Hz (requested %lu Hz)\n",
+ oldi->oldi_instance, clk_get_rate(oldi->serial), rate);
+
+ return 0;
+}
+
+static void tidss_oldi_tx_power(struct tidss_oldi *oldi, bool enable)
+{
+ u32 mask;
+
+ /*
+ * The power control bits are Active Low, and remain powered off by
+ * default. That is, the bits are set to 1. To power on the OLDI TXes,
+ * the bits must be cleared to 0. Since there are cases where not all
+ * OLDI TXes are being used, the power logic selectively powers them
+ * on.
+ * Setting the variable 'val' to particular bit masks, makes sure that
+ * the undesired OLDI TXes remain powered off.
+ */
+
+ if (enable) {
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* Power-on only the required OLDI TX's IO*/
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ /* Power-on both the OLDI TXes' IOs */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ default:
+ /*
+ * This code execution should never reach here as any
+ * OLDI with an unsupported OLDI mode would never get
+ * registered in the first place.
+ * However, power-off the OLDI in concern just in case.
+ */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ enable = false;
+ break;
+ }
+ } else {
+ switch (oldi->link_type) {
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_SINGLE_LINK:
+ default:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ break;
+ }
+ }
+
+ regmap_update_bits(oldi->io_ctrl, OLDI_PD_CTRL, mask, enable ? 0 : mask);
+}
+
+static int tidss_oldi_config(struct tidss_oldi *oldi)
+{
+ const struct oldi_bus_format *bus_fmt = NULL;
+ u32 oldi_cfg = 0;
+ int ret;
+
+ bus_fmt = oldi->bus_format;
+
+ /*
+ * MASTERSLAVE and SRC bits of OLDI Config are always set to 0.
+ */
+
+ if (bus_fmt->data_width == 24)
+ oldi_cfg |= OLDI_MSB;
+ else if (bus_fmt->data_width != 18)
+ dev_warn(oldi->dev,
+ "OLDI%u: DSS port width %d not supported\n",
+ oldi->oldi_instance, bus_fmt->data_width);
+
+ oldi_cfg |= OLDI_DEPOL;
+
+ oldi_cfg = (oldi_cfg & (~OLDI_MAP)) | (bus_fmt->oldi_mode_reg_val << 1);
+
+ oldi_cfg |= OLDI_SOFTRST;
+
+ oldi_cfg |= OLDI_ENABLE;
+
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* All configuration is done for this mode. */
+ break;
+
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ oldi_cfg |= OLDI_CLONE_MODE;
+ break;
+
+ case OLDI_MODE_DUAL_LINK:
+ /* data-mapping field also indicates dual-link mode */
+ oldi_cfg |= BIT(3);
+ oldi_cfg |= OLDI_DUALMODESYNC;
+ break;
+
+ default:
+ dev_err(oldi->dev, "OLDI%u: Unsupported mode.\n",
+ oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ ret = tidss_configure_oldi(oldi->tidss, oldi->parent_vp, oldi_cfg);
+ if (ret == -ETIMEDOUT)
+ dev_warn(oldi->dev, "OLDI%u: timeout waiting for OLDI reset done.\n",
+ oldi->oldi_instance);
+
+ return ret;
+}
+
+static void tidss_oldi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ /* Configure the OLDI params*/
+ tidss_oldi_config(oldi);
+
+ /* Set the OLDI serial clock (7 times the pixel clock) */
+ tidss_oldi_set_serial_clk(oldi, mode->clock * 7 * 1000);
+
+ /* Enable OLDI IO power */
+ tidss_oldi_tx_power(oldi, true);
+}
+
+static void tidss_oldi_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ /* Disable OLDI IO power */
+ tidss_oldi_tx_power(oldi, false);
+
+ /* Set the OLDI serial clock to IDLE Frequency */
+ tidss_oldi_set_serial_clk(oldi, OLDI_IDLE_CLK_HZ);
+
+ /* Clear OLDI Config */
+ tidss_disable_oldi(oldi->tidss, oldi->parent_vp);
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ u32 *input_fmts;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0; i < ARRAY_SIZE(oldi_bus_formats); i++)
+ if (oldi_bus_formats[i].bus_fmt == output_fmt)
+ break;
+
+ if (i == ARRAY_SIZE(oldi_bus_formats))
+ return NULL;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = oldi_bus_formats[i].input_bus_fmt;
+ oldi->bus_format = &oldi_bus_formats[i];
+
+ return input_fmts;
+}
+
+static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = {
+ .attach = tidss_oldi_bridge_attach,
+ .atomic_pre_enable = tidss_oldi_atomic_pre_enable,
+ .atomic_post_disable = tidss_oldi_atomic_post_disable,
+ .atomic_get_input_bus_fmts = tidss_oldi_atomic_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance)
+{
+ struct device_node *companion;
+ struct device_node *port0, *port1;
+ u32 companion_reg;
+ bool secondary_oldi = false;
+ int pixel_order;
+
+ /*
+ * Find if the OLDI is paired with another OLDI for combined OLDI
+ * operation (dual-link or clone).
+ */
+ companion = of_parse_phandle(oldi_tx, "ti,companion-oldi", 0);
+ if (!companion)
+ /*
+ * The OLDI TX does not have a companion, nor is it a
+ * secondary OLDI. It will operate independently.
+ */
+ return OLDI_MODE_SINGLE_LINK;
+
+ if (of_property_read_u32(companion, "reg", &companion_reg))
+ return OLDI_MODE_UNSUPPORTED;
+
+ if (companion_reg > (TIDSS_MAX_OLDI_TXES - 1))
+ /* Invalid companion OLDI reg value. */
+ return OLDI_MODE_UNSUPPORTED;
+
+ *companion_instance = (int)companion_reg;
+
+ if (of_property_read_bool(oldi_tx, "ti,secondary-oldi"))
+ secondary_oldi = true;
+
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes, the
+ * OLDI TX ports are connected to. If they are marked as expecting
+ * even pixels and odd pixels, then we need to enable dual-link.
+ */
+ port0 = of_graph_get_port_by_id(oldi_tx, 1);
+ port1 = of_graph_get_port_by_id(companion, 1);
+ pixel_order = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+ of_node_put(port0);
+ of_node_put(port1);
+ of_node_put(companion);
+
+ switch (pixel_order) {
+ case -EINVAL:
+ /*
+ * The dual-link properties were not found in at least
+ * one of the sink nodes. Since 2 OLDI ports are present
+ * in the DT, it can be safely assumed that the required
+ * configuration is Clone Mode.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK :
+ OLDI_MODE_CLONE_SINGLE_LINK);
+
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ /*
+ * Primary OLDI can only support "ODD" pixels. So, from its
+ * perspective, the pixel order has to be ODD-EVEN.
+ */
+ return (secondary_oldi ? OLDI_MODE_UNSUPPORTED :
+ OLDI_MODE_DUAL_LINK);
+
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ /*
+ * Secondary OLDI can only support "EVEN" pixels. So, from its
+ * perspective, the pixel order has to be EVEN-ODD.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_DUAL_LINK :
+ OLDI_MODE_UNSUPPORTED);
+
+ default:
+ return OLDI_MODE_UNSUPPORTED;
+ }
+}
+
+static int get_parent_dss_vp(struct device_node *oldi_tx, u32 *parent_vp)
+{
+ struct device_node *ep, *dss_port;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(oldi_tx, OLDI_INPUT_PORT, -1);
+ if (ep) {
+ dss_port = of_graph_get_remote_port(ep);
+ if (!dss_port) {
+ ret = -ENODEV;
+ goto err_return_ep_port;
+ }
+
+ ret = of_property_read_u32(dss_port, "reg", parent_vp);
+
+ of_node_put(dss_port);
+err_return_ep_port:
+ of_node_put(ep);
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static const struct drm_bridge_timings default_tidss_oldi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+void tidss_oldi_deinit(struct tidss_device *tidss)
+{
+ for (int i = 0; i < tidss->num_oldis; i++) {
+ if (tidss->oldis[i]) {
+ drm_bridge_remove(&tidss->oldis[i]->bridge);
+ tidss->oldis[i] = NULL;
+ }
+ }
+}
+
+int tidss_oldi_init(struct tidss_device *tidss)
+{
+ struct tidss_oldi *oldi;
+ struct device_node *child;
+ struct drm_bridge *bridge;
+ u32 parent_vp, oldi_instance;
+ int companion_instance = -1;
+ enum tidss_oldi_link_type link_type = OLDI_MODE_UNSUPPORTED;
+ struct device_node *oldi_parent;
+ int ret = 0;
+
+ tidss->num_oldis = 0;
+
+ oldi_parent = of_get_child_by_name(tidss->dev->of_node, "oldi-transmitters");
+ if (!oldi_parent)
+ /* Return gracefully */
+ return 0;
+
+ for_each_available_child_of_node(oldi_parent, child) {
+ ret = get_parent_dss_vp(child, &parent_vp);
+ if (ret) {
+ if (ret == -ENODEV) {
+ /*
+ * ENODEV means that this particular OLDI node
+ * is not connected with the DSS, which is not
+ * a harmful case. There could be another OLDI
+ * which may still be connected.
+ * Continue to search for that.
+ */
+ ret = 0;
+ continue;
+ }
+ goto err_put_node;
+ }
+
+ ret = of_property_read_u32(child, "reg", &oldi_instance);
+ if (ret)
+ goto err_put_node;
+
+ /*
+ * Now that it's confirmed that OLDI is connected with DSS,
+ * let's continue getting the OLDI sinks ahead and other OLDI
+ * properties.
+ */
+ bridge = devm_drm_of_get_bridge(tidss->dev, child,
+ OLDI_OUTPUT_PORT, 0);
+ if (IS_ERR(bridge)) {
+ /*
+ * Either there was no OLDI sink in the devicetree, or
+ * the OLDI sink has not been added yet. In any case,
+ * return.
+ * We don't want to have an OLDI node connected to DSS
+ * but not to any sink.
+ */
+ ret = dev_err_probe(tidss->dev, PTR_ERR(bridge),
+ "no panel/bridge for OLDI%u.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ link_type = get_oldi_mode(child, &companion_instance);
+ if (link_type == OLDI_MODE_UNSUPPORTED) {
+ ret = dev_err_probe(tidss->dev, -EINVAL,
+ "OLDI%u: Unsupported OLDI connection.\n",
+ oldi_instance);
+ goto err_put_node;
+ } else if ((link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) ||
+ (link_type == OLDI_MODE_CLONE_SINGLE_LINK)) {
+ /*
+ * The OLDI driver cannot support OLDI clone mode
+ * properly at present.
+ * The clone mode requires 2 working encoder-bridge
+ * pipelines, generating from the same crtc. The DRM
+ * framework does not support this at present. If
+ * there were to be, say, 2 OLDI sink bridges each
+ * connected to an OLDI TXes, they couldn't both be
+ * supported simultaneously.
+ * This driver still has some code pertaining to OLDI
+ * clone mode configuration in DSS hardware for future,
+ * when there is a better infrastructure in the DRM
+ * framework to support 2 encoder-bridge pipelines
+ * simultaneously.
+ * Till that time, this driver shall error out if it
+ * detects a clone mode configuration.
+ */
+ ret = dev_err_probe(tidss->dev, -EOPNOTSUPP,
+ "The OLDI driver does not support Clone Mode at present.\n");
+ goto err_put_node;
+ } else if (link_type == OLDI_MODE_SECONDARY_DUAL_LINK) {
+ /*
+ * This is the secondary OLDI node, which serves as a
+ * companion to the primary OLDI, when it is configured
+ * for the dual-link mode. Since the primary OLDI will
+ * be a part of bridge chain, no need to put this one
+ * too. Continue onto the next OLDI node.
+ */
+ continue;
+ }
+
+ oldi = devm_drm_bridge_alloc(tidss->dev, struct tidss_oldi, bridge,
+ &tidss_oldi_bridge_funcs);
+ if (IS_ERR(oldi)) {
+ ret = PTR_ERR(oldi);
+ goto err_put_node;
+ }
+
+ oldi->parent_vp = parent_vp;
+ oldi->oldi_instance = oldi_instance;
+ oldi->companion_instance = companion_instance;
+ oldi->link_type = link_type;
+ oldi->dev = tidss->dev;
+ oldi->next_bridge = bridge;
+
+ /*
+ * Only the primary OLDI needs to reference the io-ctrl system
+ * registers, and the serial clock.
+ * We don't require a check for secondary OLDI in dual-link mode
+ * because the driver will not create a drm_bridge instance.
+ * But the driver will need to create a drm_bridge instance,
+ * for secondary OLDI in clone mode (once it is supported).
+ */
+ if (link_type != OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) {
+ oldi->io_ctrl = syscon_regmap_lookup_by_phandle(child,
+ "ti,oldi-io-ctrl");
+ if (IS_ERR(oldi->io_ctrl)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->io_ctrl),
+ "OLDI%u: syscon_regmap_lookup_by_phandle failed.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ oldi->serial = of_clk_get_by_name(child, "serial");
+ if (IS_ERR(oldi->serial)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->serial),
+ "OLDI%u: Failed to get serial clock.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+ }
+
+ /* Register the bridge. */
+ oldi->bridge.of_node = child;
+ oldi->bridge.driver_private = oldi;
+ oldi->bridge.timings = &default_tidss_oldi_timings;
+
+ tidss->oldis[tidss->num_oldis++] = oldi;
+ oldi->tidss = tidss;
+
+ drm_bridge_add(&oldi->bridge);
+ }
+
+ of_node_put(child);
+ of_node_put(oldi_parent);
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ of_node_put(oldi_parent);
+ return ret;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.h b/drivers/gpu/drm/tidss/tidss_oldi.h
new file mode 100644
index 000000000000..8cd535c5ee65
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#ifndef __TIDSS_OLDI_H__
+#define __TIDSS_OLDI_H__
+
+#include "tidss_drv.h"
+
+struct tidss_oldi;
+
+/* OLDI PORTS */
+#define OLDI_INPUT_PORT 0
+#define OLDI_OUTPUT_PORT 1
+
+/* Control MMR Registers */
+
+/* Register offsets */
+#define OLDI_PD_CTRL 0x100
+#define OLDI_LB_CTRL 0x104
+
+/* Power control bits */
+#define OLDI_PWRDOWN_TX(n) BIT(n)
+
+/* LVDS Bandgap reference Enable/Disable */
+#define OLDI_PWRDN_BG BIT(8)
+
+enum tidss_oldi_link_type {
+ OLDI_MODE_UNSUPPORTED,
+ OLDI_MODE_SINGLE_LINK,
+ OLDI_MODE_CLONE_SINGLE_LINK,
+ OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK,
+ OLDI_MODE_DUAL_LINK,
+ OLDI_MODE_SECONDARY_DUAL_LINK,
+};
+
+int tidss_oldi_init(struct tidss_device *tidss);
+void tidss_oldi_deinit(struct tidss_device *tidss);
+
+#endif /* __TIDSS_OLDI_H__ */
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 719412e6c346..142ae81951a0 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -200,7 +200,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
struct tidss_plane *tplane;
enum drm_plane_type type;
u32 possible_crtcs;
- u32 num_planes = tidss->feat->num_planes;
+ u32 num_planes = tidss->feat->num_vids;
u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709));
u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8706763af8fb..8d3b7c4fa6a4 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
+#include <drm/drm_panic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -469,10 +470,28 @@ static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane,
bochs_hw_setformat(bochs, fb->format);
}
+static int bochs_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct bochs_device *bochs = to_bochs_device(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
+ }
+ return -ENODEV;
+}
+
static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = bochs_primary_plane_helper_atomic_check,
.atomic_update = bochs_primary_plane_helper_atomic_update,
+ .get_scanout_buffer = bochs_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs bochs_primary_plane_funcs = {
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 3148f5d3dbd6..1bcc67977f48 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,14 +542,15 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_ASSERT_EQ(test, man->usage, size);
-
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, placement);
ttm_bo_reserve(bo, false, false, NULL);
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
err = ttm_bo_validate(bo, placement, &ctx);
ttm_bo_unreserve(bo);
@@ -757,56 +758,6 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
-static void ttm_bo_validate_swapout(struct kunit *test)
-{
- unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
- enum ttm_bo_type bo_type = ttm_bo_type_device;
- struct ttm_buffer_object *bo_small, *bo_big;
- struct ttm_test_devices *priv = test->priv;
- struct ttm_operation_ctx ctx = { };
- struct ttm_placement *placement;
- u32 mem_type = TTM_PL_TT;
- struct ttm_place *place;
- struct sysinfo si;
- int err;
-
- si_meminfo(&si);
- size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
-
- ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
-
- place = ttm_place_kunit_init(test, mem_type, 0);
- placement = ttm_placement_kunit_init(test, place, 1);
-
- bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, bo_small);
-
- drm_gem_private_object_init(priv->drm, &bo_small->base, size);
-
- err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
- PAGE_SIZE, &ctx, NULL, NULL,
- &dummy_ttm_bo_destroy);
- KUNIT_EXPECT_EQ(test, err, 0);
- dma_resv_unlock(bo_small->base.resv);
-
- bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
-
- dma_resv_lock(bo_big->base.resv, NULL);
- err = ttm_bo_validate(bo_big, placement, &ctx);
- dma_resv_unlock(bo_big->base.resv);
-
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
- KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
- KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
- KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
-
- ttm_bo_put(bo_big);
- ttm_bo_put(bo_small);
-
- ttm_mock_manager_fini(priv->ttm_dev, mem_type);
-}
-
static void ttm_bo_validate_happy_evict(struct kunit *test)
{
u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
@@ -1201,7 +1152,6 @@ static struct kunit_case ttm_bo_validate_test_cases[] = {
KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
ttm_bo_validate_wait_gen_params),
- KUNIT_CASE(ttm_bo_validate_swapout),
KUNIT_CASE(ttm_bo_validate_happy_evict),
KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index b91c13f46225..7aaf0d1395ff 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
index f6d1c8a2845d..d7eb6471f2ed 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index d27691f2e451..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index ffaab68bd5dd..32530c75f038 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -4,6 +4,8 @@
*/
#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
@@ -112,15 +114,8 @@ ttm_backup_backup_page(struct file *backup, struct page *page,
if (writeback && !folio_mapped(to_folio) &&
folio_clear_dirty_for_io(to_folio)) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = SWAP_CLUSTER_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1,
- };
folio_set_reclaim(to_folio);
- ret = shmem_writeout(to_folio, &wbc);
+ ret = shmem_writeout(to_folio, NULL, NULL);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 08a23ab037cb..f4d9e68b21e7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -35,6 +35,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -46,6 +47,7 @@
#include <linux/dma-resv.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
@@ -524,11 +526,11 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
return 0;
if (bo->deleted) {
- lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
if (!lret)
ttm_bo_cleanup_memtype_use(bo);
} else {
- lret = ttm_bo_evict(bo, walk->ctx);
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
}
if (lret)
@@ -564,8 +566,10 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_bo_evict_walk evict_walk = {
.walk = {
.ops = &ttm_evict_walk_ops,
- .ctx = ctx,
- .ticket = ticket,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
},
.place = place,
.evictor = evictor,
@@ -574,7 +578,7 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
};
s64 lret;
- evict_walk.walk.trylock_only = true;
+ evict_walk.walk.arg.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
/* One more attempt if we hit low limit? */
@@ -588,12 +592,12 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
/* Reset low limit */
evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
- evict_walk.walk.trylock_only = false;
+ evict_walk.walk.arg.trylock_only = false;
retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
- evict_walk.walk.ticket = ticket;
+ evict_walk.walk.arg.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
@@ -1104,7 +1108,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
- struct ttm_operation_ctx *ctx = walk->ctx;
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
s64 ret;
/*
@@ -1215,8 +1219,10 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
struct ttm_bo_swapout_walk swapout_walk = {
.walk = {
.ops = &ttm_swap_ops,
- .ctx = ctx,
- .trylock_only = true,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
},
.gfp_flags = gfp_flags,
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..9d8b747a34db
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bd90404ea609..acbbca9d5c92 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,6 +28,8 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
@@ -37,6 +39,8 @@
#include <drm/drm_cache.h>
+#include "ttm_bo_internal.h"
+
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
@@ -379,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
}
/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
* ttm_bo_kmap
*
* @bo: The buffer object.
@@ -770,14 +800,15 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
- *needs_unlock = false;
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
return true;
}
@@ -789,27 +820,27 @@ static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
return false;
}
-static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
+ struct ttm_lru_walk_arg *arg = curs->arg;
struct dma_resv *resv = bo->base.resv;
int ret;
- if (walk->ctx->interruptible)
- ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
else
- ret = dma_resv_lock(resv, walk->ticket);
+ ret = dma_resv_lock(resv, arg->ticket);
if (!ret) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
/*
* Only a single ticketlock per loop. Ticketlocks are prone
* to return -EDEADLK causing the eviction to fail, so
* after waiting for the ticketlock, revert back to
* trylocking for this walk.
*/
- walk->ticket = NULL;
+ arg->ticket = NULL;
} else if (ret == -EDEADLK) {
/* Caller needs to exit the ww transaction. */
ret = -ENOSPC;
@@ -818,12 +849,6 @@ static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
return ret;
}
-static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
-{
- if (locked)
- dma_resv_unlock(bo->base.resv);
-}
-
/**
* ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
* valid items.
@@ -858,64 +883,21 @@ static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target)
{
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
s64 progress = 0;
s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_cursor_init(&cursor, man);
- ttm_resource_manager_for_each_res(&cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- bool bo_needs_unlock = false;
- bool bo_locked = false;
- int mem_type;
-
- /*
- * Attempt a trylock before taking a reference on the bo,
- * since if we do it the other way around, and the trylock fails,
- * we need to drop the lru lock to put the bo.
- */
- if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
- bo_locked = true;
- else if (!walk->ticket || walk->ctx->no_wait_gpu ||
- walk->trylock_only)
- continue;
-
- if (!ttm_bo_get_unless_zero(bo)) {
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- continue;
- }
-
- mem_type = res->mem_type;
- spin_unlock(&bdev->lru_lock);
-
- lret = 0;
- if (!bo_locked)
- lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
-
- /*
- * Note that in between the release of the lru lock and the
- * ticketlock, the bo may have switched resource,
- * and also memory type, since the resource may have been
- * freed and allocated again with a different memory type.
- * In that case, just skip it.
- */
- if (!lret && bo->resource && bo->resource->mem_type == mem_type)
- lret = walk->ops->process_bo(walk, bo);
-
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- ttm_bo_put(bo);
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
if (lret == -EBUSY || lret == -EALREADY)
lret = 0;
progress = (lret < 0) ? lret : progress + lret;
-
- spin_lock(&bdev->lru_lock);
if (progress < 0 || progress >= target)
break;
}
- ttm_resource_cursor_fini(&cursor);
- spin_unlock(&bdev->lru_lock);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
return progress;
}
@@ -953,44 +935,87 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
* ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
* @curs: The ttm_bo_lru_cursor to initialize.
* @man: The ttm resource_manager whose LRU lists to iterate over.
- * @ctx: The ttm_operation_ctx to govern the locking.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
*
- * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
- * or prelocked buffer objects are available as detailed by
- * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
- * supported.
+ * Initialize a struct ttm_bo_lru_cursor.
*
* Return: Pointer to @curs. The function does not fail.
*/
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx)
+ struct ttm_lru_walk_arg *arg)
{
memset(curs, 0, sizeof(*curs));
ttm_resource_cursor_init(&curs->res_curs, man);
- curs->ctx = ctx;
+ curs->arg = arg;
return curs;
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
static struct ttm_buffer_object *
-ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- struct ttm_buffer_object *bo = res->bo;
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
- if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
- return NULL;
+ ttm_bo_lru_cursor_cleanup_bo(curs);
- if (!ttm_bo_get_unless_zero(bo)) {
- if (curs->needs_unlock)
- dma_resv_unlock(bo->base.resv);
- return NULL;
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ spin_lock(lru_lock);
}
- curs->bo = bo;
- return bo;
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
}
/**
@@ -1004,25 +1029,7 @@ ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *cur
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_resource *res = NULL;
- struct ttm_buffer_object *bo;
-
- ttm_bo_lru_cursor_cleanup_bo(curs);
-
- spin_lock(lru_lock);
- for (;;) {
- res = ttm_resource_manager_next(&curs->res_curs);
- if (!res)
- break;
-
- bo = ttm_bo_from_res_reserved(res, curs);
- if (bo)
- break;
- }
-
- spin_unlock(lru_lock);
- return res ? bo : NULL;
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
@@ -1036,21 +1043,8 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_buffer_object *bo;
- struct ttm_resource *res;
-
- spin_lock(lru_lock);
- res = ttm_resource_manager_first(&curs->res_curs);
- if (!res) {
- spin_unlock(lru_lock);
- return NULL;
- }
-
- bo = ttm_bo_from_res_reserved(res, curs);
- spin_unlock(lru_lock);
-
- return bo ? bo : ttm_bo_lru_cursor_next(curs);
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index bdfa6ecfef05..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/export.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 02e797fd1891..c3e2fcbdd2cc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
@@ -36,6 +37,7 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
/*
* ttm_global_mutex - protecting the global state
@@ -123,6 +125,28 @@ out:
return ret;
}
+/**
+ * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation.
+ *
+ * @bdev: A pointer to a struct ttm_device to prepare hibernation for.
+ *
+ * Return: 0 on success, negative number on failure.
+ */
+int ttm_device_prepare_hibernation(struct ttm_device *bdev)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ int ret;
+
+ do {
+ ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
+ } while (ret > 0);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_device_prepare_hibernation);
+
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index f1c60fa80c2d..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c2ea865be657..baf27c70a419 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -31,6 +31,7 @@
* cause they are rather slow compared to alloc_pages+map.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -1132,7 +1133,9 @@ void ttm_pool_fini(struct ttm_pool *pool)
}
EXPORT_SYMBOL(ttm_pool_fini);
-/* As long as pages are available make sure to release at least one */
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
+
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -1140,9 +1143,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do
num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
- return num_freed;
+ return num_freed ?: SHRINK_STOP;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
@@ -1233,7 +1239,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc) {
+ if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -1242,7 +1248,12 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- seq_puts(m, "DMA ");
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (pool->use_dma_alloc)
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
@@ -1266,10 +1277,15 @@ EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
- seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
@@ -1324,6 +1340,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker->count_objects = ttm_pool_shrinker_count;
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
mm_shrinker->seeks = 1;
shrinker_register(mm_shrinker);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..db854b581d83 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 769b0ca9be47..e2c82ad07eb4 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -23,6 +23,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
@@ -557,6 +558,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
cond_resched();
} while (!ret);
+ if (ret && ret != -ENOENT)
+ return ret;
+
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 698cd4bf5e46..506e257dfba8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -33,6 +33,7 @@
#include <linux/cc_platform.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index bb7815599435..c41476ddde68 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -16,7 +16,6 @@
*/
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <linux/vmalloc.h>
#include "v3d_drv.h"
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 42df9d3567e7..cb9df8822472 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -745,17 +745,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-}
-
-static void
-v3d_sched_skip_reset(struct drm_sched_job *sched_job)
-{
- struct drm_gpu_scheduler *sched = sched_job->sched;
-
- spin_lock(&sched->job_list_lock);
- list_add(&sched_job->list, &sched->pending_list);
- spin_unlock(&sched->job_list_lock);
+ return DRM_GPU_SCHED_STAT_RESET;
}
static enum drm_gpu_sched_stat
@@ -776,8 +766,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
*timedout_ctca = ctca;
*timedout_ctra = ctra;
- v3d_sched_skip_reset(sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
@@ -822,8 +811,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
- v3d_sched_skip_reset(sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4ff5de46fb22..5171ffe9012d 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -169,7 +169,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->file = file_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
+ 1, v3d_priv, file_priv->client_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 163d092bd973..07c91b450f93 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -560,6 +560,12 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_connector_hdmi_audio_init(connector, dev->dev,
+ &vc4_hdmi_audio_funcs,
+ 8, 0, false, -1);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -2286,7 +2292,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
- &vc4_hdmi_audio_funcs, 8, false,
+ &vc4_hdmi_audio_funcs, 8, 0, false,
-1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f5b167417428..8f983edb81ff 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -530,6 +530,7 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -568,7 +569,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd = &mode_cmd_local;
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2752ab4f1c97..260c64733972 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -32,7 +32,7 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
@@ -52,7 +52,7 @@
static struct vgem_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
} *vgem_device;
static int vgem_open(struct drm_device *dev, struct drm_file *file)
@@ -127,27 +127,27 @@ static const struct drm_driver vgem_driver = {
static int __init vgem_init(void)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
- pdev = platform_device_register_simple("vgem", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create("vgem", NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- dma_coerce_mask_and_coherent(&pdev->dev,
+ dma_coerce_mask_and_coherent(&fdev->dev,
DMA_BIT_MASK(64));
- vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
+ vgem_device = devm_drm_dev_alloc(&fdev->dev, &vgem_driver,
struct vgem_device, drm);
if (IS_ERR(vgem_device)) {
ret = PTR_ERR(vgem_device);
goto out_devres;
}
- vgem_device->platform = pdev;
+ vgem_device->faux_dev = fdev;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
@@ -157,19 +157,19 @@ static int __init vgem_init(void)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
static void __exit vgem_exit(void)
{
- struct platform_device *pdev = vgem_device->platform;
+ struct faux_device *fdev = vgem_device->faux_dev;
drm_dev_unregister(&vgem_device->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 59a45e74a641..e5805ca646c7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -66,6 +66,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -73,7 +74,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
@@ -293,6 +294,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
@@ -314,7 +316,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index e32e680c7197..71c6ccad4b99 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -130,10 +130,10 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
static void virtio_gpu_shutdown(struct virtio_device *vdev)
{
- /*
- * drm does its own synchronization on shutdown.
- * Do nothing here, opt out of device reset.
- */
+ struct drm_device *dev = vdev->priv;
+
+ /* stop talking to the device */
+ drm_dev_unplug(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
index 9ded37b67a46..5750f0bd9d40 100644
--- a/drivers/gpu/drm/vkms/tests/Makefile
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -1,3 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
+vkms-kunit-tests-y := \
+ vkms_config_test.o \
+ vkms_format_test.o
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
new file mode 100644
index 000000000000..2e1daef94831
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+
+#include "../../drm_crtc_internal.h"
+
+#include "../vkms_formats.h"
+
+#define TEST_BUFF_SIZE 50
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+/**
+ * struct pixel_yuv_u8 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ */
+struct pixel_yuv_u8 {
+ u8 y, u, v;
+};
+
+/*
+ * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * conversions in VKMS between YUV to ARGB
+ *
+ * @encoding: Encoding used to convert RGB to YUV
+ * @range: Range used to convert RGB to YUV
+ * @n_colors: Count of test colors in this case
+ * @format_pair.name: Name used for this color conversion, used to
+ * clarify the test results
+ * @format_pair.rgb: RGB color tested
+ * @format_pair.yuv: Same color as @format_pair.rgb, but converted to
+ * YUV using @encoding and @range.
+ */
+struct yuv_u8_to_argb_u16_case {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ size_t n_colors;
+ struct format_pair {
+ char *name;
+ struct pixel_yuv_u8 yuv;
+ struct pixel_argb_u16 argb;
+ } colors[TEST_BUFF_SIZE];
+};
+
+/*
+ * The YUV color representation were acquired via the colour python framework.
+ * Below are the function calls used for generating each case.
+ *
+ * For more information got to the docs:
+ * https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
+ */
+static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ *
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 full range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 limited range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 full range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * int_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 limited range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 full range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 limited range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+};
+
+/*
+ * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * colors to ARGB colors in VKMS
+ *
+ * This test will use the functions get_conversion_matrix_to_argb_u16 and
+ * argb_u16_from_yuv888 to convert YUV colors (stored in
+ * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ *
+ * The conversion between YUV and RGB is not totally reversible, so there may be
+ * some difference between the expected value and the result.
+ * In addition, there may be some rounding error as the input color is 8 bits
+ * and output color is 16 bits.
+ */
+static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+{
+ const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ struct pixel_argb_u16 argb;
+
+ for (size_t i = 0; i < param->n_colors; i++) {
+ const struct format_pair *color = &param->colors[i];
+ struct conversion_matrix matrix;
+
+ get_conversion_matrix_to_argb_u16
+ (DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
+
+ argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
+ "On the A channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.a, argb.a);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.r, color->argb.r), 0x1ff,
+ "On the R channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.r, argb.r);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.g, color->argb.g), 0x1ff,
+ "On the G channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.g, argb.g);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.b, color->argb.b), 0x1ff,
+ "On the B channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.b, argb.b);
+ }
+}
+
+static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
+ drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
+}
+
+KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
+ vkms_format_test_yuv_u8_to_argb_u16_case_desc
+);
+
+static struct kunit_case vkms_format_test_cases[] = {
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ {}
+};
+
+static struct kunit_suite vkms_format_test_suite = {
+ .name = "vkms-format",
+ .test_cases = vkms_format_test_cases,
+};
+
+kunit_test_suite(vkms_format_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms format conversion");
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 8c9898b9055d..e60573e0f3e9 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -302,8 +302,6 @@ struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *pri
vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
if (IS_ERR(vkms_out->composer_workq))
return ERR_CAST(vkms_out->composer_workq);
- if (!vkms_out->composer_workq)
- return ERR_PTR(-ENOMEM);
return vkms_out;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index a24d1655f7b8..e8472d9b6e3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/dma-mapping.h>
#include <drm/clients/drm_client_setup.h>
@@ -149,27 +149,27 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
static int vkms_create(struct vkms_config *config)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
struct vkms_device *vkms_device;
const char *dev_name;
dev_name = vkms_config_get_device_name(config);
- pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create(dev_name, NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
+ vkms_device = devm_drm_dev_alloc(&fdev->dev, &vkms_driver,
struct vkms_device, drm);
if (IS_ERR(vkms_device)) {
ret = PTR_ERR(vkms_device);
goto out_devres;
}
- vkms_device->platform = pdev;
+ vkms_device->faux_dev = fdev;
vkms_device->config = config;
config->dev = vkms_device;
@@ -203,9 +203,9 @@ static int vkms_create(struct vkms_config *config)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
@@ -231,19 +231,19 @@ static int __init vkms_init(void)
static void vkms_destroy(struct vkms_config *config)
{
- struct platform_device *pdev;
+ struct faux_device *fdev;
if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = config->dev->platform;
+ fdev = config->dev->faux_dev;
drm_dev_unregister(&config->dev->drm);
drm_atomic_helper_shutdown(&config->dev->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
config->dev = NULL;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index a74a7fc3a056..8013c31efe3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -45,6 +45,23 @@ struct vkms_frame_info {
unsigned int rotation;
};
+/**
+ * struct pixel_argb_u16 - Internal representation of a pixel color.
+ * @a: Alpha component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @r: Red component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @g: Green component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @b: Blue component value, stored in 16 bits, without padding, using
+ * machine endianness
+ *
+ * The goal of this structure is to keep enough precision to ensure
+ * correct composition results in VKMS and simplifying color
+ * manipulation by splitting each component into its own field.
+ * Caution: the byte ordering of this structure is machine-dependent,
+ * you can't cast it directly to AR48 or xR48.
+ */
struct pixel_argb_u16 {
u16 a, r, g, b;
};
@@ -103,16 +120,34 @@ typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_st
struct pixel_argb_u16 out_pixel[]);
/**
+ * struct conversion_matrix - Matrix to use for a specific encoding and range
+ *
+ * @matrix: Conversion matrix from yuv to rgb. The matrix is stored in a row-major manner and is
+ * used to compute rgb values from yuv values:
+ * [[r],[g],[b]] = @matrix * [[y],[u],[v]]
+ * OR for yvu formats:
+ * [[r],[g],[b]] = @matrix * [[y],[v],[u]]
+ * The values of the matrix are signed fixed-point values with 32 bits fractional part.
+ * @y_offset: Offset to apply on the y value.
+ */
+struct conversion_matrix {
+ s64 matrix[3][3];
+ int y_offset;
+};
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
* @pixel_read_line: function to read a pixel line in this plane. The creator of a
* struct vkms_plane_state must ensure that this pointer is valid
+ * @conversion_matrix: matrix used for yuv formats to convert to rgb
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
pixel_read_line_t pixel_read_line;
+ struct conversion_matrix conversion_matrix;
};
struct vkms_plane {
@@ -197,13 +232,13 @@ struct vkms_config;
* struct vkms_device - Description of a VKMS device
*
* @drm - Base device in DRM
- * @platform - Associated platform device
+ * @faux_dev - Associated faux device
* @output - Configuration and sub-components of the VKMS device
* @config: Configuration used in this VKMS device
*/
struct vkms_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
const struct vkms_config *config;
};
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 30a64ecca87c..6d0227c6635a 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -7,6 +7,8 @@
#include <drm/drm_rect.h>
#include <drm/drm_fixed.h>
+#include <kunit/visibility.h>
+
#include "vkms_formats.h"
/**
@@ -140,6 +142,51 @@ static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
*addr = (u8 *)frame_info->map[0].vaddr + offset;
}
+/**
+ * get_subsampling() - Get the subsampling divisor value on a specific direction
+ *
+ * @format: format to extarct the subsampling from
+ * @direction: direction of the subsampling requested
+ */
+static int get_subsampling(const struct drm_format_info *format,
+ enum pixel_read_direction direction)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ return format->vsub;
+ case READ_RIGHT_TO_LEFT:
+ case READ_LEFT_TO_RIGHT:
+ return format->hsub;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 1;
+}
+
+/**
+ * get_subsampling_offset() - An offset for keeping the chroma siting consistent regardless of
+ * x_start and y_start values
+ *
+ * @direction: direction of the reading to properly compute this offset
+ * @x_start: x coordinate of the starting point of the readed line
+ * @y_start: y coordinate of the starting point of the readed line
+ */
+static int get_subsampling_offset(enum pixel_read_direction direction, int x_start, int y_start)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ return -y_start - 1;
+ case READ_TOP_TO_BOTTOM:
+ return y_start;
+ case READ_RIGHT_TO_LEFT:
+ return -x_start - 1;
+ case READ_LEFT_TO_RIGHT:
+ return x_start;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 0;
+}
+
/*
* The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
* &struct pixel_argb_u16
@@ -202,11 +249,54 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
return out_pixel;
}
+static struct pixel_argb_u16 argb_u16_from_gray8(u8 gray)
+{
+ return argb_u16_from_u8888(255, gray, gray, gray);
+}
+
+static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
+{
+ return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
+}
+
+VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix)
+{
+ u16 r, g, b;
+ s64 fp_y, fp_channel_1, fp_channel_2;
+ s64 fp_r, fp_g, fp_b;
+
+ fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
+ fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
+ fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+
+ fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[0][2], fp_channel_2);
+ fp_g = drm_fixp_mul(matrix->matrix[1][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[1][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[1][2], fp_channel_2);
+ fp_b = drm_fixp_mul(matrix->matrix[2][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[2][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[2][2], fp_channel_2);
+
+ fp_r = drm_fixp2int_round(fp_r);
+ fp_g = drm_fixp2int_round(fp_g);
+ fp_b = drm_fixp2int_round(fp_b);
+
+ r = clamp(fp_r, 0, 0xffff);
+ g = clamp(fp_g, 0, 0xffff);
+ b = clamp(fp_b, 0, 0xffff);
+
+ return argb_u16_from_u16161616(0xffff, r, g, b);
+}
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+
/*
* The following functions are read_line function for each pixel format supported by VKMS.
*
* They read a line starting at the point @x_start,@y_start following the @direction. The result
- * is stored in @out_pixel and in the format ARGB16161616.
+ * is stored in @out_pixel and in a 64 bits format, see struct pixel_argb_u16.
*
* These functions are very repetitive, but the innermost pixel loops must be kept inside these
* functions for performance reasons. Some benchmarking was done in [1] where having the innermost
@@ -215,6 +305,96 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
* [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
+static void Rx_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ int bits_per_pixel = drm_format_info_bpp(plane->frame_info->fb->format, 0);
+ u8 *src_pixels;
+ int rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_height(plane->frame_info->fb->format, 0) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_addr(plane->frame_info, x_start, y_start, 0, &src_pixels, &rem_x, &rem_y);
+ int bit_offset = (8 - bits_per_pixel) - rem_x * bits_per_pixel;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int mask = (0x1 << bits_per_pixel) - 1;
+ int lum_per_level = 0xFFFF / mask;
+
+ if (direction == READ_LEFT_TO_RIGHT || direction == READ_RIGHT_TO_LEFT) {
+ int restart_bit_offset;
+ int step_bit_offset;
+
+ if (direction == READ_LEFT_TO_RIGHT) {
+ restart_bit_offset = 8 - bits_per_pixel;
+ step_bit_offset = -bits_per_pixel;
+ } else {
+ restart_bit_offset = 0;
+ step_bit_offset = bits_per_pixel;
+ }
+
+ while (out_pixel < end) {
+ u8 val = ((*src_pixels) >> bit_offset) & mask;
+
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+
+ bit_offset += step_bit_offset;
+ if (bit_offset < 0 || 8 <= bit_offset) {
+ bit_offset = restart_bit_offset;
+ src_pixels += step;
+ }
+ out_pixel += 1;
+ }
+ } else if (direction == READ_TOP_TO_BOTTOM || direction == READ_BOTTOM_TO_TOP) {
+ while (out_pixel < end) {
+ u8 val = (*src_pixels >> bit_offset) & mask;
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+ }
+}
+
+static void R1_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R2_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ while (out_pixel < end) {
+ *out_pixel = argb_u16_from_gray8(*src_pixels);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+}
+
static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -332,6 +512,92 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
}
/*
+ * This callback can be used for YUV formats where U and V values are
+ * stored in the same plane (often called semi-planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *uv_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &uv_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0)
+ uv_plane += step_uv;
+ }
+}
+
+/*
+ * This callback can be used for YUV format where each color component is
+ * stored in a different plane (often called planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *channel_1_plane;
+ u8 *channel_2_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &channel_1_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 2,
+ &channel_2_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_channel_1 = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int step_channel_2 = get_block_step_bytes(plane->frame_info->fb, direction, 2);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0) {
+ channel_1_plane += step_channel_1;
+ channel_2_plane += step_channel_2;
+ }
+ }
+}
+
+/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
* The result is stored in @out_pixel.
*
@@ -456,6 +722,28 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ return &semi_planar_yuv_read_line;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return &planar_yuv_read_line;
+ case DRM_FORMAT_R1:
+ return &R1_read_line;
+ case DRM_FORMAT_R2:
+ return &R2_read_line;
+ case DRM_FORMAT_R4:
+ return &R4_read_line;
+ case DRM_FORMAT_R8:
+ return &R8_read_line;
default:
/*
* This is a bug in vkms_plane_atomic_check(). All the supported
@@ -469,6 +757,183 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
}
}
+/*
+ * Those matrices were generated using the colour python framework
+ *
+ * Below are the function calls used to generate each matrix, go to
+ * https://colour.readthedocs.io/en/develop/generated/colour.matrix_YCbCr.html
+ * for more info:
+ *
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix no_operation = {
+ .matrix = {
+ { 4294967296, 0, 0, },
+ { 0, 4294967296, 0, },
+ { 0, 0, 4294967296, },
+ },
+ .y_offset = 0,
+};
+
+static const struct conversion_matrix yuv_bt601_full = {
+ .matrix = {
+ { 4294967296, 0, 6021544149 },
+ { 4294967296, -1478054095, -3067191994 },
+ { 4294967296, 7610682049, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt601_limited = {
+ .matrix = {
+ { 5020601039, 0, 6881764740 },
+ { 5020601039, -1689204679, -3505362278 },
+ { 5020601039, 8697922339, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_full = {
+ .matrix = {
+ { 4294967296, 0, 6763714498 },
+ { 4294967296, -804551626, -2010578443 },
+ { 4294967296, 7969741314, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_limited = {
+ .matrix = {
+ { 5020601039, 0, 7729959424 },
+ { 5020601039, -919487572, -2297803934 },
+ { 5020601039, 9108275786, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_full = {
+ .matrix = {
+ { 4294967296, 0, 6333358775 },
+ { 4294967296, -706750298, -2453942994 },
+ { 4294967296, 8080551471, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_limited = {
+ .matrix = {
+ { 5020601039, 0, 7238124312 },
+ { 5020601039, -807714626, -2804506279 },
+ { 5020601039, 9234915964, 0 },
+ },
+ .y_offset = 16,
+};
+
+/**
+ * swap_uv_columns() - Swap u and v column of a given matrix
+ *
+ * @matrix: Matrix in which column are swapped
+ */
+static void swap_uv_columns(struct conversion_matrix *matrix)
+{
+ swap(matrix->matrix[0][2], matrix->matrix[0][1]);
+ swap(matrix->matrix[1][2], matrix->matrix[1][1]);
+ swap(matrix->matrix[2][2], matrix->matrix[2][1]);
+}
+
+/**
+ * get_conversion_matrix_to_argb_u16() - Retrieve the correct yuv to rgb conversion matrix for a
+ * given encoding and range.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ * @encoding: DRM_COLOR_* value for which to obtain a conversion matrix
+ * @range: DRM_COLOR_*_RANGE value for which to obtain a conversion matrix
+ * @matrix: Pointer to store the value into
+ */
+void get_conversion_matrix_to_argb_u16(u32 format,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix)
+{
+ const struct conversion_matrix *matrix_to_copy;
+ bool limited_range;
+
+ switch (range) {
+ case DRM_COLOR_YCBCR_LIMITED_RANGE:
+ limited_range = true;
+ break;
+ case DRM_COLOR_YCBCR_FULL_RANGE:
+ limited_range = false;
+ break;
+ case DRM_COLOR_RANGE_MAX:
+ limited_range = false;
+ WARN_ONCE(true, "The requested range is not supported.");
+ break;
+ }
+
+ switch (encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ matrix_to_copy = limited_range ? &yuv_bt601_limited :
+ &yuv_bt601_full;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ matrix_to_copy = limited_range ? &yuv_bt709_limited :
+ &yuv_bt709_full;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ matrix_to_copy = limited_range ? &yuv_bt2020_limited :
+ &yuv_bt2020_full;
+ break;
+ case DRM_COLOR_ENCODING_MAX:
+ matrix_to_copy = &no_operation;
+ WARN_ONCE(true, "The requested encoding is not supported.");
+ break;
+ }
+
+ memcpy(matrix, matrix_to_copy, sizeof(*matrix_to_copy));
+
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ swap_uv_columns(matrix);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(get_conversion_matrix_to_argb_u16);
+
/**
* get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format.
* The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index 8d2bef95ff79..b4fe62ab9c65 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -9,4 +9,13 @@ pixel_read_line_t get_pixel_read_line_function(u32 format);
pixel_write_t get_pixel_write_function(u32 format);
+void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix);
+#endif
+
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e34f8c7f83c3..e3fdd161d0f0 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -18,7 +18,23 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV42,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ DRM_FORMAT_R4,
+ DRM_FORMAT_R8,
};
static struct drm_plane_state *
@@ -119,6 +135,8 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info->rotation = new_state->rotation;
vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
+ get_conversion_matrix_to_argb_u16(fmt, new_state->color_encoding, new_state->color_range,
+ &vkms_plane_state->conversion_matrix);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -205,5 +223,14 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index dd4ca6a9c690..8fe02131a6c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 5205552b1970..8ff958d119be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -440,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
@@ -454,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
@@ -713,7 +715,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, "vmwgfx probe");
+ ret = pcim_request_all_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
@@ -733,7 +735,6 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
- pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
@@ -754,11 +755,9 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
- pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
- pci_release_regions(pdev);
return -EINVAL;
}
@@ -836,7 +835,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
vmw_sw_context_init(dev_priv);
@@ -852,7 +850,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
- goto out_no_pci_or_version;
+ return ret;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -1152,15 +1150,13 @@ out_err0:
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-out_no_pci_or_version:
- pci_release_regions(pdev);
+
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1196,8 +1192,6 @@ static void vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
-
- pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 594af8eb04c6..eda5b6f8f4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -522,7 +522,7 @@ struct vmw_private {
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
@@ -1006,15 +1006,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
- int *waiter_count);
-extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
- u32 flag, int *waiter_count);
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e831e324e737..819704ac675d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
@@ -4068,23 +4067,6 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
-/*
- * DMA fence callback to remove a seqno_waiter
- */
-struct seqno_waiter_rm_context {
- struct dma_fence_cb base;
- struct vmw_private *dev_priv;
-};
-
-static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct seqno_waiter_rm_context *ctx =
- container_of(cb, struct seqno_waiter_rm_context, base);
-
- vmw_seqno_waiter_remove(ctx->dev_priv);
- kfree(ctx);
-}
-
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4265,15 +4247,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
- struct seqno_waiter_rm_context *ctx =
- kmalloc(sizeof(*ctx), GFP_KERNEL);
- ctx->dev_priv = dev_priv;
- vmw_seqno_waiter_add(dev_priv);
- if (dma_fence_add_callback(&fence->base, &ctx->base,
- seqno_waiter_rm_cb) < 0) {
- vmw_seqno_waiter_remove(dev_priv);
- kfree(ctx);
- }
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 588d50ababf6..c2294abbe753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,32 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-#include <linux/sched/signal.h>
-
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -35,14 +14,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
u64 ctx;
};
@@ -52,12 +24,10 @@ struct vmw_user_fence {
};
/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
+ * struct vmw_event_fence_action - fence callback that delivers a DRM event.
*
- * @action: A struct vmw_fence_action to hook up to a fence.
+ * @base: For use with dma_fence_add_callback(...)
* @event: A pointer to the pending event.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
@@ -65,10 +35,9 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct vmw_fence_action action;
+ struct dma_fence_cb base;
struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
struct drm_device *dev;
uint32_t *tv_sec;
@@ -81,44 +50,6 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-static u32 vmw_fence_goal_read(struct vmw_private *vmw)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
- else
- return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
-}
-
-static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
- else
- vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
-}
-
-/*
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
@@ -126,8 +57,21 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
+ /* The fence manager still has an implicit reference to this
+ * fence via the fence list if head is set. Because the lock is
+ * required to be held when the fence manager updates the fence
+ * list either the fence will have been removed after we get
+ * the lock below or we can safely remove it and the fence
+ * manager will never see it. This implies the fence is being
+ * deleted without being signaled which is dubious but valid
+ * if there are no callbacks. The dma_fence code that calls
+ * this hook will warn about deleted unsignaled with callbacks
+ * so no need to warn again here.
+ */
spin_lock(&fman->lock);
list_del_init(&fence->head);
+ if (fence->waiter_added)
+ vmw_seqno_waiter_remove(fman->dev_priv);
spin_unlock(&fman->lock);
}
fence->destroy(fence);
@@ -143,165 +87,46 @@ static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
return "svga";
}
+/* When we toggle signaling for the SVGA device there is a race period from
+ * the time we first read the fence seqno to the time we enable interrupts.
+ * If we miss the interrupt for a fence during this period its likely the driver
+ * will stall. As a result we need to re-read the seqno after interrupts are
+ * enabled. If interrupts were already enabled we just increment the number of
+ * seqno waiters.
+ */
static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
+ u32 seqno;
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
-
- u32 seqno = vmw_fence_read(dev_priv);
- if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+check_for_race:
+ seqno = vmw_fence_read(dev_priv);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(dev_priv);
+ fence->waiter_added = false;
+ }
return false;
-
+ } else if (!fence->waiter_added) {
+ fence->waiter_added = true;
+ if (vmw_seqno_waiter_add(dev_priv))
+ goto check_for_race;
+ }
return true;
}
-struct vmwgfx_wait_cb {
- struct dma_fence_cb base;
- struct task_struct *task;
-};
-
-static void
-vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct vmwgfx_wait_cb *wait =
- container_of(cb, struct vmwgfx_wait_cb, base);
-
- wake_up_process(wait->task);
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
-static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
-{
- struct vmw_fence_obj *fence =
- container_of(f, struct vmw_fence_obj, base);
-
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_private *dev_priv = fman->dev_priv;
- struct vmwgfx_wait_cb cb;
- long ret = timeout;
-
- if (likely(vmw_fence_obj_signaled(fence)))
- return timeout;
-
- vmw_seqno_waiter_add(dev_priv);
-
- spin_lock(f->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- goto out;
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- cb.base.func = vmwgfx_wait_cb;
- cb.task = current;
- list_add(&cb.base.node, &f->cb_list);
-
- for (;;) {
- __vmw_fences_update(fman);
-
- /*
- * We can use the barrier free __set_current_state() since
- * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
- * fence spinlock.
- */
- if (intr)
- __set_current_state(TASK_INTERRUPTIBLE);
- else
- __set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
- if (ret == 0 && timeout > 0)
- ret = 1;
- break;
- }
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (ret == 0)
- break;
-
- spin_unlock(f->lock);
-
- ret = schedule_timeout(ret);
-
- spin_lock(f->lock);
- }
- __set_current_state(TASK_RUNNING);
- if (!list_empty(&cb.base.node))
- list_del(&cb.base.node);
-
-out:
- spin_unlock(f->lock);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- return ret;
-}
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
- .wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
-/*
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
@@ -312,10 +137,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
return fman;
@@ -325,11 +147,8 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
bool lists_empty;
- (void) cancel_work_sync(&fman->work);
-
spin_lock(&fman->lock);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
+ lists_empty = list_empty(&fman->fence_list);
spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
@@ -344,7 +163,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
- INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
spin_lock(&fman->lock);
@@ -352,6 +170,11 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
ret = -EBUSY;
goto out_unlock;
}
+ /* This creates an implicit reference to the fence from the fence
+ * manager. It will be dropped when the fence is signaled which is
+ * expected to happen before deletion. The dtor has code to catch
+ * the rare deletion before signaling case.
+ */
list_add_tail(&fence->head, &fman->fence_list);
out_unlock:
@@ -360,148 +183,35 @@ out_unlock:
}
-static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- struct vmw_fence_obj *fence, *next_fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- vmw_fence_goal_write(fman->dev_priv,
- fence->base.seqno);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- u32 goal_seqno;
-
- if (dma_fence_is_signaled_locked(&fence->base))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(fman->seqno_valid &&
- goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
- return false;
-
- vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
- fman->seqno_valid = true;
-
- return true;
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
+ const bool cookie = dma_fence_begin_signalling();
+ const u32 seqno = vmw_fence_read(fman->dev_priv);
- seqno = vmw_fence_read(fman->dev_priv);
-rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(fman->dev_priv);
+ fence->waiter_added = false;
+ }
dma_fence_signal_locked(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
} else
break;
}
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
- if (unlikely(needs_rerun)) {
- new_seqno = vmw_fence_read(fman->dev_priv);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
+ dma_fence_end_signalling(cookie);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -539,14 +249,13 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
- ret = vmw_fence_obj_init(fman, fence, seqno,
- vmw_fence_destroy);
+ ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -638,7 +347,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- struct list_head action_list;
int ret;
/*
@@ -661,10 +369,6 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
dma_fence_signal(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
}
BUG_ON(!list_empty(&fence->head));
@@ -778,7 +482,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -787,14 +490,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
@@ -822,10 +522,11 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
*/
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
+ container_of(cb, struct vmw_event_fence_action, base);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
@@ -837,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_get_ts64(&ts);
+ ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -846,75 +547,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
spin_unlock_irq(&dev->event_lock);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
-
- vmw_fence_obj_unreference(&eaction->fence);
+ dma_fence_put(f);
kfree(eaction);
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence: The fence object.
- * @action: The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock(&fman->lock);
-
- fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock(&fman->lock);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
-}
-
/**
* vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
@@ -949,18 +585,14 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
return -ENOMEM;
eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
-
- eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- vmw_fence_obj_add_action(fence, &eaction->action);
-
+ vmw_fence_obj_reference(fence); // Dropped in CB
+ if (dma_fence_add_callback(&fence->base, &eaction->base,
+ vmw_event_fence_action_seq_passed) < 0)
+ vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index a7eee579c76a..e897cccae1ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -39,27 +39,10 @@ struct drm_pending_event;
struct vmw_private;
struct vmw_fence_manager;
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
struct vmw_fence_obj {
struct dma_fence base;
-
+ bool waiter_added;
struct list_head head;
- struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
};
@@ -86,7 +69,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index e417921af584..eedf1fe60be7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -284,11 +284,10 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->tbo.base.size, placement, type);
- seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d",
bo->tbo.priority,
bo->tbo.pin_count,
- kref_read(&bo->tbo.base.refcount),
- kref_read(&bo->tbo.kref));
+ kref_read(&bo->tbo.base.refcount));
seq_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..05773eb394d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
@@ -239,51 +230,59 @@ out_err:
return ret;
}
-void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 05b1c54a070c..54ea1b513950 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -500,6 +500,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -548,7 +549,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd);
memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
vmw_user_object_ref(&vfbs->uo);
@@ -602,6 +603,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -634,7 +636,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
}
vfbd->base.base.obj[0] = &bo->tbo.base;
- drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
*out = &vfbd->base;
@@ -679,11 +681,13 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* @dev_priv: Pointer to device private struct.
* @uo: Pointer to user object to wrap the kms framebuffer around.
* Either the buffer or surface inside the user object must be NULL.
+ * @info: pixel format information.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
@@ -692,10 +696,10 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
/* Create the new framebuffer depending one what we have */
if (vmw_user_object_surface(uo)) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else if (uo->buffer) {
ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else {
BUG();
}
@@ -712,6 +716,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -741,7 +746,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 511e29cdb987..445471fe9be6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -399,6 +399,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 7fb1c88bcc47..69dfe69ce0f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true,
+ .pin = false,
.keep_resv = true,
};
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 99a91355842e..714d5702dfd7 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -5,6 +5,7 @@ config DRM_XE
depends on KUNIT || !KUNIT
depends on INTEL_VSEC || !INTEL_VSEC
depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
+ depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -32,7 +33,6 @@ config DRM_XE
select ACPI_VIDEO if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
- select IOSF_MBI
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
@@ -46,6 +46,7 @@ config DRM_XE
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
select HMM_MIRROR
+ select REGMAP if I2C
help
Driver for Intel Xe2 series GPUs and later. Experimental support
for Xe series is also available.
@@ -87,16 +88,18 @@ config DRM_XE_GPUSVM
Enable this option if you want support for CPU to GPU address
mirroring.
- If in doubut say "Y".
+ If in doubt say "Y".
-config DRM_XE_DEVMEM_MIRROR
- bool "Enable device memory mirror"
+config DRM_XE_PAGEMAP
+ bool "Enable device memory pool for SVM"
depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
- Disable this option only if you want to compile out without device
- memory mirror. Will reduce KMD memory footprint when disabled.
+ Disable this option only if you don't want to expose local device
+ memory for SVM. Will reduce KMD memory footprint when disabled.
+
+ If in doubt say "Y".
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 0d749ed44878..01735c6ece8b 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -86,12 +86,17 @@ config DRM_XE_KUNIT_TEST
If in doubt, say "N".
-config DRM_XE_LARGE_GUC_BUFFER
- bool "Enable larger guc log buffer"
+config DRM_XE_DEBUG_GUC
+ bool "Enable extra GuC related debug options"
+ depends on DRM_XE_DEBUG
default n
+ select STACKDEPOT
help
Choose this option when debugging guc issues.
- Buffer should be large enough for complex issues.
+ The GuC log buffer is increased to the maximum allowed, which should
+ be large enough for complex issues. The tracking of FAST_REQ messages
+ is extended to include a record of the calling stack, which is then
+ dumped on a FAST_REQ error notification.
Recommended for driver developers only.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e4bf484d4121..07c71a29963d 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -21,6 +21,13 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
+generated_device_oob := $(obj)/generated/xe_device_wa_oob.c $(obj)/generated/xe_device_wa_oob.h
+quiet_cmd_device_wa_oob = GEN $(notdir $(generated_device_oob))
+ cmd_device_wa_oob = mkdir -p $(@D); $^ $(generated_device_oob)
+$(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe_gen_wa_oob \
+ $(src)/xe_device_wa_oob.rules
+ $(call cmd,device_wa_oob)
+
# Please keep these build lists sorted!
# core driver code
@@ -80,6 +87,7 @@ xe-y += xe_bb.o \
xe_mmio.o \
xe_mocs.o \
xe_module.o \
+ xe_nvm.o \
xe_oa.o \
xe_observation.o \
xe_pat.o \
@@ -124,6 +132,7 @@ xe-y += xe_bb.o \
xe_wait_user_fence.o \
xe_wopcm.o
+xe-$(CONFIG_I2C) += xe_i2c.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
@@ -139,7 +148,8 @@ xe-y += \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o \
- xe_sriov_vf.o
+ xe_sriov_vf.o \
+ xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
@@ -153,7 +163,8 @@ xe-$(CONFIG_PCI_IOV) += \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
xe_pci_sriov.o \
- xe_sriov_pf.o
+ xe_sriov_pf.o \
+ xe_sriov_pf_service.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -204,7 +215,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/icl_dsi.o \
i915-display/intel_alpm.o \
i915-display/intel_atomic.o \
- i915-display/intel_atomic_plane.o \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
@@ -254,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_fbc.o \
i915-display/intel_fdi.o \
i915-display/intel_fifo_underrun.o \
+ i915-display/intel_flipq.o \
i915-display/intel_frontbuffer.o \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
@@ -270,6 +281,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
+ i915-display/intel_plane.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pch.o \
i915-display/intel_pps.o \
@@ -337,4 +349,4 @@ $(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
-$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h $(obj)/generated/xe_device_wa_oob.h
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 448afb86e05c..81eb046aeebf 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -142,6 +142,7 @@ enum xe_guc_action {
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV = 0x550E,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
@@ -161,6 +162,37 @@ enum xe_guc_preempt_options {
XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
};
+enum xe_guc_register_context_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_DATA_2_CONTEXT_INDEX,
+ XE_GUC_REGISTER_CONTEXT_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MSG_LEN,
+};
+
+enum xe_guc_register_context_multi_lrc_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_2_PARENT_CONTEXT,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
@@ -240,4 +272,7 @@ enum xe_guc_g2g_type {
#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+/* invalid type for XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR */
+#define XE_GUC_CAT_ERR_TYPE_INVALID 0xdeadbeef
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index 2c627a21648f..ecf748fd87df 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -6,8 +6,7 @@
#ifndef _ABI_GUC_ERRORS_ABI_H
#define _ABI_GUC_ERRORS_ABI_H
-enum xe_guc_response_status {
- XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+enum xe_guc_response {
XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04,
XE_GUC_RESPONSE_INVALID_STATE = 0x0A,
XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B,
@@ -21,12 +20,20 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41,
XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50,
XE_GUC_RESPONSE_INVALID_PARAMS = 0x60,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_INDEX = 0x61,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_REGISTRATION = 0x62,
+ XE_GUC_RESPONSE_INVALID_DOORBELL_ID = 0x63,
+ XE_GUC_RESPONSE_INVALID_ENGINE_ID = 0x64,
XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70,
XE_GUC_RESPONSE_INVALID_BUFFER = 0x71,
+ XE_GUC_RESPONSE_BUFFER_ALREADY_REGISTERED = 0x72,
XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80,
XE_GUC_RESPONSE_PENDING_ACTION = 0x90,
+ XE_GUC_RESPONSE_CONTEXT_NOT_REGISTERED = 0x100,
+ XE_GUC_RESPONSE_CONTEXT_ALREADY_REGISTERED = 0X101,
XE_GUC_RESPONSE_INVALID_SIZE = 0x102,
XE_GUC_RESPONSE_MALFORMED_KLV = 0x103,
+ XE_GUC_RESPONSE_INVALID_CONTEXT = 0x104,
XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105,
XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106,
XE_GUC_RESPONSE_VF_MIGRATED = 0x107,
@@ -40,10 +47,11 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304,
XE_GUC_RESPONSE_CTB_IN_USE = 0x305,
XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306,
+ XE_GUC_RESPONSE_HW_TIMEOUT = 0x30C,
XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D,
XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E,
XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F,
- XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+ XE_GUC_RESPONSE_GENERIC_FAIL = 0xF000,
};
enum xe_guc_load_status {
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 7de8f827281f..0366a9da5977 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -16,6 +16,7 @@
* +===+=======+==============================================================+
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
+ * | | | - `GuC Opt In Feature KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -125,6 +126,33 @@ enum {
};
/**
+ * DOC: GuC Opt In Feature KLVs
+ *
+ * `GuC KLV`_ keys available for use with OPT_IN_FEATURE_KLV
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE` : 0x4001
+ * Adds an extra dword to the XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR G2H
+ * containing the type of the CAT error. On HW that does not support
+ * reporting the CAT error type, the extra dword is set to 0xdeadbeef.
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH` : 0x4003
+ * This KLV enables the Dynamic Inhibit Context Switch optimization, which
+ * consists in the GuC setting the CTX_CTRL_INHIBIT_SYN_CTX_SWITCH bit to
+ * zero in the CTX_CONTEXT_CONTROL register of LRCs that are submitted
+ * to an oversubscribed engine. This will cause those contexts to be
+ * switched out immediately if they hit an unsatisfied semaphore wait
+ * (instead of waiting the full timeslice duration). The bit is instead set
+ * to one if a single context is queued on the engine, to avoid it being
+ * switched out if there isn't another context that can run in its place.
+ */
+
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u
+
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index a473aa6697d0..4fcd3bf6b76f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -6,37 +6,6 @@
#ifndef __INTEL_PCODE_H__
#define __INTEL_PCODE_H__
-#include "intel_uncore.h"
#include "xe_pcode.h"
-static inline int
-snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
-{
- return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
- slow_timeout_ms ?: 1);
-}
-
-static inline int
-snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
-{
-
- return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
-}
-
-static inline int
-snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
-{
- return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
-}
-
-static inline int
-skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- int timeout_base_ms)
-{
- return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
- timeout_base_ms);
-}
-
#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 0c1e88e36a1e..d012f02bc84f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -24,13 +24,6 @@ static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncor
return xe_root_tile_mmio(xe);
}
-static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
-{
- struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
-
- return xe_device_get_root_tile(xe);
-}
-
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
@@ -110,12 +103,13 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout)
+ u32 value, unsigned int timeout,
+ u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, NULL, false);
+ timeout * USEC_PER_MSEC, out_value, false);
}
static inline int
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
new file mode 100644
index 000000000000..69e1935e9cdf
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+static inline void vlv_iosf_sb_get(struct drm_device *drm, unsigned long ports)
+{
+}
+static inline u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
+{
+ return 0;
+}
+static inline int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
+{
+ return 0;
+}
+static inline void vlv_iosf_sb_put(struct drm_device *drm, unsigned long ports)
+{
+}
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
index 949f134ce3cf..cb7fa8e794a6 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
@@ -3,4 +3,4 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/vlv_sideband_reg.h"
+#include "../../i915/vlv_iosf_sb_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
deleted file mode 100644
index ec6f12de5727..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum pipe;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- return 0;
-}
-static inline void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
-}
-static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg)
-{
- return 0;
-}
-static inline void vlv_dpio_write(struct drm_i915_private *i915,
- int pipe, int reg, u32 val)
-{
-}
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- return 0;
-}
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- return 0;
-}
-static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return 0;
-}
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 27437c22bd70..910632f57c3d 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,7 +1,12 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
+#include <drm/drm_panic.h>
+
+#include "intel_fb.h"
+#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
@@ -59,3 +64,89 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
+
+struct xe_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct xe_framebuffer {
+ struct intel_framebuffer base;
+ struct xe_panic_data panic;
+};
+
+static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct xe_framebuffer, base)->panic;
+}
+
+static void xe_panic_kunmap(struct xe_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ xe_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
+ panic->page);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ struct xe_framebuffer *xe_fb;
+
+ xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
+ if (xe_fb)
+ return &xe_fb->base;
+ return NULL;
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ xe_panic_kunmap(panic);
+ panic->page = -1;
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index e8191562d122..fba9617a75a5 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -5,6 +5,7 @@
#include <drm/drm_fb_helper.h>
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -65,7 +66,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
goto err;
}
- fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd);
+ fb = intel_framebuffer_create(&obj->ttm.base,
+ drm_get_format_info(dev,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 9f4ade25787a..e2e0771cf274 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -20,6 +20,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -38,7 +39,9 @@
static bool has_display(struct xe_device *xe)
{
- return HAS_DISPLAY(&xe->display);
+ struct intel_display *display = xe->display;
+
+ return HAS_DISPLAY(display);
}
/**
@@ -46,6 +49,8 @@ static bool has_display(struct xe_device *xe)
* early on
* @pdev: PCI device
*
+ * Note: This is called before xe or display device creation.
+ *
* Returns: true if probe needs to be deferred, false otherwise
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
@@ -63,6 +68,8 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
* Set features and function hooks in @driver that are needed for driving the
* display IP. This sets the driver's capability of driving display, regardless
* if the device has it enabled
+ *
+ * Note: This is called before xe or display device creation.
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
@@ -81,39 +88,10 @@ static void unset_display_features(struct xe_device *xe)
xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
-static void display_destroy(struct drm_device *dev, void *dummy)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- destroy_workqueue(xe->display.hotplug.dp_wq);
-}
-
-/**
- * xe_display_create - create display struct
- * @xe: XE device instance
- *
- * Initialize all fields used by the display part.
- *
- * TODO: once everything can be inside a single struct, make the struct opaque
- * to the rest of xe and return it to be xe->display.
- *
- * Returns: 0 on success
- */
-int xe_display_create(struct xe_device *xe)
-{
- spin_lock_init(&xe->display.fb_tracking.lock);
-
- xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
- if (!xe->display.hotplug.dp_wq)
- return -ENOMEM;
-
- return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
-}
-
static void xe_display_fini_early(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -126,7 +104,7 @@ static void xe_display_fini_early(void *arg)
int xe_display_init_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -144,7 +122,9 @@ int xe_display_init_early(struct xe_device *xe)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(xe);
+ err = intel_dram_detect(xe);
+ if (err)
+ goto err_opregion;
intel_bw_init_hw(display);
@@ -170,7 +150,7 @@ err_opregion:
static void xe_display_fini(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
@@ -180,7 +160,7 @@ static void xe_display_fini(void *arg)
int xe_display_init(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -195,7 +175,7 @@ int xe_display_init(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -206,7 +186,7 @@ void xe_display_register(struct xe_device *xe)
void xe_display_unregister(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -219,7 +199,7 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -230,7 +210,7 @@ void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -241,7 +221,7 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -251,7 +231,7 @@ void xe_display_irq_reset(struct xe_device *xe)
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -292,7 +272,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
static void xe_display_enable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -315,7 +295,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
static void xe_display_disable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -339,7 +319,7 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
void xe_display_pm_suspend(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -364,7 +344,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&xe->display);
+ intel_encoder_suspend_all(display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
@@ -374,7 +354,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
void xe_display_pm_shutdown(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -405,7 +385,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -420,7 +400,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -431,7 +411,7 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -449,7 +429,7 @@ void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
void xe_display_pm_shutdown_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -464,7 +444,7 @@ void xe_display_pm_shutdown_late(struct xe_device *xe)
void xe_display_pm_resume_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -474,7 +454,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
void xe_display_pm_resume(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -509,7 +489,7 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -532,6 +512,17 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+/**
+ * xe_display_probe - probe display and create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
int xe_display_probe(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -542,11 +533,15 @@ int xe_display_probe(struct xe_device *xe)
goto no_display;
display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return PTR_ERR(display);
err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
+ xe->display = display;
+
if (has_display(xe))
return 0;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 46e14f8dee28..e533aa4750bc 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -15,8 +15,6 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-int xe_display_create(struct xe_device *xe);
-
int xe_display_probe(struct xe_device *xe);
int xe_display_init_early(struct xe_device *xe);
@@ -46,8 +44,6 @@ static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0
static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { }
static inline void xe_display_driver_remove(struct xe_device *xe) {}
-static inline int xe_display_create(struct xe_device *xe) { return 0; }
-
static inline int xe_display_probe(struct xe_device *xe) { return 0; }
static inline int xe_display_init_early(struct xe_device *xe) { return 0; }
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
index 1955153aadba..3825376e98cc 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rpm.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
+#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pm.h"
static struct xe_device *display_to_xe(struct intel_display *display)
{
- return container_of(display, struct xe_device, display);
+ return to_xe_device(display->drm);
}
struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 2933ca97d673..68d1387d81a0 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -3,8 +3,8 @@
* Copyright © 2024 Intel Corporation
*/
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-
#include "xe_device.h"
#include "xe_wa.h"
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 55259969480b..c38fba18effe 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -6,6 +6,7 @@
#include <drm/ttm/ttm_bo.h>
#include "i915_vma.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -23,6 +24,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
* by writing dpt/ggtt in a different order?
@@ -32,10 +34,9 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(map, *dpt_ofs, u64, pte);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx -= src_stride;
}
@@ -55,17 +56,15 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
{
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
- = ggtt->pt_ops->pte_encode_bo;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
for (row = 0; row < height; row++) {
u32 src_idx = src_stride * row + bo_ofs;
for (column = 0; column < width; column++) {
- iosys_map_wr(map, *dpt_ofs, u64,
- pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]));
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx++;
@@ -129,13 +128,13 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
return PTR_ERR(dpt);
if (view->type == I915_GTT_VIEW_NORMAL) {
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
u32 x;
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
+ iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr);
}
} else if (view->type == I915_GTT_VIEW_REMAPPED) {
const struct intel_remapped_info *remap_info = &view->remapped;
@@ -176,15 +175,15 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
{
struct xe_device *xe = xe_bo_device(bo);
u32 column, row;
+ u64 pte = ggtt->pt_ops->pte_encode_flags(bo, xe->pat.idx[XE_CACHE_NONE]);
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte | addr);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
@@ -202,14 +201,15 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_ggtt *ggtt = tile0->mem.ggtt;
u32 align;
int ret;
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(xe);
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -218,29 +218,22 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node[ggtt->tile->id];
+ if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[tile0->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
- u32 x, size = bo->ttm.base.size;
-
vma->node = xe_ggtt_node_init(ggtt);
if (IS_ERR(vma->node)) {
ret = PTR_ERR(vma->node);
goto out_unlock;
}
- ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
if (ret) {
xe_ggtt_node_fini(vma->node);
goto out_unlock;
}
- for (x = 0; x < size; x += XE_PAGE_SIZE) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_NONE]);
-
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
- }
+ xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
} else {
u32 i, ggtt_ofs;
const struct intel_rotation_info *rot_info = &view->rotated;
@@ -274,7 +267,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_pm_runtime_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -349,7 +342,7 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- u8 tile_id = vma->node->ggtt->tile->id;
+ u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id;
if (!refcount_dec_and_test(&vma->ref))
return;
@@ -390,6 +383,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct intel_display *display = xe->display;
struct i915_vma *vma;
if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
@@ -400,8 +394,8 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
goto found;
}
- if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
- vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(display->fbdev.fbdev);
if (vma)
goto found;
}
@@ -464,3 +458,8 @@ u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
return 0;
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ *map = vma->bo->vmap;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index b35a6f201d4a..30f1073141fc 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -85,7 +85,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
cmd_in = xe_bo_ggtt_addr(bo);
cmd_out = cmd_in + PAGE_SIZE;
- xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo));
gsc_context->hdcp_bo = bo;
gsc_context->hdcp_cmd_in = cmd_in;
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 6502b8274173..dcbc4b2d3fd9 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,14 +10,15 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
#include "xe_wa.h"
@@ -87,12 +88,8 @@ initial_plane_bo(struct xe_device *xe,
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
- u64 __iomem *gte = tile0->mem.ggtt->gsm;
- u64 pte;
+ u64 pte = xe_ggtt_read_pte(tile0->mem.ggtt, base);
- gte += base / XE_PAGE_SIZE;
-
- pte = ioread64(gte);
if (!(pte & XE_GGTT_PTE_DM)) {
drm_err(&xe->drm,
"Initial plane programming missing DM bit\n");
@@ -187,7 +184,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- &bo->ttm.base, &mode_cmd)) {
+ &bo->ttm.base, fb->format, &mode_cmd)) {
drm_dbg_kms(&xe->drm, "intel fb init failed\n");
goto err_bo;
}
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2a7fccbeb1d5..78bda4c47874 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -3,9 +3,9 @@
* Copyright © 2024 Intel Corporation
*/
-#include "xe_device.h"
-#include "intel_display_types.h"
+#include "intel_display_core.h"
#include "intel_tdf.h"
+#include "xe_device.h"
void intel_td_flush(struct intel_display *display)
{
diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h
index ce05b6ae832f..880140d6ccdc 100644
--- a/drivers/gpu/drm/xe/regs/xe_bars.h
+++ b/drivers/gpu/drm/xe/regs/xe_bars.h
@@ -7,5 +7,6 @@
#define GTTMMADR_BAR 0 /* MMIO + GTT */
#define LMEM_BAR 2 /* VRAM */
+#define VF_LMEM_BAR 9 /* VF VRAM */
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 7702364b65f1..9b66cc972a63 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -16,6 +16,10 @@
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
+#define DG1_GSC_HECI2_BASE 0x00259000
+#define PVC_GSC_HECI2_BASE 0x00285000
+#define DG2_GSC_HECI2_BASE 0x00374000
+
#define HECI_H_CSR(base) XE_REG((base) + 0x4)
#define HECI_H_CSR_IE REG_BIT(0)
#define HECI_H_CSR_IS REG_BIT(1)
diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
new file mode 100644
index 000000000000..af781c8e4a80
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_REGS_H_
+#define _XE_I2C_REGS_H_
+
+#include <linux/pci_regs.h>
+
+#include "xe_reg_defs.h"
+#include "xe_regs.h"
+
+#define I2C_BRIDGE_OFFSET (SOC_BASE + 0xd9000)
+#define I2C_CONFIG_SPACE_OFFSET (SOC_BASE + 0xf6000)
+#define I2C_MEM_SPACE_OFFSET (SOC_BASE + 0xf7400)
+
+#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164)
+#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168)
+
+#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND)
+#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84)
+
+#endif /* _XE_I2C_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index f0ecfcac4003..13635e4331d4 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -19,6 +19,7 @@
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
#define DISPLAY_IRQ REG_BIT(16)
+#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
/*
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 994af591a2e8..1b101edb838b 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -12,9 +12,13 @@
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_PER_CTX_PTR (0x12 + 1)
+#define CTX_CS_INDIRECT_CTX (0x14 + 1)
+#define CTX_CS_INDIRECT_CTX_OFFSET (0x16 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
+#define CTX_ACC_CTR_THOLD (0x2a + 1)
+#define CTX_ASID (0x2e + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
@@ -36,4 +40,7 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
+#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
+#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a79ad2da070c..e693a50706f8 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -97,4 +97,7 @@
#define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET)
#define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET)
+#define OAM_COMPRESSION_T3_CONTROL XE_REG(0x1c2e00)
+#define OAM_LAT_MEASURE_ENABLE REG_BIT(4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index c556a04670ee..fb097607b86c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,12 +18,10 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
#define BMG_FAN_1_SPEED XE_REG(0x138140)
#define BMG_FAN_2_SPEED XE_REG(0x138170)
#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
-#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index f45abcd96ba8..2995d72c3f78 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -5,11 +5,16 @@
#ifndef _XE_PMT_H_
#define _XE_PMT_H_
-#define SOC_BASE 0x280000
+#include "xe_regs.h"
#define BMG_PMT_BASE_OFFSET 0xDB000
#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+#define PUNIT_TELEMETRY_GUID XE_REG(BMG_DISCOVERY_OFFSET + 0x4)
+#define BMG_ENERGY_STATUS_PMT_OFFSET (0x30)
+#define ENERGY_PKG REG_GENMASK64(31, 0)
+#define ENERGY_CARD REG_GENMASK64(63, 32)
+
#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 3abb17d2ca33..1926b4044314 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,6 +7,8 @@
#include "regs/xe_reg_defs.h"
+#define SOC_BASE 0x280000
+
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 378dcd0fb414..bb469096d072 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -106,7 +106,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Check last CCS value, or at least last value in page. */
- offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size);
+ offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo));
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test,
@@ -514,9 +514,9 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
- xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
xe_tt->purgeable = true;
- xe_ttm_tt_account_add(&xe_tt->ttm);
+ xe_ttm_tt_account_add(xe, &xe_tt->ttm);
bo->ttm.priority = 0;
spin_lock(&bo->ttm.bdev->lru_lock);
ttm_bo_move_to_lru_tail(&bo->ttm);
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
deleted file mode 100644
index b683585db852..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 AND MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <kunit/test.h>
-
-#include "xe_device.h"
-#include "xe_kunit_helpers.h"
-#include "xe_pci_test.h"
-
-static int pf_service_test_init(struct kunit *test)
-{
- struct xe_pci_fake_data fake = {
- .sriov_mode = XE_SRIOV_MODE_PF,
- .platform = XE_TIGERLAKE, /* some random platform */
- .subplatform = XE_SUBPLATFORM_NONE,
- };
- struct xe_device *xe;
- struct xe_gt *gt;
-
- test->priv = &fake;
- xe_kunit_helper_xe_device_test_init(test);
-
- xe = test->priv;
- KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
-
- gt = xe_device_get_gt(xe, 0);
- pf_init_versions(gt);
-
- /*
- * sanity check:
- * - all supported platforms VF/PF ABI versions must be defined
- * - base version can't be newer than latest
- */
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.latest.major);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor,
- gt->sriov.pf.service.version.latest.minor);
-
- test->priv = gt;
- return 0;
-}
-
-static void pf_negotiate_any(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY,
- VF2PF_HANDSHAKE_MINOR_ANY,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_base_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor);
-}
-
-static void pf_negotiate_base_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major);
- if (major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.base.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor - 1,
- &major, &minor));
-}
-
-static void pf_negotiate_base_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major - 1, 1,
- &major, &minor));
-}
-
-static void pf_negotiate_latest_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.latest.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor - 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1);
-}
-
-static void pf_negotiate_latest_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- kunit_skip(test, "no prev major");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major - 1,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1);
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
-}
-
-static struct kunit_case pf_service_test_cases[] = {
- KUNIT_CASE(pf_negotiate_any),
- KUNIT_CASE(pf_negotiate_base_match),
- KUNIT_CASE(pf_negotiate_base_newer),
- KUNIT_CASE(pf_negotiate_base_next),
- KUNIT_CASE(pf_negotiate_base_older),
- KUNIT_CASE(pf_negotiate_base_prev),
- KUNIT_CASE(pf_negotiate_latest_match),
- KUNIT_CASE(pf_negotiate_latest_newer),
- KUNIT_CASE(pf_negotiate_latest_next),
- KUNIT_CASE(pf_negotiate_latest_older),
- KUNIT_CASE(pf_negotiate_latest_prev),
- {}
-};
-
-static struct kunit_suite pf_service_suite = {
- .name = "pf_service",
- .test_cases = pf_service_test_cases,
- .init = pf_service_test_init,
-};
-
-kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
index 6faffcd74869..d266882adc0e 100644
--- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -32,7 +32,7 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
bo->tile = tile;
bo->ttm.bdev = &xe->ttm;
- bo->size = size;
+ bo->ttm.base.size = size;
iosys_map_set_vaddr(&bo->vmap, buf);
if (flags & XE_BO_FLAG_GGTT) {
@@ -42,10 +42,8 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
KUNIT_ASSERT_EQ(test, 0,
- drm_mm_insert_node_in_range(&ggtt->mm,
- &bo->ggtt_node[tile->id]->base,
- bo->size, SZ_4K,
- 0, 0, U64_MAX, 0));
+ xe_ggtt_node_insert(bo->ggtt_node[tile->id],
+ xe_bo_size(bo), SZ_4K));
}
return bo;
@@ -67,8 +65,9 @@ static int guc_buf_test_init(struct kunit *test)
ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
- drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
- mutex_init(&ggtt->lock);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
+ DUT_GGTT_START + DUT_GGTT_SIZE));
kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
replacement_xe_managed_bo_create_pin_map);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 4a65e3103f77..edd1e701aa1c 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -74,13 +74,13 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
- bool big = bo->size >= SZ_2M;
+ bool big = xe_bo_size(bo) >= SZ_2M;
struct dma_fence *fence;
const char *str = big ? "Copying big bo" : "Copying small bo";
int err;
struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
- bo->size,
+ xe_bo_size(bo),
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
@@ -105,7 +105,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
goto out_unlock;
}
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
fence = xe_migrate_clear(m, remote, remote->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
@@ -113,15 +113,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected, "remote first offset should be cleared",
test);
- retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64);
check(retval, expected, "remote last offset should be cleared",
test);
}
dma_fence_put(fence);
/* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
- xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo));
expected = 0xc0c0c0c0c0c0c0c0;
fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
@@ -131,15 +131,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &bo->vmap, 0, u64);
check(retval, expected,
"remote -> vram bo first offset should be copied", test);
- retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"remote -> vram bo offset should be copied", test);
}
dma_fence_put(fence);
/* And other way around.. slightly hacky.. */
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo));
fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
remote->ttm.resource, false);
@@ -148,7 +148,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected,
"vram -> remote bo first offset should be copied", test);
- retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"vram -> remote bo last offset should be copied", test);
}
@@ -245,9 +245,9 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
- xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
+ xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it);
else
- xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
&src_it, XE_PAGE_SIZE, pt->ttm.resource);
@@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a small bo */
kunit_info(test, "Clearing small buffer object\n");
- xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
+ xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny));
expected = 0;
fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
check(retval, expected, "Command clear small first value", test);
- retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
+ retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32);
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
@@ -298,7 +298,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a big bo */
kunit_info(test, "Clearing big buffer object\n");
- xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
+ xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big));
expected = 0;
fence = xe_migrate_clear(m, big, big->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -308,7 +308,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &big->vmap, 0, u32);
check(retval, expected, "Command clear big first value", test);
- retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
+ retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32);
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
@@ -370,7 +370,7 @@ static struct dma_fence *blt_copy(struct xe_tile *tile,
struct xe_migrate *m = tile->migrate;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it;
struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
u64 src_L0_ofs, dst_L0_ofs;
@@ -498,7 +498,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
long ret;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -523,7 +523,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Clear evicted vram data first value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Clear evicted vram data last value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -532,7 +532,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -562,7 +562,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -570,7 +570,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -583,7 +583,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
u64 expected, retval;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -597,7 +597,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
}
dma_fence_put(fence);
@@ -615,7 +615,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear main buffer first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear main buffer last value", test);
}
dma_fence_put(fence);
@@ -625,7 +625,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear ccs data last value", test);
}
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 1d3e2e50c355..9c715e59f030 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,49 +12,79 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
+ param->verx100 / 100, param->verx100 % 100, param->name);
+}
+
+KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
+
+static void xe_pci_id_kunit_desc(const struct pci_device_id *param, char *desc)
+{
+ const struct xe_device_desc *dev_desc =
+ (const struct xe_device_desc *)param->driver_data;
+
+ if (dev_desc)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "0x%X (%s)",
+ param->device, dev_desc->platform_name);
+}
+
+KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
+
/**
- * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
- * This function iterates over the descriptors for all graphics IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_graphics_desc *desc, *last = NULL;
-
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- desc = graphics_ips[i].desc;
- if (desc == last)
- continue;
-
- xe_fn(desc);
- last = desc;
- }
+ return graphics_ip_gen_params(prev, desc);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
- * xe_call_for_each_media_ip - Iterate over all recognized media IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
*
- * This function iterates over the descriptors for all media IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_media_ip(xe_media_fn xe_fn)
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_media_desc *desc, *last = NULL;
+ return media_ip_gen_params(prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- desc = media_ips[i].desc;
- if (desc == last)
- continue;
+/**
+ * xe_pci_id_gen_param - Generate struct pci_device_id parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct pci_device_id parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_id_gen_param(const void *prev, char *desc)
+{
+ const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
- xe_fn(desc);
- last = desc;
- }
+ return pci->driver_data ? pci : NULL;
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
u32 *ver, u32 *revid)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 744a37583d2d..37b344df2dc3 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -14,9 +14,10 @@
#include "xe_pci_test.h"
#include "xe_pci_types.h"
-static void check_graphics_ip(const struct xe_graphics_desc *graphics)
+static void check_graphics_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_graphics_desc *graphics = param->desc;
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -28,9 +29,10 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_media_ip(const struct xe_media_desc *media)
+static void check_media_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_media_desc *media = param->desc;
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
@@ -42,19 +44,21 @@ static void check_media_ip(const struct xe_media_desc *media)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void xe_gmdid_graphics_ip(struct kunit *test)
+static void check_platform_gt_count(struct kunit *test)
{
- xe_call_for_each_graphics_ip(check_graphics_ip);
-}
+ const struct pci_device_id *pci = test->param_value;
+ const struct xe_device_desc *desc =
+ (const struct xe_device_desc *)pci->driver_data;
+ int max_gt = desc->max_gt_per_tile;
-static void xe_gmdid_media_ip(struct kunit *test)
-{
- xe_call_for_each_media_ip(check_media_ip);
+ KUNIT_ASSERT_GT(test, max_gt, 0);
+ KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
}
static struct kunit_case xe_pci_tests[] = {
- KUNIT_CASE(xe_gmdid_graphics_ip),
- KUNIT_CASE(xe_gmdid_media_ip),
+ KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
+ KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
+ KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ede46800aff1..ce4d2b86b778 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -12,15 +12,6 @@
#include "xe_sriov_types.h"
struct xe_device;
-struct xe_graphics_desc;
-struct xe_media_desc;
-
-typedef int (*xe_device_fn)(struct xe_device *);
-typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
-typedef void (*xe_media_fn)(const struct xe_media_desc *);
-
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
-void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
@@ -34,6 +25,9 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_id_gen_param(const void *prev, char *desc);
const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
new file mode 100644
index 000000000000..ba95e29b597d
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+static int pf_service_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ xe_sriov_pf_service_init(xe);
+ /*
+ * sanity check:
+ * - all supported platforms VF/PF ABI versions must be defined
+ * - base version can't be newer than latest
+ */
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.latest.major);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor,
+ xe->sriov.pf.service.version.latest.minor);
+ return 0;
+}
+
+static void pf_negotiate_any(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe, VF2PF_HANDSHAKE_MAJOR_ANY,
+ VF2PF_HANDSHAKE_MINOR_ANY,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_base_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.base.minor);
+}
+
+static void pf_negotiate_base_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_GE(test, minor, xe->sriov.pf.service.version.base.minor);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_LE(test, major, xe->sriov.pf.service.version.latest.major);
+ if (major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.base.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor - 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_base_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major - 1, 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_latest_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.latest.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor - 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor - 1);
+}
+
+static void pf_negotiate_latest_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ kunit_skip(test, "no prev major");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major - 1,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major - 1);
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+}
+
+static struct kunit_case pf_service_test_cases[] = {
+ KUNIT_CASE(pf_negotiate_any),
+ KUNIT_CASE(pf_negotiate_base_match),
+ KUNIT_CASE(pf_negotiate_base_newer),
+ KUNIT_CASE(pf_negotiate_base_next),
+ KUNIT_CASE(pf_negotiate_base_older),
+ KUNIT_CASE(pf_negotiate_base_prev),
+ KUNIT_CASE(pf_negotiate_latest_match),
+ KUNIT_CASE(pf_negotiate_latest_newer),
+ KUNIT_CASE(pf_negotiate_latest_next),
+ KUNIT_CASE(pf_negotiate_latest_older),
+ KUNIT_CASE(pf_negotiate_latest_prev),
+ {}
+};
+
+static struct kunit_suite pf_service_suite = {
+ .name = "pf_service",
+ .test_cases = pf_service_test_cases,
+ .init = pf_service_test_init,
+};
+
+kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 9570672fce33..5ce0e26822f2 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -19,7 +19,7 @@ static int bb_prefetch(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
+ if (GRAPHICS_VERx100(xe) >= 1250 && xe_gt_is_main_type(gt))
/*
* RCS and CCS require 1K, although other engines would be
* okay with 512.
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index fafacd73dcc3..b5cc65506696 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -14,7 +14,7 @@ struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
-struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
+struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 7aa2c17825da..18f27da47a36 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -19,6 +19,8 @@
#include <kunit/static_stub.h>
+#include <trace/events/gpu_mem.h>
+
#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
@@ -336,15 +338,13 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- /** @xe - The xe device */
- struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
bool purgeable;
};
-static int xe_tt_map_sg(struct ttm_tt *tt)
+static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
unsigned long num_pages = tt->num_pages;
@@ -359,13 +359,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->xe->drm.dev),
+ xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -376,12 +376,12 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
return 0;
}
-static void xe_tt_unmap_sg(struct ttm_tt *tt)
+static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -400,24 +400,37 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
* Account ttm pages against the device shrinker's shrinkable and
* purgeable counts.
*/
-static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
}
-static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
+static void update_global_total_pages(struct ttm_device *ttm_dev,
+ long num_pages)
+{
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+ u64 global_total_pages =
+ atomic64_add_return(num_pages, &xe->global_total_pages);
+
+ trace_gpu_mem_total(xe->drm.primary->index, 0,
+ global_total_pages << PAGE_SHIFT);
+#endif
}
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
@@ -436,11 +449,10 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
return NULL;
tt = &xe_tt->ttm;
- xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
- extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
+ extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
PAGE_SIZE);
/*
@@ -527,21 +539,25 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
return err;
xe_tt->purgeable = false;
- xe_ttm_tt_account_add(tt);
+ xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
+ update_global_total_pages(ttm_dev, tt->num_pages);
return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+
if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
- xe_tt_unmap_sg(tt);
+ xe_tt_unmap_sg(xe, tt);
ttm_pool_free(&ttm_dev->pool, tt);
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
+ update_global_total_pages(ttm_dev, -(long)tt->num_pages);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -789,7 +805,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Bo creation path, moving to system or TT. */
if ((!old_mem && ttm) && !handle_system_ccs) {
if (new_mem->mem_type == XE_PL_TT)
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (!ret)
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -812,7 +828,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
(!ttm && ttm_bo->type == ttm_bo_type_device);
if (new_mem->mem_type == XE_PL_TT) {
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (ret)
goto out;
}
@@ -958,7 +974,7 @@ out:
if (timeout < 0)
ret = timeout;
- xe_tt_unmap_sg(ttm_bo->ttm);
+ xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
return ret;
@@ -968,6 +984,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
unsigned long *scanned)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
long lret;
/* Fake move to system, without copying data. */
@@ -982,7 +999,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
if (lret)
return lret;
- xe_tt_unmap_sg(bo->ttm);
+ xe_tt_unmap_sg(xe, bo->ttm);
ttm_bo_move_null(bo, new_resource);
}
@@ -993,7 +1010,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
.allow_move = false});
if (lret > 0)
- xe_ttm_tt_account_subtract(bo->ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm);
return lret;
}
@@ -1043,7 +1060,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
- struct xe_device *xe = xe_tt->xe;
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
bool needs_rpm;
long lret = 0L;
@@ -1080,7 +1097,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
xe_pm_runtime_put(xe);
if (lret > 0)
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
out_unref:
xe_bo_put(xe_bo);
@@ -1122,7 +1139,7 @@ int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
goto out_unlock_bo;
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
@@ -1200,7 +1217,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
goto out_unlock_bo;
if (!backup) {
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
+ NULL, xe_bo_size(bo),
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
@@ -1254,7 +1272,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
}
xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
- bo->size);
+ xe_bo_size(bo));
}
if (!bo->backup_obj)
@@ -1347,7 +1365,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
}
xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
- bo->size);
+ xe_bo_size(bo));
}
bo->backup_obj = NULL;
@@ -1381,7 +1399,8 @@ int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
ttm_bo->sg = NULL;
xe_tt->sg = NULL;
} else if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
+ xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -1557,7 +1576,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
- bo->size - (offset & PAGE_MASK), &cursor);
+ xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
do {
unsigned long page_offset = (offset & ~PAGE_MASK);
@@ -1857,7 +1876,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->ccs_cleared = false;
bo->tile = tile;
- bo->size = size;
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
@@ -2035,7 +2053,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + bo->size, U64_MAX);
+ start + xe_bo_size(bo), U64_MAX);
} else {
err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
}
@@ -2156,21 +2174,6 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
}
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags)
-{
- struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- type, flags);
- if (IS_ERR(bo))
- return bo;
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
-
- return bo;
-}
-
static void __xe_bo_unpin_map_no_vm(void *arg)
{
xe_bo_unpin_map_no_vm(arg);
@@ -2233,7 +2236,7 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
xe_assert(xe, !(*src)->vmap.is_iomem);
bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
- (*src)->size, dst_flags);
+ xe_bo_size(*src), dst_flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -2293,7 +2296,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2341,7 +2344,7 @@ int xe_bo_pin(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2377,7 +2380,7 @@ void xe_bo_unpin_external(struct xe_bo *bo)
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2409,7 +2412,7 @@ void xe_bo_unpin(struct xe_bo *bo)
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
}
/**
@@ -2523,7 +2526,7 @@ int xe_bo_vmap(struct xe_bo *bo)
* TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
* to use struct iosys_map.
*/
- ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
+ ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
if (ret)
return ret;
@@ -2992,6 +2995,14 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
+ /*
+ * Compression implies coh_none, therefore we know for sure that WB
+ * memory can't currently use compression, which is likely one of the
+ * common cases.
+ */
+ if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
+ return false;
+
return true;
}
@@ -3067,7 +3078,7 @@ void xe_bo_put(struct xe_bo *bo)
#endif
for_each_tile(tile, xe_bo_device(bo), id)
if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
- might_lock(&bo->ggtt_node[id]->ggtt->lock);
+ xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 02ada1fb8a23..02e8cde4c6b2 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -118,9 +118,6 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags,
u64 alignment);
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
@@ -238,6 +235,19 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
return xe_bo_addr(bo, 0, page_size);
}
+/**
+ * xe_bo_size() - Xe BO size
+ * @bo: The bo object.
+ *
+ * Simple helper to return Xe BO's size.
+ *
+ * Return: Xe BO's size
+ */
+static inline size_t xe_bo_size(struct xe_bo *bo)
+{
+ return bo->ttm.base.size;
+}
+
static inline u32
__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
@@ -246,7 +256,7 @@ __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo));
XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
return ggtt_node->base.start;
}
@@ -300,7 +310,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
{
- return PAGE_ALIGN(bo->ttm.base.size);
+ return PAGE_ALIGN(xe_bo_size(bo));
}
static inline bool xe_bo_has_pages(struct xe_bo *bo)
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index ed3746d32b27..7484ce55a303 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -197,9 +197,7 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
continue;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo_unlocked(tile->mem.ggtt, bo);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index eb5e83c5f233..ff560d82496f 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -32,8 +32,6 @@ struct xe_bo {
struct xe_bo *backup_obj;
/** @parent_obj: Ref to parent bo if this a backup_obj */
struct xe_bo *parent_obj;
- /** @size: Size of this buffer object */
- size_t size;
/** @flags: flags for this buffer object */
u32 flags;
/** @vm: VM this BO is attached to, for extobj this will be NULL */
@@ -86,7 +84,7 @@ struct xe_bo {
u16 cpu_caching;
/** @devmem_allocation: SVM device memory allocation */
- struct drm_gpusvm_devmem devmem_allocation;
+ struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index cb9f175c89a1..e9b46a2d0019 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -3,14 +3,19 @@
* Copyright © 2025 Intel Corporation
*/
+#include <linux/bitops.h>
#include <linux/configfs.h>
+#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/string.h>
#include "xe_configfs.h"
#include "xe_module.h"
+#include "xe_hw_engine_types.h"
+
/**
* DOC: Xe Configfs
*
@@ -48,6 +53,30 @@
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
* # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
*
+ * Allowed engines:
+ * ----------------
+ *
+ * Allow only a set of engine(s) to be available, disabling the other engines
+ * even if they are available in hardware. This is applied after HW fuses are
+ * considered on each tile. Examples:
+ *
+ * Allow only one render and one copy engines, nothing else::
+ *
+ * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Allow only compute engines and first copy engine::
+ *
+ * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Note that the engine names are the per-GT hardware names. On multi-tile
+ * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
+ * and copy engines on each tile.
+ *
+ * The requested configuration may not be supported by the platform and driver
+ * may fail to probe. For example: if at least one copy engine is expected to be
+ * available for migrations, but it's disabled. This is intended for debugging
+ * purposes only.
+ *
* Remove devices
* ==============
*
@@ -60,11 +89,30 @@ struct xe_config_device {
struct config_group group;
bool survivability_mode;
+ u64 engines_allowed;
/* protects attributes */
struct mutex lock;
};
+struct engine_info {
+ const char *cls;
+ u64 mask;
+};
+
+/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
+#define MAX_ENGINE_CLASS_CHARS 5
+#define MAX_ENGINE_INSTANCE_CHARS 2
+
+static const struct engine_info engine_info[] = {
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+};
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
return container_of(to_config_group(item), struct xe_config_device, group);
@@ -94,10 +142,96 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
return len;
}
+static ssize_t engines_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u64 mask = engine_info[i].mask;
+
+ if ((dev->engines_allowed & mask) == mask) {
+ p += sprintf(p, "%s*\n", engine_info[i].cls);
+ } else if (mask & dev->engines_allowed) {
+ u16 bit0 = __ffs64(mask), bit;
+
+ mask &= dev->engines_allowed;
+
+ for_each_set_bit(bit, (const unsigned long *)&mask, 64)
+ p += sprintf(p, "%s%u\n", engine_info[i].cls,
+ bit - bit0);
+ }
+ }
+
+ return p - page;
+}
+
+static bool lookup_engine_mask(const char *pattern, u64 *mask)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u8 instance;
+ u16 bit;
+
+ if (!str_has_prefix(pattern, engine_info[i].cls))
+ continue;
+
+ pattern += strlen(engine_info[i].cls);
+
+ if (!strcmp(pattern, "*")) {
+ *mask = engine_info[i].mask;
+ return true;
+ }
+
+ if (kstrtou8(pattern, 10, &instance))
+ return false;
+
+ bit = __ffs64(engine_info[i].mask) + instance;
+ if (bit >= fls64(engine_info[i].mask))
+ return false;
+
+ *mask = BIT_ULL(bit);
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t engines_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ size_t patternlen, p;
+ u64 mask, val = 0;
+
+ for (p = 0; p < len; p += patternlen + 1) {
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+
+ patternlen = strcspn(page + p, ",\n");
+ if (patternlen >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, page + p, patternlen);
+ buf[patternlen] = '\0';
+
+ if (!lookup_engine_mask(buf, &mask))
+ return -EINVAL;
+
+ val |= mask;
+ }
+
+ mutex_lock(&dev->lock);
+ dev->engines_allowed = val;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
CONFIGFS_ATTR(, survivability_mode);
+CONFIGFS_ATTR(, engines_allowed);
static struct configfs_attribute *xe_config_device_attrs[] = {
&attr_survivability_mode,
+ &attr_engines_allowed,
NULL,
};
@@ -133,12 +267,16 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
if (!pdev)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
+ pci_dev_put(pdev);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
+ /* Default values */
+ dev->engines_allowed = U64_MAX;
+
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
mutex_init(&dev->lock);
@@ -226,6 +364,29 @@ void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
config_item_put(&dev->group.cg_item);
}
+/**
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * @pdev: pci device
+ *
+ * Find the configfs group that belongs to the pci device and return
+ * the mask of engines allowed to be used.
+ *
+ * Return: engine mask with allowed engines
+ */
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ u64 engines_allowed;
+
+ if (!dev)
+ return U64_MAX;
+
+ engines_allowed = dev->engines_allowed;
+ config_item_put(&dev->group.cg_item);
+
+ return engines_allowed;
+}
+
int __init xe_configfs_init(void)
{
struct config_group *root = &xe_configfs.su_group;
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index d7d041ec2611..fb8764008089 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -5,6 +5,7 @@
#ifndef _XE_CONFIGFS_H_
#define _XE_CONFIGFS_H_
+#include <linux/limits.h>
#include <linux/types.h>
struct pci_dev;
@@ -14,11 +15,13 @@ int xe_configfs_init(void);
void xe_configfs_exit(void);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
#else
-static inline int xe_configfs_init(void) { return 0; };
-static inline void xe_configfs_exit(void) {};
-static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+static inline int xe_configfs_init(void) { return 0; }
+static inline void xe_configfs_exit(void) { }
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
+static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index d0503959a8ed..26e9d146ccbf 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -20,7 +20,9 @@
#include "xe_pm.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_step.h"
+#include "xe_wa.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -82,9 +84,28 @@ static int sriov_info(struct seq_file *m, void *data)
return 0;
}
+static int workarounds(struct xe_device *xe, struct drm_printer *p)
+{
+ xe_pm_runtime_get(xe);
+ xe_wa_device_dump(xe, p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static int workaround_info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ workarounds(xe, &p);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
+ { .name = "workarounds", .show = workaround_info, },
};
static int forcewake_open(struct inode *inode, struct file *file)
@@ -191,6 +212,41 @@ static const struct file_operations wedged_mode_fops = {
.write = wedged_mode_set,
};
+static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ char buf[32];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t atomic_svm_timeslice_ms_set(struct file *f,
+ const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ u32 atomic_svm_timeslice_ms;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms);
+ if (ret)
+ return ret;
+
+ xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms;
+
+ return size;
+}
+
+static const struct file_operations atomic_svm_timeslice_ms_fops = {
+ .owner = THIS_MODULE,
+ .read = atomic_svm_timeslice_ms_show,
+ .write = atomic_svm_timeslice_ms_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
@@ -211,6 +267,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
+ debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
+ &atomic_svm_timeslice_ms_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -235,4 +294,7 @@ void xe_debugfs_register(struct xe_device *xe)
xe_pxp_debugfs_register(xe->pxp);
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
+
+ if (IS_SRIOV_PF(xe))
+ xe_sriov_pf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 11e60d687572..203e3038cc81 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -331,13 +331,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct xe_guc *guc = exec_queue_to_guc(q);
- u32 adj_logical_mask = q->logical_mask;
- u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
-
unsigned int fw_ref;
bool cookie;
- int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
@@ -353,14 +349,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
cookie = dma_fence_begin_signalling();
- for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
- if (adj_logical_mask & BIT(i)) {
- adj_logical_mask |= width_mask << i;
- i += q->width;
- } else {
- ++i;
- }
- }
/* keep going if fw fails as we still want to save the memory and SW data */
fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index e9f3c1a53db2..6ece4defa9df 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -43,10 +43,11 @@
#include "xe_guc_pc.h"
#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
-#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_nvm.h"
#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
@@ -67,6 +68,7 @@
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
@@ -403,9 +405,6 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
- if (!IS_ERR_OR_NULL(xe->mem.shrinker))
- xe_shrinker_destroy(xe->mem.shrinker);
-
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -439,13 +438,14 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- xe->mem.shrinker = xe_shrinker_create(xe);
- if (IS_ERR(xe->mem.shrinker))
- return ERR_CAST(xe->mem.shrinker);
+ err = xe_shrinker_create(xe);
+ if (err)
+ goto err;
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
+ xe->atomic_svm_timeslice_ms = 5;
err = xe_irq_init(xe);
if (err)
@@ -494,10 +494,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- err = xe_display_create(xe);
- if (WARN_ON(err))
- goto err;
-
return xe;
err:
@@ -685,6 +681,7 @@ static void sriov_update_device_info(struct xe_device *xe)
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
@@ -705,6 +702,9 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
+ xe_wa_device_init(xe);
+ xe_wa_process_device_oob(xe);
+
err = xe_mmio_probe_early(xe);
if (err)
return err;
@@ -790,51 +790,18 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- err = xe_ttm_sys_mgr_init(xe);
- if (err)
- return err;
-
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
if (err)
return err;
-
- /*
- * Only after this point can GT-specific MMIO operations
- * (including things like communication with the GuC)
- * be performed.
- */
- xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
- if (IS_SRIOV_VF(xe)) {
- xe_guc_comm_init_early(&tile->primary_gt->uc.guc);
- err = xe_gt_sriov_vf_bootstrap(tile->primary_gt);
- if (err)
- return err;
- err = xe_gt_sriov_vf_query_config(tile->primary_gt);
- if (err)
- return err;
- }
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- err = xe_memirq_init(&tile->memirq);
- if (err)
- return err;
- }
-
- for_each_gt(gt, xe, id) {
- err = xe_gt_init_hwconfig(gt);
- if (err)
- return err;
}
- err = xe_devcoredump_init(xe);
- if (err)
- return err;
-
/*
* From here on, if a step fails, make sure a Driver-FLR is triggereed
*/
@@ -856,6 +823,14 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ /*
+ * Allow allocations only now to ensure xe_display_init_early()
+ * is the first to allocate, always.
+ */
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
+
/* Allocate and map stolen after potential VRAM resize */
err = xe_ttm_stolen_mgr_init(xe);
if (err)
@@ -887,6 +862,16 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ if (xe->tiles->media_gt &&
+ XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_DEVICE_WA_DISABLE(xe, 15015404425);
+
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
+
+ xe_nvm_init(xe);
+
err = xe_heci_gsc_init(xe);
if (err)
return err;
@@ -927,6 +912,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_i2c_probe(xe);
+ if (err)
+ goto err_unregister_display;
+
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
@@ -944,6 +933,8 @@ void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
+ xe_nvm_fini(xe);
+
drm_dev_unplug(&xe->drm);
xe_bo_pci_dev_remove_all(xe);
@@ -1184,7 +1175,8 @@ void xe_device_declare_wedged(struct xe_device *xe)
/* Notify userspace of wedged device */
drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 0bc3bc8e6803..bc802e066a7d 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -60,35 +60,32 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
return &xe->tiles[0];
}
+/*
+ * Highest GT/tile count for any platform. Used only for memory allocation
+ * sizing. Any logic looping over GTs or mapping userspace GT IDs into GT
+ * structures should use the per-platform xe->info.max_gt_per_tile instead.
+ */
#define XE_MAX_GT_PER_TILE 2
-static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
-{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
- gt_id = 0;
-
- return gt_id ? tile->media_gt : tile->primary_gt;
-}
-
static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
{
- struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ struct xe_tile *tile;
struct xe_gt *gt;
- /*
- * FIXME: This only works for now because multi-tile and standalone
- * media are mutually exclusive on the platforms we have today.
- *
- * id => GT mapping may change once we settle on how we want to handle
- * our UAPI.
- */
- if (MEDIA_VER(xe) >= 13) {
- gt = xe_tile_get_gt(root_tile, gt_id);
- } else {
- if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
- gt_id = 0;
-
- gt = xe->tiles[gt_id].primary_gt;
+ if (gt_id >= xe->info.tile_count * xe->info.max_gt_per_tile)
+ return NULL;
+
+ tile = &xe->tiles[gt_id / xe->info.max_gt_per_tile];
+ switch (gt_id % xe->info.max_gt_per_tile) {
+ default:
+ xe_assert(xe, false);
+ fallthrough;
+ case 0:
+ gt = tile->primary_gt;
+ break;
+ case 1:
+ gt = tile->media_gt;
+ break;
}
if (!gt)
@@ -130,14 +127,14 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe)
for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
for_each_if((tile__) = &(xe__)->tiles[(id__)])
-/*
- * FIXME: This only works for now since multi-tile and standalone media
- * happen to be mutually exclusive. Future platforms may change this...
- */
#define for_each_gt(gt__, xe__, id__) \
- for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
+ for ((id__) = 0; (id__) < (xe__)->info.tile_count * (xe__)->info.max_gt_per_tile; (id__)++) \
for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
+#define for_each_gt_on_tile(gt__, tile__, id__) \
+ for_each_gt((gt__), (tile__)->xe, (id__)) \
+ for_each_if((gt__)->tile == (tile__))
+
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
return &gt->pm.fw;
@@ -195,6 +192,8 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+int xe_is_injection_active(void);
+
/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index b9440f8c781e..bd9015761aa0 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -24,6 +24,12 @@
*
* vram_d3cold_threshold - Report/change vram used threshold(in MB) below
* which vram save/restore is permissible during runtime D3cold entry/exit.
+ *
+ * lb_fan_control_version - Fan control version provisioned by late binding.
+ * Exposed only if supported by the device.
+ *
+ * lb_voltage_regulator_version - Voltage regulator version provisioned by late
+ * binding. Exposed only if supported by the device.
*/
static ssize_t
@@ -65,6 +71,140 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static ssize_t
+lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
+
+static ssize_t
+lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
+
+static int late_bind_create_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret) {
+ if (ret == -ENXIO) {
+ drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
+ ret = 0;
+ }
+ goto out;
+ }
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (ret)
+ goto out;
+ }
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static void late_bind_remove_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+}
+
/**
* DOC: PCIe Gen5 Limitations
*
@@ -151,8 +291,10 @@ static void xe_device_sysfs_fini(void *arg)
if (xe->d3cold.capable)
sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (xe->info.platform == XE_BATTLEMAGE)
+ if (xe->info.platform == XE_BATTLEMAGE) {
sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
+ late_bind_remove_files(xe->drm.dev);
+ }
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -170,6 +312,10 @@ int xe_device_sysfs_init(struct xe_device *xe)
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
if (ret)
return ret;
+
+ ret = late_bind_create_files(dev);
+ if (ret)
+ return ret;
}
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 6383a1c0d478..d4d2c6854790 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -21,7 +21,9 @@
#include "xe_platform_types.h"
#include "xe_pmu_types.h"
#include "xe_pt_types.h"
+#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
+#include "xe_sriov_vf_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
#include "xe_ttm_vram_mgr_types.h"
@@ -30,12 +32,11 @@
#define TEST_VM_OPS_ERROR
#endif
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "intel_display_core.h"
-#include "intel_display_device.h"
-#endif
-
+struct dram_info;
+struct intel_display;
+struct intel_dg_nvm_dev;
struct xe_ggtt;
+struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
@@ -108,7 +109,7 @@ struct xe_vram_region {
void __iomem *mapping;
/** @ttm: VRAM TTM manager */
struct xe_ttm_vram_mgr ttm;
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -296,6 +297,8 @@ struct xe_device {
u8 vram_flags;
/** @info.tile_count: Number of tiles */
u8 tile_count;
+ /** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
+ u8 max_gt_per_tile;
/** @info.gt_count: Total number of GTs for entire device */
u8 gt_count;
/** @info.vm_max_level: Max VM level */
@@ -319,6 +322,8 @@ struct xe_device {
u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_gsc_nvm: Device has gsc non-volatile memory */
+ u8 has_gsc_nvm:1;
/** @info.has_heci_cscfi: device has heci cscfi */
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
@@ -360,6 +365,19 @@ struct xe_device {
u8 skip_pcode:1;
} info;
+ /** @wa_active: keep track of active workarounds */
+ struct {
+ /** @wa_active.oob: bitmap with active OOB workarounds */
+ unsigned long *oob;
+
+ /**
+ * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
+ * of XE_DEVICE_WA() - it can only be called on initialization after
+ * Device OOB WAs have been processed.
+ */
+ bool oob_initialized;
+ } wa_active;
+
/** @survivability: survivability information for device */
struct xe_survivability survivability;
@@ -406,10 +424,12 @@ struct xe_device {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
- /** @sriov.pf: PF specific data */
- struct xe_device_pf pf;
- /** @sriov.vf: VF specific data */
- struct xe_device_vf vf;
+ union {
+ /** @sriov.pf: PF specific data */
+ struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
+ };
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
@@ -502,6 +522,10 @@ struct xe_device {
const struct xe_pat_table_entry *table;
/** @pat.n_entries: Number of PAT entries */
int n_entries;
+ /** @pat.ats_entry: PAT entry for PCIe ATS responses */
+ const struct xe_pat_table_entry *pat_ats;
+ /** @pat.pta_entry: PAT entry for page table accesses */
+ const struct xe_pat_table_entry *pat_pta;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
@@ -548,6 +572,9 @@ struct xe_device {
/** @heci_gsc: graphics security controller */
struct xe_heci_gsc heci_gsc;
+ /** @nvm: discrete graphics non-volatile memory */
+ struct intel_dg_nvm_dev *nvm;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -576,6 +603,12 @@ struct xe_device {
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
+ /** @i2c: I2C host controller */
+ struct xe_i2c *i2c;
+
+ /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
+ u32 atomic_svm_timeslice_ms;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -584,6 +617,14 @@ struct xe_device {
u8 vm_inject_error_position;
#endif
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ /**
+ * @global_total_pages: global GPU page usage tracked for gpu_mem
+ * tracepoints
+ */
+ atomic64_t global_total_pages;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -593,27 +634,9 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display display;
-
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- __INTEL_DRAM_TYPE_MAX,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ struct intel_display *display;
+
+ const struct dram_info *dram_info;
/*
* edram size in MB.
diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
new file mode 100644
index 000000000000..3a0c4ccc4224
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
@@ -0,0 +1,2 @@
+15015404425 PLATFORM(LUNARLAKE)
+ PLATFORM(PANTHERLAKE)
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 31f688e953d7..f931ff9b1ec0 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -167,7 +167,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo)
static void bo_meminfo(struct xe_bo *bo,
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
{
- u64 sz = bo->size;
+ u64 sz = xe_bo_size(bo);
u32 mem_type = bo->ttm.resource->mem_type;
xe_bo_assert_held(bo);
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 96732613b4b7..af7916315ac6 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -258,11 +258,13 @@ static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
struct eu_stall_open_properties *props)
{
- if (value >= xe->info.gt_count) {
+ struct xe_gt *gt = xe_device_get_gt(xe, value);
+
+ if (!gt) {
drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
return -EINVAL;
}
- props->gt = xe_device_get_gt(xe, value);
+ props->gt = gt;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fee22358cc09..8991b4aed440 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -610,7 +610,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
- if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
+ if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
return -EINVAL;
if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 8a5cba22b586..c59a9b330697 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -64,7 +64,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER);
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index ed9183599e31..6581cb0f0e59 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -18,8 +18,8 @@
" *\n" \
" * This file was generated from rules: %s\n" \
" */\n" \
- "#ifndef _GENERATED_XE_WA_OOB_\n" \
- "#define _GENERATED_XE_WA_OOB_\n" \
+ "#ifndef _GENERATED_%s_\n" \
+ "#define _GENERATED_%s_\n" \
"\n" \
"enum {\n"
@@ -52,7 +52,7 @@ static char *strip(char *line, size_t linelen)
}
#define MAX_LINE_LEN 4096
-static int parse(FILE *input, FILE *csource, FILE *cheader)
+static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
{
char line[MAX_LINE_LEN + 1];
char *name, *prev_name = NULL, *rules;
@@ -96,7 +96,7 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
}
if (name) {
- fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
+ fprintf(cheader, "\t%s_%s = %u,\n", prefix, name, idx);
/* Close previous entry before starting a new one */
if (idx)
@@ -118,7 +118,33 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (idx)
fprintf(csource, ") },\n");
- fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
+ fprintf(cheader, "\t_%s_COUNT = %u\n", prefix, idx);
+
+ return 0;
+}
+
+static int fn_to_prefix(const char *fn, char *prefix, size_t size)
+{
+ size_t len;
+
+ fn = basename(fn);
+ len = strlen(fn);
+
+ if (len > size - 1)
+ return -ENAMETOOLONG;
+
+ memcpy(prefix, fn, len + 1);
+
+ for (char *p = prefix; *p; p++) {
+ switch (*p) {
+ case '.':
+ *p = '\0';
+ return 0;
+ default:
+ *p = toupper(*p);
+ break;
+ }
+ }
return 0;
}
@@ -141,6 +167,7 @@ int main(int argc, const char *argv[])
[ARGS_CHEADER] = { .fn = argv[3], .mode = "w" },
};
int ret = 1;
+ char prefix[128];
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
@@ -148,6 +175,9 @@ int main(int argc, const char *argv[])
return 1;
}
+ if (fn_to_prefix(args[ARGS_CHEADER].fn, prefix, sizeof(prefix)) < 0)
+ return 1;
+
for (int i = 0; i < _ARGS_COUNT; i++) {
args[i].f = fopen(args[i].fn, args[i].mode);
if (!args[i].f) {
@@ -157,9 +187,10 @@ int main(int argc, const char *argv[])
}
}
- fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn);
+ fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn, prefix, prefix);
+
ret = parse(args[ARGS_INPUT].f, args[ARGS_CSOURCE].f,
- args[ARGS_CHEADER].f);
+ args[ARGS_CHEADER].f, prefix);
if (!ret)
fprintf(args[ARGS_CHEADER].f, FOOTER);
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2c799958c1e4..29d4d3f51da1 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <kunit/visibility.h>
#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -22,12 +23,13 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -64,13 +66,9 @@
* give us the correct placement for free.
*/
-static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
- u64 pte;
-
- pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pte |= XE_PAGE_PRESENT;
+ u64 pte = XE_PAGE_PRESENT;
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
pte |= XE_GGTT_PTE_DM;
@@ -78,13 +76,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
return pte;
}
-static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
+ pte = xelp_ggtt_pte_flags(bo, pat_index);
xe_assert(xe, pat_index <= 3);
@@ -149,8 +146,9 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
xe_tile_assert(ggtt->tile, start < end);
if (ggtt->scratch)
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
- pat_index);
+ scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
+ ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
+ pat_index);
else
scratch_pte = 0;
@@ -160,6 +158,22 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+/**
+ * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
+ * @tile: &xe_tile
+ *
+ * Allocates a &xe_ggtt for a given tile.
+ *
+ * Return: &xe_ggtt on success, or NULL when out of memory.
+ */
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (ggtt)
+ ggtt->tile = tile;
+ return ggtt;
+}
+
static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -176,6 +190,13 @@ static void ggtt_fini(void *arg)
ggtt->scratch = NULL;
}
+#ifdef CONFIG_LOCKDEP
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{
+ might_lock(&ggtt->lock);
+}
+#endif
+
static void primelockdep(struct xe_ggtt *ggtt)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -187,20 +208,36 @@ static void primelockdep(struct xe_ggtt *ggtt)
}
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
- .pte_encode_bo = xelp_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
+{
+ drm_mm_init(&ggtt->mm, reserved,
+ ggtt->size - reserved);
+ mutex_init(&ggtt->lock);
+ primelockdep(ggtt);
+}
+
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
+{
+ ggtt->size = size;
+ __xe_ggtt_init_early(ggtt, reserved);
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
+
static void dev_fini_ggtt(void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -226,7 +263,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
unsigned int gsm_size;
int err;
- if (IS_SRIOV_VF(xe))
+ if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
else
gsm_size = probe_gsm_size(pdev);
@@ -254,11 +291,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
-
- drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
- ggtt->size - xe_wopcm_size(xe));
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
+ __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
if (err)
@@ -269,7 +302,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return err;
if (IS_SRIOV_VF(xe)) {
- err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+ err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
if (err)
return err;
}
@@ -388,7 +421,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
goto err;
}
- xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
+ xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
xe_ggtt_initial_clear(ggtt);
@@ -440,16 +473,17 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
* @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
*
- * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
+ * To be used in cases where ggtt->lock is already taken.
+ * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
{
struct xe_ggtt *ggtt = node->ggtt;
int err;
@@ -458,14 +492,13 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
+ lockdep_assert_held(&ggtt->lock);
node->base.color = 0;
node->base.start = start;
node->base.size = end - start;
- mutex_lock(&ggtt->lock);
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
@@ -477,27 +510,72 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
}
/**
- * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
* @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_node_insert_balloon() for details.
+ * To be used in cases where ggtt->lock is already taken.
+ * See xe_ggtt_node_insert_balloon_locked() for details.
*/
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
{
- if (!node || !node->ggtt)
+ if (!xe_ggtt_node_allocated(node))
return;
- if (!drm_mm_node_allocated(&node->base))
- goto free_node;
+ lockdep_assert_held(&node->ggtt->lock);
xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
- mutex_lock(&node->ggtt->lock);
drm_mm_remove_node(&node->base);
- mutex_unlock(&node->ggtt->lock);
+}
-free_node:
- xe_ggtt_node_fini(node);
+static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
+{
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 __maybe_unused wopcm = xe_wopcm_size(xe);
+
+ xe_tile_assert(tile, start >= wopcm);
+ xe_tile_assert(tile, start + size < ggtt->size - wopcm);
+}
+
+/**
+ * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
+ * @ggtt: the &xe_ggtt struct instance
+ * @shift: change to the location of area provisioned for current VF
+ *
+ * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
+ * to represent allocations in range formerly assigned to current VF, before the range changed.
+ * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
+ *
+ * The function has no ability of failing - because it shifts existing nodes, without
+ * any additional processing. If the nodes were successfully existing at the old address,
+ * they will do the same at the new one. A fail inside this function would indicate that
+ * the list of nodes was either already damaged, or that the shift brings the address range
+ * outside of valid bounds. Both cases justify an assert rather than error code.
+ */
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
+{
+ struct xe_tile *tile __maybe_unused = ggtt->tile;
+ struct drm_mm_node *node, *tmpn;
+ LIST_HEAD(temp_list_head);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
+ xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
+
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
+ drm_mm_remove_node(node);
+ list_add(&node->node_list, &temp_list_head);
+ }
+
+ list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
+ list_del(&node->node_list);
+ node->start += shift;
+ drm_mm_reserve_node(&ggtt->mm, node);
+ xe_tile_assert(tile, drm_mm_node_allocated(node));
+ }
}
/**
@@ -548,12 +626,12 @@ int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
* xe_ggtt_node_init - Initialize %xe_ggtt_node struct
* @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
*
- * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This function will allocate the struct %xe_ggtt_node and return its pointer.
* This struct will then be freed after the node removal upon xe_ggtt_node_remove()
- * or xe_ggtt_node_remove_balloon().
+ * or xe_ggtt_node_remove_balloon_locked().
* Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
* in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
*
* Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
**/
@@ -575,7 +653,7 @@ struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
* @node: the &xe_ggtt_node to be freed
*
* If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
* this function needs to be called to free the %xe_ggtt_node struct
**/
void xe_ggtt_node_fini(struct xe_ggtt_node *node)
@@ -600,26 +678,59 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
/**
* xe_ggtt_map_bo - Map the BO into GGTT
* @ggtt: the &xe_ggtt where node will be mapped
+ * @node: the &xe_ggtt_node where this BO is mapped
* @bo: the &xe_bo to be mapped
+ * @pat_index: Which pat_index to use.
*/
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index)
{
- u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start;
- u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
+ u64 start, pte, end;
+ struct xe_res_cursor cur;
+
+ if (XE_WARN_ON(!node))
return;
- start = bo->ggtt_node[ggtt->tile->id]->base.start;
+ start = node->base.start;
+ end = start + xe_bo_size(bo);
+
+ pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+ if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
+ xe_assert(xe_bo_device(bo), bo->ttm.ttm);
+
+ for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte | xe_res_dma(&cur));
+ } else {
+ /* Prepend GPU offset */
+ pte |= vram_region_gpu_offset(bo->ttm.resource);
- for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
- pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
- ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
+ for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte + cur.start);
}
}
+/**
+ * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ *
+ * This is used to restore a GGTT mapping after suspend.
+ */
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
+ mutex_unlock(&ggtt->lock);
+}
+
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
@@ -632,7 +743,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
return 0;
}
@@ -651,12 +762,15 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
- bo->size, alignment, 0, start, end, 0);
+ xe_bo_size(bo), alignment, 0, start, end, 0);
if (err) {
xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
bo->ggtt_node[tile_id] = NULL;
} else {
- xe_ggtt_map_bo(ggtt, bo);
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
}
mutex_unlock(&ggtt->lock);
@@ -709,7 +823,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
@@ -852,3 +966,30 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
return total;
}
+
+/**
+ * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
+ * @ggtt: &xe_ggtt
+ * @bo: &xe_bo
+ * @pat_index: The pat_index for the PTE.
+ *
+ * This function returns the pte_flags for a given BO, without address.
+ * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
+ */
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
+ struct xe_bo *bo, u16 pat_index)
+{
+ return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+}
+
+/**
+ * xe_ggtt_read_pte - Read a PTE from the GGTT
+ * @ggtt: &xe_ggtt
+ * @offset: the offset for which the mapping should be read.
+ *
+ * Used by testcases, and by display reading out an inherited bios FB.
+ */
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
+{
+ return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 27e7d67de004..fbe1e397d05d 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -9,22 +9,28 @@
#include "xe_ggtt_types.h"
struct drm_printer;
+struct xe_tile;
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size);
int xe_ggtt_init(struct xe_ggtt *ggtt);
struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
void xe_ggtt_node_fini(struct xe_ggtt_node *node);
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
- u64 start, u64 size);
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift);
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
u32 size, u32 align, u32 mm_flags);
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index);
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
@@ -38,4 +44,14 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
+#ifndef CONFIG_LOCKDEP
+static inline void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{ }
+#else
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt);
+#endif
+
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, struct xe_bo *bo, u16 pat_index);
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index cb02b7994a9a..c5e999d58ff2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -74,8 +74,8 @@ struct xe_ggtt_node {
* Which can vary from platform to platform.
*/
struct xe_ggtt_pt_ops {
- /** @pte_encode_bo: Encode PTE address for a given BO */
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @pte_encode_flags: Encode PTE flags for a given BO */
+ u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
/** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 0bcf97063ff6..1d84bf2f2cef 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -59,7 +59,8 @@ static int memcpy_fw(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
- xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
+ xe_map_memset(xe, &gsc->private->vmap, fw_size, 0,
+ xe_bo_size(gsc->private) - fw_size);
kfree(storage);
@@ -82,7 +83,8 @@ static int emit_gsc_upload(struct xe_gsc *gsc)
bb->cs[bb->len++] = GSC_FW_LOAD;
bb->cs[bb->len++] = lower_32_bits(offset);
bb->cs[bb->len++] = upper_32_bits(offset);
- bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
+ bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) |
+ GSC_FW_LOAD_LIMIT_VALID;
job = xe_bb_create_job(gsc->q, bb);
if (IS_ERR(job)) {
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index d0519cd6704a..464282a89eef 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -23,6 +23,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_tile.h"
/*
* GSC proxy:
@@ -483,7 +484,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
}
/* no multi-tile devices with this feature yet */
- if (tile->id > 0) {
+ if (!xe_tile_is_root(tile)) {
xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index e3517ce2e18c..c8eda36546d3 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -112,7 +112,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
if (!fw_ref)
return;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
@@ -146,30 +146,23 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
static void gt_reset_worker(struct work_struct *w);
-static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
+ long timeout_jiffies)
{
struct xe_sched_job *job;
- struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
- bb = xe_bb_new(gt, 4, false);
- if (IS_ERR(bb))
- return PTR_ERR(bb);
-
job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
+ if (IS_ERR(job))
return PTR_ERR(job);
- }
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
+ timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
dma_fence_put(fence);
- xe_bb_free(bb, NULL);
if (timeout < 0)
return timeout;
else if (!timeout)
@@ -178,27 +171,30 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
+static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+{
+ struct xe_bb *bb;
+ int ret;
+
+ bb = xe_bb_new(gt, 4, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ ret = emit_job_sync(q, bb, HZ);
+ xe_bb_free(bb, NULL);
+
+ return ret;
+}
+
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
struct xe_reg_sr_entry *entry;
+ int count_rmw = 0, count = 0, ret;
unsigned long idx;
- struct xe_sched_job *job;
struct xe_bb *bb;
- struct dma_fence *fence;
- long timeout;
- int count_rmw = 0;
- int count = 0;
-
- if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
- /* Big enough to emit all of the context's 3DSTATE */
- bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
- else
- /* Just pick a large BB size */
- bb = xe_bb_new(gt, SZ_4K, false);
-
- if (IS_ERR(bb))
- return PTR_ERR(bb);
+ size_t bb_len = 0;
+ u32 *cs;
/* count RMW registers as those will be handled separately */
xa_for_each(&sr->xa, idx, entry) {
@@ -208,13 +204,34 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
++count_rmw;
}
- if (count || count_rmw)
- xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count)
+ bb_len += count * 2 + 1;
+
+ if (count_rmw)
+ bb_len += count_rmw * 20 + 7;
+
+ if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
+ /*
+ * Big enough to emit all of the context's 3DSTATE via
+ * xe_lrc_emit_hwe_state_instructions()
+ */
+ bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
+
+ xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
+
+ bb = xe_bb_new(gt, bb_len, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ cs = bb->cs;
if (count) {
- /* emit single LRI with all non RMW regs */
+ /*
+ * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
+ * reg + 1
+ */
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
@@ -229,79 +246,68 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
val |= entry->set_bits;
- bb->cs[bb->len++] = reg.addr;
- bb->cs[bb->len++] = val;
+ *cs++ = reg.addr;
+ *cs++ = val;
xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
}
}
if (count_rmw) {
- /* emit MI_MATH for each RMW reg */
+ /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
xa_for_each(&sr->xa, idx, entry) {
if (entry->reg.masked || entry->clr_bits == ~0)
continue;
- bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
- bb->cs[bb->len++] = entry->reg.addr;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
-
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
- MI_LRI_LRM_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
- bb->cs[bb->len++] = entry->clr_bits;
- bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
- bb->cs[bb->len++] = entry->set_bits;
-
- bb->cs[bb->len++] = MI_MATH(8);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
- bb->cs[bb->len++] = CS_ALU_INSTR_AND;
- bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
- bb->cs[bb->len++] = CS_ALU_INSTR_OR;
- bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
-
- bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
- bb->cs[bb->len++] = entry->reg.addr;
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ *cs++ = entry->reg.addr;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = entry->clr_bits;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = entry->set_bits;
+
+ *cs++ = MI_MATH(8);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ *cs++ = CS_ALU_INSTR_AND;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ *cs++ = CS_ALU_INSTR_OR;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = entry->reg.addr;
xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
entry->reg.addr, entry->clr_bits, entry->set_bits);
}
/* reset used GPR */
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
- bb->cs[bb->len++] = 0;
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = 0;
}
- xe_lrc_emit_hwe_state_instructions(q, bb);
+ cs = xe_lrc_emit_hwe_state_instructions(q, cs);
- job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
- return PTR_ERR(job);
- }
+ bb->len = cs - bb->cs;
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
- xe_sched_job_push(job);
+ ret = emit_job_sync(q, bb, HZ);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
xe_bb_free(bb, NULL);
- if (timeout < 0)
- return timeout;
- else if (!timeout)
- return -ETIME;
- return 0;
+ return ret;
}
int xe_gt_record_default_lrcs(struct xe_gt *gt)
@@ -363,14 +369,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
goto put_nop_q;
}
- /* Reload golden LRC to record the effect of any indirect W/A */
- err = emit_nop_job(gt, q);
- if (err) {
- xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
- hwe->name, ERR_PTR(err), q->guc->id);
- goto put_nop_q;
- }
-
xe_map_memcpy_from(xe, default_lrc,
&q->lrc[0]->bo->vmap,
xe_lrc_pphwsp_offset(q->lrc[0]),
@@ -390,6 +388,7 @@ put_exec_queue:
int xe_gt_init_early(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -419,6 +418,25 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_mocs_init_early(gt);
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
+
+ err = xe_uc_init_noalloc(&gt->uc);
+ if (err)
+ return err;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ xe_gt_mcr_init_early(gt);
+ xe_pat_init(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
return 0;
}
@@ -433,7 +451,7 @@ static void dump_pat_on_error(struct xe_gt *gt)
xe_pat_dump(gt, &p);
}
-static int gt_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_gt_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -442,7 +460,15 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (!fw_ref)
return -ETIMEDOUT;
- if (!xe_gt_is_media_type(gt)) {
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ xe_gt_topology_init(gt);
+ xe_gt_mcr_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
+
+ if (xe_gt_is_main_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
goto err_force_wake;
@@ -457,8 +483,10 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_gt_mcr_init(gt);
err = xe_hw_engines_init_early(gt);
- if (err)
+ if (err) {
+ dump_pat_on_error(gt);
goto err_force_wake;
+ }
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
@@ -479,13 +507,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
return 0;
err_force_wake:
- dump_pat_on_error(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
-static int all_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_all_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -518,7 +545,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_force_wake;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/*
* USM has its only SA pool to non-block behind user operations
*/
@@ -534,7 +561,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
tile->migrate = xe_migrate_init(tile);
@@ -544,7 +571,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
goto err_force_wake;
@@ -554,7 +581,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
xe_gt_apply_ccs_mode(gt);
}
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -572,39 +599,6 @@ err_force_wake:
return err;
}
-/*
- * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
- * enable CTB communication.
- */
-int xe_gt_init_hwconfig(struct xe_gt *gt)
-{
- unsigned int fw_ref;
- int err;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return -ETIMEDOUT;
-
- xe_gt_mcr_init_early(gt);
- xe_pat_init(gt);
-
- err = xe_uc_init(&gt->uc);
- if (err)
- goto out_fw;
-
- err = xe_uc_init_hwconfig(&gt->uc);
- if (err)
- goto out_fw;
-
- xe_gt_topology_init(gt);
- xe_gt_mcr_init(gt);
- xe_gt_enable_host_l2_vram(gt);
-
-out_fw:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return err;
-}
-
static void xe_gt_fini(void *arg)
{
struct xe_gt *gt = arg;
@@ -636,7 +630,7 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = gt_fw_domain_init(gt);
+ err = gt_init_with_gt_forcewake(gt);
if (err)
return err;
@@ -654,7 +648,7 @@ int xe_gt_init(struct xe_gt *gt)
xe_force_wake_init_engines(gt, gt_to_fw(gt));
- err = all_fw_domain_init(gt);
+ err = gt_init_with_all_forcewake(gt);
if (err)
return err;
@@ -742,7 +736,7 @@ static int vf_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
@@ -780,11 +774,11 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt)))
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 6357325f3939..41880979f4de 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -28,7 +28,6 @@ static inline bool xe_fault_inject_gt_reset(void)
}
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
-int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
void xe_gt_mmio_init(struct xe_gt *gt);
@@ -107,6 +106,11 @@ static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
xe_device_uc_enabled(gt_to_xe(gt));
}
+static inline bool xe_gt_is_main_type(struct xe_gt *gt)
+{
+ return gt->info.type == XE_GT_TYPE_MAIN;
+}
+
static inline bool xe_gt_is_media_type(struct xe_gt *gt)
{
return gt->info.type == XE_GT_TYPE_MEDIA;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 119a55bb7580..848618acdca8 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -122,24 +122,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int force_reset(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -306,8 +288,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
- {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
@@ -332,6 +312,78 @@ static const struct drm_info_list pf_only_debugfs_list[] = {
{"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
};
+static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
+ void (*call)(struct xe_gt *), struct xe_gt *gt)
+{
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes)
+ call(gt);
+ return count;
+}
+
+static void force_reset(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset_async(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset, gt);
+}
+
+static int force_reset_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset);
+
+static void force_reset_sync(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_sync_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset_sync, gt);
+}
+
+static int force_reset_sync_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset_sync(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset_sync);
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -355,6 +407,10 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
+ /* VF safe */
+ debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
+ debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
+
drm_debugfs_create_files(vf_safe_debugfs_list,
ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index c11206410a4d..ffb210216aa9 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -121,7 +121,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (vcs_mask || vecs_mask)
gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
if (xe->info.platform != XE_DG1) {
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index d4d9730f0d2c..64a2f0d6aaf9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -420,12 +420,6 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
-static void init_steering_inst0(struct xe_gt *gt)
-{
- gt->steering[INSTANCE0].group_target = 0; /* unused */
- gt->steering[INSTANCE0].instance_target = 0; /* unused */
-}
-
static const struct {
const char *name;
void (*init)(struct xe_gt *gt);
@@ -436,7 +430,7 @@ static const struct {
[DSS] = { "DSS", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
- [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 },
+ [INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
};
@@ -446,25 +440,17 @@ static const struct {
*
* Perform early software only initialization of the MCR lock to allow
* the synchronization on accessing the STEER_SEMAPHORE register and
- * use the xe_gt_mcr_multicast_write() function.
+ * use the xe_gt_mcr_multicast_write() function, plus the minimum
+ * safe MCR registers required for VRAM/CCS probing.
*/
void xe_gt_mcr_init_early(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
spin_lock_init(&gt->mcr_lock);
-}
-
-/**
- * xe_gt_mcr_init - Normal initialization of the MCR support
- * @gt: GT structure
- *
- * Perform normal initialization of the MCR for all usages.
- */
-void xe_gt_mcr_init(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
if (IS_SRIOV_VF(xe))
return;
@@ -505,10 +491,27 @@ void xe_gt_mcr_init(struct xe_gt *gt)
}
}
+ /* Mark instance 0 as initialized, we need this early for VRAM and CCS probe. */
+ gt->steering[INSTANCE0].initialized = true;
+}
+
+/**
+ * xe_gt_mcr_init - Normal initialization of the MCR support
+ * @gt: GT structure
+ *
+ * Perform normal initialization of the MCR for all usages.
+ */
+void xe_gt_mcr_init(struct xe_gt *gt)
+{
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
/* Select non-terminated steering target for each type */
- for (int i = 0; i < NUM_STEERING_TYPES; i++)
+ for (int i = 0; i < NUM_STEERING_TYPES; i++) {
+ gt->steering[i].initialized = true;
if (gt->steering[i].ranges && xe_steering_types[i].init)
xe_steering_types[i].init(gt);
+ }
}
/**
@@ -570,6 +573,10 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
if (xe_mmio_in_range(&gt->mmio, &gt->steering[type].ranges[i], reg)) {
+ drm_WARN(&gt_to_xe(gt)->drm, !gt->steering[type].initialized,
+ "Uninitialized usage of MCR register %s/%#x\n",
+ xe_steering_types[type].name, reg.addr);
+
*group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target;
return true;
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 6717a636b1d9..5a75d56d8558 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,6 +14,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
@@ -68,31 +69,8 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->tile_invalidated);
-}
-
-static bool vma_matches(struct xe_vma *vma, u64 page_addr)
-{
- if (page_addr > xe_vma_end(vma) - 1 ||
- page_addr + SZ_4K - 1 < xe_vma_start(vma))
- return false;
-
- return true;
-}
-
-static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
-{
- struct xe_vma *vma = NULL;
-
- if (vm->usm.last_fault_vma) { /* Fast lookup */
- if (vma_matches(vm->usm.last_fault_vma, page_addr))
- vma = vm->usm.last_fault_vma;
- }
- if (!vma)
- vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
-
- return vma;
+ return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated);
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
@@ -143,7 +121,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
trace_xe_vma_pagefault(vma);
- /* Check if VMA is valid */
+ /* Check if VMA is valid, opportunistic check only */
if (vma_is_valid(tile, vma) && !atomic)
return 0;
@@ -180,7 +158,6 @@ retry_userptr:
dma_fence_wait(fence, false);
dma_fence_put(fence);
- vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
@@ -231,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- vma = lookup_vma(vm, pf->page_addr);
+ vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
goto unlock_vm;
@@ -266,22 +243,22 @@ static int send_pagefault_reply(struct xe_guc *guc,
return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
+static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
- drm_dbg(&xe->drm, "\n\tASID: %d\n"
- "\tVFID: %d\n"
- "\tPDATA: 0x%04x\n"
- "\tFaulted Address: 0x%08x%08x\n"
- "\tFaultType: %d\n"
- "\tAccessType: %d\n"
- "\tFaultLevel: %d\n"
- "\tEngineClass: %d %s\n"
- "\tEngineInstance: %d\n",
- pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
- lower_32_bits(pf->page_addr),
- pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
- pf->engine_instance);
+ xe_gt_dbg(gt, "\n\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tPDATA: 0x%04x\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d %s\n"
+ "\tEngineInstance: %d\n",
+ pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
+ lower_32_bits(pf->page_addr),
+ pf->fault_type, pf->access_type, pf->fault_level,
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
@@ -333,7 +310,6 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
struct pf_queue *pf_queue;
unsigned long flags;
u32 asid;
@@ -358,7 +334,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
- drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
+ xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
}
spin_unlock_irqrestore(&pf_queue->lock, flags);
@@ -371,7 +347,6 @@ static void pf_queue_work_func(struct work_struct *w)
{
struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
struct xe_gt *gt = pf_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc_pagefault_reply reply = {};
struct pagefault pf = {};
unsigned long threshold;
@@ -382,9 +357,9 @@ static void pf_queue_work_func(struct work_struct *w)
while (get_pagefault(pf_queue, &pf)) {
ret = handle_pagefault(gt, &pf);
if (unlikely(ret)) {
- print_pagefault(xe, &pf);
+ print_pagefault(gt, &pf);
pf.fault_unsuccessful = 1;
- drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
+ xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
}
reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
@@ -538,21 +513,21 @@ static int sub_granularity_in_byte(int val)
return (granularity_in_byte(val) / 32);
}
-static void print_acc(struct xe_device *xe, struct acc *acc)
+static void print_acc(struct xe_gt *gt, struct acc *acc)
{
- drm_warn(&xe->drm, "Access counter request:\n"
- "\tType: %s\n"
- "\tASID: %d\n"
- "\tVFID: %d\n"
- "\tEngine: %d:%d\n"
- "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
- "\tSub_Granularity Vector: 0x%08x\n"
- "\tVA Range base: 0x%016llx\n",
- acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
- acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
- granularity_in_byte(acc->granularity) / SZ_1K,
- sub_granularity_in_byte(acc->granularity) / SZ_1K,
- acc->sub_granularity, acc->va_range_base);
+ xe_gt_warn(gt, "Access counter request:\n"
+ "\tType: %s\n"
+ "\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tEngine: %d:%d\n"
+ "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
+ "\tSub_Granularity Vector: 0x%08x\n"
+ "\tVA Range base: 0x%016llx\n",
+ acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
+ acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
+ granularity_in_byte(acc->granularity) / SZ_1K,
+ sub_granularity_in_byte(acc->granularity) / SZ_1K,
+ acc->sub_granularity, acc->va_range_base);
}
static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
@@ -650,7 +625,6 @@ static void acc_queue_work_func(struct work_struct *w)
{
struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
struct xe_gt *gt = acc_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct acc acc = {};
unsigned long threshold;
int ret;
@@ -660,8 +634,8 @@ static void acc_queue_work_func(struct work_struct *w)
while (get_acc(acc_queue, &acc)) {
ret = handle_acc(gt, &acc);
if (unlikely(ret)) {
- print_acc(xe, &acc);
- drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
+ print_acc(gt, &acc);
+ xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
}
if (time_after(jiffies, threshold) &&
@@ -706,7 +680,7 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
- drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
+ xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
}
spin_unlock(&acc_queue->lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 35489fa81825..bdbd15f3afe3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -16,6 +16,7 @@
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_submit.h"
#include "xe_mmio.h"
#include "xe_pm.h"
@@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt)
static void pf_init_workers(struct xe_gt *gt)
{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
}
+static void pf_fini_workers(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ disable_work_sync(&gt->sriov.pf.workers.restart);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static void pf_fini_action(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ pf_fini_workers(gt);
+}
+
+static int pf_init_late(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
+}
+
/**
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt)
if (err)
return err;
- return xe_gt_sriov_pf_migration_init(gt);
+ err = xe_gt_sriov_pf_migration_init(gt);
+ if (err)
+ return err;
+
+ err = pf_init_late(gt);
+ if (err)
+ return err;
+
+ return 0;
}
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
@@ -230,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
+
+static void pf_flush_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ flush_work(&gt->sriov.pf.workers.restart);
+}
+
+/**
+ * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
+{
+ /* don't wait if there is another ongoing reset */
+ if (xe_guc_read_stopped(&gt->uc.guc))
+ return -EBUSY;
+
+ pf_flush_restart(gt);
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index e2b2ff8132dc..e7fde3f9937a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -11,6 +11,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 53a44702c04a..494909f74eb2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -104,13 +104,13 @@ static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
}
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
void *klvs = xe_guc_buf_cpu_ptr(buf);
char name[8];
- xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
- xe_sriov_function_name(vfid, name, sizeof(name)),
- num_klvs, str_plural(num_klvs));
+ xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
@@ -238,26 +238,35 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
{
u32 n = 0;
- if (xe_ggtt_node_allocated(config->ggtt_region)) {
- if (details) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
- }
-
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(start);
+ cfg[n++] = upper_32_bits(start);
}
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(size);
+ cfg[n++] = upper_32_bits(size);
+
return n;
}
/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+{
+ struct xe_ggtt_node *node = config->ggtt_region;
+
+ if (!xe_ggtt_node_allocated(node))
+ return 0;
+
+ return encode_ggtt(cfg, node->base.start, node->base.size, details);
+}
+
+/* Return: number of configuration dwords written */
static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
@@ -282,8 +291,8 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
if (config->lmem_obj) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
- cfg[n++] = lower_32_bits(config->lmem_obj->size);
- cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
+ cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
}
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
@@ -332,6 +341,17 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+ if (vfid == PFID) {
+ u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
+
+ /* plain PF config data will never include a real GGTT region */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
+
+ /* fake PF GGTT config covers full GGTT range except reserved WOPCM */
+ num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
+ }
+
num_klvs = xe_guc_klv_count(cfg, num_dwords);
err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
@@ -376,7 +396,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
{
u64 spare;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -388,7 +408,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
{
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -443,7 +463,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
size = round_up(size, alignment);
@@ -492,7 +512,7 @@ static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_ggtt_node *node = config->ggtt_region;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
@@ -560,7 +580,7 @@ int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
@@ -622,7 +642,7 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -693,7 +713,7 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
fair = pf_estimate_fair_ggtt(gt, num_vfs);
@@ -1299,7 +1319,7 @@ static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
struct xe_bo *bo;
bo = config->lmem_obj;
- return bo ? bo->size : 0;
+ return bo ? xe_bo_size(bo) : 0;
}
static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1327,7 +1347,17 @@ static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 si
static void pf_force_lmtt_invalidate(struct xe_device *xe)
{
- /* TODO */
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ xe_assert(xe, xe_device_has_lmtt(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_invalidate_hw(lmtt);
+ }
}
static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
@@ -1388,7 +1418,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
if (err)
goto fail;
- offset += bo->size;
+ offset += xe_bo_size(bo);
}
}
@@ -1406,7 +1436,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (config->lmem_obj) {
@@ -1425,7 +1455,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, IS_DGFX(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1469,12 +1499,12 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
goto release;
}
- err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
if (unlikely(err))
goto reset_lmtt;
xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
- vfid, bo->size, bo->size / SZ_1M);
+ vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
return 0;
reset_lmtt:
@@ -1520,6 +1550,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
+ xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
err = pf_provision_vf_lmem(gt, vfid, size);
@@ -1550,7 +1582,7 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -1627,9 +1659,9 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
return 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
@@ -1661,7 +1693,7 @@ int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
result = result ?: err;
err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
@@ -1989,7 +2021,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
@@ -2080,7 +2112,7 @@ static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
* Only GGTT and LMEM requires to be cleared by the PF.
* GuC doorbell IDs and context IDs do not need any clearing.
*/
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_sanitize_ggtt(config->ggtt_region, vfid);
if (IS_DGFX(xe))
err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
@@ -2147,7 +2179,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool is_primary = !xe_gt_is_media_type(gt);
+ bool is_primary = xe_gt_is_main_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -2163,7 +2195,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
- if (IS_DGFX(xe)) {
+ if (xe_device_has_lmtt(xe)) {
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
@@ -2347,7 +2379,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return -EINVAL;
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
drm_printf(&p, "restoring VF%u config:\n", vfid);
xe_guc_klv_print(buf, size / sizeof(u32), &p);
@@ -2364,6 +2396,20 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void pf_prepare_self_config(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
+
+ /*
+ * We want PF to be allowed to use all of context ID, doorbells IDs
+ * and whole usable GGTT area. While we can store ctxs/dbs numbers
+ * directly in the config structure, can't do the same with the GGTT
+ * configuration, so let it be prepared on demand while pushing KLVs.
+ */
+ config->num_ctxs = GUC_ID_MAX;
+ config->num_dbs = GUC_NUM_DOORBELLS;
+}
+
static int pf_push_self_config(struct xe_gt *gt)
{
int err;
@@ -2407,6 +2453,7 @@ int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_prepare_self_config(gt);
err = pf_push_self_config(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
@@ -2577,10 +2624,10 @@ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
if (!config->lmem_obj)
continue;
- string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
buf, sizeof(buf));
drm_printf(p, "VF%u:\t%zu\t(%s)\n",
- n, config->lmem_obj->size, buf);
+ n, xe_bo_size(config->lmem_obj), buf);
}
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 1f50aec3a059..4f7fff892bc0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -15,10 +15,11 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
-#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf_service.h"
+#include "xe_tile.h"
static const char *control_cmd_to_string(u32 cmd)
{
@@ -1064,7 +1065,9 @@ static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
return false;
- xe_gt_sriov_pf_service_reset(gt, vfid);
+ if (xe_tile_is_root(gt->tile) && xe_gt_is_main_type(gt))
+ xe_sriov_pf_service_reset_vf(gt_to_xe(gt), vfid);
+
xe_gt_sriov_pf_monitor_flr(gt, vfid);
pf_enter_vf_flr_reset_mmio(gt, vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 0fe47f41b63c..3ed245e04d0c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -22,6 +22,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
+#include "xe_sriov_pf.h"
/*
* /sys/kernel/debug/dri/0/
@@ -78,11 +79,6 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_service_print_runtime,
},
{
- "negotiated_versions",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_service_print_version,
- },
- {
"adverse_events",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_monitor_print_events,
@@ -210,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \
return -EOVERFLOW; \
\
xe_pm_runtime_get(xe); \
- err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -305,10 +302,10 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
xe_gt_assert(gt, gt == extract_gt(parent));
xe_gt_assert(gt, vfid == extract_vfid(parent));
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
0644, parent, parent, &ggtt_fops);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
0644, parent, parent, &lmem_fops);
}
@@ -554,11 +551,11 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
drm_debugfs_create_files(pf_ggtt_info,
ARRAY_SIZE(pf_ggtt_info),
pfdentry, minor);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
drm_debugfs_create_files(pf_lmem_info,
ARRAY_SIZE(pf_lmem_info),
pfdentry, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 821cfcc34e6b..76dd9233ef9f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -19,91 +19,7 @@
#include "xe_gt_sriov_pf_service_types.h"
#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
-
-static void pf_init_versions(struct xe_gt *gt)
-{
- BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
- BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
-
- /* base versions may differ between platforms */
- gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
- gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
-
- /* latest version is same for all platforms */
- gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
- gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_negotiate_version(struct xe_gt *gt,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
- struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
-
- xe_gt_assert(gt, base.major);
- xe_gt_assert(gt, base.major <= latest.major);
- xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
-
- /* VF doesn't care - return our latest */
- if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
- wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants newer than our - return our latest */
- if (wanted_major > latest.major) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants older than min required - reject */
- if (wanted_major < base.major ||
- (wanted_major == base.major && wanted_minor < base.minor)) {
- return -EPERM;
- }
-
- /* previous major - return wanted, as we should still support it */
- if (wanted_major < latest.major) {
- /* XXX: we are not prepared for multi-versions yet */
- xe_gt_assert(gt, base.major == latest.major);
- return -ENOPKG;
- }
-
- /* same major - return common minor */
- *major = wanted_major;
- *minor = min_t(u32, latest.minor, wanted_minor);
- return 0;
-}
-
-static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
- xe_gt_assert(gt, major || minor);
-
- gt->sriov.pf.vfs[vfid].version.major = major;
- gt->sriov.pf.vfs[vfid].version.minor = minor;
-}
-
-static void pf_disconnect(struct xe_gt *gt, u32 vfid)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- gt->sriov.pf.vfs[vfid].version.major = 0;
- gt->sriov.pf.vfs[vfid].version.minor = 0;
-}
-
-static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- return major == gt->sriov.pf.vfs[vfid].version.major &&
- minor <= gt->sriov.pf.vfs[vfid].version.minor;
-}
+#include "xe_sriov_pf_service.h"
static const struct xe_reg tgl_runtime_regs[] = {
RPM_CONFIG0, /* _MMIO(0x0d00) */
@@ -266,7 +182,7 @@ static void pf_prepare_runtime_info(struct xe_gt *gt)
read_many(gt, size, regs, values);
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
xe_gt_sriov_pf_service_print_runtime(gt, &p);
}
@@ -285,8 +201,6 @@ int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
{
int err;
- pf_init_versions(gt);
-
err = pf_alloc_runtime_info(gt);
if (unlikely(err))
goto failed;
@@ -311,47 +225,6 @@ void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
pf_prepare_runtime_info(gt);
}
-/**
- * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
- * @gt: the &xe_gt
- * @vfid: the VF identifier
- *
- * Reset a VF driver negotiated VF/PF ABI version.
- * After that point, the VF driver will have to perform new version handshake
- * to continue use of the PF services again.
- *
- * This function can only be called on PF.
- */
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
-{
- pf_disconnect(gt, vfid);
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- int err;
-
- xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
- vfid, wanted_major, wanted_minor);
-
- err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
-
- if (err < 0) {
- xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
- vfid, wanted_major, wanted_minor, ERR_PTR(err));
- pf_disconnect(gt, vfid);
- } else {
- xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
- vfid, *major, *minor);
- pf_connect(gt, vfid, *major, *minor);
- }
-
- return 0;
-}
-
/* Return: length of the response message or a negative error code on failure. */
static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
const u32 *request, u32 len, u32 *response, u32 size)
@@ -371,7 +244,8 @@ static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
- err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
+ err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor,
+ &major, &minor);
if (err < 0)
return err;
@@ -430,8 +304,10 @@ static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
u32 remaining = 0;
int ret;
- if (!pf_is_negotiated(gt, origin, 1, 0))
+ /* this action is available from ABI 1.0 */
+ if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0))
return -EACCES;
+
if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
return -EMSGSIZE;
if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
@@ -528,33 +404,3 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p
return 0;
}
-
-/**
- * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
- * @gt: the &xe_gt
- * @p: the &drm_printer
- *
- * This function is for PF use only.
- */
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
- struct xe_gt_sriov_pf_service_version *version;
-
- xe_gt_assert(gt, IS_SRIOV_PF(xe));
-
- for (n = 1; n <= total_vfs; n++) {
- version = &gt->sriov.pf.vfs[n].version;
- if (!version->major && !version->minor)
- continue;
-
- drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
- }
-
- return 0;
-}
-
-#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
-#include "tests/xe_gt_sriov_pf_service_test.c"
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
index 56aaadf0360d..10b02c9b651c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
@@ -14,9 +14,7 @@ struct xe_gt;
int xe_gt_sriov_pf_service_init(struct xe_gt *gt);
void xe_gt_sriov_pf_service_update(struct xe_gt *gt);
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid);
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p);
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index a439261bf4d7..b282838d59e6 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -82,17 +82,17 @@ int xe_gt_sriov_vf_reset(struct xe_gt *gt)
}
static int guc_action_match_version(struct xe_guc *guc,
- u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
- u32 *branch, u32 *major, u32 *minor, u32 *patch)
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
{
u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
GUC_ACTION_VF2GUC_MATCH_VERSION),
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor),
};
u32 response[GUC_MAX_MMIO_MSG_LEN];
int ret;
@@ -106,120 +106,138 @@ static int guc_action_match_version(struct xe_guc *guc,
if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
return -EPROTO;
- *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
- *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
- *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
- *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
+ memset(found, 0, sizeof(struct xe_uc_fw_version));
+ found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
+ found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
+ found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
+ found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
return 0;
}
-static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static int guc_action_match_version_any(struct xe_guc *guc,
+ struct xe_uc_fw_version *found)
+{
+ struct xe_uc_fw_version wanted = {
+ .branch = GUC_VERSION_BRANCH_ANY,
+ .major = GUC_VERSION_MAJOR_ANY,
+ .minor = GUC_VERSION_MINOR_ANY,
+ .patch = 0
+ };
+
+ return guc_action_match_version(guc, &wanted, found);
+}
+
+static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
struct xe_device *xe = gt_to_xe(gt);
+ memset(ver, 0, sizeof(struct xe_uc_fw_version));
+
switch (xe->info.platform) {
case XE_TIGERLAKE ... XE_PVC:
/* 1.1 this is current baseline for Xe driver */
- *branch = 0;
- *major = 1;
- *minor = 1;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 1;
break;
default:
/* 1.2 has support for the GMD_ID KLV */
- *branch = 0;
- *major = 1;
- *minor = 2;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 2;
break;
}
}
-static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
/* for now it's the same as minimum */
- return vf_minimum_guc_version(gt, branch, major, minor);
+ return vf_minimum_guc_version(gt, ver);
}
static int vf_handshake_with_guc(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version wanted = {0};
struct xe_guc *guc = &gt->uc.guc;
- u32 wanted_branch, wanted_major, wanted_minor;
- u32 branch, major, minor, patch;
+ bool old = false;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
/* select wanted version - prefer previous (if any) */
if (guc_version->major || guc_version->minor) {
- wanted_branch = guc_version->branch;
- wanted_major = guc_version->major;
- wanted_minor = guc_version->minor;
+ wanted = *guc_version;
+ old = true;
} else {
- vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
+ vf_wanted_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+
+ /* First time we handshake, so record the minimum wanted */
+ gt->sriov.vf.wanted_guc_version = wanted;
}
- err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
- &branch, &major, &minor, &patch);
+ err = guc_action_match_version(guc, &wanted, guc_version);
if (unlikely(err))
goto fail;
- /* we don't support interface version change */
- if ((guc_version->major || guc_version->minor) &&
- (guc_version->branch != branch || guc_version->major != major ||
- guc_version->minor != minor)) {
- xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
- branch, major, minor, patch);
- xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
- guc_version->branch, guc_version->major,
- guc_version->minor, guc_version->patch);
- err = -EREMCHG;
- goto fail;
+ if (old) {
+ /* we don't support interface version change */
+ if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
+ xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
+ xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
+ wanted.branch, wanted.major,
+ wanted.minor, wanted.patch);
+ err = -EREMCHG;
+ goto fail;
+ } else {
+ /* version is unchanged, no need to re-verify it */
+ return 0;
+ }
}
/* illegal */
- if (major > wanted_major) {
+ if (guc_version->major > wanted.major) {
err = -EPROTO;
goto unsupported;
}
/* there's no fallback on major version. */
- if (major != wanted_major) {
+ if (guc_version->major != wanted.major) {
err = -ENOPKG;
goto unsupported;
}
/* check against minimum version supported by us */
- vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
- if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
+ vf_minimum_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+ if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
err = -ENOKEY;
goto unsupported;
}
xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
- guc_version->branch = branch;
- guc_version->major = major;
- guc_version->minor = minor;
- guc_version->patch = patch;
return 0;
unsupported:
xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
- branch, major, minor, patch, ERR_PTR(err));
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch,
+ ERR_PTR(err));
fail:
xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
- wanted_major, wanted_minor, ERR_PTR(err));
+ wanted.major, wanted.minor, ERR_PTR(err));
/* try again with *any* just to query which version is supported */
- if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
- GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
- &branch, &major, &minor, &patch))
+ if (!guc_action_match_version_any(guc, &wanted))
xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ wanted.branch, wanted.major, wanted.minor, wanted.patch);
return err;
}
@@ -250,6 +268,29 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions
+ * @gt: the &xe_gt
+ * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version
+ * @found: pointer to the xe_uc_fw_version to be filled with the found version
+ *
+ * This function is for VF use only and it can only be used after successful
+ * version handshake with the GuC.
+ */
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
+{
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+
+ if (wanted)
+ *wanted = gt->sriov.vf.wanted_guc_version;
+
+ if (found)
+ *found = gt->sriov.vf.guc_version;
+}
+
static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
{
u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
@@ -415,6 +456,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
+ config->ggtt_shift = start - (s64)config->ggtt_base;
config->ggtt_base = start;
config->ggtt_size = size;
@@ -510,7 +552,7 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
err = vf_get_lmem_info(gt);
if (unlikely(err))
return err;
@@ -560,106 +602,56 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
-static struct xe_ggtt_node *
-vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
-{
- struct xe_ggtt_node *node;
- int err;
-
- node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(node))
- return node;
-
- err = xe_ggtt_node_insert_balloon(node, start, end);
- if (err) {
- xe_ggtt_node_fini(node);
- return ERR_PTR(err);
- }
-
- return node;
-}
-
-static int vf_balloon_ggtt(struct xe_gt *gt)
+/**
+ * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_ggtt *ggtt = tile->mem.ggtt;
- struct xe_device *xe = gt_to_xe(gt);
- u64 start, end;
-
- xe_gt_assert(gt, IS_SRIOV_VF(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
-
- if (!config->ggtt_size)
- return -ENODATA;
-
- /*
- * VF can only use part of the GGTT as allocated by the PF:
- *
- * WOPCM GUC_GGTT_TOP
- * |<------------ Total GGTT size ------------------>|
- *
- * VF GGTT base -->|<- size ->|
- *
- * +--------------------+----------+-----------------+
- * |////////////////////| block |\\\\\\\\\\\\\\\\\|
- * +--------------------+----------+-----------------+
- *
- * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
- */
-
- start = xe_wopcm_size(xe);
- end = config->ggtt_base;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
- }
-
- start = config->ggtt_base + config->ggtt_size;
- end = GUC_GGTT_TOP;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
- }
- }
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- return 0;
+ return gt->sriov.vf.self_config.ggtt_size;
}
-static void deballoon_ggtt(struct drm_device *drm, void *arg)
+/**
+ * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: base offset of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
{
- struct xe_tile *tile = arg;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return gt->sriov.vf.self_config.ggtt_base;
}
/**
- * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
- * @gt: the &xe_gt
+ * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
+ * @gt: the &xe_gt struct instance
*
* This function is for VF use only.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: The shift value; could be negative
*/
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (xe_gt_is_media_type(gt))
- return 0;
+ struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- err = vf_balloon_ggtt(gt);
- if (err)
- return err;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
+ return config->ggtt_shift;
}
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
@@ -694,21 +686,22 @@ static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
return 0;
}
-static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
+static void vf_connect_pf(struct xe_device *xe, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_assert(xe, IS_SRIOV_VF(xe));
- gt->sriov.vf.pf_version.major = major;
- gt->sriov.vf.pf_version.minor = minor;
+ xe->sriov.vf.pf_version.major = major;
+ xe->sriov.vf.pf_version.minor = minor;
}
-static void vf_disconnect_pf(struct xe_gt *gt)
+static void vf_disconnect_pf(struct xe_device *xe)
{
- vf_connect_pf(gt, 0, 0);
+ vf_connect_pf(xe, 0, 0);
}
static int vf_handshake_with_pf(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
u32 major = major_wanted, minor = minor_wanted;
@@ -724,13 +717,13 @@ static int vf_handshake_with_pf(struct xe_gt *gt)
}
xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
- vf_connect_pf(gt, major, minor);
+ vf_connect_pf(xe, major, minor);
return 0;
failed:
xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
major, minor, ERR_PTR(err));
- vf_disconnect_pf(gt);
+ vf_disconnect_pf(xe);
return err;
}
@@ -783,10 +776,12 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ struct xe_device *xe = gt_to_xe(gt);
- return major == gt->sriov.vf.pf_version.major &&
- minor <= gt->sriov.vf.pf_version.minor;
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ return major == xe->sriov.vf.pf_version.major &&
+ minor <= xe->sriov.vf.pf_version.minor;
}
static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
@@ -974,7 +969,6 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
struct vf_runtime_reg *rr;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
xe_gt_assert(gt, !reg.vf);
if (reg.addr == GMD_ID.addr) {
@@ -1043,7 +1037,9 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
}
@@ -1079,19 +1075,21 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
- struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
- u32 branch, major, minor;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *wanted = &gt->sriov.vf.wanted_guc_version;
+ struct xe_sriov_vf_relay_version *pf_version = &xe->sriov.vf.pf_version;
+ struct xe_uc_fw_version ver;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
drm_printf(p, "GuC ABI:\n");
- vf_minimum_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
+ vf_minimum_guc_version(gt, &ver);
+ drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor);
- vf_wanted_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
+ drm_printf(p, "\twanted:\t%u.%u.%u.*\n",
+ wanted->branch, wanted->major, wanted->minor);
drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index ba6c5d74e326..e0357f341a2d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -11,19 +11,26 @@
struct drm_printer;
struct xe_gt;
struct xe_reg;
+struct xe_uc_fw_version;
int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index a57f13b5afcd..298dedf4b009 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,30 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
-
-/**
- * struct xe_gt_sriov_vf_guc_version - GuC ABI version details.
- */
-struct xe_gt_sriov_vf_guc_version {
- /** @branch: branch version. */
- u8 branch;
- /** @major: major version. */
- u8 major;
- /** @minor: minor version. */
- u8 minor;
- /** @patch: patch version. */
- u8 patch;
-};
-
-/**
- * struct xe_gt_sriov_vf_relay_version - PF ABI version details.
- */
-struct xe_gt_sriov_vf_relay_version {
- /** @major: major version. */
- u16 major;
- /** @minor: minor version. */
- u16 minor;
-};
+#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
@@ -40,6 +17,8 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_base;
/** @ggtt_size: assigned size of the GGTT region. */
u64 ggtt_size;
+ /** @ggtt_shift: difference in ggtt_base on last migration */
+ s64 ggtt_shift;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
@@ -71,12 +50,12 @@ struct xe_gt_sriov_vf_runtime {
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
+ /** @wanted_guc_version: minimum wanted GuC ABI version. */
+ struct xe_uc_fw_version wanted_guc_version;
/** @guc_version: negotiated GuC ABI version. */
- struct xe_gt_sriov_vf_guc_version guc_version;
+ struct xe_uc_fw_version guc_version;
/** @self_config: resource configurations. */
struct xe_gt_sriov_vf_selfconfig self_config;
- /** @pf_version: negotiated VF/PF ABI version. */
- struct xe_gt_sriov_vf_relay_version pf_version;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index e1362e608146..086c12ee3d9d 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -330,6 +330,40 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+static int send_tlb_invalidation_all(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ 0, /* seqno, replaced in send_tlb_invalidation */
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
+}
+
+/**
+ * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
+ * @gt: the &xe_gt structure
+ * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
+ *
+ * Send a request to invalidate all TLBs across PF and all VFs.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ int err;
+
+ xe_gt_assert(gt, gt == fence->gt);
+
+ err = send_tlb_invalidation_all(gt, fence);
+ if (err)
+ xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
+
+ return err;
+}
+
/*
* Ensure that roundup_pow_of_two(length) doesn't overflow.
* Note that roundup_pow_of_two() operates on unsigned long,
@@ -449,30 +483,6 @@ void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
- * @vma: VMA to invalidate
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
-{
- xe_gt_assert(gt, vma);
-
- return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index abe9b03d543e..f7f0f2eaf4b5 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -19,10 +19,8 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma);
void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 516c81e3b8dd..8c63e3263643 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,23 +12,20 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
-static void
-load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs,
+ const struct xe_reg regs[])
{
- va_list argp;
u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
int i;
- if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
- numregs = XE_MAX_DSS_FUSE_REGS;
+ xe_gt_assert(gt, numregs <= ARRAY_SIZE(fuse_val));
- va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
- va_end(argp);
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, regs[i]);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
}
@@ -218,9 +215,19 @@ get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
void
xe_gt_topology_init(struct xe_gt *gt)
{
+ static const struct xe_reg geometry_regs[] = {
+ XELP_GT_GEOMETRY_DSS_ENABLE,
+ XE2_GT_GEOMETRY_DSS_1,
+ XE2_GT_GEOMETRY_DSS_2,
+ };
+ static const struct xe_reg compute_regs[] = {
+ XEHP_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
+ XE2_GT_COMPUTE_DSS_2,
+ };
+ int num_geometry_regs, num_compute_regs;
struct xe_device *xe = gt_to_xe(gt);
struct drm_printer p;
- int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -228,23 +235,18 @@ xe_gt_topology_init(struct xe_gt *gt)
* Register counts returned shouldn't exceed the number of registers
* passed as parameters below.
*/
- drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
- drm_WARN_ON(&xe->drm, num_compute_regs > 3);
+ xe_gt_assert(gt, num_geometry_regs <= ARRAY_SIZE(geometry_regs));
+ xe_gt_assert(gt, num_compute_regs <= ARRAY_SIZE(compute_regs));
load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
- num_geometry_regs,
- XELP_GT_GEOMETRY_DSS_ENABLE,
- XE2_GT_GEOMETRY_DSS_1,
- XE2_GT_GEOMETRY_DSS_2);
- load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
- XEHP_GT_COMPUTE_DSS_ENABLE,
- XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
- XE2_GT_COMPUTE_DSS_2);
+ num_geometry_regs, geometry_regs);
+ load_dss_mask(gt, gt->fuse_topo.c_dss_mask,
+ num_compute_regs, compute_regs);
+
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
- p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
-
+ p = xe_gt_dbg_printer(gt);
xe_gt_topology_dump(gt, &p);
}
@@ -288,11 +290,6 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}
-bool xe_dss_mask_empty(const xe_dss_mask_t mask)
-{
- return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
-}
-
/**
* xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
* @gt: GT to check
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index a72d26ba0653..c8140704ad4c 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -41,8 +41,6 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
-bool xe_dss_mask_empty(const xe_dss_mask_t mask);
-
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 7def0959da35..96344c604726 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -377,6 +377,8 @@ struct xe_gt {
u16 group_target;
/** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
+ /** @steering.initialized: Whether this steering range is initialized */
+ bool initialized;
} steering[NUM_STEERING_TYPES];
/**
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bac5471a1a78..b1d1d6da3758 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -29,6 +29,7 @@
#include "xe_guc_db_mgr.h"
#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
#include "xe_guc_relay.h"
@@ -59,7 +60,7 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
xe_assert(xe, addr < GUC_GGTT_TOP);
- xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
+ xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
return addr;
}
@@ -420,7 +421,7 @@ static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 t
buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
- xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
desc, buf, G2G_BUFFER_SIZE);
@@ -570,6 +571,86 @@ err_deregister:
return err;
}
+static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ num_dwords
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static bool supports_dynamic_ics(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ /* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
+ if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
+ return false;
+
+ /*
+ * The feature is currently not compatible with multi-lrc, so the GuC
+ * does not support it at all on the media engines (which are the main
+ * users of mlrc). On the primary GT side, to avoid it being used in
+ * conjunction with mlrc, we only enable it if we are in single CCS
+ * mode.
+ */
+ if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
+ return false;
+
+ /*
+ * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
+ * version v1.18.4.
+ */
+ return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
+}
+
+#define OPT_IN_MAX_DWORDS 16
+int xe_guc_opt_in_features_enable(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
+ u32 count = 0;
+ u32 *klvs;
+ int ret;
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ klvs = xe_guc_buf_cpu_ptr(buf);
+
+ /*
+ * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
+ * to compatibility version v1.7.0.
+ * Note that the GuC allows enabling this KLV even on platforms that do
+ * not support the extra type; in such case the returned type variable
+ * will be set to a known invalid value which we can check against.
+ */
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
+
+ if (supports_dynamic_ics(guc))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
+
+ if (count) {
+ xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
+
+ ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC opt-in features: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -577,7 +658,7 @@ static void guc_fini_hw(void *arg)
unsigned int fw_ref;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_fini_hw(&guc_to_gt(guc)->uc);
+ xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
guc_g2g_fini(guc);
@@ -627,23 +708,51 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
return 0;
}
-static int vf_guc_init(struct xe_guc *guc)
+static int vf_guc_init_noalloc(struct xe_guc *guc)
{
+ struct xe_gt *gt = guc_to_gt(guc);
int err;
- xe_guc_comm_init_early(guc);
-
- err = xe_guc_ct_init(&guc->ct);
+ err = xe_gt_sriov_vf_bootstrap(gt);
if (err)
return err;
- err = xe_guc_relay_init(&guc->relay);
+ err = xe_gt_sriov_vf_query_config(gt);
if (err)
return err;
return 0;
}
+int xe_guc_init_noalloc(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ xe_guc_comm_init_early(guc);
+
+ ret = xe_guc_ct_init_noalloc(&guc->ct);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_relay_init(&guc->relay);
+ if (ret)
+ goto out;
+
+ if (IS_SRIOV_VF(xe)) {
+ ret = vf_guc_init_noalloc(guc);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_guc_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -653,13 +762,13 @@ int xe_guc_init(struct xe_guc *guc)
guc->fw.type = XE_UC_FW_TYPE_GUC;
ret = xe_uc_fw_init(&guc->fw);
if (ret)
- goto out;
+ return ret;
if (!xe_uc_fw_is_enabled(&guc->fw))
return 0;
if (IS_SRIOV_VF(xe)) {
- ret = vf_guc_init(guc);
+ ret = xe_guc_ct_init(&guc->ct);
if (ret)
goto out;
return 0;
@@ -681,10 +790,6 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = xe_guc_relay_init(&guc->relay);
- if (ret)
- goto out;
-
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
@@ -693,8 +798,6 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc);
- xe_guc_comm_init_early(guc);
-
return 0;
out:
@@ -710,6 +813,10 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
if (err)
return err;
+ err = xe_guc_buf_cache_init(&guc->buf);
+ if (err)
+ return err;
+
/* XXX xe_guc_db_mgr_init not needed for now */
return 0;
@@ -763,6 +870,10 @@ int xe_guc_post_load_init(struct xe_guc *guc)
xe_guc_ads_populate_post_load(&guc->ads);
+ ret = xe_guc_opt_in_features_enable(guc);
+ if (ret)
+ return ret;
+
if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
ret = guc_g2g_start(guc);
if (ret)
@@ -1098,14 +1209,6 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int ret;
- ret = xe_gt_sriov_vf_bootstrap(gt);
- if (ret)
- return ret;
-
- ret = xe_gt_sriov_vf_query_config(gt);
- if (ret)
- return ret;
-
ret = xe_guc_hwconfig_init(guc);
if (ret)
return ret;
@@ -1116,13 +1219,17 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
ret = xe_gt_sriov_vf_connect(gt);
if (ret)
- return ret;
+ goto err_out;
ret = xe_gt_sriov_vf_query_runtime(gt);
if (ret)
- return ret;
+ goto err_out;
return 0;
+
+err_out:
+ xe_guc_sanitize(guc);
+ return ret;
}
/**
@@ -1285,6 +1392,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
+ bool lost = false;
int ret;
int i;
@@ -1318,6 +1426,12 @@ retry:
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
+ /* scratch registers might be cleared during FLR, try once more */
+ if (!reply && !lost) {
+ xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
+ lost = true;
+ goto retry;
+ }
timeout:
xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
request[0], reply);
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 58338be44558..22cf019a11bf 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -26,6 +26,7 @@
struct drm_printer;
void xe_guc_comm_init_early(struct xe_guc *guc);
+int xe_guc_init_noalloc(struct xe_guc *guc);
int xe_guc_init(struct xe_guc *guc);
int xe_guc_init_post_hwconfig(struct xe_guc *guc);
int xe_guc_post_load_init(struct xe_guc *guc);
@@ -33,6 +34,7 @@ int xe_guc_reset(struct xe_guc *guc);
int xe_guc_upload(struct xe_guc *guc);
int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
int xe_guc_enable_communication(struct xe_guc *guc);
+int xe_guc_opt_in_features_enable(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 44c1fa2fe7c8..131cfc56be00 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -20,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
@@ -889,7 +890,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
@@ -913,7 +914,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
@@ -1004,16 +1005,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
*/
int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
{
- struct xe_device *xe = ads_to_xe(ads);
- struct xe_gt *gt = ads_to_gt(ads);
- struct xe_tile *tile = gt_to_tile(gt);
struct guc_policies *policies;
- struct xe_bo *bo;
- int ret = 0;
+ struct xe_guc *guc = ads_to_guc(ads);
+ struct xe_device *xe = ads_to_xe(ads);
+ CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
- policies = kmalloc(sizeof(*policies), GFP_KERNEL);
- if (!policies)
- return -ENOMEM;
+ policies = xe_guc_buf_cpu_ptr(buf);
+ memset(policies, 0, sizeof(*policies));
policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
@@ -1023,16 +1024,5 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
else
policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
- bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto out;
- }
-
- ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
-out:
- kfree(policies);
- return ret;
+ return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
}
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 0193c94dd6a0..14a07dca48e7 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -37,10 +37,6 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam;
- /* XXX: currently it's useful only for the PF actions */
- if (!IS_SRIOV_PF(gt_to_xe(gt)))
- return 0;
-
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
if (IS_ERR(sam))
return PTR_ERR(sam);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 859a3ba91be5..243dad3e2418 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
str_yes_no(snapshot->kernel_reserved));
for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ /*
+ * FIXME: During devcoredump print we should avoid accessing the
+ * driver pointers for gt or engine. Printing should be done only
+ * using the snapshot captured. Here we are accessing the gt
+ * pointer. It should be fixed.
+ */
list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
capture_class, false);
snapshot_print_by_list_order(snapshot, p, type, list);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index bbcbb348256f..3f4e6a46ff16 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -25,6 +25,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
@@ -84,22 +85,28 @@ struct g2h_fence {
u16 error;
u16 hint;
u16 reason;
+ bool cancel;
bool retry;
bool fail;
bool done;
};
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
+ memset(g2h_fence, 0, sizeof(*g2h_fence));
g2h_fence->response_buffer = response_buffer;
- g2h_fence->response_data = 0;
- g2h_fence->response_len = 0;
- g2h_fence->fail = false;
- g2h_fence->retry = false;
- g2h_fence->done = false;
g2h_fence->seqno = ~0x0;
}
+static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
+{
+ g2h_fence->cancel = true;
+ g2h_fence->fail = true;
+ g2h_fence->done = true;
+}
+
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
{
return g2h_fence->seqno == ~0x0;
@@ -206,12 +213,10 @@ static void primelockdep(struct xe_guc_ct *ct)
fs_reclaim_release(GFP_KERNEL);
}
-int xe_guc_ct_init(struct xe_guc_ct *ct)
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_bo *bo;
int err;
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
@@ -237,6 +242,23 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
+ err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
+ if (err)
+ return err;
+
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+
+int xe_guc_ct_init(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
@@ -246,13 +268,6 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
return PTR_ERR(bo);
ct->bo = bo;
-
- err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
- if (err)
- return err;
-
- xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
- ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
@@ -373,9 +388,13 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
return ret > 0 ? -EPROTO : ret;
}
-static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+static void guc_ct_change_state(struct xe_guc_ct *ct,
enum xe_guc_ct_state state)
{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct g2h_fence *g2h_fence;
+ unsigned long idx;
+
mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
@@ -387,8 +406,20 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
ct->g2h_outstanding = 0;
ct->state = state;
+ xe_gt_dbg(gt, "GuC CT communication channel %s\n",
+ state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
+ str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
+
spin_unlock_irq(&ct->fast_lock);
+ /* cancel all in-flight send-recv requests */
+ xa_for_each(&ct->fence_lookup, idx, g2h_fence)
+ g2h_fence_cancel(g2h_fence);
+
+ /* make sure guc_ct_send_recv() will see g2h_fence changes */
+ smp_mb();
+ wake_up_all(&ct->g2h_fence_wq);
+
/*
* Lockdep doesn't like this under the fast lock and he destroy only
* needs to be serialized with the send path which ct lock provides.
@@ -442,7 +473,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -458,11 +489,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (err)
goto err_out;
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
smp_mb();
wake_up_all(&ct->wq);
- xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
@@ -503,7 +533,7 @@ static void stop_g2h_handler(struct xe_guc_ct *ct)
*/
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
ct_exit_safe_mode(ct);
stop_g2h_handler(ct);
}
@@ -519,7 +549,7 @@ void xe_guc_ct_stop(struct xe_guc_ct *ct)
if (!xe_guc_ct_initialized(ct))
return;
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -630,6 +660,47 @@ static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
spin_unlock_irq(&ct->fast_lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+ unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ unsigned long entries[SZ_32];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ /* May be called under spinlock, so avoid sleeping */
+ ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
+#endif
+ ct->fast_req[slot].fence = fence;
+ ct->fast_req[slot].action = action;
+}
+#else
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+}
+#endif
+
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -706,6 +777,9 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
+ fast_req_track(ct, ct_fence_value,
+ FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
+
cmd[1] =
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
@@ -738,25 +812,6 @@ corrupted:
return -EPIPE;
}
-/*
- * The CT protocol accepts a 16 bits fence. This field is fully owned by the
- * driver, the GuC will just copy it to the reply message. Since we need to
- * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
- * we use one bit of the seqno as an indicator for that and a rolling counter
- * for the remaining 15 bits.
- */
-#define CT_SEQNO_MASK GENMASK(14, 0)
-#define CT_SEQNO_UNTRACKED BIT(15)
-static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
-{
- u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
-
- if (!is_g2h_fence)
- seqno |= CT_SEQNO_UNTRACKED;
-
- return seqno;
-}
-
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
@@ -1057,6 +1112,11 @@ retry_same_fence:
goto retry;
}
if (g2h_fence.fail) {
+ if (g2h_fence.cancel) {
+ xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
+ ret = -ECANCELED;
+ goto unlock;
+ }
xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
@@ -1065,6 +1125,7 @@ retry_same_fence:
if (ret > 0)
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+unlock:
mutex_unlock(&ct->lock);
return ret;
@@ -1148,6 +1209,55 @@ static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+ u16 fence_min = U16_MAX, fence_max = 0;
+ struct xe_gt *gt = ct_to_gt(ct);
+ bool found = false;
+ unsigned int n;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ char *buf;
+#endif
+
+ lockdep_assert_held(&ct->lock);
+
+ for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
+ if (ct->fast_req[n].fence < fence_min)
+ fence_min = ct->fast_req[n].fence;
+ if (ct->fast_req[n].fence > fence_max)
+ fence_max = ct->fast_req[n].fence;
+
+ if (ct->fast_req[n].fence != fence)
+ continue;
+ found = true;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ buf = kmalloc(SZ_4K, GFP_NOWAIT);
+ if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
+ fence, ct->fast_req[n].action, buf);
+ else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
+ fence, ct->fast_req[n].action);
+ kfree(buf);
+#else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
+ fence, ct->fast_req[n].action);
+#endif
+ break;
+ }
+
+ if (!found)
+ xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
+ fence, fence_min, fence_max, ct->fence_seqno);
+}
+#else
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+}
+#endif
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1176,6 +1286,9 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+
+ fast_req_report(ct, fence);
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1629,6 +1742,186 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
+static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
+ u32 size, u32 idx, s64 shift)
+{
+ u32 hi, lo;
+ u64 offset;
+
+ lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
+ hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
+ offset = make_u64(hi, lo);
+ offset += shift;
+ lo = lower_32_bits(offset);
+ hi = upper_32_bits(offset);
+ xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
+ xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
+}
+
+/*
+ * Shift any GGTT addresses within a single message left within CTB from
+ * before post-migration recovery.
+ * @ct: pointer to CT struct of the target GuC
+ * @cmds: iomap buffer containing CT messages
+ * @head: start of the target message within the buffer
+ * @len: length of the target message
+ * @size: size of the commands buffer
+ * @shift: the address shift to be added to each GGTT reference
+ * Return: true if the message was fixed or needed no fixups, false on failure
+ */
+static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
+ struct iosys_map *cmds, u32 head,
+ u32 len, u32 size, s64 shift)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 action, i, n;
+
+ xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
+
+ msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+
+ xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
+
+ switch (action) {
+ case XE_GUC_ACTION_REGISTER_CONTEXT:
+ if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
+ break;
+ case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
+ if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
+ goto err_len;
+ n = xe_map_rd_ring_u32(xe, cmds, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
+ if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ for (i = 0; i < n; i++)
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
+ + 2 * i, shift);
+ break;
+ default:
+ break;
+ }
+ return true;
+
+err_len:
+ xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
+ return false;
+}
+
+/*
+ * Apply fixups to the next outgoing CT message within given CTB
+ * @ct: the &xe_guc_ct struct instance representing the target GuC
+ * @h2g: the &guc_ctb struct instance of the target buffer
+ * @shift: shift to be added to all GGTT addresses within the CTB
+ * @mhead: pointer to an integer storing message start position; the
+ * position is changed to next message before this function return
+ * @avail: size of the area available for parsing, that is length
+ * of all remaining messages stored within the CTB
+ * Return: size of the area available for parsing after one message
+ * has been parsed, that is length remaining from the updated mhead
+ */
+static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
+ s64 shift, u32 *mhead, s32 avail)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 size = h2g->info.size;
+ u32 head = *mhead;
+ u32 len;
+
+ xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
+
+ /* Read header */
+ msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+
+ if (unlikely(len > (u32)avail)) {
+ xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
+ avail, len);
+ return 0;
+ }
+
+ head = (head + GUC_CTB_MSG_MIN_LEN) % size;
+ if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
+ return 0;
+ *mhead = (head + msg_len_to_hxg_len(len)) % size;
+
+ return avail - len;
+}
+
+/**
+ * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
+ * @ct: pointer to CT struct of the target GuC
+ * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
+ *
+ * Messages in GuC to Host CTB are owned by GuC and any fixups in them
+ * are made by GuC. But content of the Host to GuC CTB is owned by the
+ * KMD, so fixups to GGTT references in any pending messages need to be
+ * applied here.
+ * This function updates GGTT offsets in payloads of pending H2G CTB
+ * messages (messages which were not consumed by GuC before the VF got
+ * paused).
+ */
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 head, tail, size;
+ s32 avail;
+
+ if (unlikely(h2g->info.broken))
+ return;
+
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+ head = h2g->info.head;
+ tail = READ_ONCE(h2g->info.tail);
+ size = h2g->info.size;
+
+ if (unlikely(head > size))
+ goto corrupted;
+
+ if (unlikely(tail >= size))
+ goto corrupted;
+
+ avail = tail - head;
+
+ /* beware of buffer wrap case */
+ if (unlikely(avail < 0))
+ avail += size;
+ xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
+ xe_gt_assert(gt, avail >= 0);
+
+ while (avail > 0)
+ avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
+
+ return;
+
+corrupted:
+ xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
+ head, tail, size);
+ h2g->info.broken = true;
+}
+
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
@@ -1639,7 +1932,7 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bo
return NULL;
if (ct->bo && want_ctb) {
- snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb_size = xe_bo_size(ct->bo);
snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
@@ -1775,6 +2068,24 @@ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * This is a helper function which assists the driver in identifying if a fault
+ * injection test is currently active, allowing it to reduce unnecessary debug
+ * output. Typically, the function returns zero, but the fault injection
+ * framework can alter this to return an error. Since faults are injected
+ * through this function, it's important to ensure the compiler doesn't optimize
+ * it into an inline function. To avoid such optimization, the 'noinline'
+ * attribute is applied. Compiler optimizes the static function defined in the
+ * header file as an inline function.
+ */
+noinline int xe_is_injection_active(void) { return 0; }
+ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
+#else
+int xe_is_injection_active(void) { return 0; }
+#endif
+
static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
{
struct xe_guc_log_snapshot *snapshot_log;
@@ -1785,6 +2096,12 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
if (ctb)
ctb->info.broken = true;
+ /*
+ * Huge dump is getting generated when injecting error for guc CT/MMIO
+ * functions. So, let us suppress the dump when fault is injected.
+ */
+ if (xe_is_injection_active())
+ return;
/* Ignore further errors after the first dump until a reset */
if (ct->dead.reported)
@@ -1835,7 +2152,6 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
-
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 582aac106469..18d4225e6502 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -11,6 +11,7 @@
struct drm_printer;
struct xe_device;
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
@@ -22,6 +23,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
+
static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
{
return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8e1b9d981d61..8b03b50313d9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/iosys-map.h>
#include <linux/spinlock_types.h>
+#include <linux/stackdepot.h>
#include <linux/wait.h>
#include <linux/xarray.h>
@@ -104,6 +105,18 @@ struct xe_dead_ct {
/** snapshot_log: copy of GuC log at point of error */
struct xe_guc_log_snapshot *snapshot_log;
};
+
+/** struct xe_fast_req_fence - Used to track FAST_REQ messages by fence to match error responses */
+struct xe_fast_req_fence {
+ /** @fence: sequence number sent in H2G and return in G2H error */
+ u16 fence;
+ /** @action: H2G action code */
+ u16 action;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ /** @stack: call stack from when the H2G was sent */
+ depot_stack_handle_t stack;
+#endif
+};
#endif
/**
@@ -152,6 +165,8 @@ struct xe_guc_ct {
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
/** @dead: information for debugging dead CTs */
struct xe_dead_ct dead;
+ /** @fast_req: history of FAST_REQ messages for matching with G2H error responses */
+ struct xe_fast_req_fence fast_req[SZ_32];
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 0fb48f8f05d8..92e1f9f41b8c 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -124,7 +124,7 @@ static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
static bool is_engine_activity_supported(struct xe_guc *guc)
{
struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
- struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 };
struct xe_gt *gt = guc_to_gt(guc);
if (IS_SRIOV_VF(gt_to_xe(gt))) {
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index 4c39f01e4f52..a3f421e2adc0 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -20,6 +20,8 @@ struct xe_exec_queue;
struct xe_guc_exec_queue {
/** @q: Backpointer to parent xe_exec_queue */
struct xe_exec_queue *q;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
/** @sched: GPU scheduler for this xe_exec_queue */
struct xe_gpu_scheduler sched;
/** @entity: Scheduler entity for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 38039c411387..c01ccb35dc75 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -79,7 +79,7 @@ static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *
* Also, can't use vmalloc as might be called from atomic context. So need
* to break the buffer up into smaller chunks that can be allocated.
*/
- snapshot->size = log->bo->size;
+ snapshot->size = xe_bo_size(log->bo);
snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE);
snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 5b896f5fafaf..f1e2b0be90a9 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -12,7 +12,7 @@
struct drm_printer;
struct xe_device;
-#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index c0ca61695d76..68b192fe3b32 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -189,7 +189,7 @@ static int pc_action_reset(struct xe_guc_pc *pc)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
ERR_PTR(ret));
@@ -213,7 +213,7 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
/* Blocking here to ensure the results are ready before reading them */
ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
ERR_PTR(ret));
@@ -236,7 +236,7 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
id, value, ERR_PTR(ret));
@@ -258,7 +258,7 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
ERR_PTR(ret));
@@ -275,7 +275,7 @@ static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
mode, ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 2ac87ff4a057..cafb47711e9b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -498,6 +498,15 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), q->width ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]);
xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
@@ -522,6 +531,14 @@ static void __register_exec_queue(struct xe_guc *guc,
info->hwlrca_hi,
};
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]);
+
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
@@ -971,10 +988,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
*/
xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
- if (ctx_timestamp < ctx_job_timestamp)
- diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
- else
- diff = ctx_timestamp - ctx_job_timestamp;
+ diff = ctx_timestamp - ctx_job_timestamp;
/*
* Ensure timeout is within 5% to account for an GuC scheduling latency
@@ -1079,12 +1093,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- xe_sched_add_pending_job(sched, job);
- xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
- }
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
xe_sched_submission_stop(sched);
@@ -1253,7 +1263,7 @@ trigger_reset:
/* Start fence signaling */
xe_hw_fence_irq_start(q->fence_irq);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
enable_scheduling(q);
@@ -1263,10 +1273,8 @@ rearm:
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
- xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
static void __guc_exec_queue_fini_async(struct work_struct *w)
@@ -1287,7 +1295,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
- kfree(ge);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
xe_pm_runtime_put(guc_to_xe(guc));
}
@@ -1470,6 +1482,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->guc = ge;
ge->q = q;
+ init_rcu_head(&ge->rcu);
init_waitqueue_head(&ge->suspend_wait);
for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
@@ -2073,12 +2086,16 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
u32 guc_id;
+ u32 type = XE_GUC_CAT_ERR_TYPE_INVALID;
- if (unlikely(len < 1))
+ if (unlikely(!len || len > 2))
return -EPROTO;
guc_id = msg[0];
+ if (len == 2)
+ type = msg[1];
+
if (guc_id == GUC_ID_UNKNOWN) {
/*
* GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
@@ -2092,8 +2109,19 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
if (unlikely(!q))
return -EPROTO;
- xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
- xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ /*
+ * The type is HW-defined and changes based on platform, so we don't
+ * decode it in the kernel and only check if it is valid.
+ * See bspec 54047 and 72187 for details.
+ */
+ if (type != XE_GUC_CAT_ERR_TYPE_INVALID)
+ xe_gt_dbg(gt,
+ "Engine memory CAT error [%u]: class=%s, logical_mask: 0x%x, guc_id=%d",
+ type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ else
+ xe_gt_dbg(gt,
+ "Engine memory CAT error: class=%s, logical_mask: 0x%x, guc_id=%d",
+ xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
trace_xe_exec_queue_memory_cat_error(q);
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 27d11e06a82b..6d7b62724126 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -11,15 +11,12 @@
#include "xe_device_types.h"
#include "xe_drv.h"
#include "xe_heci_gsc.h"
+#include "regs/xe_gsc_regs.h"
#include "xe_platform_types.h"
#include "xe_survivability_mode.h"
#define GSC_BAR_LENGTH 0x00000FFC
-#define DG1_GSC_HECI2_BASE 0x259000
-#define PVC_GSC_HECI2_BASE 0x285000
-#define DG2_GSC_HECI2_BASE 0x374000
-
static void heci_gsc_irq_mask(struct irq_data *d)
{
/* generic irq handling */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index 6a846e4cb221..7e43b2dd6a32 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -171,7 +171,7 @@ static int huc_auth_via_gsccs(struct xe_huc *huc)
sizeof(struct pxp43_new_huc_auth_in));
wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
xe_bo_ggtt_addr(huc->fw.bo),
- huc->fw.bo->size);
+ xe_bo_size(huc->fw.bo));
do {
err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 93241fd0a4ba..796ba8c34a16 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -17,6 +17,7 @@
#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -693,7 +694,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vcs%u fused off\n", j);
+ xe_gt_info(gt, "vcs%u fused off\n", j);
}
}
@@ -703,7 +704,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vecs%u fused off\n", j);
+ xe_gt_info(gt, "vecs%u fused off\n", j);
}
}
}
@@ -728,15 +729,13 @@ static void read_copy_fuses(struct xe_gt *gt)
if (!(BIT(j / 2) & bcs_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_dss(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
/*
* CCS fusing based on DSS masks only applies to platforms that can
* have more than one CCS.
@@ -755,14 +754,13 @@ static void read_compute_fuses_from_dss(struct xe_gt *gt)
if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_reg(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
@@ -774,7 +772,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
if ((ccs_mask & BIT(j)) == 0) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
@@ -789,8 +787,6 @@ static void read_compute_fuses(struct xe_gt *gt)
static void check_gsc_availability(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
return;
@@ -806,7 +802,25 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
+ xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
+ }
+}
+
+static void check_sw_disable(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ if (!(gt->info.engine_mask & BIT(id)))
+ continue;
+
+ if (!(sw_allowed & BIT(id))) {
+ gt->info.engine_mask &= ~BIT(id);
+ xe_gt_info(gt, "%s disabled via configfs\n",
+ engine_infos[id].name);
+ }
}
}
@@ -818,6 +832,7 @@ int xe_hw_engines_init_early(struct xe_gt *gt)
read_copy_fuses(gt);
read_compute_fuses(gt);
check_gsc_availability(gt);
+ check_sw_disable(gt);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
@@ -1044,12 +1059,13 @@ struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device *xe,
struct drm_xe_engine_class_instance eci)
{
+ struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
unsigned int idx;
if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
- if (eci.gt_id >= xe->info.gt_count)
+ if (!gt)
return NULL;
idx = array_index_nospec(eci.engine_class,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 2d68c5b5262a..c926f840c87b 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -13,15 +13,6 @@
#include "xe_vm.h"
static void
-hw_engine_group_free(struct drm_device *drm, void *arg)
-{
- struct xe_hw_engine_group *group = arg;
-
- destroy_workqueue(group->resume_wq);
- kfree(group);
-}
-
-static void
hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
{
struct xe_exec_queue *q;
@@ -53,7 +44,7 @@ hw_engine_group_alloc(struct xe_device *xe)
struct xe_hw_engine_group *group;
int err;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
+ group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -61,14 +52,14 @@ hw_engine_group_alloc(struct xe_device *xe)
if (!group->resume_wq)
return ERR_PTR(-ENOMEM);
+ err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq);
+ if (err)
+ return ERR_PTR(err);
+
init_rwsem(&group->mode_sem);
INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
INIT_LIST_HEAD(&group->exec_queue_list);
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
- if (err)
- return ERR_PTR(err);
-
return group;
}
@@ -84,25 +75,18 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
enum xe_hw_engine_id id;
struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
struct xe_device *xe = gt_to_xe(gt);
- int err;
group_rcs_ccs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_rcs_ccs)) {
- err = PTR_ERR(group_rcs_ccs);
- goto err_group_rcs_ccs;
- }
+ if (IS_ERR(group_rcs_ccs))
+ return PTR_ERR(group_rcs_ccs);
group_bcs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_bcs)) {
- err = PTR_ERR(group_bcs);
- goto err_group_bcs;
- }
+ if (IS_ERR(group_bcs))
+ return PTR_ERR(group_bcs);
group_vcs_vecs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_vcs_vecs)) {
- err = PTR_ERR(group_vcs_vecs);
- goto err_group_vcs_vecs;
- }
+ if (IS_ERR(group_vcs_vecs))
+ return PTR_ERR(group_vcs_vecs);
for_each_hw_engine(hwe, gt, id) {
switch (hwe->class) {
@@ -125,15 +109,6 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
}
return 0;
-
-err_group_vcs_vecs:
- kfree(group_vcs_vecs);
-err_group_bcs:
- kfree(group_bcs);
-err_group_rcs_ccs:
- kfree(group_rcs_ccs);
-
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index 0b4f12be3692..b2a0c46dfcd4 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -100,6 +100,9 @@ void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
spin_unlock_irqrestore(&irq->lock, flags);
dma_fence_end_signalling(tmp);
}
+
+ /* Safe release of the irq->lock used in dma_fence_init. */
+ synchronize_rcu();
}
void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
@@ -165,7 +168,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
return dma_fence->error ||
- !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+ !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
}
static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index f008e8049700..c17ed1ae8649 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -20,6 +20,8 @@
#include "xe_pcode_api.h"
#include "xe_sriov.h"
#include "xe_pm.h"
+#include "xe_vsec.h"
+#include "regs/xe_pmt.h"
enum xe_hwmon_reg {
REG_TEMP,
@@ -51,6 +53,14 @@ enum xe_fan_channel {
FAN_MAX,
};
+/* Attribute index for powerX_xxx_interval sysfs entries */
+enum sensor_attr_power {
+ SENSOR_INDEX_PSYS_PL1,
+ SENSOR_INDEX_PKG_PL1,
+ SENSOR_INDEX_PSYS_PL2,
+ SENSOR_INDEX_PKG_PL2,
+};
+
/*
* For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
* not supported and below are SKU units to be used.
@@ -72,8 +82,9 @@ enum xe_fan_channel {
* PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
*/
#define PL1_HWMON_ATTR hwmon_power_max
+#define PL2_HWMON_ATTR hwmon_power_cap
-#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid")
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2")
/*
* Timeout for power limit write mailbox command.
@@ -124,6 +135,9 @@ struct xe_hwmon {
bool boot_power_limit_read;
/** @pl1_on_boot: power limit PL1 on boot */
u32 pl1_on_boot[CHANNEL_MAX];
+ /** @pl2_on_boot: power limit PL2 on boot */
+ u32 pl2_on_boot[CHANNEL_MAX];
+
};
static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
@@ -151,8 +165,10 @@ static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 att
/* return the value only if limit is enabled */
if (attr == PL1_HWMON_ATTR)
*uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == PL2_HWMON_ATTR)
+ *uval = (val1 & PWR_LIM_EN) ? val1 : 0;
else if (attr == hwmon_power_label)
- *uval = (val0 & PWR_LIM_EN) ? 1 : 0;
+ *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0;
else
*uval = 0;
@@ -180,6 +196,8 @@ static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr
if (attr == PL1_HWMON_ATTR)
val0 = (val0 & ~clr) | set;
+ else if (attr == PL2_HWMON_ATTR)
+ val1 = (val1 & ~clr) | set;
else
return -EIO;
@@ -236,12 +254,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_ENERGY_STATUS;
- else
- return BMG_PLATFORM_ENERGY_STATUS;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
return PVC_GT0_PLATFORM_ENERGY_STATUS;
} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
return PCU_CR_PACKAGE_ENERGY_STATUS;
@@ -273,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val, min, max;
+ u64 reg_val = 0, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -285,16 +298,6 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
-
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- goto unlock;
- }
reg_val = xe_mmio_read32(mmio, rapl_limit);
}
@@ -327,8 +330,9 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u32 reg_val;
+ u32 reg_val, max;
struct xe_reg rapl_limit;
+ u64 max_supp_power_limit = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -353,22 +357,41 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
goto unlock;
}
+ /*
+ * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
+ * the supported maximum (U12.3 format).
+ * This is to avoid truncation during reg_val calculation below and ensure the valid
+ * power limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
+ if (value > max_supp_power_limit) {
+ value = max_supp_power_limit;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected %s exceeds channel %d limit\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ }
+
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
/*
- * Clamp power limit to card-firmware default as maximum, as an additional protection to
+ * Clamp power limit to GPU firmware default as maximum, as an additional protection to
* pcode clamp.
*/
if (hwmon->xe->info.has_mbx_power_limits) {
- if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) {
- reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]);
- drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n",
+ max = (attr == PL1_HWMON_ATTR) ?
+ hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel];
+ max = REG_FIELD_PREP(PWR_LIM_VAL, max);
+ if (reg_val > max) {
+ reg_val = max;
+ drm_dbg(&hwmon->xe->drm,
+ "Clamping power limit to GPU firmware default 0x%x\n",
reg_val);
}
}
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+
if (hwmon->xe->info.has_mbx_power_limits)
ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
else
@@ -427,16 +450,37 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
- u64 reg_val;
+ u32 reg_val;
+ int ret = 0;
- reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ /* Energy is supported only for card and pkg */
+ if (channel > CHANNEL_PKG) {
+ *energy = 0;
+ return;
+ }
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ u64 pmt_val;
+ ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val));
+ if (ret != sizeof(pmt_val)) {
+ drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret);
+ *energy = 0;
+ return;
+ }
+
+ if (channel == CHANNEL_PKG)
+ reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val);
+ else
+ reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val);
+ } else {
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
+ }
+
+ ei->accum_energy += reg_val - ei->reg_val_prev;
ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
@@ -451,8 +495,9 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+
int ret = 0;
xe_pm_runtime_get(hwmon->xe);
@@ -505,9 +550,9 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
int ret;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -534,10 +579,8 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
- if (val > max_win) {
- drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val);
+ if (val > max_win)
return -EINVAL;
- }
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
@@ -578,15 +621,25 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_CARD);
-
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1);
+/* PKG PL1 */
static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_PKG);
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1);
+/* PSYS PL2 */
+static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2);
+/* PKG PL2 */
+static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
&sensor_dev_attr_power2_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_interval.dev_attr.attr,
NULL
};
@@ -596,19 +649,22 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- int channel = index ? CHANNEL_PKG : CHANNEL_CARD;
- u32 power_attr = PL1_HWMON_ATTR;
- u32 uval;
+ int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+ u32 uval = 0;
+ struct xe_reg rapl_limit;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
xe_pm_runtime_get(hwmon->xe);
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
- ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
- } else {
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? attr->mode : 0;
+ } else if (power_attr != PL2_HWMON_ATTR) {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(rapl_limit))
+ uval = xe_mmio_read32(mmio, rapl_limit);
}
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
xe_pm_runtime_put(hwmon->xe);
@@ -628,8 +684,9 @@ static const struct attribute_group *hwmon_groups[] = {
static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT |
+ HWMON_P_CAP,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
@@ -697,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
{
int ret;
u32 uval;
+ u64 max_crit_power_curr = 0;
mutex_lock(&hwmon->hwmon_lock);
+ /*
+ * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
+ * max supported value, clamp it to the command's max (U10.6 format).
+ * This is to avoid truncation during uval calculation below and ensure the valid power
+ * limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
+ if (value > max_crit_power_curr) {
+ value = max_crit_power_curr;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected exceeds channel %d limit\n",
+ channel);
+ }
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
@@ -750,31 +821,62 @@ xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
+ struct xe_reg reg;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
- return (uval) ? 0664 : 0;
- } else {
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? 0664 : 0;
+ } else if (attr != PL2_HWMON_ATTR) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ if (uval & PWR_LIM_EN) {
+ drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0664;
}
+ drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0;
case hwmon_power_rated_max:
- if (hwmon->xe->info.has_mbx_power_limits)
+ if (hwmon->xe->info.has_mbx_power_limits) {
return 0;
- else
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ return uval ? 0444 : 0;
+ }
case hwmon_power_crit:
- case hwmon_power_label:
if (channel == CHANNEL_CARD) {
xe_hwmon_pcode_read_i1(hwmon, &uval);
- return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ?
- 0444 : 0644 : 0;
+ return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0;
}
break;
+ case hwmon_power_label:
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+
+ if (!uval) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ }
+ if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0;
+ }
+ return (uval) ? 0444 : 0;
default:
return 0;
}
@@ -786,6 +888,7 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
@@ -802,6 +905,7 @@ static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
+ case hwmon_power_cap:
case hwmon_power_max:
return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
@@ -884,11 +988,18 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
+ long energy = 0;
+
switch (attr) {
case hwmon_energy_input:
case hwmon_energy_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ xe_hwmon_energy_get(hwmon, channel, &energy);
+ return energy ? 0444 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
+ }
default:
return 0;
}
@@ -1124,13 +1235,17 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
struct xe_reg pkg_power_sku_unit;
if (hwmon->xe->info.has_mbx_power_limits) {
- /* Check if card firmware support mailbox power limits commands. */
+ /* Check if GPU firmware support mailbox power limits commands. */
if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
&hwmon->pl1_on_boot[CHANNEL_CARD]) |
xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
- &hwmon->pl1_on_boot[CHANNEL_PKG])) {
+ &hwmon->pl1_on_boot[CHANNEL_PKG]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl2_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl2_on_boot[CHANNEL_PKG])) {
drm_warn(&hwmon->xe->drm,
- "Failed to read power limits, check card firmware !\n");
+ "Failed to read power limits, check GPU firmware !\n");
} else {
drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
/* Write default limits to read from pcode from now on. */
@@ -1140,6 +1255,12 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_PKG]);
hwmon->scl_shift_power = PWR_UNIT;
hwmon->scl_shift_energy = ENERGY_UNIT;
hwmon->scl_shift_time = TIME_UNIT;
@@ -1223,4 +1344,4 @@ int xe_hwmon_register(struct xe_device *xe)
return 0;
}
-
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
new file mode 100644
index 000000000000..bc7dc2099470
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Intel Xe I2C attached Microcontroller Units (MCU)
+ *
+ * Copyright (C) 2025 Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "regs/xe_i2c_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_i2c.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+
+/**
+ * DOC: Xe I2C devices
+ *
+ * Register a platform device for the I2C host controller (Synpsys DesignWare
+ * I2C) if the registers of that controller are mapped to the MMIO, and also the
+ * I2C client device for the Add-In Management Controller (the MCU) attached to
+ * the host controller.
+ *
+ * See drivers/i2c/busses/i2c-designware-* for more information on the I2C host
+ * controller.
+ */
+
+static const char adapter_name[] = "i2c_designware";
+
+static const struct property_entry xe_i2c_adapter_properties[] = {
+ PROPERTY_ENTRY_STRING("compatible", "intel,xe-i2c"),
+ PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_FAST_MODE_PLUS_FREQ),
+ { }
+};
+
+static inline void xe_i2c_read_endpoint(struct xe_mmio *mmio, void *ep)
+{
+ u32 *val = ep;
+
+ val[0] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_PREFIX);
+ val[1] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_POSTFIX);
+}
+
+static void xe_i2c_client_work(struct work_struct *work)
+{
+ struct xe_i2c *i2c = container_of(work, struct xe_i2c, work);
+ struct i2c_board_info info = {
+ .type = "amc",
+ .flags = I2C_CLIENT_HOST_NOTIFY,
+ .addr = i2c->ep.addr[1],
+ };
+
+ i2c->client[0] = i2c_new_client_device(i2c->adapter, &info);
+}
+
+static int xe_i2c_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct xe_i2c *i2c = container_of(nb, struct xe_i2c, bus_notifier);
+ struct i2c_adapter *adapter = i2c_verify_adapter(data);
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ adapter && dev->parent == &i2c->pdev->dev) {
+ i2c->adapter = adapter;
+ schedule_work(&i2c->work);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int xe_i2c_register_adapter(struct xe_i2c *i2c)
+{
+ struct pci_dev *pci = to_pci_dev(i2c->drm_dev);
+ struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ /*
+ * Not using platform_device_register_full() here because we don't have
+ * a handle to the platform_device before it returns. xe_i2c_notifier()
+ * uses that handle, but it may be called before
+ * platform_device_register_full() is done.
+ */
+ pdev = platform_device_alloc(adapter_name, pci_dev_id(pci));
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto err_fwnode_remove;
+ }
+
+ if (i2c->adapter_irq) {
+ struct resource res;
+
+ res = DEFINE_RES_IRQ_NAMED(i2c->adapter_irq, "xe_i2c");
+
+ ret = platform_device_add_resources(pdev, &res, 1);
+ if (ret)
+ goto err_pdev_put;
+ }
+
+ pdev->dev.parent = i2c->drm_dev;
+ pdev->dev.fwnode = fwnode;
+ i2c->adapter_node = fwnode;
+ i2c->pdev = pdev;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err_pdev_put;
+
+ return 0;
+
+err_pdev_put:
+ platform_device_put(pdev);
+err_fwnode_remove:
+ fwnode_remove_software_node(fwnode);
+
+ return ret;
+}
+
+static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
+{
+ platform_device_unregister(i2c->pdev);
+ fwnode_remove_software_node(i2c->adapter_node);
+}
+
+/**
+ * xe_i2c_irq_handler: Handler for I2C interrupts
+ * @xe: xe device instance
+ * @master_ctl: interrupt register
+ *
+ * Forward interrupts generated by the I2C host adapter to the I2C host adapter
+ * driver.
+ */
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->i2c || !xe->i2c->adapter_irq)
+ return;
+
+ if (master_ctl & I2C_IRQ)
+ generic_handle_irq_safe(xe->i2c->adapter_irq);
+}
+
+static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops xe_i2c_irq_ops = {
+ .map = xe_i2c_irq_map,
+};
+
+static int xe_i2c_create_irq(struct xe_i2c *i2c)
+{
+ struct irq_domain *domain;
+
+ if (!(i2c->ep.capabilities & XE_I2C_EP_CAP_IRQ))
+ return 0;
+
+ domain = irq_domain_create_linear(dev_fwnode(i2c->drm_dev), 1, &xe_i2c_irq_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ i2c->adapter_irq = irq_create_mapping(domain, 0);
+ i2c->irqdomain = domain;
+
+ return 0;
+}
+
+static void xe_i2c_remove_irq(struct xe_i2c *i2c)
+{
+ if (!i2c->irqdomain)
+ return;
+
+ irq_dispose_mapping(i2c->adapter_irq);
+ irq_domain_remove(i2c->irqdomain);
+}
+
+static int xe_i2c_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct xe_i2c *i2c = context;
+
+ *val = xe_mmio_read32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET));
+
+ return 0;
+}
+
+static int xe_i2c_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct xe_i2c *i2c = context;
+
+ xe_mmio_write32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET), val);
+
+ return 0;
+}
+
+static const struct regmap_config i2c_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = xe_i2c_read,
+ .reg_write = xe_i2c_write,
+ .fast_io = true,
+};
+
+void xe_i2c_pm_suspend(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ if (d3cold)
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+static void xe_i2c_remove(void *data)
+{
+ struct xe_i2c *i2c = data;
+ unsigned int i;
+
+ for (i = 0; i < XE_I2C_MAX_CLIENTS; i++)
+ i2c_unregister_device(i2c->client[i]);
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ xe_i2c_unregister_adapter(i2c);
+ xe_i2c_remove_irq(i2c);
+}
+
+/**
+ * xe_i2c_probe: Probe the I2C host adapter and the I2C clients attached to it
+ * @xe: xe device instance
+ *
+ * Register all the I2C devices described in the I2C Endpoint data structure.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int xe_i2c_probe(struct xe_device *xe)
+{
+ struct device *drm_dev = xe->drm.dev;
+ struct xe_i2c_endpoint ep;
+ struct regmap *regmap;
+ struct xe_i2c *i2c;
+ int ret;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep);
+ if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return 0;
+
+ i2c = devm_kzalloc(drm_dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ INIT_WORK(&i2c->work, xe_i2c_client_work);
+ i2c->mmio = xe_root_tile_mmio(xe);
+ i2c->drm_dev = drm_dev;
+ i2c->ep = ep;
+ xe->i2c = i2c;
+
+ /* PCI PM isn't aware of this device, bring it up and match it with SGUnit state. */
+ xe_i2c_pm_resume(xe, true);
+
+ regmap = devm_regmap_init(drm_dev, NULL, i2c, &i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ i2c->bus_notifier.notifier_call = xe_i2c_notifier;
+ ret = bus_register_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ if (ret)
+ return ret;
+
+ ret = xe_i2c_create_irq(i2c);
+ if (ret)
+ goto err_unregister_notifier;
+
+ ret = xe_i2c_register_adapter(i2c);
+ if (ret)
+ goto err_remove_irq;
+
+ return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c);
+
+err_remove_irq:
+ xe_i2c_remove_irq(i2c);
+
+err_unregister_notifier:
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
new file mode 100644
index 000000000000..b767ed8ce52b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_H_
+#define _XE_I2C_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_adapter;
+struct i2c_client;
+struct irq_domain;
+struct platform_device;
+struct xe_device;
+struct xe_mmio;
+
+#define XE_I2C_MAX_CLIENTS 3
+
+#define XE_I2C_EP_COOKIE_DEVICE 0xde
+
+/* Endpoint Capabilities */
+#define XE_I2C_EP_CAP_IRQ BIT(0)
+
+struct xe_i2c_endpoint {
+ u8 cookie;
+ u8 capabilities;
+ u16 addr[XE_I2C_MAX_CLIENTS];
+};
+
+struct xe_i2c {
+ struct fwnode_handle *adapter_node;
+ struct platform_device *pdev;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client[XE_I2C_MAX_CLIENTS];
+
+ struct notifier_block bus_notifier;
+ struct work_struct work;
+
+ struct irq_domain *irqdomain;
+ int adapter_irq;
+
+ struct xe_i2c_endpoint ep;
+ struct device *drm_dev;
+
+ struct xe_mmio *mmio;
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+int xe_i2c_probe(struct xe_device *xe);
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_i2c_pm_suspend(struct xe_device *xe);
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
+#else
+static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
+static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5362d3174b06..5df5b8c2a3e4 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,10 +18,12 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_pxp.h"
#include "xe_sriov.h"
+#include "xe_tile.h"
/*
* Interrupt registers for a unit are always consecutive and ordered
@@ -160,7 +162,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
dmask = irqs << 16 | irqs;
smask = irqs << 16;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/* Enable interrupts for each engine class */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
if (ccs_mask)
@@ -260,7 +262,7 @@ gt_engine_identity(struct xe_device *xe,
static void
gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
{
- if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
+ if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
@@ -476,6 +478,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
+ xe_i2c_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
}
@@ -550,7 +553,7 @@ static void xelp_irq_reset(struct xe_tile *tile)
static void dg1_irq_reset(struct xe_tile *tile)
{
- if (tile->id == 0)
+ if (xe_tile_is_root(tile))
dg1_intr_disable(tile_to_xe(tile));
gt_irq_reset(tile);
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 023ed6a6b49d..a2000307d5bf 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,6 +11,7 @@
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -80,7 +81,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt_assert(lmtt, xe_bo_is_vram(bo));
lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
- xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
+ xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo));
pt->level = level;
pt->bo = bo;
@@ -222,6 +223,58 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
lmtt_setup_dir_ptr(lmtt);
}
+static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tile *tile = lmtt_to_tile(lmtt);
+ struct xe_gt *gt;
+ int result = 0;
+ int err;
+ u8 id;
+
+ for_each_gt_on_tile(gt, tile, id) {
+ xe_gt_tlb_invalidation_fence_init(gt, fence, true);
+ err = xe_gt_tlb_invalidation_all(gt, fence);
+ result = result ?: err;
+ fence++;
+ }
+
+ lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result);
+
+ /*
+ * It is fine to wait for all fences, even for those which covers the
+ * invalidation request that failed, as such fence should be already
+ * marked as signaled.
+ */
+ fence = fences;
+ for_each_gt_on_tile(gt, tile, id)
+ xe_gt_tlb_invalidation_fence_wait(fence++);
+
+ return result;
+}
+
+/**
+ * xe_lmtt_invalidate_hw - Invalidate LMTT hardware.
+ * @lmtt: the &xe_lmtt to invalidate
+ *
+ * Send requests to all GuCs on this tile to invalidate all TLBs.
+ *
+ * This function should be called only when running as a PF driver.
+ */
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_device *xe = lmtt_to_xe(lmtt);
+ int err;
+
+ lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+
+ err = lmtt_invalidate_hw(lmtt);
+ if (err)
+ xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
+ lmtt_to_tile(lmtt)->id, ERR_PTR(err));
+}
+
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
u64 pte, unsigned int idx)
{
@@ -276,6 +329,7 @@ static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
return;
lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
+ lmtt_invalidate_hw(lmtt);
lmtt_assert(lmtt, pd->level > 0);
lmtt_assert(lmtt, pt->level == pd->level - 1);
@@ -397,11 +451,11 @@ static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo
u64 addr, vram_offset;
lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
- lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size));
+ lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size));
lmtt_assert(lmtt, xe_bo_is_vram(bo));
vram_offset = vram_region_gpu_offset(bo->ttm.resource);
- xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
+ xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
while (cur.remaining) {
addr = xe_res_dma(&cur);
addr += vram_offset; /* XXX */
diff --git a/drivers/gpu/drm/xe/xe_lmtt.h b/drivers/gpu/drm/xe/xe_lmtt.h
index cb10ef994db6..75a234fbf367 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.h
+++ b/drivers/gpu/drm/xe/xe_lmtt.h
@@ -15,6 +15,7 @@ struct xe_lmtt_ops;
#ifdef CONFIG_PCI_IOV
int xe_lmtt_init(struct xe_lmtt *lmtt);
void xe_lmtt_init_hw(struct xe_lmtt *lmtt);
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt);
int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 6e7b70532d11..6d38411bdeba 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -39,15 +39,46 @@
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
#define LRC_WA_BB_SIZE SZ_4K
+/*
+ * Layout of the LRC and associated data allocated as
+ * lrc->bo:
+ *
+ * Region Size
+ * +============================+=================================+ <- __xe_lrc_ring_offset()
+ * | Ring | ring_size, see |
+ * | | xe_lrc_init() |
+ * +============================+=================================+ <- __xe_lrc_pphwsp_offset()
+ * | PPHWSP (includes SW state) | 4K |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_regs_offset()
+ * | Engine Context Image | n * 4K, see |
+ * | | xe_gt_lrc_size() |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_indirect_ring_offset()
+ * | Indirect Ring State Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_RING_STATE |
+ * +============================+=================================+ <- __xe_lrc_indirect_ctx_offset()
+ * | Indirect Context Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_CTX |
+ * +============================+=================================+ <- __xe_lrc_wa_bb_offset()
+ * | WA BB Per Ctx | 4k |
+ * +============================+=================================+ <- xe_bo_size(lrc->bo)
+ */
+
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
{
return gt_to_xe(lrc->fence_ctx.gt);
}
+static bool
+gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
+{
+ return false;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -582,8 +613,6 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
-
- /* TODO: Timestamp */
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
@@ -655,8 +684,8 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_SEQNO_PPHWSP_OFFSET 512
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 1024
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -717,8 +746,23 @@ static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
- /* Indirect ring state page is at the very end of LRC */
- return lrc->size - LRC_INDIRECT_RING_STATE_SIZE;
+ u32 offset = xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE -
+ LRC_INDIRECT_RING_STATE_SIZE;
+
+ if (lrc->flags & XE_LRC_FLAG_INDIRECT_CTX)
+ offset -= LRC_INDIRECT_CTX_BO_SIZE;
+
+ return offset;
+}
+
+static inline u32 __xe_lrc_indirect_ctx_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE - LRC_INDIRECT_CTX_BO_SIZE;
+}
+
+static inline u32 __xe_lrc_wa_bb_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE;
}
#define DECL_MAP_ADDR_HELPERS(elem) \
@@ -913,15 +957,9 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
xe_bo_unpin_map_no_vm(lrc->bo);
}
-static size_t wa_bb_offset(struct xe_lrc *lrc)
-{
- return lrc->bo->size - LRC_WA_BB_SIZE;
-}
-
/*
- * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
- * context run ticks.
- * @lrc: Pointer to the lrc.
+ * wa_bb_setup_utilization() - Write commands to wa bb to assist
+ * in calculating active context run ticks.
*
* Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
* context, but only gets updated when the context switches out. In order to
@@ -946,19 +984,15 @@ static size_t wa_bb_offset(struct xe_lrc *lrc)
* store it in the PPHSWP.
*/
#define CONTEXT_ACTIVE 1ULL
-static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
+static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch,
+ size_t max_len)
{
- const size_t max_size = LRC_WA_BB_SIZE;
- u32 *cmd, *buf = NULL;
+ u32 *cmd = batch;
- if (lrc->bo->vmap.is_iomem) {
- buf = kmalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- cmd = buf;
- } else {
- cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc);
- }
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
*cmd++ = ENGINE_ID(0).addr;
@@ -977,42 +1011,190 @@ static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
*cmd++ = upper_32_bits(CONTEXT_ACTIVE);
}
- *cmd++ = MI_BATCH_BUFFER_END;
+ return cmd - batch;
+}
+
+struct bo_setup {
+ ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_size);
+};
+
+struct bo_setup_state {
+ /* Input: */
+ struct xe_lrc *lrc;
+ struct xe_hw_engine *hwe;
+ size_t max_size;
+ size_t reserve_dw;
+ unsigned int offset;
+ const struct bo_setup *funcs;
+ unsigned int num_funcs;
+
+ /* State: */
+ u32 *buffer;
+ u32 *ptr;
+ unsigned int written;
+};
+
+static int setup_bo(struct bo_setup_state *state)
+{
+ ssize_t remain;
+
+ if (state->lrc->bo->vmap.is_iomem) {
+ state->buffer = kmalloc(state->max_size, GFP_KERNEL);
+ if (!state->buffer)
+ return -ENOMEM;
+ state->ptr = state->buffer;
+ } else {
+ state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
+ state->buffer = NULL;
+ }
+
+ remain = state->max_size / sizeof(u32);
- if (buf) {
- xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap,
- wa_bb_offset(lrc), buf,
- (cmd - buf) * sizeof(*cmd));
- kfree(buf);
+ for (size_t i = 0; i < state->num_funcs; i++) {
+ ssize_t len = state->funcs[i].setup(state->lrc, state->hwe,
+ state->ptr, remain);
+
+ remain -= len;
+
+ /*
+ * Caller has asked for at least reserve_dw to remain unused.
+ */
+ if (len < 0 ||
+ xe_gt_WARN_ON(state->lrc->gt, remain < state->reserve_dw))
+ goto fail;
+
+ state->ptr += len;
+ state->written += len;
}
- xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) +
- wa_bb_offset(lrc) + 1);
+ return 0;
+
+fail:
+ kfree(state->buffer);
+ return -ENOSPC;
+}
+
+static void finish_bo(struct bo_setup_state *state)
+{
+ if (!state->buffer)
+ return;
+
+ xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
+ state->offset, state->buffer,
+ state->written * sizeof(u32));
+ kfree(state->buffer);
+}
+
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static const struct bo_setup funcs[] = {
+ { .setup = setup_utilization_wa },
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = LRC_WA_BB_SIZE,
+ .reserve_dw = 1,
+ .offset = __xe_lrc_wa_bb_offset(lrc),
+ .funcs = funcs,
+ .num_funcs = ARRAY_SIZE(funcs),
+ };
+ int ret;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ *state.ptr++ = MI_BATCH_BUFFER_END;
+ state.written++;
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bo) + state.offset + 1);
return 0;
}
-#define PVC_CTX_ASID (0x2e + 1)
-#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
+static int
+setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static struct bo_setup rcs_funcs[] = {
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = (63 * 64) /* max 63 cachelines */,
+ .offset = __xe_lrc_indirect_ctx_offset(lrc),
+ };
+ int ret;
+
+ if (!(lrc->flags & XE_LRC_FLAG_INDIRECT_CTX))
+ return 0;
+
+ if (hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE) {
+ state.funcs = rcs_funcs;
+ state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ }
+
+ if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
+ return 0;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ /*
+ * Align to 64B cacheline so there's no garbage at the end for CS to
+ * execute: size for indirect ctx must be a multiple of 64.
+ */
+ while (state.written & 0xf) {
+ *state.ptr++ = MI_NOOP;
+ state.written++;
+ }
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX,
+ (xe_bo_ggtt_addr(lrc->bo) + state.offset) |
+ /* Size in CLs. */
+ (state.written * sizeof(u32) / 64));
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX_OFFSET,
+ CTX_INDIRECT_CTX_OFFSET_DEFAULT);
+
+ return 0;
+}
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_vm *vm, u32 ring_size, u16 msix_vec,
u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
+ const u32 lrc_size = xe_gt_lrc_size(gt, hwe->class);
+ u32 bo_size = ring_size + lrc_size + LRC_WA_BB_SIZE;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
struct iosys_map map;
- void *init_data = NULL;
u32 arb_enable;
- u32 lrc_size;
u32 bo_flags;
int err;
kref_init(&lrc->refcount);
lrc->gt = gt;
+ lrc->size = lrc_size;
lrc->flags = 0;
- lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
+ lrc->ring.size = ring_size;
+ lrc->ring.tail = 0;
+
+ if (gt_engine_needs_indirect_ctx(gt, hwe->class)) {
+ lrc->flags |= XE_LRC_FLAG_INDIRECT_CTX;
+ bo_size += LRC_INDIRECT_CTX_BO_SIZE;
+ }
+
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
@@ -1021,45 +1203,36 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
if (vm && vm->xef) /* userspace */
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
- /*
- * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
- * via VM bind calls.
- */
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL,
- lrc_size + LRC_WA_BB_SIZE,
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
ttm_bo_type_kernel,
bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
- lrc->size = lrc_size;
- lrc->ring.size = ring_size;
- lrc->ring.tail = 0;
-
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
- if (!gt->default_lrc[hwe->class]) {
- init_data = empty_lrc_data(hwe);
- if (!init_data) {
- err = -ENOMEM;
- goto err_lrc_finish;
- }
- }
-
/*
* Init Per-Process of HW status Page, LRC / context state to known
- * values
+ * values. If there's already a primed default_lrc, just copy it, otherwise
+ * it's the early submission to record the lrc: build a new empty one from
+ * scratch.
*/
map = __xe_lrc_pphwsp_map(lrc);
- if (!init_data) {
+ if (gt->default_lrc[hwe->class]) {
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
- xe_gt_lrc_size(gt, hwe->class) - LRC_PPHWSP_SIZE);
+ lrc_size - LRC_PPHWSP_SIZE);
} else {
- xe_map_memcpy_to(xe, &map, 0, init_data,
- xe_gt_lrc_size(gt, hwe->class));
+ void *init_data = empty_lrc_data(hwe);
+
+ if (!init_data) {
+ err = -ENOMEM;
+ goto err_lrc_finish;
+ }
+
+ xe_map_memcpy_to(xe, &map, 0, init_data, lrc_size);
kfree(init_data);
}
@@ -1113,7 +1286,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+ xe_lrc_write_ctx_reg(lrc, CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
@@ -1139,7 +1312,11 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
- err = xe_lrc_setup_utilization(lrc);
+ err = setup_wa_bb(lrc, hwe);
+ if (err)
+ goto err_lrc_finish;
+
+ err = setup_indirect_ctx(lrc, hwe);
if (err)
goto err_lrc_finish;
@@ -1735,7 +1912,7 @@ static const struct instr_state xe_hpg_svg_state[] = {
{ .instr = CMD_3DSTATE_DRAWING_RECTANGLE, .num_dw = 4 },
};
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb)
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
{
struct xe_gt *gt = q->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
@@ -1770,7 +1947,7 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
if (!state_table) {
xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
- return;
+ return cs;
}
for (int i = 0; i < state_table_size; i++) {
@@ -1793,12 +1970,14 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
instr == CMD_3DSTATE_DRAWING_RECTANGLE)
instr = CMD_3DSTATE_DRAWING_RECTANGLE_FAST;
- bb->cs[bb->len] = instr;
+ *cs = instr;
if (!is_single_dw)
- bb->cs[bb->len] |= (num_dw - 2);
+ *cs |= (num_dw - 2);
- bb->len += num_dw;
+ cs += num_dw;
}
+
+ return cs;
}
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
@@ -1819,8 +1998,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
- snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset -
- LRC_WA_BB_SIZE;
+ snapshot->lrc_size = lrc->size;
snapshot->lrc_snapshot = NULL;
snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index eb6e8de8c939..b6c8053c581b 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -112,7 +112,7 @@ void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
enum xe_engine_class);
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs);
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc);
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 883e550a9423..e9883706e004 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -22,14 +22,15 @@ struct xe_lrc {
*/
struct xe_bo *bo;
- /** @size: size of lrc including any indirect ring state page */
+ /** @size: size of the lrc and optional indirect ring state */
u32 size;
/** @gt: gt which this LRC belongs to */
struct xe_gt *gt;
/** @flags: LRC flags */
-#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
+#define XE_LRC_FLAG_INDIRECT_CTX 0x1
+#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x2
u32 flags;
/** @refcount: ref count of this lrc */
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index f62e0c8b67ab..8d67f6ba2d95 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -78,6 +78,24 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
+#define xe_map_rd_array(xe__, map__, index__, type__) \
+ xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
+
+#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
+ xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
+
+#define xe_map_rd_array_u32(xe__, map__, index__) \
+ xe_map_rd_array(xe__, map__, index__, u32)
+
+#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
+ xe_map_wr_array(xe__, map__, index__, u32, val__)
+
+#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
+ xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
+
+#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
+ xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
+
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 07a5161c7d5b..7d20ac4bb633 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -203,7 +203,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */
- xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
+ xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
@@ -214,7 +214,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
- pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
@@ -236,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -247,13 +247,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
level++;
}
if (xe->info.has_usm) {
- xe_tile_assert(tile, batch->size == SZ_1M);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
- xe_tile_assert(tile, batch->size == SZ_512K);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -306,7 +306,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
+ u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
@@ -321,7 +321,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+ u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
@@ -768,7 +768,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it, ccs_it;
u64 src_L0_ofs, dst_L0_ofs;
u32 src_L0_pt, dst_L0_pt;
@@ -791,7 +791,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
return ERR_PTR(-EINVAL);
- if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
+ if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
return ERR_PTR(-EINVAL);
if (!src_is_vram)
@@ -1064,7 +1064,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_device *xe = gt_to_xe(gt);
bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
- u64 size = bo->size;
+ u64 size = xe_bo_size(bo);
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
@@ -1076,9 +1076,9 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_only_system_ccs = true;
if (!clear_vram)
- xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
else
- xe_res_first(src, 0, bo->size, &src_it);
+ xe_res_first(src, 0, xe_bo_size(bo), &src_it);
while (size) {
u64 clear_L0_ofs;
@@ -1407,7 +1407,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
if (idx == chunk)
goto next_cmd;
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
/* Map a PT at most once */
if (pt_bo->update_index < 0)
@@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
+ void *bounce;
+ int err;
+
+ BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+ bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
/*
* Less than ideal for large unaligned access but this should be
* fairly rare, can fixup if this becomes common.
*/
do {
- u8 bounce[XE_CACHELINE_BYTES];
- void *ptr = (void *)bounce;
- int err;
int copy_bytes = min_t(int, bytes_left,
XE_CACHELINE_BYTES -
(offset & XE_CACHELINE_MASK));
@@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset &
~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), 0);
+ bounce,
+ XE_CACHELINE_BYTES, 0);
if (err)
- return err;
+ break;
if (write) {
- memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+ memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), write);
+ bounce,
+ XE_CACHELINE_BYTES, write);
if (err)
- return err;
+ break;
} else {
- memcpy(buf + buf_offset, ptr + ptr_offset,
+ memcpy(buf + buf_offset, bounce + ptr_offset,
copy_bytes);
}
@@ -1861,14 +1865,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
offset += copy_bytes;
} while (bytes_left);
- return 0;
+ kfree(bounce);
+ return err;
}
dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
if (IS_ERR(dma_addr))
return PTR_ERR(dma_addr);
- xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+ xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
do {
struct dma_fence *__fence;
@@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
- if (fence)
- dma_fence_put(fence);
+ if (current_bytes & ~PAGE_MASK) {
+ int pitch = 4;
+
+ current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
+ }
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
@@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
- if (fence)
+ if (fence) {
dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
fence = __fence;
goto out_err;
}
+
+ dma_fence_put(fence);
fence = __fence;
buf += current_bytes;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 7357458bc0d2..e4db8d58ea2d 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -22,6 +22,9 @@
#include "xe_macros.h"
#include "xe_sriov.h"
#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "generated/xe_device_wa_oob.h"
static void tiles_fini(void *arg)
{
@@ -55,6 +58,7 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
+ struct xe_gt *gt;
u8 id;
/*
@@ -67,7 +71,7 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
/* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count;
+ u8 tile_count, gt_count;
u32 mtcfg;
/*
@@ -84,12 +88,15 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but
- * should be impossible with multi-tile for now:
- * multi-tile platform with standalone media doesn't
- * exist
+ * We've already setup gt_count according to the full
+ * tile count. Re-calculate it to only include the GTs
+ * that belong to the remaining tile(s).
*/
- xe->info.gt_count = xe->info.tile_count;
+ gt_count = 0;
+ for_each_gt(gt, xe, id)
+ if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
+ gt_count++;
+ xe->info.gt_count = gt_count;
}
}
@@ -163,7 +170,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
#define DUMMY_REG_OFFSET 0x130030
int i;
- if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
+ if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425))
return;
/* 4 dummy writes */
@@ -176,7 +183,6 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readb(mmio->regs + addr);
@@ -190,7 +196,6 @@ u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readw(mmio->regs + addr);
@@ -217,7 +222,6 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index da6793c2f991..d9391bd08194 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -18,29 +18,50 @@
#include "xe_observation.h"
#include "xe_sched_job.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define DEFAULT_GUC_LOG_LEVEL 3
+#else
+#define DEFAULT_GUC_LOG_LEVEL 1
+#endif
+
+#define DEFAULT_PROBE_DISPLAY true
+#define DEFAULT_VRAM_BAR_SIZE 0
+#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
+#define DEFAULT_MAX_VFS ~0
+#define DEFAULT_MAX_VFS_STR "unlimited"
+#define DEFAULT_WEDGED_MODE 1
+#define DEFAULT_SVM_NOTIFIER_SIZE 512
+
struct xe_modparam xe_modparam = {
- .probe_display = true,
- .guc_log_level = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? 3 : 1,
- .force_probe = CONFIG_DRM_XE_FORCE_PROBE,
- .wedged_mode = 1,
- .svm_notifier_size = 512,
+ .probe_display = DEFAULT_PROBE_DISPLAY,
+ .guc_log_level = DEFAULT_GUC_LOG_LEVEL,
+ .force_probe = DEFAULT_FORCE_PROBE,
+#ifdef CONFIG_PCI_IOV
+ .max_vfs = DEFAULT_MAX_VFS,
+#endif
+ .wedged_mode = DEFAULT_WEDGED_MODE,
+ .svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
};
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
-MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size in MiB, must be power of 2 "
+ "[default=" __stringify(DEFAULT_SVM_NOTIFIER_SIZE) "]");
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
-MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
+MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched "
+ "[default=" __stringify(DEFAULT_PROBE_DISPLAY) "])");
module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size in MiB (<0=disable-resize, 0=max-needed-size, >0=force-size "
+ "[default=" __stringify(DEFAULT_VRAM_BAR_SIZE) "])");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
-MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
+MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels "
+ "[default=" __stringify(DEFAULT_GUC_LOG_LEVEL) "])");
module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
@@ -56,18 +77,21 @@ MODULE_PARM_DESC(gsc_firmware_path,
module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
MODULE_PARM_DESC(force_probe,
- "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+ "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details "
+ "[default=" DEFAULT_FORCE_PROBE "])");
#ifdef CONFIG_PCI_IOV
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
MODULE_PARM_DESC(max_vfs,
"Limit number of Virtual Functions (VFs) that could be managed. "
- "(0 = no VFs [default]; N = allow up to N VFs)");
+ "(0=no VFs; N=allow up to N VFs "
+ "[default=" DEFAULT_MAX_VFS_STR "])");
#endif
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
MODULE_PARM_DESC(wedged_mode,
- "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+ "Module's default policy for the wedged mode (0=never, 1=upon-critical-errors, 2=upon-any-hang "
+ "[default=" __stringify(DEFAULT_WEDGED_MODE) "])");
static int xe_check_nomodeset(void)
{
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
new file mode 100644
index 000000000000..61b0a1531a53
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/intel_dg_nvm_aux.h>
+#include <linux/pci.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_mmio.h"
+#include "xe_nvm.h"
+#include "regs/xe_gsc_regs.h"
+#include "xe_sriov.h"
+
+#define GEN12_GUNIT_NVM_BASE 0x00102040
+#define GEN12_DEBUG_NVM_BASE 0x00101018
+
+#define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
+
+#define GEN12_GUNIT_NVM_SIZE 0x80
+#define GEN12_DEBUG_NVM_SIZE 0x4
+
+#define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
+
+#define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
+
+static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
+ [0] = { .name = "DESCRIPTOR", },
+ [2] = { .name = "GSC", },
+ [9] = { .name = "PADDING", },
+ [11] = { .name = "OptionROM", },
+ [12] = { .name = "DAM", },
+};
+
+static void xe_nvm_release_dev(struct device *dev)
+{
+}
+
+static bool xe_nvm_non_posted_erase(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return false;
+ return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ NVM_NON_POSTED_ERASE_CHICKEN_BIT);
+}
+
+static bool xe_nvm_writable_override(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ bool writable_override;
+ resource_size_t base;
+
+ switch (xe->info.platform) {
+ case XE_BATTLEMAGE:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_PVC:
+ base = PVC_GSC_HECI2_BASE;
+ break;
+ case XE_DG2:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_DG1:
+ base = DG1_GSC_HECI2_BASE;
+ break;
+ default:
+ drm_err(&xe->drm, "Unknown platform\n");
+ return true;
+ }
+
+ writable_override =
+ !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ HECI_FW_STATUS_2_NVM_ACCESS_MODE);
+ if (writable_override)
+ drm_info(&xe->drm, "NVM access overridden by jumper\n");
+ return writable_override;
+}
+
+int xe_nvm_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct auxiliary_device *aux_dev;
+ struct intel_dg_nvm_dev *nvm;
+ int ret;
+
+ if (!xe->info.has_gsc_nvm)
+ return 0;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ /* Nvm pointer should be NULL here */
+ if (WARN_ON(xe->nvm))
+ return -EFAULT;
+
+ xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!xe->nvm)
+ return -ENOMEM;
+
+ nvm = xe->nvm;
+
+ nvm->writable_override = xe_nvm_writable_override(xe);
+ nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
+ nvm->bar.parent = &pdev->resource[0];
+ nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
+ nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
+ nvm->bar.flags = IORESOURCE_MEM;
+ nvm->bar.desc = IORES_DESC_NONE;
+ nvm->regions = regions;
+
+ nvm->bar2.parent = &pdev->resource[0];
+ nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
+ nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
+ nvm->bar2.flags = IORESOURCE_MEM;
+ nvm->bar2.desc = IORES_DESC_NONE;
+
+ aux_dev = &nvm->aux_dev;
+
+ aux_dev->name = "nvm";
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = xe_nvm_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
+ goto err;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
+ auxiliary_device_uninit(aux_dev);
+ goto err;
+ }
+ return 0;
+
+err:
+ kfree(nvm);
+ xe->nvm = NULL;
+ return ret;
+}
+
+void xe_nvm_fini(struct xe_device *xe)
+{
+ struct intel_dg_nvm_dev *nvm = xe->nvm;
+
+ if (!xe->info.has_gsc_nvm)
+ return;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /* Nvm pointer should not be NULL here */
+ if (WARN_ON(!nvm))
+ return;
+
+ auxiliary_device_delete(&nvm->aux_dev);
+ auxiliary_device_uninit(&nvm->aux_dev);
+ kfree(nvm);
+ xe->nvm = NULL;
+}
diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h
new file mode 100644
index 000000000000..7f3d5f57bed0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_NVM_H__
+#define __XE_NVM_H__
+
+struct xe_device;
+
+int xe_nvm_init(struct xe_device *xe);
+
+void xe_nvm_fini(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index fb842fa0552e..5729e7d3e335 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -43,6 +43,12 @@
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oam_unit_type {
+ XE_OAM_UNIT_SAG,
+ XE_OAM_UNIT_SCMI_0,
+ XE_OAM_UNIT_SCMI_1,
+};
+
enum xe_oa_submit_deps {
XE_OA_SUBMIT_NO_DEPS,
XE_OA_SUBMIT_ADD_DEPS,
@@ -77,7 +83,7 @@ struct xe_oa_config {
struct xe_oa_open_param {
struct xe_file *xef;
- u32 oa_unit_id;
+ struct xe_oa_unit *oa_unit;
bool sample;
u32 metric_set;
enum xe_oa_format_name oa_format;
@@ -194,7 +200,7 @@ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *l
static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
{
- return &stream->hwe->oa_unit->regs;
+ return &stream->oa_unit->regs;
}
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
@@ -397,7 +403,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ int size_exponent = __ffs(xe_bo_size(stream->oa_buffer.bo));
u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
@@ -429,7 +435,7 @@ static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* Zero out the OA buffer since we rely on zero report id and timestamp fields */
- memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size);
+ memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo));
}
static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask)
@@ -454,7 +460,7 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
{
- return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ return stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
}
@@ -475,7 +481,7 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
@@ -838,11 +844,16 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Reset PMON Enable to save power. */
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
+
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, OAM_LAT_MEASURE_ENABLE, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
{
- struct xe_oa_unit *u = stream->hwe->oa_unit;
+ struct xe_oa_unit *u = stream->oa_unit;
struct xe_gt *gt = stream->hwe->gt;
if (WARN_ON(stream != u->exclusive_stream))
@@ -1054,7 +1065,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- stream->oa_buffer.bo->size > SZ_16M ?
+ xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
}
@@ -1105,9 +1116,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
-
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, 0, OAM_LAT_MEASURE_ENABLE);
+
/* Configure OAR/OAC */
if (stream->exec_q) {
ret = xe_oa_configure_oa_context(stream, true);
@@ -1139,14 +1154,31 @@ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *n
return -EINVAL;
}
+static struct xe_oa_unit *xe_oa_lookup_oa_unit(struct xe_oa *oa, u32 oa_unit_id)
+{
+ struct xe_gt *gt;
+ int gt_id, i;
+
+ for_each_gt(gt, oa->xe, gt_id) {
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ struct xe_oa_unit *u = &gt->oa.oa_unit[i];
+
+ if (u->oa_unit_id == oa_unit_id)
+ return u;
+ }
+ }
+
+ return NULL;
+}
+
static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
- if (value >= oa->oa_unit_ids) {
+ param->oa_unit = xe_oa_lookup_oa_unit(oa, value);
+ if (!param->oa_unit) {
drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
return -EINVAL;
}
- param->oa_unit_id = value;
return 0;
}
@@ -1550,7 +1582,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = xe_bo_size(stream->oa_buffer.bo), };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1636,7 +1668,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
+ if (vma->vm_end - vma->vm_start != xe_bo_size(stream->oa_buffer.bo)) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1677,13 +1709,13 @@ static const struct file_operations xe_oa_fops = {
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
- struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS;
+ stream->oa_unit = param->oa_unit;
stream->hwe = param->hwe;
stream->gt = stream->hwe->gt;
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
@@ -1704,7 +1736,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* buffer whose size, circ_size, is a multiple of the report size
*/
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
param->oa_buffer_size -
param->oa_buffer_size % stream->oa_buffer.format->size;
@@ -1762,7 +1794,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- WRITE_ONCE(u->exclusive_stream, stream);
+ WRITE_ONCE(stream->oa_unit->exclusive_stream, stream);
hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1798,7 +1830,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
int ret;
/* We currently only allow exclusive access */
- if (param->hwe->oa_unit->exclusive_stream) {
+ if (param->oa_unit->exclusive_stream) {
drm_dbg(&oa->xe->drm, "OA unit already in use\n");
ret = -EBUSY;
goto exit;
@@ -1874,13 +1906,14 @@ static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent)
return div_u64(nom + den - 1, den);
}
-static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+static bool oa_unit_supports_oa_format(struct xe_oa_open_param *param, int type)
{
- switch (hwe->oa_unit->type) {
+ switch (param->oa_unit->type) {
case DRM_XE_OA_UNIT_TYPE_OAG:
return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR ||
type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC;
case DRM_XE_OA_UNIT_TYPE_OAM:
+ case DRM_XE_OA_UNIT_TYPE_OAM_SAG:
return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC;
default:
return false;
@@ -1899,37 +1932,48 @@ u16 xe_oa_unit_id(struct xe_hw_engine *hwe)
hwe->oa_unit->oa_unit_id : U16_MAX;
}
+/* A hwe must be assigned to stream/oa_unit for batch submissions */
static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
{
- struct xe_gt *gt;
- int i, ret = 0;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int ret = 0;
+
+ /* If not provided, OA unit defaults to OA unit 0 as per uapi */
+ if (!param->oa_unit)
+ param->oa_unit = &xe_root_mmio_gt(oa->xe)->oa.oa_unit[0];
+ /* When we have an exec_q, get hwe from the exec_q */
if (param->exec_q) {
- /* When we have an exec_q, get hwe from the exec_q */
param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class,
param->engine_instance, true);
- } else {
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
-
- /* Else just get the first hwe attached to the oa unit */
- for_each_gt(gt, oa->xe, i) {
- for_each_hw_engine(hwe, gt, id) {
- if (xe_oa_unit_id(hwe) == param->oa_unit_id) {
- param->hwe = hwe;
- goto out;
- }
- }
- }
+ if (!param->hwe || param->hwe->oa_unit != param->oa_unit)
+ goto err;
+ goto out;
}
-out:
- if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) {
- drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
- param->exec_q ? param->exec_q->class : -1,
- param->engine_instance, param->oa_unit_id);
- ret = -EINVAL;
+
+ /* Else just get the first hwe attached to the oa unit */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (hwe->oa_unit == param->oa_unit) {
+ param->hwe = hwe;
+ goto out;
+ }
}
+ /* If we still didn't find a hwe, just get one with a valid oa_unit from the same gt */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (!hwe->oa_unit)
+ continue;
+
+ param->hwe = hwe;
+ goto out;
+ }
+err:
+ drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
+ param->exec_q ? param->exec_q->class : -1,
+ param->engine_instance, param->oa_unit->oa_unit_id);
+ ret = -EINVAL;
+out:
return ret;
}
@@ -2007,7 +2051,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
f = &oa->oa_formats[param.oa_format];
if (!param.oa_format || !f->size ||
- !engine_supports_oa_format(param.hwe, f->type)) {
+ !oa_unit_supports_oa_format(&param, f->type)) {
drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n",
param.oa_format, f->type, f->size, param.hwe->class);
ret = -EINVAL;
@@ -2155,6 +2199,7 @@ static const struct xe_mmio_range gen12_oa_mux_regs[] = {
static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
+ { .start = 0xB01C, .end = 0xB01C }, /* LNCF_MISC_CONFIG_REGISTER0 */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
{ .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
@@ -2448,20 +2493,38 @@ int xe_oa_register(struct xe_device *xe)
static u32 num_oa_units_per_gt(struct xe_gt *gt)
{
- return 1;
+ if (xe_gt_is_main_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return 1;
+ else if (!IS_DGFX(gt_to_xe(gt)))
+ return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */
+ else
+ return XE_OAM_UNIT_SCMI_1 + 1; /* SAG + SCMI_0 + SCMI_1 */
}
static u32 __hwe_oam_unit(struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) {
- /*
- * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
- * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA
- */
- xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA);
+ if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270)
+ return XE_OA_UNIT_INVALID;
+
+ xe_gt_WARN_ON(hwe->gt, xe_gt_is_main_type(hwe->gt));
+ if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20)
return 0;
- }
+ /*
+ * XE_OAM_UNIT_SAG has only GSCCS attached to it, but only on some platforms. Also
+ * GSCCS cannot be used to submit batches to program the OAM unit. Therefore we don't
+ * assign an OA unit to GSCCS. This means that XE_OAM_UNIT_SAG is exposed as an OA
+ * unit without attached engines. Fused off engines can also result in oa_unit's with
+ * num_engines == 0. OA streams can be opened on all OA units.
+ */
+ else if (hwe->engine_id == XE_HW_ENGINE_GSCCS0)
+ return XE_OA_UNIT_INVALID;
+ else if (!IS_DGFX(gt_to_xe(hwe->gt)))
+ return XE_OAM_UNIT_SCMI_0;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE)
+ return (hwe->instance / 2 & 0x1) + 1;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ return (hwe->instance & 0x1) + 1;
return XE_OA_UNIT_INVALID;
}
@@ -2475,6 +2538,7 @@ static u32 __hwe_oa_unit(struct xe_hw_engine *hwe)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ case XE_ENGINE_CLASS_OTHER:
return __hwe_oam_unit(hwe);
default:
@@ -2514,20 +2578,29 @@ static struct xe_oa_regs __oag_regs(void)
static void __xe_oa_init_oa_units(struct xe_gt *gt)
{
- const u32 mtl_oa_base[] = { 0x13000 };
+ /* Actual address is MEDIA_GT_GSI_OFFSET + oam_base_addr[i] */
+ const u32 oam_base_addr[] = {
+ [XE_OAM_UNIT_SAG] = 0x13000,
+ [XE_OAM_UNIT_SCMI_0] = 0x14000,
+ [XE_OAM_UNIT_SCMI_1] = 0x14800,
+ };
int i, num_units = gt->oa.num_oa_units;
for (i = 0; i < num_units; i++) {
struct xe_oa_unit *u = &gt->oa.oa_unit[i];
- if (gt->info.type != XE_GT_TYPE_MEDIA) {
+ if (xe_gt_is_main_type(gt)) {
u->regs = __oag_regs();
u->type = DRM_XE_OA_UNIT_TYPE_OAG;
- } else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- u->regs = __oam_regs(mtl_oa_base[i]);
- u->type = DRM_XE_OA_UNIT_TYPE_OAM;
+ } else {
+ xe_gt_assert(gt, GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270);
+ u->regs = __oam_regs(oam_base_addr[i]);
+ u->type = i == XE_OAM_UNIT_SAG && GRAPHICS_VER(gt_to_xe(gt)) >= 20 ?
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG : DRM_XE_OA_UNIT_TYPE_OAM;
}
+ u->gt = gt;
+
xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
/* Ensure MMIO trigger remains disabled till there is a stream */
@@ -2560,10 +2633,6 @@ static int xe_oa_init_gt(struct xe_gt *gt)
}
}
- /*
- * Fused off engines can result in oa_unit's with num_engines == 0. These units
- * will appear in OA unit query, but no OA streams can be opened on them.
- */
gt->oa.num_oa_units = num_oa_units;
gt->oa.oa_unit = u;
@@ -2574,17 +2643,54 @@ static int xe_oa_init_gt(struct xe_gt *gt)
return 0;
}
+static void xe_oa_print_gt_oa_units(struct xe_gt *gt)
+{
+ enum xe_hw_engine_id hwe_id;
+ struct xe_hw_engine *hwe;
+ struct xe_oa_unit *u;
+ char buf[256];
+ int i, n;
+
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ u = &gt->oa.oa_unit[i];
+ buf[0] = '\0';
+ n = 0;
+
+ for_each_hw_engine(hwe, gt, hwe_id)
+ if (xe_oa_unit_id(hwe) == u->oa_unit_id)
+ n += scnprintf(buf + n, sizeof(buf) - n, "%s ", hwe->name);
+
+ xe_gt_dbg(gt, "oa_unit %d, type %d, Engines: %s\n", u->oa_unit_id, u->type, buf);
+ }
+}
+
+static void xe_oa_print_oa_units(struct xe_oa *oa)
+{
+ struct xe_gt *gt;
+ int gt_id;
+
+ for_each_gt(gt, oa->xe, gt_id)
+ xe_oa_print_gt_oa_units(gt);
+}
+
static int xe_oa_init_oa_units(struct xe_oa *oa)
{
struct xe_gt *gt;
int i, ret;
+ /* Needed for OAM implementation here */
+ BUILD_BUG_ON(XE_OAM_UNIT_SAG != 0);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_0 != 1);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_1 != 2);
+
for_each_gt(gt, oa->xe, i) {
ret = xe_oa_init_gt(gt);
if (ret)
return ret;
}
+ xe_oa_print_oa_units(oa);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 52e33c37d5ee..2628f78c4e8d 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -95,6 +95,9 @@ struct xe_oa_unit {
/** @oa_unit_id: identifier for the OA unit */
u16 oa_unit_id;
+ /** @gt: gt associated with the OA unit */
+ struct xe_gt *gt;
+
/** @type: Type of OA unit - OAM, OAG etc. */
enum drm_xe_oa_unit_type type;
@@ -182,6 +185,9 @@ struct xe_oa_stream {
/** @gt: gt associated with the oa stream */
struct xe_gt *gt;
+ /** @oa_unit: oa unit for this stream */
+ struct xe_oa_unit *oa_unit;
+
/** @hwe: hardware engine associated with this oa stream */
struct xe_hw_engine *hwe;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 30fdbdb9341e..2e7cb99ae87a 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -103,7 +103,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
*
* Note: There is an implicit assumption in the driver that compression and
* coh_1way+ are mutually exclusive. If this is ever not true then userptr
- * and imported dma-buf from external device will have uncleared ccs state.
+ * and imported dma-buf from external device will have uncleared ccs state. See
+ * also xe_bo_needs_ccs_pages().
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -162,21 +163,35 @@ u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
}
static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -303,26 +318,6 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat_mcr(gt, table, n_entries);
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
-}
-
-static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat(gt, table, n_entries);
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
-}
-
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -375,8 +370,8 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
}
static const struct xe_pat_ops xe2_pat_ops = {
- .program_graphics = xe2lpg_program_pat,
- .program_media = xe2lpm_program_pat,
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
.dump = xe2_dump,
};
@@ -385,6 +380,9 @@ void xe_pat_init_early(struct xe_device *xe)
if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
+ xe->pat.pat_ats = &xe2_pat_ats;
+ if (IS_DGFX(xe))
+ xe->pat.pat_pta = &xe2_pat_pta;
/* Wa_16023588340. XXX: Should use XE_WA */
if (GRAPHICS_VERx100(xe) == 2001)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 278af53c74dc..3c40ef426f0c 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -38,43 +38,6 @@ enum toggle_d3cold {
D3COLD_ENABLE,
};
-struct xe_subplatform_desc {
- enum xe_subplatform subplatform;
- const char *name;
- const u16 *pciidlist;
-};
-
-struct xe_device_desc {
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_graphics_ip;
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_media_ip;
-
- const char *platform_name;
- const struct xe_subplatform_desc *subplatforms;
-
- enum xe_platform platform;
-
- u8 dma_mask_size;
- u8 max_remote_tiles:2;
-
- u8 require_force_probe:1;
- u8 is_dgfx:1;
-
- u8 has_display:1;
- u8 has_fan_control:1;
- u8 has_heci_gscfi:1;
- u8 has_heci_cscfi:1;
- u8 has_llc:1;
- u8 has_mbx_power_limits:1;
- u8 has_pxp:1;
- u8 has_sriov:1;
- u8 needs_scratch:1;
- u8 skip_guc_pc:1;
- u8 skip_mtcfg:1;
- u8 skip_pcode:1;
-};
-
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
@@ -179,9 +142,11 @@ static const struct xe_ip graphics_ips[] = {
{ 1271, "Xe_LPG", &graphics_xelpg },
{ 1274, "Xe_LPG+", &graphics_xelpg },
{ 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2002, "Xe2_HPG", &graphics_xe2 },
{ 2004, "Xe2_LPG", &graphics_xe2 },
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
+ { 3003, "Xe3_LPG", &graphics_xe2 },
};
/* Pre-GMDID Media IPs */
@@ -194,6 +159,7 @@ static const struct xe_ip media_ips[] = {
{ 1301, "Xe2_HPM", &media_xelpmp },
{ 2000, "Xe2_LPM", &media_xelpmp },
{ 3000, "Xe3_LPM", &media_xelpmp },
+ { 3002, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
@@ -203,6 +169,7 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -213,6 +180,7 @@ static const struct xe_device_desc rkl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -225,6 +193,7 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
@@ -241,6 +210,7 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
@@ -255,6 +225,7 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -268,7 +239,9 @@ static const struct xe_device_desc dg1_desc = {
PLATFORM(DG1),
.dma_mask_size = 39,
.has_display = true,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -279,6 +252,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
PLATFORM(DG2), \
+ .has_gsc_nvm = 1, \
.has_heci_gscfi = 1, \
.subplatforms = (const struct xe_subplatform_desc[]) { \
{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
@@ -291,6 +265,7 @@ static const struct xe_device_desc ats_m_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
@@ -301,6 +276,7 @@ static const struct xe_device_desc dg2_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
@@ -315,7 +291,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
PLATFORM(PVC),
.dma_mask_size = 52,
.has_display = false,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
.has_mbx_power_limits = false,
@@ -328,6 +306,7 @@ static const struct xe_device_desc mtl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
};
static const struct xe_device_desc lnl_desc = {
@@ -335,6 +314,7 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -345,7 +325,10 @@ static const struct xe_device_desc bmg_desc = {
.has_display = true,
.has_fan_control = true,
.has_mbx_power_limits = true,
+ .has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_sriov = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -354,7 +337,7 @@ static const struct xe_device_desc ptl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_sriov = true,
- .require_force_probe = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -588,6 +571,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_fan_control = desc->has_fan_control;
xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
+ xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -601,6 +585,10 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
desc->has_display;
+
+ xe_assert(xe, desc->max_gt_per_tile > 0);
+ xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE);
+ xe->info.max_gt_per_tile = desc->max_gt_per_tile;
xe->info.tile_count = 1 + desc->max_remote_tiles;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
@@ -700,10 +688,11 @@ static int xe_info_init(struct xe_device *xe,
*/
for_each_tile(tile, xe, id) {
gt = tile->primary_gt;
- gt->info.id = xe->info.gt_count++;
gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
+ xe->info.gt_count++;
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -721,17 +710,10 @@ static int xe_info_init(struct xe_device *xe,
gt = tile->media_gt;
gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
-
- /*
- * FIXME: At the moment multi-tile and standalone media are
- * mutually exclusive on current platforms. We'll need to
- * come up with a better way to number GTs if we ever wind
- * up with platforms that support both together.
- */
- drm_WARN_ON(&xe->drm, id != 0);
- gt->info.id = xe->info.gt_count++;
+ xe->info.gt_count++;
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 8813efdcafbb..af05db07162e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -3,6 +3,10 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/bitops.h>
+#include <linux/pci.h>
+
+#include "regs/xe_bars.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
@@ -12,6 +16,7 @@
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_printk.h"
@@ -127,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs,
}
}
+static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 sizes;
+
+ sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
+ if (!sizes)
+ return 0;
+
+ return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -138,6 +155,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_assert(xe, num_vfs <= total_vfs);
xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+ err = xe_sriov_pf_wait_ready(xe);
+ if (err)
+ goto out;
+
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@@ -153,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ if (IS_DGFX(xe)) {
+ err = resize_vf_vram_bar(xe, num_vfs);
+ if (err)
+ xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
+ }
+
err = pci_enable_sriov(pdev, num_vfs);
if (err < 0)
goto failed;
@@ -169,7 +196,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
failed:
pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
-
+out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index ca6b10d35573..4de6f69ed975 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -8,6 +8,47 @@
#include <linux/types.h>
+#include "xe_platform_types.h"
+
+struct xe_subplatform_desc {
+ enum xe_subplatform subplatform;
+ const char *name;
+ const u16 *pciidlist;
+};
+
+struct xe_device_desc {
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_graphics_ip;
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_media_ip;
+
+ const char *platform_name;
+ const struct xe_subplatform_desc *subplatforms;
+
+ enum xe_platform platform;
+
+ u8 dma_mask_size;
+ u8 max_remote_tiles:2;
+ u8 max_gt_per_tile:2;
+
+ u8 require_force_probe:1;
+ u8 is_dgfx:1;
+
+ u8 has_display:1;
+ u8 has_fan_control:1;
+ u8 has_gsc_nvm:1;
+ u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
+ u8 has_llc:1;
+ u8 has_mbx_power_limits:1;
+ u8 has_pxp:1;
+ u8 has_sriov:1;
+ u8 needs_scratch:1;
+ u8 skip_guc_pc:1;
+ u8 skip_mtcfg:1;
+ u8 skip_pcode:1;
+};
+
struct xe_graphics_desc {
u8 va_bits;
u8 vm_max_level;
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9189117fe825..6a7ddb9005f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -336,3 +336,33 @@ int xe_pcode_probe_early(struct xe_device *xe)
return xe_pcode_ready(xe, false);
}
ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
+
+/* Helpers with drm device. These should only be called by the display side */
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(tile, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index de38f44f3201..a5584c1c75f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -7,8 +7,10 @@
#define _XE_PCODE_H_
#include <linux/types.h>
-struct xe_tile;
+
+struct drm_device;
struct xe_device;
+struct xe_tile;
void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
@@ -32,4 +34,12 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
| FIELD_PREP(PCODE_MB_PARAM1, param1)\
| FIELD_PREP(PCODE_MB_PARAM2, param2))
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 0befdea77db1..92bfcba51e19 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -50,6 +50,21 @@
#define READ_PL_FROM_FW 0x1
#define READ_PL_FROM_PCODE 0x0
+#define PCODE_LATE_BINDING 0x5C
+#define GET_CAPABILITY_STATUS 0x0
+#define V1_FAN_SUPPORTED REG_BIT(0)
+#define VR_PARAMS_SUPPORTED REG_BIT(3)
+#define V1_FAN_PROVISIONED REG_BIT(16)
+#define VR_PARAMS_PROVISIONED REG_BIT(19)
+#define GET_VERSION_LOW 0x1
+#define GET_VERSION_HIGH 0x2
+#define MAJOR_VERSION_MASK REG_GENMASK(31, 16)
+#define MINOR_VERSION_MASK REG_GENMASK(15, 0)
+#define HOTFIX_VERSION_MASK REG_GENMASK(31, 16)
+#define BUILD_VERSION_MASK REG_GENMASK(15, 0)
+#define FAN_TABLE 1
+#define VR_CONFIG 2
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index ad263de44111..e279b47ba03b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -19,6 +19,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
@@ -146,6 +147,8 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
drm_dbg(&xe->drm, "Device suspended\n");
return 0;
@@ -190,6 +193,8 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
@@ -487,6 +492,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_runtime_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return 0;
@@ -534,6 +541,8 @@ int xe_pm_runtime_resume(struct xe_device *xe)
goto out;
}
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 69df0e3520a5..cab51d826345 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -157,10 +157,13 @@ static bool event_gt_forcewake(struct perf_event *event)
return true;
}
-static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
+static bool event_supported(struct xe_pmu *pmu, unsigned int gt_id,
unsigned int id)
{
- if (gt >= XE_MAX_GT_PER_TILE)
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, gt_id);
+
+ if (!gt)
return false;
return id < sizeof(pmu->supported_events) * BITS_PER_BYTE &&
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index b04756a97cdc..c8e63bd23300 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -907,6 +907,11 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
+ if (xe_vma_bo(vma))
+ xe_bo_assert_held(xe_vma_bo(vma));
+ else if (xe_vma_is_userptr(vma))
+ lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1458,6 +1463,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm *vm = pt_update->vops->vm;
struct xe_vma_ops *vops = pt_update->vops;
struct xe_vma_op *op;
+ unsigned long i;
int err;
err = xe_pt_pre_commit(pt_update);
@@ -1467,20 +1473,35 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
xe_svm_notifier_lock(vm);
list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = op->map_range.range;
+ struct xe_svm_range *range = NULL;
if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
continue;
- xe_svm_range_debug(range, "PRE-COMMIT");
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ xe_assert(vm->xe,
+ xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ xe_svm_range_debug(range, "PRE-COMMIT");
- xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -ENODATA;
+ }
+ }
+ } else {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ range = op->map_range.range;
- if (!xe_svm_range_pages_valid(range)) {
- xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
- return -EAGAIN;
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
}
}
@@ -1974,6 +1995,32 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static bool
+xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
+ struct xe_svm_range *range)
+{
+ struct xe_vm_pgtable_update *update = pt_op->entries;
+
+ XE_WARN_ON(!pt_op->num_entries);
+
+ /*
+ * We can't skip the invalidation if we are removing PTEs that span more
+ * than the range, do some checks to ensure we are removing PTEs that
+ * are invalid.
+ */
+
+ if (pt_op->num_entries > 1)
+ return false;
+
+ if (update->pt->level == 0)
+ return true;
+
+ if (update->pt->level == 1)
+ return xe_svm_range_size(range) >= SZ_2M;
+
+ return false;
+}
+
static int unbind_range_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2002,7 +2049,10 @@ static int unbind_range_prepare(struct xe_vm *vm,
range->base.itree.last + 1);
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
- pt_update_ops->needs_invalidation = true;
+ pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
+ xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) ||
+ !xe_pt_op_check_range_skip_invalidation(pt_op, range);
xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
pt_op->num_entries);
@@ -2065,11 +2115,20 @@ static int op_prepare(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (xe_vma_is_cpu_addr_mirror(vma))
- break;
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range;
+ unsigned long i;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
- pt_update_ops->wait_vm_kernel = true;
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ vma, range);
+ if (err)
+ return err;
+ }
+ } else {
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
+ pt_update_ops->wait_vm_kernel = true;
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
@@ -2166,10 +2225,15 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- vma->tile_present |= BIT(tile->id);
- vma->tile_staged &= ~BIT(tile->id);
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
if (invalidate_on_bind)
- vma->tile_invalidated |= BIT(tile->id);
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(tile->id));
+ else
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated & ~BIT(tile->id));
+ vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2216,6 +2280,18 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
}
+static void range_present_and_invalidated_tile(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_id)
+{
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+
+ WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
+ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
+}
+
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2263,27 +2339,28 @@ static void op_commit(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (!xe_vma_is_cpu_addr_mirror(vma))
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range = NULL;
+ unsigned long i;
+
+ xa_for_each(&op->prefetch_range.range, i, range)
+ range_present_and_invalidated_tile(vm, range, tile->id);
+ } else {
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
fence2, false);
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
-
- if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- WRITE_ONCE(op->map_range.range->tile_present,
- op->map_range.range->tile_present |
- BIT(tile->id));
- WRITE_ONCE(op->map_range.range->tile_invalidated,
- op->map_range.range->tile_invalidated &
- ~BIT(tile->id));
- } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
+ range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
+ else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
WRITE_ONCE(op->unmap_range.range->tile_present,
op->unmap_range.range->tile_present &
~BIT(tile->id));
- }
+
break;
}
default:
@@ -2476,7 +2553,7 @@ free_ifence:
kfree(mfence);
kfree(ifence);
kill_vm_tile1:
- if (err != -EAGAIN && tile->id)
+ if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index b5bc15f436fa..3d62008c99f1 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -504,69 +504,62 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t
return 0;
}
-static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
- spin_lock_irq(&pxp->queues.lock);
- list_add_tail(&q->pxp.link, &pxp->queues.list);
- spin_unlock_irq(&pxp->queues.lock);
+ int ret = 0;
+
+ /*
+ * A queue can be added to the list only if the PXP is in active status,
+ * otherwise the termination might not handle it correctly.
+ */
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE) {
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+ } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
+ ret = -EIO;
+ } else {
+ ret = -EBUSY; /* try again later */
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ return ret;
}
-/**
- * xe_pxp_exec_queue_add - add a queue to the PXP list
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
- * @q: the queue to add to the list
- *
- * If PXP is enabled and the prerequisites are done, start the PXP ARB
- * session (if not already running) and add the queue to the PXP list. Note
- * that the queue must have previously been marked as using PXP with
- * xe_pxp_exec_queue_set_type.
- *
- * Returns 0 if the PXP ARB session is running and the queue is in the list,
- * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
- * other errno value if something goes wrong during the session start.
- */
-int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int pxp_start(struct xe_pxp *pxp, u8 type)
{
int ret = 0;
+ bool restart = false;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
/* we only support HWDRM sessions right now */
- xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
-
- /*
- * Runtime suspend kills PXP, so we take a reference to prevent it from
- * happening while we have active queues that use PXP
- */
- xe_pm_runtime_get(pxp->xe);
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
/* get_readiness_status() returns 0 for in-progress and 1 for done */
ret = xe_pxp_get_readiness_status(pxp);
- if (ret <= 0) {
- if (!ret)
- ret = -EBUSY;
- goto out;
- }
+ if (ret <= 0)
+ return ret ?: -EBUSY;
+
ret = 0;
wait_for_idle:
/*
* if there is an action in progress, wait for it. We need to wait
* outside the lock because the completion is done from within the lock.
- * Note that the two action should never be pending at the same time.
+ * Note that the two actions should never be pending at the same time.
*/
if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
if (!wait_for_completion_timeout(&pxp->activation,
- msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
mutex_lock(&pxp->mutex);
@@ -574,11 +567,9 @@ wait_for_idle:
switch (pxp->status) {
case XE_PXP_ERROR:
ret = -EIO;
- break;
+ goto out_unlock;
case XE_PXP_ACTIVE:
- __exec_queue_add(pxp, q);
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
case XE_PXP_READY_TO_START:
pxp->status = XE_PXP_START_IN_PROGRESS;
reinit_completion(&pxp->activation);
@@ -586,8 +577,8 @@ wait_for_idle:
case XE_PXP_START_IN_PROGRESS:
/* If a start is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->activation));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_NEEDS_TERMINATION:
mark_termination_in_progress(pxp);
break;
@@ -595,29 +586,25 @@ wait_for_idle:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
/* If a termination is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->termination));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_SUSPENDED:
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
ret = -EIO;
- break;
+ goto out_unlock;
}
mutex_unlock(&pxp->mutex);
- if (ret)
- goto out;
-
if (!completion_done(&pxp->termination)) {
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
}
goto wait_for_idle;
@@ -639,21 +626,59 @@ wait_for_idle:
if (pxp->status != XE_PXP_START_IN_PROGRESS) {
drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
pxp->status = XE_PXP_NEEDS_TERMINATION;
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
}
/* If everything went ok, update the status and add the queue to the list */
- if (!ret) {
+ if (!ret)
pxp->status = XE_PXP_ACTIVE;
- __exec_queue_add(pxp, q);
- } else {
+ else
pxp->status = XE_PXP_ERROR;
- }
+out_unlock:
mutex_unlock(&pxp->mutex);
-out:
+ if (restart)
+ goto wait_for_idle;
+
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP default
+ * session (if not already running) and add the queue to the PXP list.
+ *
+ * Returns 0 if the PXP session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+start:
+ ret = pxp_start(pxp, q->pxp.type);
+
+ if (!ret) {
+ ret = __exec_queue_add(pxp, q);
+ if (ret == -EBUSY)
+ goto start;
+ }
+
/*
* in the successful case the PM ref is released from
* xe_pxp_exec_queue_remove
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 2dbf4066d86f..d517ec9ddcbf 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -141,7 +141,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id >= XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= xe->info.max_gt_per_tile)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
@@ -368,6 +368,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
struct drm_xe_query_gt_list __user *query_ptr =
u64_to_user_ptr(query->data);
struct drm_xe_query_gt_list *gt_list;
+ int iter = 0;
u8 id;
if (query->size == 0) {
@@ -385,12 +386,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
else
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
- gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
- gt_list->gt_list[id].gt_id = gt->info.id;
- gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+ gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
+ gt_list->gt_list[iter].gt_id = gt->info.id;
+ gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
/*
* The mem_regions indexes in the mask below need to
* directly identify the struct
@@ -406,19 +407,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
* assumption.
*/
if (!IS_DGFX(xe))
- gt_list->gt_list[id].near_mem_regions = 0x1;
+ gt_list->gt_list[iter].near_mem_regions = 0x1;
else
- gt_list->gt_list[id].near_mem_regions =
+ gt_list->gt_list[iter].near_mem_regions =
BIT(gt_to_tile(gt)->id) << 1;
- gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
- gt_list->gt_list[id].near_mem_regions;
+ gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
+ gt_list->gt_list[iter].near_mem_regions;
- gt_list->gt_list[id].ip_ver_major =
+ gt_list->gt_list[iter].ip_ver_major =
REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_minor =
+ gt_list->gt_list[iter].ip_ver_minor =
REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_rev =
+ gt_list->gt_list[iter].ip_ver_rev =
REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
+
+ iter++;
}
if (copy_to_user(query_ptr, gt_list, size)) {
@@ -683,8 +686,8 @@ static int query_oa_units(struct xe_device *xe,
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
- DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
-
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
+ DRM_XE_OA_CAPS_OAM;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
if (!xe_hw_engine_is_reserved(hwe) &&
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 29e694bb1219..95571b87aa73 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -56,37 +56,61 @@ static bool rule_matches(const struct xe_device *xe,
xe->info.subplatform == r->subplatform;
break;
case XE_RTP_MATCH_GRAPHICS_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 >= r->ver_start &&
xe->info.graphics_verx100 <= r->ver_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_GRAPHICS_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.graphics >= r->step_start &&
xe->info.step.graphics < r->step_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 >= r->ver_start &&
xe->info.media_verx100 <= r->ver_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.media >= r->step_start &&
xe->info.step.media < r->step_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_INTEGRATED:
@@ -108,6 +132,9 @@ static bool rule_matches(const struct xe_device *xe,
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = r->match_func(gt, hwe);
break;
default:
@@ -186,6 +213,11 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
struct xe_device **xe)
{
switch (ctx->type) {
+ case XE_RTP_PROCESS_TYPE_DEVICE:
+ *hwe = NULL;
+ *gt = NULL;
+ *xe = ctx->xe;
+ break;
case XE_RTP_PROCESS_TYPE_GT:
*hwe = NULL;
*gt = ctx->gt;
@@ -326,21 +358,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
hwe->engine_id == __ffs(render_compute_mask);
}
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe)
-{
- unsigned int dss_per_gslice = 4;
- unsigned int dss;
-
- if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask),
- "Checking gslice for platform without geometry pipeline\n"))
- return false;
-
- dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0);
-
- return dss >= dss_per_gslice;
-}
-
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 4fe736a11c42..5ed6c14b9ae3 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -422,7 +422,8 @@ struct xe_reg_sr;
#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
- struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+ struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }, \
+ struct xe_device * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_DEVICE })
void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
unsigned long *active_entries,
@@ -466,17 +467,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
- * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off
- *
- * @gt: GT structure
- * @hwe: Engine instance
- *
- * Returns: true if first gslice is fused off, false otherwise.
- */
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe);
-
-/*
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
*
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index 1b76b947c706..f4cf30e298cf 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -110,12 +110,14 @@ struct xe_rtp_entry {
};
enum xe_rtp_process_type {
+ XE_RTP_PROCESS_TYPE_DEVICE,
XE_RTP_PROCESS_TYPE_GT,
XE_RTP_PROCESS_TYPE_ENGINE,
};
struct xe_rtp_process_ctx {
union {
+ struct xe_device *xe;
struct xe_gt *gt;
struct xe_hw_engine *hwe;
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 1905ca590965..d21bf8f26964 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -113,7 +113,8 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
kref_init(&job->refcount);
xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
if (err)
goto err_free;
@@ -216,15 +217,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
bool xe_sched_job_started(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_start_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_start_seqno(lrc));
}
bool xe_sched_job_completed(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
/*
@@ -232,9 +235,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
* parallel handshake is done.
*/
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_seqno(lrc));
}
void xe_sched_job_arm(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 86d47aaf0358..90244fe59b59 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -5,6 +5,7 @@
#include <linux/shrinker.h>
+#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -53,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
write_unlock(&shrinker->lock);
}
-static s64 xe_shrinker_walk(struct xe_device *xe,
- struct ttm_operation_ctx *ctx,
- const struct xe_bo_shrink_flags flags,
- unsigned long to_scan, unsigned long *scanned)
+static s64 __xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
{
unsigned int mem_type;
s64 freed = 0, lret;
@@ -65,11 +66,15 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
struct ttm_bo_lru_cursor curs;
struct ttm_buffer_object *ttm_bo;
+ struct ttm_lru_walk_arg arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ };
if (!man || !man->use_tt)
continue;
- ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, &arg, ttm_bo) {
if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
continue;
@@ -81,6 +86,50 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
if (*scanned >= to_scan)
break;
}
+ /* Trylocks should never error, just fail. */
+ xe_assert(xe, !IS_ERR(ttm_bo));
+ }
+
+ return freed;
+}
+
+/*
+ * Try shrinking idle objects without writeback first, then if not sufficient,
+ * try also non-idle objects and finally if that's not sufficient either,
+ * add writeback. This avoids stalls and explicit writebacks with light or
+ * moderate memory pressure.
+ */
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ bool no_wait_gpu = true;
+ struct xe_bo_shrink_flags save_flags = flags;
+ s64 lret, freed;
+
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ save_flags.writeback = false;
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ if (lret < 0 || *scanned >= to_scan)
+ return lret;
+
+ freed = lret;
+ if (!ctx->no_wait_gpu) {
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ if (*scanned >= to_scan)
+ return freed;
+ }
+
+ if (flags.writeback) {
+ lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
}
return freed;
@@ -192,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
shrink_flags.purge = false;
+
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
nr_to_scan, &nr_scanned);
if (lret >= 0)
@@ -213,24 +263,34 @@ static void xe_shrinker_pm(struct work_struct *work)
xe_pm_runtime_put(shrinker->xe);
}
+static void xe_shrinker_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_shrinker *shrinker = arg;
+
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
+
/**
* xe_shrinker_create() - Create an xe per-device shrinker
* @xe: Pointer to the xe device.
*
- * Returns: A pointer to the created shrinker on success,
- * Negative error code on failure.
+ * Return: %0 on success. Negative error code on failure.
*/
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+int xe_shrinker_create(struct xe_device *xe)
{
struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
if (!shrinker)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
@@ -240,19 +300,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
shrinker->shrink->scan_objects = xe_shrinker_scan;
shrinker->shrink->private_data = shrinker;
shrinker_register(shrinker->shrink);
+ xe->mem.shrinker = shrinker;
- return shrinker;
-}
-
-/**
- * xe_shrinker_destroy() - Destroy an xe per-device shrinker
- * @shrinker: Pointer to the shrinker to destroy.
- */
-void xe_shrinker_destroy(struct xe_shrinker *shrinker)
-{
- xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
- xe_assert(shrinker->xe, !shrinker->purgeable_pages);
- shrinker_free(shrinker->shrink);
- flush_work(&shrinker->pm_worker);
- kfree(shrinker);
+ return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
index 28a038f4fcbf..5132ae5192e1 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.h
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -11,8 +11,6 @@ struct xe_device;
void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
-
-void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+int xe_shrinker_create(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index 0f721ae17b26..27ddf3cc80e9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -3,13 +3,18 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_service.h"
#include "xe_sriov_printk.h"
static unsigned int wanted_max_vfs(struct xe_device *xe)
@@ -80,9 +85,48 @@ bool xe_sriov_pf_readiness(struct xe_device *xe)
*/
int xe_sriov_pf_init_early(struct xe_device *xe)
{
+ int err;
+
xe_assert(xe, IS_SRIOV_PF(xe));
- return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe),
+ sizeof(*xe->sriov.pf.vfs), GFP_KERNEL);
+ if (!xe->sriov.pf.vfs)
+ return -ENOMEM;
+
+ err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ if (err)
+ return err;
+
+ xe_sriov_pf_service_init(xe);
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
+ * @xe: the &xe_device to test
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_wait_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_wait_ready(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -102,3 +146,45 @@ void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
}
+
+static int simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_device *xe = parent->d_inode->i_private;
+ void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+
+ print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
+ { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
+};
+
+/**
+ * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Prepare debugfs attributes exposed by the PF.
+ */
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *parent;
+
+ /*
+ * /sys/kernel/debug/dri/0/
+ * ├── pf
+ * │   ├── ...
+ */
+ parent = debugfs_create_dir("pf", root);
+ if (IS_ERR(parent))
+ return;
+ parent->d_inode->i_private = xe;
+
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index d1220e70e1c0..e3b34f8f5e04 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -8,12 +8,15 @@
#include <linux/types.h>
+struct dentry;
struct drm_printer;
struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_wait_ready(struct xe_device *xe);
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
@@ -25,6 +28,10 @@ static inline int xe_sriov_pf_init_early(struct xe_device *xe)
{
return 0;
}
+
+static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
new file mode 100644
index 000000000000..eee3b2a1ba41
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#include "abi/guc_relay_actions_abi.h"
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_printk.h"
+
+#include "xe_sriov_pf_service.h"
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * xe_sriov_pf_service_init - Early initialization of the SR-IOV PF service.
+ * @xe: the &xe_device to initialize
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_init(struct xe_device *xe)
+{
+ BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
+ BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* base versions may differ between platforms */
+ xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
+ xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
+
+ /* latest version is same for all platforms */
+ xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
+ xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
+}
+
+/* Return: 0 on success or a negative error code on failure. */
+static int pf_negotiate_version(struct xe_device *xe,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base;
+ struct xe_sriov_pf_service_version latest = xe->sriov.pf.service.version.latest;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, base.major);
+ xe_assert(xe, base.major <= latest.major);
+ xe_assert(xe, (base.major < latest.major) || (base.minor <= latest.minor));
+
+ /* VF doesn't care - return our latest */
+ if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
+ wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants newer than our - return our latest */
+ if (wanted_major > latest.major) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants older than min required - reject */
+ if (wanted_major < base.major ||
+ (wanted_major == base.major && wanted_minor < base.minor)) {
+ return -EPERM;
+ }
+
+ /* previous major - return wanted, as we should still support it */
+ if (wanted_major < latest.major) {
+ /* XXX: we are not prepared for multi-versions yet */
+ xe_assert(xe, base.major == latest.major);
+ return -ENOPKG;
+ }
+
+ /* same major - return common minor */
+ *major = wanted_major;
+ *minor = min_t(u32, latest.minor, wanted_minor);
+ return 0;
+}
+
+static void pf_connect(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+ xe_assert(xe, major || minor);
+
+ xe->sriov.pf.vfs[vfid].version.major = major;
+ xe->sriov.pf.vfs[vfid].version.minor = minor;
+}
+
+static void pf_disconnect(struct xe_device *xe, u32 vfid)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ xe->sriov.pf.vfs[vfid].version.major = 0;
+ xe->sriov.pf.vfs[vfid].version.minor = 0;
+}
+
+/**
+ * xe_sriov_pf_service_is_negotiated - Check if VF has negotiated given ABI version.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @major: the major version to check
+ * @minor: the minor version to check
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ *
+ * Returns: true if VF can use given ABI version functionality.
+ */
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ return major == xe->sriov.pf.vfs[vfid].version.major &&
+ minor <= xe->sriov.pf.vfs[vfid].version.minor;
+}
+
+/**
+ * xe_sriov_pf_service_handshake_vf - Confirm a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @wanted_major: the major service version expected by the VF
+ * @wanted_minor: the minor service version expected by the VF
+ * @major: the major service version to be used by the VF
+ * @minor: the minor service version to be used by the VF
+ *
+ * Negotiate a VF/PF ABI version to allow VF use the PF services.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ int err;
+
+ xe_sriov_dbg_verbose(xe, "VF%u wants ABI version %u.%u\n",
+ vfid, wanted_major, wanted_minor);
+
+ err = pf_negotiate_version(xe, wanted_major, wanted_minor, major, minor);
+
+ if (err < 0) {
+ xe_sriov_notice(xe, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
+ vfid, wanted_major, wanted_minor, ERR_PTR(err));
+ pf_disconnect(xe, vfid);
+ } else {
+ xe_sriov_dbg(xe, "VF%u negotiated ABI version %u.%u\n",
+ vfid, *major, *minor);
+ pf_connect(xe, vfid, *major, *minor);
+ }
+
+ return err;
+}
+
+/**
+ * xe_sriov_pf_service_reset_vf - Reset a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Reset a VF driver negotiated VF/PF ABI version.
+ *
+ * After that point, the VF driver will have to perform new version handshake
+ * to continue use of the PF services again.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid)
+{
+ pf_disconnect(xe, vfid);
+}
+
+static void print_pf_version(struct drm_printer *p, const char *name,
+ const struct xe_sriov_pf_service_version *version)
+{
+ drm_printf(p, "%s:\t%u.%u\n", name, version->major, version->minor);
+}
+
+/**
+ * xe_sriov_pf_service_print_versions - Print ABI versions negotiated with VFs.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for PF use only.
+ */
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+ struct xe_sriov_pf_service_version *version;
+ char name[8];
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ print_pf_version(p, "base", &xe->sriov.pf.service.version.base);
+ print_pf_version(p, "latest", &xe->sriov.pf.service.version.latest);
+
+ for (n = 1; n <= total_vfs; n++) {
+ version = &xe->sriov.pf.vfs[n].version;
+ if (!version->major && !version->minor)
+ continue;
+
+ print_pf_version(p, xe_sriov_function_name(n, name, sizeof(name)), version);
+ }
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_sriov_pf_service_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
new file mode 100644
index 000000000000..d38c18f5ed10
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_H_
+#define _XE_SRIOV_PF_SERVICE_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+
+void xe_sriov_pf_service_init(struct xe_device *xe);
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p);
+
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor);
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor);
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
new file mode 100644
index 000000000000..0835dde358c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_TYPES_H_
+#define _XE_SRIOV_PF_SERVICE_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_sriov_pf_service_version - VF/PF ABI Version.
+ * @major: the major version of the VF/PF ABI
+ * @minor: the minor version of the VF/PF ABI
+ *
+ * See `GuC Relay Communication`_.
+ */
+struct xe_sriov_pf_service_version {
+ u16 major;
+ u16 minor;
+};
+
+/**
+ * struct xe_sriov_pf_service - Data used by the PF service.
+ * @version: information about VF/PF ABI versions for current platform.
+ * @version.base: lowest VF/PF ABI version that could be negotiated with VF.
+ * @version.latest: latest VF/PF ABI version supported by the PF driver.
+ */
+struct xe_sriov_pf_service {
+ struct {
+ struct xe_sriov_pf_service_version base;
+ struct xe_sriov_pf_service_version latest;
+ } version;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
new file mode 100644
index 000000000000..956a88f9f213
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_TYPES_H_
+#define _XE_SRIOV_PF_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * struct xe_sriov_metadata - per-VF device level metadata
+ */
+struct xe_sriov_metadata {
+ /** @version: negotiated VF/PF ABI version */
+ struct xe_sriov_pf_service_version version;
+};
+
+/**
+ * struct xe_device_pf - Xe PF related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_PF mode.
+ */
+struct xe_device_pf {
+ /** @device_total_vfs: Maximum number of VFs supported by the device. */
+ u16 device_total_vfs;
+
+ /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
+ u16 driver_max_vfs;
+
+ /** @master_lock: protects all VFs configurations across GTs */
+ struct mutex master_lock;
+
+ /** @service: device level service data. */
+ struct xe_sriov_pf_service service;
+
+ /** @vfs: metadata for all VFs. */
+ struct xe_sriov_metadata *vfs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index ca94382a721e..1a138108d139 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -7,9 +7,6 @@
#define _XE_SRIOV_TYPES_H_
#include <linux/build_bug.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -40,37 +37,4 @@ enum xe_sriov_mode {
};
static_assert(XE_SRIOV_MODE_NONE);
-/**
- * struct xe_device_pf - Xe PF related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_PF mode.
- */
-struct xe_device_pf {
- /** @device_total_vfs: Maximum number of VFs supported by the device. */
- u16 device_total_vfs;
-
- /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
- u16 driver_max_vfs;
-
- /** @master_lock: protects all VFs configurations across GTs */
- struct mutex master_lock;
-};
-
-/**
- * struct xe_device_vf - Xe Virtual Function related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_VF mode.
- */
-struct xe_device_vf {
- /** @migration: VF Migration state data */
- struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
- } migration;
-};
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index c1275e64aa9c..26e243c28994 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -7,12 +7,15 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc_ct.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -121,6 +124,15 @@
* | | |
*/
+static bool vf_migration_supported(struct xe_device *xe)
+{
+ /*
+ * TODO: Add conditions to allow specific platforms, when they're
+ * supported at production quality.
+ */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
static void migration_worker_func(struct work_struct *w);
/**
@@ -130,86 +142,118 @@ static void migration_worker_func(struct work_struct *w);
void xe_sriov_vf_init_early(struct xe_device *xe)
{
INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ if (!vf_migration_supported(xe))
+ xe_sriov_info(xe, "migration not supported by this module version\n");
}
-/**
- * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+static bool gt_vf_post_migration_needed(struct xe_gt *gt)
+{
+ return test_bit(gt->info.id, &gt_to_xe(gt)->sriov.vf.migration.gt_flags);
+}
+
+/*
+ * Notify GuCs marked in flags about resource fixups apply finished.
* @xe: the &xe_device struct instance
- *
- * After migration, we need to re-query all VF configuration to make sure
- * they match previous provisioning. Note that most of VF provisioning
- * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
- *
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
+ * @gt_flags: flags marking to which GTs the notification shall be sent
*/
-static int vf_post_migration_requery_guc(struct xe_device *xe)
+static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
{
struct xe_gt *gt;
unsigned int id;
- int err, ret = 0;
+ int err = 0;
for_each_gt(gt, xe, id) {
- err = xe_gt_sriov_vf_query_config(gt);
- ret = ret ?: err;
+ if (!test_bit(id, &gt_flags))
+ continue;
+ /* skip asking GuC for RESFIX exit if new recovery request arrived */
+ if (gt_vf_post_migration_needed(gt))
+ continue;
+ err = xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (err)
+ break;
+ clear_bit(id, &gt_flags);
}
- return ret;
-}
-
-/*
- * vf_post_migration_imminent - Check if post-restore recovery is coming.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_post_migration_imminent(struct xe_device *xe)
-{
- return xe->sriov.vf.migration.gt_flags != 0 ||
- work_pending(&xe->sriov.vf.migration.worker);
+ if (gt_flags && !err)
+ drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
+ return err;
}
-/*
- * Notify all GuCs about resource fixups apply finished.
- */
-static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+static int vf_get_next_migrated_gt_id(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (vf_post_migration_imminent(xe))
- goto skip;
- xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return id;
}
- return;
+ return -1;
+}
-skip:
- drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+/**
+ * Perform post-migration fixups on a single GT.
+ *
+ * After migration, GuC needs to be re-queried for VF configuration to check
+ * if it matches previous provisioning. Most of VF provisioning shall be the
+ * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
+ * range has changed, we have to perform fixups - shift all GGTT references
+ * used anywhere within the driver. After the fixups in this function succeed,
+ * it is allowed to ask the GuC bound to this GT to continue normal operation.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int gt_vf_post_migration_fixups(struct xe_gt *gt)
+{
+ s64 shift;
+ int err;
+
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ return err;
+
+ shift = xe_gt_sriov_vf_ggtt_shift(gt);
+ if (shift) {
+ xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
+ /* FIXME: add the recovery steps */
+ xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
+ }
+ return 0;
}
static void vf_post_migration_recovery(struct xe_device *xe)
{
- int err;
+ unsigned long fixed_gts = 0;
+ int id, err;
drm_dbg(&xe->drm, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
- err = vf_post_migration_requery_guc(xe);
- if (vf_post_migration_imminent(xe))
- goto defer;
- if (unlikely(err))
+
+ if (!vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration not supported by this module version\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
+ struct xe_gt *gt = xe_device_get_gt(xe, id);
+
+ err = gt_vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ set_bit(id, &fixed_gts);
+ }
+
+ err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
+ if (err)
goto fail;
- /* FIXME: add the recovery steps */
- vf_post_migration_notify_resfix_done(xe);
xe_pm_runtime_put(xe);
drm_notice(&xe->drm, "migration recovery ended\n");
return;
-defer:
- xe_pm_runtime_put(xe);
- drm_dbg(&xe->drm, "migration recovery deferred\n");
- return;
fail:
xe_pm_runtime_put(xe);
drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
@@ -224,18 +268,23 @@ static void migration_worker_func(struct work_struct *w)
vf_post_migration_recovery(xe);
}
-static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+/*
+ * Check if post-restore recovery is coming on any of GTs.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
- xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
- return false;
- }
+ if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return true;
}
- return true;
+ return false;
}
/**
@@ -250,13 +299,9 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_VF(xe));
- if (!vf_ready_to_recovery_on_all_gts(xe))
+ if (!vf_ready_to_recovery_on_any_gts(xe))
return;
- WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
- /* Ensure other threads see that no flags are set now. */
- smp_mb();
-
started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
drm_info(&xe->drm, "VF migration recovery %s\n", started ?
"scheduled" : "already in progress");
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
new file mode 100644
index 000000000000..8300416a6226
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_TYPES_H_
+#define _XE_SRIOV_VF_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * struct xe_sriov_vf_relay_version - PF ABI version details.
+ */
+struct xe_sriov_vf_relay_version {
+ /** @major: major version. */
+ u16 major;
+ /** @minor: minor version. */
+ u16 minor;
+};
+
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @pf_version: negotiated VF/PF ABI version. */
+ struct xe_sriov_vf_relay_version pf_version;
+
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
index c77b5c317fa0..10e88f2c9615 100644
--- a/drivers/gpu/drm/xe/xe_step.c
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -5,6 +5,7 @@
#include "xe_step.h"
+#include <kunit/visibility.h>
#include <linux/bitfield.h>
#include "xe_device.h"
@@ -255,3 +256,4 @@ const char *xe_step_name(enum xe_step step)
return "**";
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_step_name);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 1f710b3fc599..41705f5d52e3 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -14,6 +14,7 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
+#include "xe_i2c.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
#include "xe_vsec.h"
@@ -173,20 +174,22 @@ static int enable_survivability_mode(struct pci_dev *pdev)
survivability->mode = true;
ret = xe_heci_gsc_init(xe);
- if (ret) {
- /*
- * But if it fails, device can't enter survivability
- * so move it back for correct error handling
- */
- survivability->mode = false;
- return ret;
- }
+ if (ret)
+ goto err;
xe_vsec_init(xe);
+ ret = xe_i2c_probe(xe);
+ if (ret)
+ goto err;
+
dev_err(dev, "In Survivability Mode\n");
return 0;
+
+err:
+ survivability->mode = false;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index f0b167b3fb6a..a7ff5975873f 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -3,13 +3,17 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_drv.h>
+
#include "xe_bo.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_pt.h"
#include "xe_svm.h"
+#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
@@ -45,21 +49,6 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-static unsigned long xe_svm_range_start(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_start(&range->base);
-}
-
-static unsigned long xe_svm_range_end(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_end(&range->base);
-}
-
-static unsigned long xe_svm_range_size(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_size(&range->base);
-}
-
#define range_debug(r__, operaton__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
@@ -103,11 +92,6 @@ static void xe_svm_range_free(struct drm_gpusvm_range *range)
kfree(range);
}
-static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
-{
- return container_of(r, struct xe_svm_range, base);
-}
-
static void
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
const struct mmu_notifier_range *mmu_range)
@@ -161,7 +145,12 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- range->tile_invalidated |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
}
return tile_mask;
@@ -187,14 +176,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
{
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
- struct xe_tile *tile;
struct drm_gpusvm_range *r, *first;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
- u8 id;
- u32 fence_id = 0;
long err;
xe_svm_assert_in_notifier(vm);
@@ -240,42 +224,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- for_each_tile(tile, xe, id) {
- if (tile_mask & BIT(id)) {
- int err;
-
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
@@ -349,7 +299,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static struct xe_vram_region *page_to_vr(struct page *page)
{
@@ -537,16 +487,18 @@ static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
}
-static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
{
return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
}
-static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct xe_device *xe = xe_bo_device(bo);
xe_bo_put_async(bo);
+ xe_pm_runtime_put(xe);
}
static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
@@ -559,7 +511,7 @@ static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
return &tile->mem.vram.ttm.mm;
}
-static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
unsigned long npages, unsigned long *pfn)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
@@ -582,7 +534,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocatio
return 0;
}
-static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
.devmem_release = xe_svm_devmem_release,
.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
.copy_to_devmem = xe_svm_copy_to_devmem,
@@ -662,84 +614,140 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile,
bool devmem_only)
{
- /*
- * Advisory only check whether the range currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
- */
- return ((READ_ONCE(range->tile_present) &
- ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
- (!devmem_only || xe_svm_range_in_vram(range));
+ return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
+}
+
+/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ *
+ * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
+ * and migrates them to SMEM
+ */
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+ if (xe_svm_range_in_vram(range))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+}
+
+/**
+ * xe_svm_range_validate() - Check if the SVM range is valid
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ * @tile_mask: Mask representing the tiles to be checked
+ * @devmem_preferred : if true range needs to be in devmem
+ *
+ * The xe_svm_range_validate() function checks if a range is
+ * valid and located in the desired memory region.
+ *
+ * Return: true if the range is valid, false otherwise
+ */
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ bool ret;
+
+ xe_svm_notifier_lock(vm);
+
+ ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
+ (devmem_preferred == range->base.flags.has_devmem_pages);
+
+ xe_svm_notifier_unlock(vm);
+
+ return ret;
+}
+
+/**
+ * xe_svm_find_vma_start - Find start of CPU VMA
+ * @vm: xe_vm pointer
+ * @start: start address
+ * @end: end address
+ * @vma: Pointer to struct xe_vma
+ *
+ *
+ * This function searches for a cpu vma, within the specified
+ * range [start, end] in the given VM. It adjusts the range based on the
+ * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
+ *
+ * Return: The starting address of the VMA within the range,
+ * or ULONG_MAX if no VMA is found
+ */
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
+{
+ return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
+ max(start, xe_vma_start(vma)),
+ min(end, xe_vma_end(vma)));
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
}
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
{
- struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = xe->drm.dev;
struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
struct list_head *blocks;
struct xe_bo *bo;
- ktime_t end = 0;
- int err;
+ ktime_t time_end = 0;
+ int err, idx;
- range_debug(range, "ALLOCATE VRAM");
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return -ENODEV;
- if (!mmget_not_zero(mm))
- return -EFAULT;
- mmap_read_lock(mm);
+ xe_pm_runtime_get(xe);
-retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
- xe_svm_range_size(range),
+ retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
ttm_bo_type_device,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_CPU_ADDR_MIRROR);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
+ if (xe_vm_validate_should_retry(NULL, err, &time_end))
goto retry;
- goto unlock;
+ goto out_pm_put;
}
- drm_gpusvm_devmem_init(&bo->devmem_allocation,
- vm->xe->drm.dev, mm,
- &gpusvm_devmem_ops,
- &tile->mem.vram.dpagemap,
- xe_svm_range_size(range));
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ end - start);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
block->private = vr;
xe_bo_get(bo);
- err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
- &bo->devmem_allocation, ctx);
+
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
if (err)
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
xe_bo_put(bo);
-unlock:
- mmap_read_unlock(mm);
- mmput(mm);
+out_pm_put:
+ xe_pm_runtime_put(xe);
+ drm_dev_exit(idx);
return err;
}
-#else
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
-{
- return -EOPNOTSUPP;
-}
#endif
static bool supports_4K_migration(struct xe_device *xe)
@@ -750,21 +758,31 @@ static bool supports_4K_migration(struct xe_device *xe)
return true;
}
-static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
- struct xe_vma *vma)
+/**
+ * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
+ * @range: SVM range for which migration needs to be decided
+ * @vma: vma which has range
+ * @preferred_region_is_vram: preferred region for range is vram
+ *
+ * Return: True for range needing migration and migration is supported else false
+ */
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram)
{
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem)
+ if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
return false;
- if (xe_svm_range_in_vram(range)) {
- drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ xe_assert(vm->xe, IS_DGFX(vm->xe));
+
+ if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -792,20 +810,19 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
.devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
.check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
.devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ vm->xe->atomic_svm_timeslice_ms : 0,
};
struct xe_svm_range *range;
- struct drm_gpusvm_range *r;
- struct drm_exec exec;
struct dma_fence *fence;
- int migrate_try_count = ctx.devmem_only ? 3 : 1;
struct xe_tile *tile = gt_to_tile(gt);
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
int err;
@@ -820,24 +837,22 @@ retry:
if (err)
return err;
- r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
- xe_vma_start(vma), xe_vma_end(vma),
- &ctx);
- if (IS_ERR(r))
- return PTR_ERR(r);
+ range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
- if (ctx.devmem_only && !r->flags.migrate_devmem)
+ if (IS_ERR(range))
+ return PTR_ERR(range);
+
+ if (ctx.devmem_only && !range->base.flags.migrate_devmem)
return -EACCES;
- range = to_xe_range(r);
if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma)) {
- err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ err = xe_svm_alloc_vram(tile, range, &ctx);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
@@ -855,16 +870,11 @@ retry:
}
range_debug(range, "GET PAGES");
- err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (migrate_try_count > 0 || !ctx.devmem_only) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm,
- &range->base);
- }
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
@@ -884,30 +894,21 @@ retry:
range_debug(range, "PAGE FAULT - BIND");
retry_bind:
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
- drm_exec_retry_on_contention(&exec);
- if (err) {
- drm_exec_fini(&exec);
- goto err_out;
- }
-
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- drm_exec_fini(&exec);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
- }
- if (xe_vm_validate_should_retry(&exec, err, &end))
- goto retry_bind;
- goto err_out;
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ xe_vm_unlock(vm);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
}
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry_bind;
+ goto err_out;
}
- drm_exec_fini(&exec);
+ xe_vm_unlock(vm);
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -943,10 +944,84 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
*/
int xe_svm_bo_evict(struct xe_bo *bo)
{
- return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+ return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
+}
+
+/**
+ * xe_svm_range_find_or_insert- Find or insert GPU SVM range
+ * @vm: xe_vm pointer
+ * @addr: address for which range needs to be found/inserted
+ * @vma: Pointer to struct xe_vma which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a SVM range based on the
+ * address.
+ *
+ * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
+ */
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_range *r;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
+ xe_vma_start(vma), xe_vma_end(vma), ctx);
+ if (IS_ERR(r))
+ return ERR_PTR(PTR_ERR(r));
+
+ return to_xe_range(r);
+}
+
+/**
+ * xe_svm_range_get_pages() - Get pages for a SVM range
+ * @vm: Pointer to the struct xe_vm
+ * @range: Pointer to the xe SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a SVM range and ensures they are mapped for
+ * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ int err = 0;
+
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+
+ return err;
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+
+/**
+ * xe_svm_alloc_vram()- Allocate device memory pages for range,
+ * migrating existing data.
+ * @tile: tile to allocate vram from
+ * @range: SVM range
+ * @ctx: DRM GPU SVM context
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_pagemap *dpagemap;
+
+ xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ range_debug(range, "ALLOCATE VRAM");
+
+ dpagemap = xe_tile_local_pagemap(tile);
+ return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
+ xe_svm_range_end(range),
+ range->base.gpusvm->mm,
+ ctx->timeslice_ms);
+}
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
@@ -972,6 +1047,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
+ .populate_mm = xe_drm_pagemap_populate_mm,
};
/**
@@ -1003,7 +1079,7 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
vr->pagemap.range.start = res->start;
vr->pagemap.range.end = res->end;
vr->pagemap.nr_range = 1;
- vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
vr->pagemap.owner = xe_svm_devm_owner(xe);
addr = devm_memremap_pages(dev, &vr->pagemap);
@@ -1024,6 +1100,13 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#else
+int xe_svm_alloc_vram(struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 30fc78b85b30..da9a69ea0bb1 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -70,6 +70,26 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
+
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx);
+
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram);
+
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
+
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred);
+
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -82,6 +102,53 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
return range->base.flags.has_dma_mapping;
}
+/**
+ * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
+ * @r: Pointer to the drm_gpusvm_range structure
+ *
+ * This function takes a pointer to a drm_gpusvm_range structure and
+ * converts it to a pointer to the containing xe_svm_range structure.
+ *
+ * Return: Pointer to the xe_svm_range structure
+ */
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+/**
+ * xe_svm_range_start() - SVM range start address
+ * @range: SVM range
+ *
+ * Return: start address of range.
+ */
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+/**
+ * xe_svm_range_end() - SVM range end address
+ * @range: SVM range
+ *
+ * Return: end address of range.
+ */
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+/**
+ * xe_svm_range_size() - SVM range size
+ * @range: SVM range
+ *
+ * Return: Size of range.
+ */
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
@@ -97,6 +164,8 @@ void xe_svm_flush(struct xe_vm *vm);
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
+struct drm_gpusvm_ctx;
+struct drm_gpusvm_range;
struct xe_bo;
struct xe_gt;
struct xe_vm;
@@ -167,6 +236,73 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
+static inline int
+xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ return -EINVAL;
+}
+
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return NULL;
+}
+
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ return false;
+}
+
+static inline
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+}
+
+static inline
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ return false;
+}
+
+static inline
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
+{
+ return ULONG_MAX;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0771acbbf367..86e9811e60ba 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -10,6 +10,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_memirq.h"
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
@@ -87,13 +88,9 @@
*/
static int xe_tile_alloc(struct xe_tile *tile)
{
- struct drm_device *drm = &tile_to_xe(tile)->drm;
-
- tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt),
- GFP_KERNEL);
+ tile->mem.ggtt = xe_ggtt_alloc(tile);
if (!tile->mem.ggtt)
return -ENOMEM;
- tile->mem.ggtt->tile = tile;
return 0;
}
@@ -178,6 +175,12 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
int xe_tile_init(struct xe_tile *tile)
{
+ int err;
+
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
+
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index eb939316d55b..cc33e8733983 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -16,4 +16,21 @@ int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram.dpagemap;
+}
+#else
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return NULL;
+}
+#endif
+
+static inline bool xe_tile_is_root(struct xe_tile *tile)
+{
+ return tile->id == 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
new file mode 100644
index 000000000000..f221dbed16f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gtt_defs.h"
+
+#include "xe_assert.h"
+#include "xe_ggtt.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_wopcm.h"
+
+static int vf_init_ggtt_balloons(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
+
+ tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_tile_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range.
+ * @tile: the &xe_tile struct instance
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+{
+ u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
+ u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 wopcm = xe_wopcm_size(xe);
+ u64 start, end;
+ int err;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(xe));
+ xe_tile_assert(tile, ggtt_size);
+ lockdep_assert_held(&tile->mem.ggtt->lock);
+
+ /*
+ * VF can only use part of the GGTT as allocated by the PF:
+ *
+ * WOPCM GUC_GGTT_TOP
+ * |<------------ Total GGTT size ------------------>|
+ *
+ * VF GGTT base -->|<- size ->|
+ *
+ * +--------------------+----------+-----------------+
+ * |////////////////////| block |\\\\\\\\\\\\\\\\\|
+ * +--------------------+----------+-----------------+
+ *
+ * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
+ */
+
+ if (ggtt_base < wopcm || ggtt_base > GUC_GGTT_TOP ||
+ ggtt_size > GUC_GGTT_TOP - ggtt_base) {
+ xe_sriov_err(xe, "tile%u: Invalid GGTT configuration: %#llx-%#llx\n",
+ tile->id, ggtt_base, ggtt_base + ggtt_size - 1);
+ return -ERANGE;
+ }
+
+ start = wopcm;
+ end = ggtt_base;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0],
+ start, end);
+ if (err)
+ return err;
+ }
+
+ start = ggtt_base + ggtt_size;
+ end = GUC_GGTT_TOP;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1],
+ start, end);
+ if (err) {
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int vf_balloon_ggtt(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ int err;
+
+ mutex_lock(&ggtt->lock);
+ err = xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+ mutex_unlock(&ggtt->lock);
+
+ return err;
+}
+
+/**
+ * xe_tile_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes.
+ * @tile: the &xe_tile struct instance
+ */
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void vf_deballoon_ggtt(struct xe_tile *tile)
+{
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ mutex_unlock(&tile->mem.ggtt->lock);
+}
+
+static void vf_fini_ggtt_balloons(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void cleanup_ggtt(struct drm_device *drm, void *arg)
+{
+ struct xe_tile *tile = arg;
+
+ vf_deballoon_ggtt(tile);
+ vf_fini_ggtt_balloons(tile);
+}
+
+/**
+ * xe_tile_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ int err;
+
+ err = vf_init_ggtt_balloons(tile);
+ if (err)
+ return err;
+
+ err = vf_balloon_ggtt(tile);
+ if (err) {
+ vf_fini_ggtt_balloons(tile);
+ return err;
+ }
+
+ return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, tile);
+}
+
+/**
+ * DOC: GGTT nodes shifting during VF post-migration recovery
+ *
+ * The first fixup applied to the VF KMD structures as part of post-migration
+ * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
+ * from range previously assigned to this VF, into newly provisioned area.
+ * The changes include balloons, which are resized accordingly.
+ *
+ * The balloon nodes are there to eliminate unavailable ranges from use: one
+ * reserves the GGTT area below the range for current VF, and another one
+ * reserves area above.
+ *
+ * Below is a GGTT layout of example VF, with a certain address range assigned to
+ * said VF, and inaccessible areas above and below:
+ *
+ * 0 4GiB
+ * |<--------------------------- Total GGTT size ----------------------------->|
+ * WOPCM GUC_TOP
+ * |<-------------- Area mappable by xe_ggtt instance ---------------->|
+ *
+ * +---+---------------------------------+----------+----------------------+---+
+ * |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
+ * +---+---------------------------------+----------+----------------------+---+
+ *
+ * Hardware enforced access rules before migration:
+ *
+ * |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
+ *
+ * GGTT nodes used for tracking allocations:
+ *
+ * |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
+ *
+ * After the migration, GGTT area assigned to the VF might have shifted, either
+ * to lower or to higher address. But we expect the total size and extra areas to
+ * be identical, as migration can only happen between matching platforms.
+ * Below is an example of GGTT layout of the VF after migration. Content of the
+ * GGTT for VF has been moved to a new area, and we receive its address from GuC:
+ *
+ * +---+----------------------+----------+---------------------------------+---+
+ * |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
+ * +---+----------------------+----------+---------------------------------+---+
+ *
+ * Hardware enforced access rules after migration:
+ *
+ * |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
+ *
+ * So the VF has a new slice of GGTT assigned, and during migration process, the
+ * memory content was copied to that new area. But the &xe_ggtt nodes are still
+ * tracking allocations using the old addresses. The nodes within VF owned area
+ * have to be shifted, and balloon nodes need to be resized to properly mask out
+ * areas not owned by the VF.
+ *
+ * Fixed &xe_ggtt nodes used for tracking allocations:
+ *
+ * |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
+ *
+ * Due to use of GPU profiles, we do not expect the old and new GGTT ares to
+ * overlap; but our node shifting will fix addresses properly regardless.
+ */
+
+/**
+ * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * @tile: the &xe_tile struct instance
+ * @shift: the shift value
+ *
+ * Since Global GTT is not virtualized, each VF has an assigned range
+ * within the global space. This range might have changed during migration,
+ * which requires all memory addresses pointing to GGTT to be shifted.
+ */
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ mutex_lock(&ggtt->lock);
+
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ xe_ggtt_shift_nodes_locked(ggtt, shift);
+ xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+
+ mutex_unlock(&ggtt->lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
new file mode 100644
index 000000000000..93eb043171e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_H_
+#define _XE_TILE_SRIOV_VF_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index ccebd5f0878e..86323cf3be2c 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_fast_assign(
__assign_str(dev);
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__entry->flags = bo->flags;
__entry->vm = bo->vm;
),
@@ -73,7 +73,7 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__assign_str(new_placement_name);
__assign_str(old_placement_name);
__assign_str(device_id);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 49ddbda7cdef..828b45b24c23 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -98,6 +98,11 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
+ { XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 3a8751a8b92d..465bda355443 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -33,6 +33,22 @@ uc_to_xe(struct xe_uc *uc)
}
/* Should be called once at driver load only */
+int xe_uc_init_noalloc(struct xe_uc *uc)
+{
+ int ret;
+
+ ret = xe_guc_init_noalloc(&uc->guc);
+ if (ret)
+ goto err;
+
+ /* HuC and GSC have no early dependencies and will be initialized during xe_uc_init(). */
+ return 0;
+
+err:
+ xe_gt_err(uc_to_gt(uc), "Failed to early initialize uC (%pe)\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_uc_init(struct xe_uc *uc)
{
int ret;
@@ -56,15 +72,17 @@ int xe_uc_init(struct xe_uc *uc)
if (!xe_device_uc_enabled(uc_to_xe(uc)))
return 0;
- if (IS_SRIOV_VF(uc_to_xe(uc)))
- return 0;
+ if (!IS_SRIOV_VF(uc_to_xe(uc))) {
+ ret = xe_wopcm_init(&uc->wopcm);
+ if (ret)
+ goto err;
+ }
- ret = xe_wopcm_init(&uc->wopcm);
+ ret = xe_guc_min_load_for_hwconfig(&uc->guc);
if (ret)
goto err;
return 0;
-
err:
xe_gt_err(uc_to_gt(uc), "Failed to initialize uC (%pe)\n", ERR_PTR(ret));
return ret;
@@ -126,28 +144,7 @@ int xe_uc_sanitize_reset(struct xe_uc *uc)
return uc_reset(uc);
}
-/**
- * xe_uc_init_hwconfig - minimally init Uc, read and parse hwconfig
- * @uc: The UC object
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_uc_init_hwconfig(struct xe_uc *uc)
-{
- int ret;
-
- /* GuC submission not enabled, nothing to do */
- if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
-
- ret = xe_guc_min_load_for_hwconfig(&uc->guc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int vf_uc_init_hw(struct xe_uc *uc)
+static int vf_uc_load_hw(struct xe_uc *uc)
{
int err;
@@ -161,22 +158,30 @@ static int vf_uc_init_hw(struct xe_uc *uc)
err = xe_gt_sriov_vf_connect(uc_to_gt(uc));
if (err)
- return err;
+ goto err_out;
uc->guc.submission_state.enabled = true;
+ err = xe_guc_opt_in_features_enable(&uc->guc);
+ if (err)
+ goto err_out;
+
err = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (err)
- return err;
+ goto err_out;
return 0;
+
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return err;
}
/*
* Should be called during driver load, after every GT reset, and after every
* suspend to reload / auth the firmwares.
*/
-int xe_uc_init_hw(struct xe_uc *uc)
+int xe_uc_load_hw(struct xe_uc *uc)
{
int ret;
@@ -185,7 +190,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
return 0;
if (IS_SRIOV_VF(uc_to_xe(uc)))
- return vf_uc_init_hw(uc);
+ return vf_uc_load_hw(uc);
ret = xe_huc_upload(&uc->huc);
if (ret)
@@ -201,15 +206,15 @@ int xe_uc_init_hw(struct xe_uc *uc)
ret = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_post_load_init(&uc->guc);
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_pc_start(&uc->guc.pc);
if (ret)
- return ret;
+ goto err_out;
xe_guc_engine_activity_enable_stats(&uc->guc);
@@ -221,11 +226,10 @@ int xe_uc_init_hw(struct xe_uc *uc)
xe_gsc_load_start(&uc->gsc);
return 0;
-}
-int xe_uc_fini_hw(struct xe_uc *uc)
-{
- return xe_uc_sanitize_reset(uc);
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return ret;
}
int xe_uc_reset_prepare(struct xe_uc *uc)
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index c23e6f5e2514..21c9306098cf 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -8,11 +8,10 @@
struct xe_uc;
+int xe_uc_init_noalloc(struct xe_uc *uc);
int xe_uc_init(struct xe_uc *uc);
-int xe_uc_init_hwconfig(struct xe_uc *uc);
int xe_uc_init_post_hwconfig(struct xe_uc *uc);
-int xe_uc_init_hw(struct xe_uc *uc);
-int xe_uc_fini_hw(struct xe_uc *uc);
+int xe_uc_load_hw(struct xe_uc *uc);
void xe_uc_gucrc_disable(struct xe_uc *uc);
int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index a6612105201a..9bbdde604923 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -16,6 +16,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -114,6 +115,7 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
@@ -126,6 +128,7 @@ struct fw_blobs_by_type {
fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, no_ver(xe, huc, ptl)) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
@@ -662,11 +665,39 @@ do { \
ver_->major, ver_->minor, ver_->patch); \
} while (0)
+static void uc_fw_vf_override(struct xe_uc_fw *uc_fw)
+{
+ struct xe_uc_fw_version *compat = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted;
+
+ /* Only GuC/HuC are supported */
+ if (uc_fw->type != XE_UC_FW_TYPE_GUC && uc_fw->type != XE_UC_FW_TYPE_HUC)
+ uc_fw->path = NULL;
+
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+
+ if (!xe_uc_fw_is_supported(uc_fw))
+ return;
+
+ /* PF is doing the loading, so we don't need a path on the VF */
+ uc_fw->path = "Loaded by PF";
+
+ /* The GuC versions are set up during the VF bootstrap */
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ xe_gt_sriov_vf_guc_versions(uc_fw_to_gt(uc_fw), wanted, compat);
+ }
+}
+
static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct drm_printer p = xe_gt_info_printer(gt);
struct device *dev = xe->drm.dev;
- struct drm_printer p = drm_info_printer(dev);
const struct firmware *fw = NULL;
int err;
@@ -675,20 +706,13 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
- xe_assert(xe, !uc_fw->status);
- xe_assert(xe, !uc_fw->path);
+ xe_gt_assert(gt, !uc_fw->status);
+ xe_gt_assert(gt, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
if (IS_SRIOV_VF(xe)) {
- /* Only GuC/HuC are supported */
- if (uc_fw->type != XE_UC_FW_TYPE_GUC &&
- uc_fw->type != XE_UC_FW_TYPE_HUC)
- uc_fw->path = NULL;
- /* VF will support only firmwares that driver can autoselect */
- xe_uc_fw_change_status(uc_fw, uc_fw->path ?
- XE_UC_FIRMWARE_PRELOADED :
- XE_UC_FIRMWARE_NOT_SUPPORTED);
+ uc_fw_vf_override(uc_fw);
return 0;
}
@@ -700,7 +724,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
if (!xe_uc_fw_is_supported(uc_fw)) {
if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
- drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ xe_gt_err(gt, "No GuC firmware defined for platform\n");
return -ENOENT;
}
return 0;
@@ -709,7 +733,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED);
- drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type));
+ xe_gt_dbg(gt, "%s disabled\n", xe_uc_fw_type_repr(uc_fw->type));
return 0;
}
@@ -742,10 +766,10 @@ fail:
XE_UC_FIRMWARE_MISSING :
XE_UC_FIRMWARE_ERROR);
- drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n",
- xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n",
- xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
+ xe_gt_notice(gt, "%s firmware %s: fetch failed with error %pe\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, ERR_PTR(err));
+ xe_gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ad3b35a0e6eb..914026015019 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -65,6 +65,8 @@ enum xe_uc_fw_type {
* struct xe_uc_fw_version - Version for XE micro controller firmware
*/
struct xe_uc_fw_version {
+ /** @branch: branch version of the FW (not always available) */
+ u16 branch;
/** @major: major version of the FW */
u16 major;
/** @minor: minor version of the FW */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 861577746929..2035604121e6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -732,7 +732,9 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
+ down_read(&vm->userptr.notifier_lock);
err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->userptr.notifier_lock);
xe_vm_unlock(vm);
if (err)
break;
@@ -798,21 +800,47 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
}
ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
+static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
+{
+ struct xe_vma *vma;
+
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
+ xa_destroy(&op->prefetch_range.range);
+}
+
+static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return;
+
+ list_for_each_entry(op, &vops->list, link)
+ xe_vma_svm_prefetch_op_fini(op);
+}
+
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
int i;
+ xe_vma_svm_prefetch_ops_fini(vops);
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
kfree(vops->pt_update_ops[i].ops);
}
-static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
{
int i;
+ if (!inc_val)
+ return;
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
if (BIT(i) & tile_mask)
- ++vops->pt_update_ops[i].num_ops;
+ vops->pt_update_ops[i].num_ops += inc_val;
}
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
@@ -842,7 +870,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -977,7 +1005,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
xe_vm_populate_range_rebind(op, vma, range, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -1062,7 +1090,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
xe_vm_populate_range_unbind(op, range);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
return 0;
}
@@ -2141,6 +2169,35 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
+{
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_vm_find_vma_by_addr() - Find a VMA by its address
+ *
+ * @vm: the xe_vm the vma belongs to
+ * @page_addr: address to look up
+ */
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
+{
+ struct xe_vma *vma = NULL;
+
+ if (vm->usm.last_fault_vma) { /* Fast lookup */
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
+ vma = vm->usm.last_fault_vma;
+ }
+ if (!vma)
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
+
+ return vma;
+}
+
static const u32 region_to_mem_type[] = {
XE_PL_TT,
XE_PL_VRAM0,
@@ -2221,13 +2278,25 @@ static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
return true;
}
+static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *__op;
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ xe_vma_svm_prefetch_op_fini(op);
+ }
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
*/
static struct drm_gpuva_ops *
-vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
- u64 bo_offset_or_userptr, u64 addr, u64 range,
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct xe_bo *bo, u64 bo_offset_or_userptr,
+ u64 addr, u64 range,
u32 operation, u32 flags,
u32 prefetch_region, u16 pat_index)
{
@@ -2235,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct drm_gpuvm_bo *vm_bo;
+ u64 range_end = addr + range;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -2296,14 +2366,80 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
- op->prefetch.region = prefetch_region;
- }
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+ u32 i;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
+ op->prefetch.region = prefetch_region;
+ break;
+ }
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+
+ for_each_tile(tile, vm->xe, id)
+ tile_mask |= 0x1 << id;
+
+ xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
+ op->prefetch_range.region = prefetch_region;
+ op->prefetch_range.ranges_count = 0;
+alloc_next_range:
+ svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
+
+ if (PTR_ERR(svm_range) == -ENOENT) {
+ u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
+
+ addr = ret == ULONG_MAX ? 0 : ret;
+ if (addr)
+ goto alloc_next_range;
+ else
+ goto print_op_label;
+ }
+
+ if (IS_ERR(svm_range)) {
+ err = PTR_ERR(svm_range);
+ goto unwind_prefetch_ops;
+ }
+
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
+ goto check_next_range;
+ }
+
+ err = xa_alloc(&op->prefetch_range.range,
+ &i, svm_range, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ goto unwind_prefetch_ops;
+
+ op->prefetch_range.ranges_count++;
+ vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
+check_next_range:
+ if (range_end > xe_svm_range_end(svm_range) &&
+ xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
+ addr = xe_svm_range_end(svm_range);
+ goto alloc_next_range;
+ }
+ }
+print_op_label:
print_op(vm->xe, __op);
}
return ops;
+
+unwind_prefetch_ops:
+ xe_svm_prefetch_gpuva_ops_fini(ops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(err);
}
+
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
@@ -2498,7 +2634,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
!op->map.is_cpu_addr_mirror) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
- op->tile_mask);
+ op->tile_mask, 1);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2507,6 +2643,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
gpuva_to_vma(op->base.remap.unmap->va);
bool skip = xe_vma_is_cpu_addr_mirror(old);
u64 start = xe_vma_start(old), end = xe_vma_end(old);
+ int num_remap_ops = 0;
if (op->base.remap.prev)
start = op->base.remap.prev->va.addr +
@@ -2559,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
@@ -2588,11 +2725,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
if (!skip)
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
+
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2604,7 +2743,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return -EBUSY;
if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
break;
case DRM_GPUVA_OP_PREFETCH:
vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2615,8 +2754,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return err;
}
- if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
+ op->prefetch_range.ranges_count);
+ else
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
+
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2742,6 +2885,57 @@ static int check_ufence(struct xe_vma *vma)
return 0;
}
+static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ int err = 0;
+
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ unsigned long i;
+ u32 region;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ return 0;
+
+ region = op->prefetch_range.region;
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = devmem_possible;
+ ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+
+ /* TODO: Threading the migration */
+ xa_for_each(&op->prefetch_range.range, i, svm_range) {
+ if (!region)
+ xe_svm_range_migrate_to_smem(vm, svm_range);
+
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
+ tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ err = xe_svm_alloc_vram(tile, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ return -ENODATA;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
+ }
+
+ err = xe_svm_range_get_pages(vm, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
+ err = -ENODATA;
+ return err;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+ }
+
+ return err;
+}
+
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma_op *op)
{
@@ -2779,7 +2973,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- u32 region = op->prefetch.region;
+ u32 region;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ region = op->prefetch_range.region;
+ else
+ region = op->prefetch.region;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2798,6 +2997,25 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
return err;
}
+static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+ int err;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return 0;
+
+ list_for_each_entry(op, &vops->list, link) {
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ err = prefetch_ranges(vm, op);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
struct xe_vm *vm,
struct xe_vma_ops *vops)
@@ -3239,6 +3457,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
vops->q = q;
vops->syncs = syncs;
vops->num_syncs = num_syncs;
+ vops->flags = 0;
}
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
@@ -3247,9 +3466,9 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
{
u16 coh_mode;
- if (XE_IOCTL_DBG(xe, range > bo->size) ||
+ if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
XE_IOCTL_DBG(xe, obj_offset >
- bo->size - range)) {
+ xe_bo_size(bo) - range)) {
return -EINVAL;
}
@@ -3446,7 +3665,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
u16 pat_index = bind_ops[i].pat_index;
- ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
addr, range, op, flags,
prefetch_region, pat_index);
if (IS_ERR(ops[i])) {
@@ -3479,6 +3698,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
+ err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
+ if (err)
+ goto unwind_ops;
+
fence = vm_bind_ioctl_ops_execute(vm, &vops);
if (IS_ERR(fence))
err = PTR_ERR(fence);
@@ -3548,7 +3771,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
xe_vma_ops_init(&vops, vm, q, NULL, 0);
- ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo),
DRM_XE_VM_BIND_OP_MAP, 0, 0,
vm->xe->pat.idx[cache_lvl]);
if (IS_ERR(ops)) {
@@ -3620,6 +3843,68 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
+ * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * address range
+ * @vm: The VM
+ * @start: start address
+ * @end: end address
+ * @tile_mask: mask for which gt's issue tlb invalidation
+ *
+ * Issue a range based TLB invalidation for gt's in tilemask
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
+{
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tile *tile;
+ u32 fence_id = 0;
+ u8 id;
+ int err;
+
+ if (!tile_mask)
+ return 0;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+ return err;
+}
+
+/**
* xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
* @vma: VMA to invalidate
*
@@ -3632,28 +3917,34 @@ void xe_vm_unlock(struct xe_vm *vm)
int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
- u8 id;
- u32 fence_id = 0;
+ u8 tile_mask = 0;
int ret = 0;
+ u8 id;
xe_assert(xe, !xe_vma_is_null(vma));
xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ vm_dbg(&vm->xe->drm,
"INVALIDATE: addr=0x%016llx, range=0x%016llx",
xe_vma_start(vma), xe_vma_size(vma));
- /* Check that we don't race with page-table updates */
+ /*
+ * Check that we don't race with page-table updates, tile_invalidated
+ * update is safe
+ */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
+ lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
+
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
to_userptr_vma(vma)->userptr.notifier_seq));
- WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
} else {
@@ -3661,39 +3952,17 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- for_each_tile(tile, xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id],
- true);
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes(tile, vma))
+ tile_mask |= BIT(id);
- ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
- }
- }
+ xe_device_wmb(xe);
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
- vma->tile_invalidated = vma->tile_mask;
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 494af6bdc646..3475a118f666 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -169,6 +169,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
!xe_vma_is_cpu_addr_mirror(vma);
}
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -226,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
+
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
@@ -370,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
return false;
}
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 1979e9bdbdf3..8a07feef503b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -100,14 +100,21 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @tile_invalidated: VMA has been invalidated */
+ /**
+ * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
+ * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
+ * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * the vm->resv. For stable reading, BO's resv or userptr
+ * vm->userptr.notifier_lock in read mode is required. Can be
+ * opportunistically read with READ_ONCE outside of locks.
+ */
u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
/**
- * @tile_present: GT mask of binding are present for this VMA.
+ * @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
@@ -259,7 +266,7 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifer_lock in
+ * items to the list requires either the @userptr.notifier_lock in
* write mode, OR @lock in write mode.
*/
struct list_head invalidated;
@@ -382,6 +389,16 @@ struct xe_vma_op_unmap_range {
struct xe_svm_range *range;
};
+/** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
+struct xe_vma_op_prefetch_range {
+ /** @range: xarray for SVM ranges data */
+ struct xarray range;
+ /** @ranges_count: number of svm ranges to map */
+ u32 ranges_count;
+ /** @region: memory region to prefetch to */
+ u32 region;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -424,6 +441,8 @@ struct xe_vma_op {
struct xe_vma_op_map_range map_range;
/** @unmap_range: VMA unmap range operation specific data */
struct xe_vma_op_unmap_range unmap_range;
+ /** @prefetch_range: VMA prefetch range operation specific data */
+ struct xe_vma_op_prefetch_range prefetch_range;
};
};
@@ -441,6 +460,9 @@ struct xe_vma_ops {
u32 num_syncs;
/** @pt_update_ops: page table update operations */
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+ /** @flag: signify the properties within xe_vma_ops*/
+#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+ u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
bool inject_error;
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
index 56930ad42962..8f23a27871b6 100644
--- a/drivers/gpu/drm/xe/xe_vsec.c
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -141,8 +141,8 @@ static int xe_guid_decode(u32 guid, int *index, u32 *offset)
return 0;
}
-static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
- u32 count)
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
{
struct xe_device *xe = pdev_to_xe_device(pdev);
void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
index 5777c53faec2..dabfb4e02d70 100644
--- a/drivers/gpu/drm/xe/xe_vsec.h
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -4,8 +4,12 @@
#ifndef _XE_VSEC_H_
#define _XE_VSEC_H_
+#include <linux/types.h>
+
+struct pci_dev;
struct xe_device;
void xe_vsec_init(struct xe_device *xe);
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, u32 count);
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 67196baa4249..22a98600fd8f 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -10,6 +10,7 @@
#include <linux/compiler_types.h>
#include <linux/fault-inject.h>
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
#include "regs/xe_engine_regs.h"
@@ -285,6 +286,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
{ XE_RTP_NAME("14021486841"),
XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
ENGINE_CLASS(VIDEO_DECODE)),
@@ -503,10 +516,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
- { XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
- },
/*
* These two workarounds are the same, just applying to different
* engines. Although Wa_18032095049 (for the RCS) isn't required on
@@ -533,31 +542,38 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe2_HPG */
{ XE_RTP_NAME("16018712365"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
},
{ XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
},
{ XE_RTP_NAME("14019988906"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
{ XE_RTP_NAME("14019877138"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
},
{ XE_RTP_NAME("14020338487"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
},
{ XE_RTP_NAME("18032247524"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
},
{ XE_RTP_NAME("14018471104"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
/*
@@ -566,7 +582,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
* apply this to all engines for simplicity.
*/
{ XE_RTP_NAME("16021639441"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002)),
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
GHWSP_CSB_REPORT_DIS |
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
@@ -578,11 +594,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
- { XE_RTP_NAME("14021821874"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ { XE_RTP_NAME("14021821874, 14022954250"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
@@ -640,6 +657,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -774,7 +795,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
{ XE_RTP_NAME("18033852989"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
},
{ XE_RTP_NAME("14021567978"),
@@ -807,7 +828,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
},
{ XE_RTP_NAME("14019386621"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
},
{ XE_RTP_NAME("14020756599"),
@@ -824,13 +845,17 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
{ XE_RTP_NAME("15016589081"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
{ XE_RTP_NAME("22021007897"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
@@ -852,9 +877,34 @@ static __maybe_unused const struct xe_rtp_entry oob_was[] = {
static_assert(ARRAY_SIZE(oob_was) - 1 == _XE_WA_OOB_COUNT);
+static __maybe_unused const struct xe_rtp_entry device_oob_was[] = {
+#include <generated/xe_device_wa_oob.c>
+ {}
+};
+
+static_assert(ARRAY_SIZE(device_oob_was) - 1 == _XE_DEVICE_WA_OOB_COUNT);
+
__diag_pop();
/**
+ * xe_wa_process_device_oob - process OOB workaround table
+ * @xe: device instance to process workarounds for
+ *
+ * process OOB workaround table for this device, marking in @xe the
+ * workarounds that are active.
+ */
+
+void xe_wa_process_device_oob(struct xe_device *xe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(xe);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was));
+
+ xe->wa_active.oob_initialized = true;
+ xe_rtp_process(&ctx, device_oob_was);
+}
+
+/**
* xe_wa_process_oob - process OOB workaround table
* @gt: GT instance to process workarounds for
*
@@ -923,6 +973,28 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
}
/**
+ * xe_wa_device_init - initialize device with workaround oob bookkeeping
+ * @xe: Xe device instance to initialize
+ *
+ * Returns 0 for success, negative with error code otherwise
+ */
+int xe_wa_device_init(struct xe_device *xe)
+{
+ unsigned long *p;
+
+ p = drmm_kzalloc(&xe->drm,
+ sizeof(*p) * BITS_TO_LONGS(ARRAY_SIZE(device_oob_was)),
+ GFP_KERNEL);
+
+ if (!p)
+ return -ENOMEM;
+
+ xe->wa_active.oob = p;
+
+ return 0;
+}
+
+/**
* xe_wa_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
@@ -956,6 +1028,16 @@ int xe_wa_init(struct xe_gt *gt)
}
ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "Device OOB Workarounds\n");
+ for_each_set_bit(idx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was))
+ if (device_oob_was[idx].name)
+ drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name);
+}
+
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index 52337405b5bc..f3880c65cb8d 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -13,17 +13,19 @@ struct xe_gt;
struct xe_hw_engine;
struct xe_tile;
+int xe_wa_device_init(struct xe_device *xe);
int xe_wa_init(struct xe_gt *gt);
+void xe_wa_process_device_oob(struct xe_device *xe);
void xe_wa_process_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, that don't fit the lifecycle any
- * other more specific type
+ * XE_WA - Out-of-band workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
@@ -32,4 +34,20 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
+/**
+ * XE_DEVICE_WA - Out-of-band Device workarounds, to be queried and called
+ * as needed.
+ * @xe__: xe_device
+ * @id__: XE_DEVICE_WA_OOB_<id__>, as generated by build system in generated/xe_device_wa_oob.h
+ */
+#define XE_DEVICE_WA(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ test_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
+#define XE_DEVICE_WA_DISABLE(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ clear_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 6d70109fcc43..e990f20eccfe 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -31,12 +31,14 @@
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION(3001)
-14022293748 GRAPHICS_VERSION(2001)
+14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
-22019794406 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(3003)
+22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
+ GRAPHICS_VERSION(3003)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
@@ -58,9 +60,15 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
MEDIA_VERSION_RANGE(1300, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
# SoC workaround - currently applies to all platforms with the following
# primary GT GMDID
14022085890 GRAPHICS_VERSION(2001)
+
+15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index dfa78a49a6d9..806ec66ee7f7 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -54,6 +54,7 @@ static const struct drm_framebuffer_funcs fb_funcs = {
static struct drm_framebuffer *
fb_create(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
@@ -61,7 +62,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
struct drm_gem_object *gem_obj;
int ret;
- fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+ fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs);
if (IS_ERR(fb))
return fb;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 197defe4f928..34ddbf98e81d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1720,7 +1720,8 @@ disconnected:
return connector_status_disconnected;
}
-static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+zynqmp_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -2409,9 +2410,9 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct zynqmp_dp *dp;
int ret;
- dp = kzalloc(sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(&pdev->dev, struct zynqmp_dp, bridge, &zynqmp_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
@@ -2424,31 +2425,25 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Acquire all resources (IOMEM, IRQ and PHYs). */
dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
- if (IS_ERR(dp->iomem)) {
- ret = PTR_ERR(dp->iomem);
- goto err_free;
- }
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0) {
- ret = dp->irq;
- goto err_free;
- }
+ if (dp->irq < 0)
+ return dp->irq;
dp->reset = devm_reset_control_get(dp->dev, NULL);
- if (IS_ERR(dp->reset)) {
- ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ if (IS_ERR(dp->reset))
+ return dev_err_probe(dp->dev, PTR_ERR(dp->reset),
"failed to get reset\n");
- goto err_free;
- }
ret = zynqmp_dp_reset(dp, true);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
@@ -2456,7 +2451,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Initialize the bridge. */
bridge = &dp->bridge;
- bridge->funcs = &zynqmp_dp_bridge_funcs;
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -2509,8 +2503,6 @@ err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
-err_free:
- kfree(dp);
return ret;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 3a9544b97bc5..2764c4b17c5e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -180,7 +180,6 @@ static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
{
kfree(dpsub->disp);
- kfree(dpsub->dp);
kfree(dpsub);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index b47463473472..2bee0a2275ed 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -373,6 +373,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
static struct drm_framebuffer *
zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
@@ -383,7 +384,7 @@ zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
- return drm_gem_fb_create(drm, file_priv, &cmd);
+ return drm_gem_fb_create(drm, file_priv, info, &cmd);
}
static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
diff --git a/drivers/gpu/nova-core/dma.rs b/drivers/gpu/nova-core/dma.rs
new file mode 100644
index 000000000000..94f44bcfd748
--- /dev/null
+++ b/drivers/gpu/nova-core/dma.rs
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Simple DMA object wrapper.
+
+use core::ops::{Deref, DerefMut};
+
+use kernel::device;
+use kernel::dma::CoherentAllocation;
+use kernel::page::PAGE_SIZE;
+use kernel::prelude::*;
+
+pub(crate) struct DmaObject {
+ dma: CoherentAllocation<u8>,
+}
+
+impl DmaObject {
+ pub(crate) fn new(dev: &device::Device<device::Bound>, len: usize) -> Result<Self> {
+ let len = core::alloc::Layout::from_size_align(len, PAGE_SIZE)
+ .map_err(|_| EINVAL)?
+ .pad_to_align()
+ .size();
+ let dma = CoherentAllocation::alloc_coherent(dev, len, GFP_KERNEL | __GFP_ZERO)?;
+
+ Ok(Self { dma })
+ }
+
+ pub(crate) fn from_data(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> {
+ Self::new(dev, data.len()).map(|mut dma_obj| {
+ // TODO[COHA]: replace with `CoherentAllocation::write()` once available.
+ // SAFETY:
+ // - `dma_obj`'s size is at least `data.len()`.
+ // - We have just created this object and there is no other user at this stage.
+ unsafe {
+ core::ptr::copy_nonoverlapping(
+ data.as_ptr(),
+ dma_obj.dma.start_ptr_mut(),
+ data.len(),
+ );
+ }
+
+ dma_obj
+ })
+ }
+}
+
+impl Deref for DmaObject {
+ type Target = CoherentAllocation<u8>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.dma
+ }
+}
+
+impl DerefMut for DmaObject {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.dma
+ }
+}
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 110f2b355db4..274989ea1fb4 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sync::Arc};
+use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sizes::SZ_16M, sync::Arc};
use crate::gpu::Gpu;
@@ -11,7 +11,7 @@ pub(crate) struct NovaCore {
_reg: auxiliary::Registration,
}
-const BAR0_SIZE: usize = 8;
+const BAR0_SIZE: usize = SZ_16M;
pub(crate) type Bar0 = pci::Bar<BAR0_SIZE>;
kernel::pci_device_table!(
@@ -19,7 +19,7 @@ kernel::pci_device_table!(
MODULE_PCI_TABLE,
<NovaCore as pci::Driver>::IdInfo,
[(
- pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as _),
+ pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
()
)]
);
@@ -45,7 +45,7 @@ impl pci::Driver for NovaCore {
_reg: auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
- 0, // TODO: Once it lands, use XArray; for now we don't use the ID.
+ 0, // TODO[XARR]: Once it lands, use XArray; for now we don't use the ID.
crate::MODULE_NAME
)?,
}),
diff --git a/drivers/gpu/nova-core/falcon.rs b/drivers/gpu/nova-core/falcon.rs
new file mode 100644
index 000000000000..50437c67c14a
--- /dev/null
+++ b/drivers/gpu/nova-core/falcon.rs
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Falcon microprocessor base support
+
+use core::ops::Deref;
+use hal::FalconHal;
+use kernel::bindings;
+use kernel::device;
+use kernel::prelude::*;
+use kernel::time::Delta;
+use kernel::types::ARef;
+
+use crate::dma::DmaObject;
+use crate::driver::Bar0;
+use crate::gpu::Chipset;
+use crate::regs;
+use crate::util;
+
+pub(crate) mod gsp;
+mod hal;
+pub(crate) mod sec2;
+
+// TODO[FPRI]: Replace with `ToPrimitive`.
+macro_rules! impl_from_enum_to_u32 {
+ ($enum_type:ty) => {
+ impl From<$enum_type> for u32 {
+ fn from(value: $enum_type) -> Self {
+ value as u32
+ }
+ }
+ };
+}
+
+/// Revision number of a falcon core, used in the [`crate::regs::NV_PFALCON_FALCON_HWCFG1`]
+/// register.
+#[repr(u8)]
+#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub(crate) enum FalconCoreRev {
+ #[default]
+ Rev1 = 1,
+ Rev2 = 2,
+ Rev3 = 3,
+ Rev4 = 4,
+ Rev5 = 5,
+ Rev6 = 6,
+ Rev7 = 7,
+}
+impl_from_enum_to_u32!(FalconCoreRev);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for FalconCoreRev {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ use FalconCoreRev::*;
+
+ let rev = match value {
+ 1 => Rev1,
+ 2 => Rev2,
+ 3 => Rev3,
+ 4 => Rev4,
+ 5 => Rev5,
+ 6 => Rev6,
+ 7 => Rev7,
+ _ => return Err(EINVAL),
+ };
+
+ Ok(rev)
+ }
+}
+
+/// Revision subversion number of a falcon core, used in the
+/// [`crate::regs::NV_PFALCON_FALCON_HWCFG1`] register.
+#[repr(u8)]
+#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub(crate) enum FalconCoreRevSubversion {
+ #[default]
+ Subversion0 = 0,
+ Subversion1 = 1,
+ Subversion2 = 2,
+ Subversion3 = 3,
+}
+impl_from_enum_to_u32!(FalconCoreRevSubversion);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for FalconCoreRevSubversion {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ use FalconCoreRevSubversion::*;
+
+ let sub_version = match value & 0b11 {
+ 0 => Subversion0,
+ 1 => Subversion1,
+ 2 => Subversion2,
+ 3 => Subversion3,
+ _ => return Err(EINVAL),
+ };
+
+ Ok(sub_version)
+ }
+}
+
+/// Security model of a falcon core, used in the [`crate::regs::NV_PFALCON_FALCON_HWCFG1`]
+/// register.
+#[repr(u8)]
+#[derive(Debug, Default, Copy, Clone)]
+/// Security mode of the Falcon microprocessor.
+///
+/// See `falcon.rst` for more details.
+pub(crate) enum FalconSecurityModel {
+ /// Non-Secure: runs unsigned code without privileges.
+ #[default]
+ None = 0,
+ /// Light-Secured (LS): Runs signed code with some privileges.
+ /// Entry into this mode is only possible from 'Heavy-secure' mode, which verifies the code's
+ /// signature.
+ ///
+ /// Also known as Low-Secure, Privilege Level 2 or PL2.
+ Light = 2,
+ /// Heavy-Secured (HS): Runs signed code with full privileges.
+ /// The code's signature is verified by the Falcon Boot ROM (BROM).
+ ///
+ /// Also known as High-Secure, Privilege Level 3 or PL3.
+ Heavy = 3,
+}
+impl_from_enum_to_u32!(FalconSecurityModel);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for FalconSecurityModel {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ use FalconSecurityModel::*;
+
+ let sec_model = match value {
+ 0 => None,
+ 2 => Light,
+ 3 => Heavy,
+ _ => return Err(EINVAL),
+ };
+
+ Ok(sec_model)
+ }
+}
+
+/// Signing algorithm for a given firmware, used in the [`crate::regs::NV_PFALCON2_FALCON_MOD_SEL`]
+/// register. It is passed to the Falcon Boot ROM (BROM) as a parameter.
+#[repr(u8)]
+#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
+pub(crate) enum FalconModSelAlgo {
+ /// AES.
+ #[expect(dead_code)]
+ Aes = 0,
+ /// RSA3K.
+ #[default]
+ Rsa3k = 1,
+}
+impl_from_enum_to_u32!(FalconModSelAlgo);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for FalconModSelAlgo {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 1 => Ok(FalconModSelAlgo::Rsa3k),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+/// Valid values for the `size` field of the [`crate::regs::NV_PFALCON_FALCON_DMATRFCMD`] register.
+#[repr(u8)]
+#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
+pub(crate) enum DmaTrfCmdSize {
+ /// 256 bytes transfer.
+ #[default]
+ Size256B = 0x6,
+}
+impl_from_enum_to_u32!(DmaTrfCmdSize);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for DmaTrfCmdSize {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0x6 => Ok(Self::Size256B),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+/// Currently active core on a dual falcon/riscv (Peregrine) controller.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub(crate) enum PeregrineCoreSelect {
+ /// Falcon core is active.
+ #[default]
+ Falcon = 0,
+ /// RISC-V core is active.
+ Riscv = 1,
+}
+impl_from_enum_to_u32!(PeregrineCoreSelect);
+
+impl From<bool> for PeregrineCoreSelect {
+ fn from(value: bool) -> Self {
+ match value {
+ false => PeregrineCoreSelect::Falcon,
+ true => PeregrineCoreSelect::Riscv,
+ }
+ }
+}
+
+/// Different types of memory present in a falcon core.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum FalconMem {
+ /// Instruction Memory.
+ Imem,
+ /// Data Memory.
+ Dmem,
+}
+
+/// Defines the Framebuffer Interface (FBIF) aperture type.
+/// This determines the memory type for external memory access during a DMA transfer, which is
+/// performed by the Falcon's Framebuffer DMA (FBDMA) engine. See falcon.rst for more details.
+#[derive(Debug, Clone, Default)]
+pub(crate) enum FalconFbifTarget {
+ /// VRAM.
+ #[default]
+ /// Local Framebuffer (GPU's VRAM memory).
+ LocalFb = 0,
+ /// Coherent system memory (System DRAM).
+ CoherentSysmem = 1,
+ /// Non-coherent system memory (System DRAM).
+ NoncoherentSysmem = 2,
+}
+impl_from_enum_to_u32!(FalconFbifTarget);
+
+// TODO[FPRI]: replace with `FromPrimitive`.
+impl TryFrom<u8> for FalconFbifTarget {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ let res = match value {
+ 0 => Self::LocalFb,
+ 1 => Self::CoherentSysmem,
+ 2 => Self::NoncoherentSysmem,
+ _ => return Err(EINVAL),
+ };
+
+ Ok(res)
+ }
+}
+
+/// Type of memory addresses to use.
+#[derive(Debug, Clone, Default)]
+pub(crate) enum FalconFbifMemType {
+ /// Virtual memory addresses.
+ #[default]
+ Virtual = 0,
+ /// Physical memory addresses.
+ Physical = 1,
+}
+impl_from_enum_to_u32!(FalconFbifMemType);
+
+/// Conversion from a single-bit register field.
+impl From<bool> for FalconFbifMemType {
+ fn from(value: bool) -> Self {
+ match value {
+ false => Self::Virtual,
+ true => Self::Physical,
+ }
+ }
+}
+
+/// Trait defining the parameters of a given Falcon instance.
+pub(crate) trait FalconEngine: Sync {
+ /// Base I/O address for the falcon, relative from which its registers are accessed.
+ const BASE: usize;
+}
+
+/// Represents a portion of the firmware to be loaded into a particular memory (e.g. IMEM or DMEM).
+#[derive(Debug)]
+pub(crate) struct FalconLoadTarget {
+ /// Offset from the start of the source object to copy from.
+ pub(crate) src_start: u32,
+ /// Offset from the start of the destination memory to copy into.
+ pub(crate) dst_start: u32,
+ /// Number of bytes to copy.
+ pub(crate) len: u32,
+}
+
+/// Parameters for the falcon boot ROM.
+#[derive(Debug)]
+pub(crate) struct FalconBromParams {
+ /// Offset in `DMEM`` of the firmware's signature.
+ pub(crate) pkc_data_offset: u32,
+ /// Mask of engines valid for this firmware.
+ pub(crate) engine_id_mask: u16,
+ /// ID of the ucode used to infer a fuse register to validate the signature.
+ pub(crate) ucode_id: u8,
+}
+
+/// Trait for providing load parameters of falcon firmwares.
+pub(crate) trait FalconLoadParams {
+ /// Returns the load parameters for `IMEM`.
+ fn imem_load_params(&self) -> FalconLoadTarget;
+
+ /// Returns the load parameters for `DMEM`.
+ fn dmem_load_params(&self) -> FalconLoadTarget;
+
+ /// Returns the parameters to write into the BROM registers.
+ fn brom_params(&self) -> FalconBromParams;
+
+ /// Returns the start address of the firmware.
+ fn boot_addr(&self) -> u32;
+}
+
+/// Trait for a falcon firmware.
+///
+/// A falcon firmware can be loaded on a given engine, and is presented in the form of a DMA
+/// object.
+pub(crate) trait FalconFirmware: FalconLoadParams + Deref<Target = DmaObject> {
+ /// Engine on which this firmware is to be loaded.
+ type Target: FalconEngine;
+}
+
+/// Contains the base parameters common to all Falcon instances.
+pub(crate) struct Falcon<E: FalconEngine> {
+ hal: KBox<dyn FalconHal<E>>,
+ dev: ARef<device::Device>,
+}
+
+impl<E: FalconEngine + 'static> Falcon<E> {
+ /// Create a new falcon instance.
+ ///
+ /// `need_riscv` is set to `true` if the caller expects the falcon to be a dual falcon/riscv
+ /// controller.
+ pub(crate) fn new(
+ dev: &device::Device,
+ chipset: Chipset,
+ bar: &Bar0,
+ need_riscv: bool,
+ ) -> Result<Self> {
+ let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, E::BASE);
+ // Check that the revision and security model contain valid values.
+ let _ = hwcfg1.core_rev()?;
+ let _ = hwcfg1.security_model()?;
+
+ if need_riscv {
+ let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ if !hwcfg2.riscv() {
+ dev_err!(
+ dev,
+ "riscv support requested on a controller that does not support it\n"
+ );
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(Self {
+ hal: hal::falcon_hal(chipset)?,
+ dev: dev.into(),
+ })
+ }
+
+ /// Wait for memory scrubbing to complete.
+ fn reset_wait_mem_scrubbing(&self, bar: &Bar0) -> Result {
+ // TIMEOUT: memory scrubbing should complete in less than 20ms.
+ util::wait_on(Delta::from_millis(20), || {
+ if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE).mem_scrubbing_done() {
+ Some(())
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Reset the falcon engine.
+ fn reset_eng(&self, bar: &Bar0) -> Result {
+ let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+
+ // According to OpenRM's `kflcnPreResetWait_GA102` documentation, HW sometimes does not set
+ // RESET_READY so a non-failing timeout is used.
+ let _ = util::wait_on(Delta::from_micros(150), || {
+ let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ if r.reset_ready() {
+ Some(())
+ } else {
+ None
+ }
+ });
+
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(true));
+
+ // TODO[DLAY]: replace with udelay() or equivalent once available.
+ // TIMEOUT: falcon engine should not take more than 10us to reset.
+ let _: Result = util::wait_on(Delta::from_micros(10), || None);
+
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(false));
+
+ self.reset_wait_mem_scrubbing(bar)?;
+
+ Ok(())
+ }
+
+ /// Reset the controller, select the falcon core, and wait for memory scrubbing to complete.
+ pub(crate) fn reset(&self, bar: &Bar0) -> Result {
+ self.reset_eng(bar)?;
+ self.hal.select_core(self, bar)?;
+ self.reset_wait_mem_scrubbing(bar)?;
+
+ regs::NV_PFALCON_FALCON_RM::default()
+ .set_value(regs::NV_PMC_BOOT_0::read(bar).into())
+ .write(bar, E::BASE);
+
+ Ok(())
+ }
+
+ /// Perform a DMA write according to `load_offsets` from `dma_handle` into the falcon's
+ /// `target_mem`.
+ ///
+ /// `sec` is set if the loaded firmware is expected to run in secure mode.
+ fn dma_wr<F: FalconFirmware<Target = E>>(
+ &self,
+ bar: &Bar0,
+ fw: &F,
+ target_mem: FalconMem,
+ load_offsets: FalconLoadTarget,
+ sec: bool,
+ ) -> Result {
+ const DMA_LEN: u32 = 256;
+
+ // For IMEM, we want to use the start offset as a virtual address tag for each page, since
+ // code addresses in the firmware (and the boot vector) are virtual.
+ //
+ // For DMEM we can fold the start offset into the DMA handle.
+ let (src_start, dma_start) = match target_mem {
+ FalconMem::Imem => (load_offsets.src_start, fw.dma_handle()),
+ FalconMem::Dmem => (
+ 0,
+ fw.dma_handle_with_offset(load_offsets.src_start as usize)?,
+ ),
+ };
+ if dma_start % bindings::dma_addr_t::from(DMA_LEN) > 0 {
+ dev_err!(
+ self.dev,
+ "DMA transfer start addresses must be a multiple of {}",
+ DMA_LEN
+ );
+ return Err(EINVAL);
+ }
+ if load_offsets.len % DMA_LEN > 0 {
+ dev_err!(
+ self.dev,
+ "DMA transfer length must be a multiple of {}",
+ DMA_LEN
+ );
+ return Err(EINVAL);
+ }
+
+ // Set up the base source DMA address.
+
+ regs::NV_PFALCON_FALCON_DMATRFBASE::default()
+ .set_base((dma_start >> 8) as u32)
+ .write(bar, E::BASE);
+ regs::NV_PFALCON_FALCON_DMATRFBASE1::default()
+ .set_base((dma_start >> 40) as u16)
+ .write(bar, E::BASE);
+
+ let cmd = regs::NV_PFALCON_FALCON_DMATRFCMD::default()
+ .set_size(DmaTrfCmdSize::Size256B)
+ .set_imem(target_mem == FalconMem::Imem)
+ .set_sec(if sec { 1 } else { 0 });
+
+ for pos in (0..load_offsets.len).step_by(DMA_LEN as usize) {
+ // Perform a transfer of size `DMA_LEN`.
+ regs::NV_PFALCON_FALCON_DMATRFMOFFS::default()
+ .set_offs(load_offsets.dst_start + pos)
+ .write(bar, E::BASE);
+ regs::NV_PFALCON_FALCON_DMATRFFBOFFS::default()
+ .set_offs(src_start + pos)
+ .write(bar, E::BASE);
+ cmd.write(bar, E::BASE);
+
+ // Wait for the transfer to complete.
+ // TIMEOUT: arbitrarily large value, no DMA transfer to the falcon's small memories
+ // should ever take that long.
+ util::wait_on(Delta::from_secs(2), || {
+ let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, E::BASE);
+ if r.idle() {
+ Some(())
+ } else {
+ None
+ }
+ })?;
+ }
+
+ Ok(())
+ }
+
+ /// Perform a DMA load into `IMEM` and `DMEM` of `fw`, and prepare the falcon to run it.
+ pub(crate) fn dma_load<F: FalconFirmware<Target = E>>(&self, bar: &Bar0, fw: &F) -> Result {
+ regs::NV_PFALCON_FBIF_CTL::alter(bar, E::BASE, |v| v.set_allow_phys_no_ctx(true));
+ regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, E::BASE);
+ regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, E::BASE, |v| {
+ v.set_target(FalconFbifTarget::CoherentSysmem)
+ .set_mem_type(FalconFbifMemType::Physical)
+ });
+
+ self.dma_wr(bar, fw, FalconMem::Imem, fw.imem_load_params(), true)?;
+ self.dma_wr(bar, fw, FalconMem::Dmem, fw.dmem_load_params(), true)?;
+
+ self.hal.program_brom(self, bar, &fw.brom_params())?;
+
+ // Set `BootVec` to start of non-secure code.
+ regs::NV_PFALCON_FALCON_BOOTVEC::default()
+ .set_value(fw.boot_addr())
+ .write(bar, E::BASE);
+
+ Ok(())
+ }
+
+ /// Runs the loaded firmware and waits for its completion.
+ ///
+ /// `mbox0` and `mbox1` are optional parameters to write into the `MBOX0` and `MBOX1` registers
+ /// prior to running.
+ ///
+ /// Wait up to two seconds for the firmware to complete, and return its exit status read from
+ /// the `MBOX0` and `MBOX1` registers.
+ pub(crate) fn boot(
+ &self,
+ bar: &Bar0,
+ mbox0: Option<u32>,
+ mbox1: Option<u32>,
+ ) -> Result<(u32, u32)> {
+ if let Some(mbox0) = mbox0 {
+ regs::NV_PFALCON_FALCON_MAILBOX0::default()
+ .set_value(mbox0)
+ .write(bar, E::BASE);
+ }
+
+ if let Some(mbox1) = mbox1 {
+ regs::NV_PFALCON_FALCON_MAILBOX1::default()
+ .set_value(mbox1)
+ .write(bar, E::BASE);
+ }
+
+ match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE).alias_en() {
+ true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default()
+ .set_startcpu(true)
+ .write(bar, E::BASE),
+ false => regs::NV_PFALCON_FALCON_CPUCTL::default()
+ .set_startcpu(true)
+ .write(bar, E::BASE),
+ }
+
+ // TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds.
+ util::wait_on(Delta::from_secs(2), || {
+ let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE);
+ if r.halted() {
+ Some(())
+ } else {
+ None
+ }
+ })?;
+
+ let (mbox0, mbox1) = (
+ regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, E::BASE).value(),
+ regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, E::BASE).value(),
+ );
+
+ Ok((mbox0, mbox1))
+ }
+
+ /// Returns the fused version of the signature to use in order to run a HS firmware on this
+ /// falcon instance. `engine_id_mask` and `ucode_id` are obtained from the firmware header.
+ pub(crate) fn signature_reg_fuse_version(
+ &self,
+ bar: &Bar0,
+ engine_id_mask: u16,
+ ucode_id: u8,
+ ) -> Result<u32> {
+ self.hal
+ .signature_reg_fuse_version(self, bar, engine_id_mask, ucode_id)
+ }
+}
diff --git a/drivers/gpu/nova-core/falcon/gsp.rs b/drivers/gpu/nova-core/falcon/gsp.rs
new file mode 100644
index 000000000000..d622e9a64470
--- /dev/null
+++ b/drivers/gpu/nova-core/falcon/gsp.rs
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::{
+ driver::Bar0,
+ falcon::{Falcon, FalconEngine},
+ regs,
+};
+
+/// Type specifying the `Gsp` falcon engine. Cannot be instantiated.
+pub(crate) struct Gsp(());
+
+impl FalconEngine for Gsp {
+ const BASE: usize = 0x00110000;
+}
+
+impl Falcon<Gsp> {
+ /// Clears the SWGEN0 bit in the Falcon's IRQ status clear register to
+ /// allow GSP to signal CPU for processing new messages in message queue.
+ pub(crate) fn clear_swgen0_intr(&self, bar: &Bar0) {
+ regs::NV_PFALCON_FALCON_IRQSCLR::default()
+ .set_swgen0(true)
+ .write(bar, Gsp::BASE);
+ }
+}
diff --git a/drivers/gpu/nova-core/falcon/hal.rs b/drivers/gpu/nova-core/falcon/hal.rs
new file mode 100644
index 000000000000..b233bc365882
--- /dev/null
+++ b/drivers/gpu/nova-core/falcon/hal.rs
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::falcon::{Falcon, FalconBromParams, FalconEngine};
+use crate::gpu::Chipset;
+
+mod ga102;
+
+/// Hardware Abstraction Layer for Falcon cores.
+///
+/// Implements chipset-specific low-level operations. The trait is generic against [`FalconEngine`]
+/// so its `BASE` parameter can be used in order to avoid runtime bound checks when accessing
+/// registers.
+pub(crate) trait FalconHal<E: FalconEngine>: Sync {
+ /// Activates the Falcon core if the engine is a risvc/falcon dual engine.
+ fn select_core(&self, _falcon: &Falcon<E>, _bar: &Bar0) -> Result {
+ Ok(())
+ }
+
+ /// Returns the fused version of the signature to use in order to run a HS firmware on this
+ /// falcon instance. `engine_id_mask` and `ucode_id` are obtained from the firmware header.
+ fn signature_reg_fuse_version(
+ &self,
+ falcon: &Falcon<E>,
+ bar: &Bar0,
+ engine_id_mask: u16,
+ ucode_id: u8,
+ ) -> Result<u32>;
+
+ /// Program the boot ROM registers prior to starting a secure firmware.
+ fn program_brom(&self, falcon: &Falcon<E>, bar: &Bar0, params: &FalconBromParams) -> Result;
+}
+
+/// Returns a boxed falcon HAL adequate for `chipset`.
+///
+/// We use a heap-allocated trait object instead of a statically defined one because the
+/// generic `FalconEngine` argument makes it difficult to define all the combinations
+/// statically.
+pub(super) fn falcon_hal<E: FalconEngine + 'static>(
+ chipset: Chipset,
+) -> Result<KBox<dyn FalconHal<E>>> {
+ use Chipset::*;
+
+ let hal = match chipset {
+ GA102 | GA103 | GA104 | GA106 | GA107 => {
+ KBox::new(ga102::Ga102::<E>::new(), GFP_KERNEL)? as KBox<dyn FalconHal<E>>
+ }
+ _ => return Err(ENOTSUPP),
+ };
+
+ Ok(hal)
+}
diff --git a/drivers/gpu/nova-core/falcon/hal/ga102.rs b/drivers/gpu/nova-core/falcon/hal/ga102.rs
new file mode 100644
index 000000000000..52c33d3f22a8
--- /dev/null
+++ b/drivers/gpu/nova-core/falcon/hal/ga102.rs
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::marker::PhantomData;
+
+use kernel::device;
+use kernel::prelude::*;
+use kernel::time::Delta;
+
+use crate::driver::Bar0;
+use crate::falcon::{
+ Falcon, FalconBromParams, FalconEngine, FalconModSelAlgo, PeregrineCoreSelect,
+};
+use crate::regs;
+use crate::util;
+
+use super::FalconHal;
+
+fn select_core_ga102<E: FalconEngine>(bar: &Bar0) -> Result {
+ let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ if bcr_ctrl.core_select() != PeregrineCoreSelect::Falcon {
+ regs::NV_PRISCV_RISCV_BCR_CTRL::default()
+ .set_core_select(PeregrineCoreSelect::Falcon)
+ .write(bar, E::BASE);
+
+ // TIMEOUT: falcon core should take less than 10ms to report being enabled.
+ util::wait_on(Delta::from_millis(10), || {
+ let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ if r.valid() {
+ Some(())
+ } else {
+ None
+ }
+ })?;
+ }
+
+ Ok(())
+}
+
+fn signature_reg_fuse_version_ga102(
+ dev: &device::Device,
+ bar: &Bar0,
+ engine_id_mask: u16,
+ ucode_id: u8,
+) -> Result<u32> {
+ // TODO[REGA]: The ucode fuse versions are contained in the
+ // FUSE_OPT_FPF_<ENGINE>_UCODE<X>_VERSION registers, which are an array. Our register
+ // definition macros do not allow us to manage them properly, so we need to hardcode their
+ // addresses for now. Clean this up once we support register arrays.
+
+ // Each engine has 16 ucode version registers numbered from 1 to 16.
+ if ucode_id == 0 || ucode_id > 16 {
+ dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
+ return Err(EINVAL);
+ }
+
+ // Base address of the FUSE registers array corresponding to the engine.
+ let reg_fuse_base = if engine_id_mask & 0x0001 != 0 {
+ regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::OFFSET
+ } else if engine_id_mask & 0x0004 != 0 {
+ regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::OFFSET
+ } else if engine_id_mask & 0x0400 != 0 {
+ regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::OFFSET
+ } else {
+ dev_err!(dev, "unexpected engine_id_mask {:#x}", engine_id_mask);
+ return Err(EINVAL);
+ };
+
+ // Read `reg_fuse_base[ucode_id - 1]`.
+ let reg_fuse_version =
+ bar.read32(reg_fuse_base + ((ucode_id - 1) as usize * core::mem::size_of::<u32>()));
+
+ // TODO[NUMM]: replace with `last_set_bit` once it lands.
+ Ok(u32::BITS - reg_fuse_version.leading_zeros())
+}
+
+fn program_brom_ga102<E: FalconEngine>(bar: &Bar0, params: &FalconBromParams) -> Result {
+ regs::NV_PFALCON2_FALCON_BROM_PARAADDR::default()
+ .set_value(params.pkc_data_offset)
+ .write(bar, E::BASE);
+ regs::NV_PFALCON2_FALCON_BROM_ENGIDMASK::default()
+ .set_value(u32::from(params.engine_id_mask))
+ .write(bar, E::BASE);
+ regs::NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID::default()
+ .set_ucode_id(params.ucode_id)
+ .write(bar, E::BASE);
+ regs::NV_PFALCON2_FALCON_MOD_SEL::default()
+ .set_algo(FalconModSelAlgo::Rsa3k)
+ .write(bar, E::BASE);
+
+ Ok(())
+}
+
+pub(super) struct Ga102<E: FalconEngine>(PhantomData<E>);
+
+impl<E: FalconEngine> Ga102<E> {
+ pub(super) fn new() -> Self {
+ Self(PhantomData)
+ }
+}
+
+impl<E: FalconEngine> FalconHal<E> for Ga102<E> {
+ fn select_core(&self, _falcon: &Falcon<E>, bar: &Bar0) -> Result {
+ select_core_ga102::<E>(bar)
+ }
+
+ fn signature_reg_fuse_version(
+ &self,
+ falcon: &Falcon<E>,
+ bar: &Bar0,
+ engine_id_mask: u16,
+ ucode_id: u8,
+ ) -> Result<u32> {
+ signature_reg_fuse_version_ga102(&falcon.dev, bar, engine_id_mask, ucode_id)
+ }
+
+ fn program_brom(&self, _falcon: &Falcon<E>, bar: &Bar0, params: &FalconBromParams) -> Result {
+ program_brom_ga102::<E>(bar, params)
+ }
+}
diff --git a/drivers/gpu/nova-core/falcon/sec2.rs b/drivers/gpu/nova-core/falcon/sec2.rs
new file mode 100644
index 000000000000..5147d9e2a7fe
--- /dev/null
+++ b/drivers/gpu/nova-core/falcon/sec2.rs
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::falcon::FalconEngine;
+
+/// Type specifying the `Sec2` falcon engine. Cannot be instantiated.
+pub(crate) struct Sec2(());
+
+impl FalconEngine for Sec2 {
+ const BASE: usize = 0x00840000;
+}
diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs
new file mode 100644
index 000000000000..4a702525fff4
--- /dev/null
+++ b/drivers/gpu/nova-core/fb.rs
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ops::Range;
+
+use kernel::prelude::*;
+use kernel::sizes::*;
+use kernel::types::ARef;
+use kernel::{dev_warn, device};
+
+use crate::dma::DmaObject;
+use crate::driver::Bar0;
+use crate::gpu::Chipset;
+use crate::regs;
+
+mod hal;
+
+/// Type holding the sysmem flush memory page, a page of memory to be written into the
+/// `NV_PFB_NISO_FLUSH_SYSMEM_ADDR*` registers and used to maintain memory coherency.
+///
+/// A system memory page is required for `sysmembar`, which is a GPU-initiated hardware
+/// memory-barrier operation that flushes all pending GPU-side memory writes that were done through
+/// PCIE to system memory. It is required for falcons to be reset as the reset operation involves a
+/// reset handshake. When the falcon acknowledges a reset, it writes into system memory. To ensure
+/// this write is visible to the host and prevent driver timeouts, the falcon must perform a
+/// sysmembar operation to flush its writes.
+///
+/// Because of this, the sysmem flush memory page must be registered as early as possible during
+/// driver initialization, and before any falcon is reset.
+///
+/// Users are responsible for manually calling [`Self::unregister`] before dropping this object,
+/// otherwise the GPU might still use it even after it has been freed.
+pub(crate) struct SysmemFlush {
+ /// Chipset we are operating on.
+ chipset: Chipset,
+ device: ARef<device::Device>,
+ /// Keep the page alive as long as we need it.
+ page: DmaObject,
+}
+
+impl SysmemFlush {
+ /// Allocate a memory page and register it as the sysmem flush page.
+ pub(crate) fn register(
+ dev: &device::Device<device::Bound>,
+ bar: &Bar0,
+ chipset: Chipset,
+ ) -> Result<Self> {
+ let page = DmaObject::new(dev, kernel::page::PAGE_SIZE)?;
+
+ hal::fb_hal(chipset).write_sysmem_flush_page(bar, page.dma_handle())?;
+
+ Ok(Self {
+ chipset,
+ device: dev.into(),
+ page,
+ })
+ }
+
+ /// Unregister the managed sysmem flush page.
+ ///
+ /// In order to gracefully tear down the GPU, users must make sure to call this method before
+ /// dropping the object.
+ pub(crate) fn unregister(&self, bar: &Bar0) {
+ let hal = hal::fb_hal(self.chipset);
+
+ if hal.read_sysmem_flush_page(bar) == self.page.dma_handle() {
+ let _ = hal.write_sysmem_flush_page(bar, 0).inspect_err(|e| {
+ dev_warn!(
+ &self.device,
+ "failed to unregister sysmem flush page: {:?}",
+ e
+ )
+ });
+ } else {
+ // Another page has been registered after us for some reason - warn as this is a bug.
+ dev_warn!(
+ &self.device,
+ "attempt to unregister a sysmem flush page that is not active\n"
+ );
+ }
+ }
+}
+
+/// Layout of the GPU framebuffer memory.
+///
+/// Contains ranges of GPU memory reserved for a given purpose during the GSP boot process.
+#[derive(Debug)]
+#[expect(dead_code)]
+pub(crate) struct FbLayout {
+ pub(crate) fb: Range<u64>,
+ pub(crate) vga_workspace: Range<u64>,
+ pub(crate) frts: Range<u64>,
+}
+
+impl FbLayout {
+ /// Computes the FB layout.
+ pub(crate) fn new(chipset: Chipset, bar: &Bar0) -> Result<Self> {
+ let hal = hal::fb_hal(chipset);
+
+ let fb = {
+ let fb_size = hal.vidmem_size(bar);
+
+ 0..fb_size
+ };
+
+ let vga_workspace = {
+ let vga_base = {
+ const NV_PRAMIN_SIZE: u64 = SZ_1M as u64;
+ let base = fb.end - NV_PRAMIN_SIZE;
+
+ if hal.supports_display(bar) {
+ match regs::NV_PDISP_VGA_WORKSPACE_BASE::read(bar).vga_workspace_addr() {
+ Some(addr) => {
+ if addr < base {
+ const VBIOS_WORKSPACE_SIZE: u64 = SZ_128K as u64;
+
+ // Point workspace address to end of framebuffer.
+ fb.end - VBIOS_WORKSPACE_SIZE
+ } else {
+ addr
+ }
+ }
+ None => base,
+ }
+ } else {
+ base
+ }
+ };
+
+ vga_base..fb.end
+ };
+
+ let frts = {
+ const FRTS_DOWN_ALIGN: u64 = SZ_128K as u64;
+ const FRTS_SIZE: u64 = SZ_1M as u64;
+ // TODO[NUMM]: replace with `align_down` once it lands.
+ let frts_base = (vga_workspace.start & !(FRTS_DOWN_ALIGN - 1)) - FRTS_SIZE;
+
+ frts_base..frts_base + FRTS_SIZE
+ };
+
+ Ok(Self {
+ fb,
+ vga_workspace,
+ frts,
+ })
+ }
+}
diff --git a/drivers/gpu/nova-core/fb/hal.rs b/drivers/gpu/nova-core/fb/hal.rs
new file mode 100644
index 000000000000..2f914948bb9a
--- /dev/null
+++ b/drivers/gpu/nova-core/fb/hal.rs
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::gpu::Chipset;
+
+mod ga100;
+mod ga102;
+mod tu102;
+
+pub(crate) trait FbHal {
+ /// Returns the address of the currently-registered sysmem flush page.
+ fn read_sysmem_flush_page(&self, bar: &Bar0) -> u64;
+
+ /// Register `addr` as the address of the sysmem flush page.
+ ///
+ /// This might fail if the address is too large for the receiving register.
+ fn write_sysmem_flush_page(&self, bar: &Bar0, addr: u64) -> Result;
+
+ /// Returns `true` is display is supported.
+ fn supports_display(&self, bar: &Bar0) -> bool;
+
+ /// Returns the VRAM size, in bytes.
+ fn vidmem_size(&self, bar: &Bar0) -> u64;
+}
+
+/// Returns the HAL corresponding to `chipset`.
+pub(super) fn fb_hal(chipset: Chipset) -> &'static dyn FbHal {
+ use Chipset::*;
+
+ match chipset {
+ TU102 | TU104 | TU106 | TU117 | TU116 => tu102::TU102_HAL,
+ GA100 => ga100::GA100_HAL,
+ GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 => {
+ ga102::GA102_HAL
+ }
+ }
+}
diff --git a/drivers/gpu/nova-core/fb/hal/ga100.rs b/drivers/gpu/nova-core/fb/hal/ga100.rs
new file mode 100644
index 000000000000..871c42bf033a
--- /dev/null
+++ b/drivers/gpu/nova-core/fb/hal/ga100.rs
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+struct Ga100;
+
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::fb::hal::FbHal;
+use crate::regs;
+
+use super::tu102::FLUSH_SYSMEM_ADDR_SHIFT;
+
+pub(super) fn read_sysmem_flush_page_ga100(bar: &Bar0) -> u64 {
+ u64::from(regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::read(bar).adr_39_08()) << FLUSH_SYSMEM_ADDR_SHIFT
+ | u64::from(regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI::read(bar).adr_63_40())
+ << FLUSH_SYSMEM_ADDR_SHIFT_HI
+}
+
+pub(super) fn write_sysmem_flush_page_ga100(bar: &Bar0, addr: u64) {
+ regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI::default()
+ .set_adr_63_40((addr >> FLUSH_SYSMEM_ADDR_SHIFT_HI) as u32)
+ .write(bar);
+ regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::default()
+ .set_adr_39_08((addr >> FLUSH_SYSMEM_ADDR_SHIFT) as u32)
+ .write(bar);
+}
+
+pub(super) fn display_enabled_ga100(bar: &Bar0) -> bool {
+ !regs::ga100::NV_FUSE_STATUS_OPT_DISPLAY::read(bar).display_disabled()
+}
+
+/// Shift applied to the sysmem address before it is written into
+/// `NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI`,
+const FLUSH_SYSMEM_ADDR_SHIFT_HI: u32 = 40;
+
+impl FbHal for Ga100 {
+ fn read_sysmem_flush_page(&self, bar: &Bar0) -> u64 {
+ read_sysmem_flush_page_ga100(bar)
+ }
+
+ fn write_sysmem_flush_page(&self, bar: &Bar0, addr: u64) -> Result {
+ write_sysmem_flush_page_ga100(bar, addr);
+
+ Ok(())
+ }
+
+ fn supports_display(&self, bar: &Bar0) -> bool {
+ display_enabled_ga100(bar)
+ }
+
+ fn vidmem_size(&self, bar: &Bar0) -> u64 {
+ super::tu102::vidmem_size_gp102(bar)
+ }
+}
+
+const GA100: Ga100 = Ga100;
+pub(super) const GA100_HAL: &dyn FbHal = &GA100;
diff --git a/drivers/gpu/nova-core/fb/hal/ga102.rs b/drivers/gpu/nova-core/fb/hal/ga102.rs
new file mode 100644
index 000000000000..a73b77e39715
--- /dev/null
+++ b/drivers/gpu/nova-core/fb/hal/ga102.rs
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::fb::hal::FbHal;
+use crate::regs;
+
+fn vidmem_size_ga102(bar: &Bar0) -> u64 {
+ regs::NV_USABLE_FB_SIZE_IN_MB::read(bar).usable_fb_size()
+}
+
+struct Ga102;
+
+impl FbHal for Ga102 {
+ fn read_sysmem_flush_page(&self, bar: &Bar0) -> u64 {
+ super::ga100::read_sysmem_flush_page_ga100(bar)
+ }
+
+ fn write_sysmem_flush_page(&self, bar: &Bar0, addr: u64) -> Result {
+ super::ga100::write_sysmem_flush_page_ga100(bar, addr);
+
+ Ok(())
+ }
+
+ fn supports_display(&self, bar: &Bar0) -> bool {
+ super::ga100::display_enabled_ga100(bar)
+ }
+
+ fn vidmem_size(&self, bar: &Bar0) -> u64 {
+ vidmem_size_ga102(bar)
+ }
+}
+
+const GA102: Ga102 = Ga102;
+pub(super) const GA102_HAL: &dyn FbHal = &GA102;
diff --git a/drivers/gpu/nova-core/fb/hal/tu102.rs b/drivers/gpu/nova-core/fb/hal/tu102.rs
new file mode 100644
index 000000000000..b022c781caf4
--- /dev/null
+++ b/drivers/gpu/nova-core/fb/hal/tu102.rs
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::Bar0;
+use crate::fb::hal::FbHal;
+use crate::regs;
+use kernel::prelude::*;
+
+/// Shift applied to the sysmem address before it is written into `NV_PFB_NISO_FLUSH_SYSMEM_ADDR`,
+/// to be used by HALs.
+pub(super) const FLUSH_SYSMEM_ADDR_SHIFT: u32 = 8;
+
+pub(super) fn read_sysmem_flush_page_gm107(bar: &Bar0) -> u64 {
+ u64::from(regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::read(bar).adr_39_08()) << FLUSH_SYSMEM_ADDR_SHIFT
+}
+
+pub(super) fn write_sysmem_flush_page_gm107(bar: &Bar0, addr: u64) -> Result {
+ // Check that the address doesn't overflow the receiving 32-bit register.
+ if addr >> (u32::BITS + FLUSH_SYSMEM_ADDR_SHIFT) == 0 {
+ regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::default()
+ .set_adr_39_08((addr >> FLUSH_SYSMEM_ADDR_SHIFT) as u32)
+ .write(bar);
+
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+pub(super) fn display_enabled_gm107(bar: &Bar0) -> bool {
+ !regs::gm107::NV_FUSE_STATUS_OPT_DISPLAY::read(bar).display_disabled()
+}
+
+pub(super) fn vidmem_size_gp102(bar: &Bar0) -> u64 {
+ regs::NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE::read(bar).usable_fb_size()
+}
+
+struct Tu102;
+
+impl FbHal for Tu102 {
+ fn read_sysmem_flush_page(&self, bar: &Bar0) -> u64 {
+ read_sysmem_flush_page_gm107(bar)
+ }
+
+ fn write_sysmem_flush_page(&self, bar: &Bar0, addr: u64) -> Result {
+ write_sysmem_flush_page_gm107(bar, addr)
+ }
+
+ fn supports_display(&self, bar: &Bar0) -> bool {
+ display_enabled_gm107(bar)
+ }
+
+ fn vidmem_size(&self, bar: &Bar0) -> u64 {
+ vidmem_size_gp102(bar)
+ }
+}
+
+const TU102: Tu102 = Tu102;
+pub(super) const TU102_HAL: &dyn FbHal = &TU102;
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 4b8a38358a4f..2931912ddba0 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -3,14 +3,20 @@
//! Contains structures and functions dedicated to the parsing, building and patching of firmwares
//! to be loaded into a given execution unit.
+use core::marker::PhantomData;
+
use kernel::device;
use kernel::firmware;
use kernel::prelude::*;
use kernel::str::CString;
+use crate::dma::DmaObject;
+use crate::falcon::FalconFirmware;
use crate::gpu;
use crate::gpu::Chipset;
+pub(crate) mod fwsec;
+
pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
/// Structure encapsulating the firmware blobs required for the GPU to operate.
@@ -24,11 +30,12 @@ pub(crate) struct Firmware {
impl Firmware {
pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{}", chipset))?;
+ let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?;
chip_name.make_ascii_lowercase();
+ let chip_name = &*chip_name;
let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin"))
.and_then(|path| firmware::Firmware::request(&path, dev))
};
@@ -41,6 +48,108 @@ impl Firmware {
}
}
+/// Structure used to describe some firmwares, notably FWSEC-FRTS.
+#[repr(C)]
+#[derive(Debug, Clone)]
+pub(crate) struct FalconUCodeDescV3 {
+ /// Header defined by `NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC*` in OpenRM.
+ hdr: u32,
+ /// Stored size of the ucode after the header.
+ stored_size: u32,
+ /// Offset in `DMEM` at which the signature is expected to be found.
+ pub(crate) pkc_data_offset: u32,
+ /// Offset after the code segment at which the app headers are located.
+ pub(crate) interface_offset: u32,
+ /// Base address at which to load the code segment into `IMEM`.
+ pub(crate) imem_phys_base: u32,
+ /// Size in bytes of the code to copy into `IMEM`.
+ pub(crate) imem_load_size: u32,
+ /// Virtual `IMEM` address (i.e. `tag`) at which the code should start.
+ pub(crate) imem_virt_base: u32,
+ /// Base address at which to load the data segment into `DMEM`.
+ pub(crate) dmem_phys_base: u32,
+ /// Size in bytes of the data to copy into `DMEM`.
+ pub(crate) dmem_load_size: u32,
+ /// Mask of the falcon engines on which this firmware can run.
+ pub(crate) engine_id_mask: u16,
+ /// ID of the ucode used to infer a fuse register to validate the signature.
+ pub(crate) ucode_id: u8,
+ /// Number of signatures in this firmware.
+ pub(crate) signature_count: u8,
+ /// Versions of the signatures, used to infer a valid signature to use.
+ pub(crate) signature_versions: u16,
+ _reserved: u16,
+}
+
+impl FalconUCodeDescV3 {
+ /// Returns the size in bytes of the header.
+ pub(crate) fn size(&self) -> usize {
+ const HDR_SIZE_SHIFT: u32 = 16;
+ const HDR_SIZE_MASK: u32 = 0xffff0000;
+
+ ((self.hdr & HDR_SIZE_MASK) >> HDR_SIZE_SHIFT) as usize
+ }
+}
+
+/// Trait implemented by types defining the signed state of a firmware.
+trait SignedState {}
+
+/// Type indicating that the firmware must be signed before it can be used.
+struct Unsigned;
+impl SignedState for Unsigned {}
+
+/// Type indicating that the firmware is signed and ready to be loaded.
+struct Signed;
+impl SignedState for Signed {}
+
+/// A [`DmaObject`] containing a specific microcode ready to be loaded into a falcon.
+///
+/// This is module-local and meant for sub-modules to use internally.
+///
+/// After construction, a firmware is [`Unsigned`], and must generally be patched with a signature
+/// before it can be loaded (with an exception for development hardware). The
+/// [`Self::patch_signature`] and [`Self::no_patch_signature`] methods are used to transition the
+/// firmware to its [`Signed`] state.
+struct FirmwareDmaObject<F: FalconFirmware, S: SignedState>(DmaObject, PhantomData<(F, S)>);
+
+/// Trait for signatures to be patched directly into a given firmware.
+///
+/// This is module-local and meant for sub-modules to use internally.
+trait FirmwareSignature<F: FalconFirmware>: AsRef<[u8]> {}
+
+impl<F: FalconFirmware> FirmwareDmaObject<F, Unsigned> {
+ /// Patches the firmware at offset `sig_base_img` with `signature`.
+ fn patch_signature<S: FirmwareSignature<F>>(
+ mut self,
+ signature: &S,
+ sig_base_img: usize,
+ ) -> Result<FirmwareDmaObject<F, Signed>> {
+ let signature_bytes = signature.as_ref();
+ if sig_base_img + signature_bytes.len() > self.0.size() {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We are the only user of this object, so there cannot be any race.
+ let dst = unsafe { self.0.start_ptr_mut().add(sig_base_img) };
+
+ // SAFETY: `signature` and `dst` are valid, properly aligned, and do not overlap.
+ unsafe {
+ core::ptr::copy_nonoverlapping(signature_bytes.as_ptr(), dst, signature_bytes.len())
+ };
+
+ Ok(FirmwareDmaObject(self.0, PhantomData))
+ }
+
+ /// Mark the firmware as signed without patching it.
+ ///
+ /// This method is used to explicitly confirm that we do not need to sign the firmware, while
+ /// allowing us to continue as if it was. This is typically only needed for development
+ /// hardware.
+ fn no_patch_signature(self) -> FirmwareDmaObject<F, Signed> {
+ FirmwareDmaObject(self.0, PhantomData)
+ }
+}
+
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
diff --git a/drivers/gpu/nova-core/firmware/fwsec.rs b/drivers/gpu/nova-core/firmware/fwsec.rs
new file mode 100644
index 000000000000..0dff3cfa90af
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/fwsec.rs
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! FWSEC is a High Secure firmware that is extracted from the BIOS and performs the first step of
+//! the GSP startup by creating the WPR2 memory region and copying critical areas of the VBIOS into
+//! it after authenticating them, ensuring they haven't been tampered with. It runs on the GSP
+//! falcon.
+//!
+//! Before being run, it needs to be patched in two areas:
+//!
+//! - The command to be run, as this firmware can perform several tasks ;
+//! - The ucode signature, so the GSP falcon can run FWSEC in HS mode.
+
+use core::marker::PhantomData;
+use core::mem::{align_of, size_of};
+use core::ops::Deref;
+
+use kernel::device::{self, Device};
+use kernel::prelude::*;
+use kernel::transmute::FromBytes;
+
+use crate::dma::DmaObject;
+use crate::driver::Bar0;
+use crate::falcon::gsp::Gsp;
+use crate::falcon::{Falcon, FalconBromParams, FalconFirmware, FalconLoadParams, FalconLoadTarget};
+use crate::firmware::{FalconUCodeDescV3, FirmwareDmaObject, FirmwareSignature, Signed, Unsigned};
+use crate::vbios::Vbios;
+
+const NVFW_FALCON_APPIF_ID_DMEMMAPPER: u32 = 0x4;
+
+#[repr(C)]
+#[derive(Debug)]
+struct FalconAppifHdrV1 {
+ version: u8,
+ header_size: u8,
+ entry_size: u8,
+ entry_count: u8,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for FalconAppifHdrV1 {}
+
+#[repr(C, packed)]
+#[derive(Debug)]
+struct FalconAppifV1 {
+ id: u32,
+ dmem_base: u32,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for FalconAppifV1 {}
+
+#[derive(Debug)]
+#[repr(C, packed)]
+struct FalconAppifDmemmapperV3 {
+ signature: u32,
+ version: u16,
+ size: u16,
+ cmd_in_buffer_offset: u32,
+ cmd_in_buffer_size: u32,
+ cmd_out_buffer_offset: u32,
+ cmd_out_buffer_size: u32,
+ nvf_img_data_buffer_offset: u32,
+ nvf_img_data_buffer_size: u32,
+ printf_buffer_hdr: u32,
+ ucode_build_time_stamp: u32,
+ ucode_signature: u32,
+ init_cmd: u32,
+ ucode_feature: u32,
+ ucode_cmd_mask0: u32,
+ ucode_cmd_mask1: u32,
+ multi_tgt_tbl: u32,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for FalconAppifDmemmapperV3 {}
+
+#[derive(Debug)]
+#[repr(C, packed)]
+struct ReadVbios {
+ ver: u32,
+ hdr: u32,
+ addr: u64,
+ size: u32,
+ flags: u32,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for ReadVbios {}
+
+#[derive(Debug)]
+#[repr(C, packed)]
+struct FrtsRegion {
+ ver: u32,
+ hdr: u32,
+ addr: u32,
+ size: u32,
+ ftype: u32,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for FrtsRegion {}
+
+const NVFW_FRTS_CMD_REGION_TYPE_FB: u32 = 2;
+
+#[repr(C, packed)]
+struct FrtsCmd {
+ read_vbios: ReadVbios,
+ frts_region: FrtsRegion,
+}
+// SAFETY: any byte sequence is valid for this struct.
+unsafe impl FromBytes for FrtsCmd {}
+
+const NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS: u32 = 0x15;
+const NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB: u32 = 0x19;
+
+/// Command for the [`FwsecFirmware`] to execute.
+pub(crate) enum FwsecCommand {
+ /// Asks [`FwsecFirmware`] to carve out the WPR2 area and place a verified copy of the VBIOS
+ /// image into it.
+ Frts { frts_addr: u64, frts_size: u64 },
+ /// Asks [`FwsecFirmware`] to load pre-OS apps on the PMU.
+ #[expect(dead_code)]
+ Sb,
+}
+
+/// Size of the signatures used in FWSEC.
+const BCRT30_RSA3K_SIG_SIZE: usize = 384;
+
+/// A single signature that can be patched into a FWSEC image.
+#[repr(transparent)]
+pub(crate) struct Bcrt30Rsa3kSignature([u8; BCRT30_RSA3K_SIG_SIZE]);
+
+/// SAFETY: A signature is just an array of bytes.
+unsafe impl FromBytes for Bcrt30Rsa3kSignature {}
+
+impl From<[u8; BCRT30_RSA3K_SIG_SIZE]> for Bcrt30Rsa3kSignature {
+ fn from(sig: [u8; BCRT30_RSA3K_SIG_SIZE]) -> Self {
+ Self(sig)
+ }
+}
+
+impl AsRef<[u8]> for Bcrt30Rsa3kSignature {
+ fn as_ref(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl FirmwareSignature<FwsecFirmware> for Bcrt30Rsa3kSignature {}
+
+/// Reinterpret the area starting from `offset` in `fw` as an instance of `T` (which must implement
+/// [`FromBytes`]) and return a reference to it.
+///
+/// # Safety
+///
+/// Callers must ensure that the region of memory returned is not written for as long as the
+/// returned reference is alive.
+///
+/// TODO[TRSM][COHA]: Remove this and `transmute_mut` once `CoherentAllocation::as_slice` is
+/// available and we have a way to transmute objects implementing FromBytes, e.g.:
+/// https://lore.kernel.org/lkml/20250330234039.29814-1-christiansantoslima21@gmail.com/
+unsafe fn transmute<'a, 'b, T: Sized + FromBytes>(
+ fw: &'a DmaObject,
+ offset: usize,
+) -> Result<&'b T> {
+ if offset + size_of::<T>() > fw.size() {
+ return Err(EINVAL);
+ }
+ if (fw.start_ptr() as usize + offset) % align_of::<T>() != 0 {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: we have checked that the pointer is properly aligned that its pointed memory is
+ // large enough the contains an instance of `T`, which implements `FromBytes`.
+ Ok(unsafe { &*(fw.start_ptr().add(offset).cast::<T>()) })
+}
+
+/// Reinterpret the area starting from `offset` in `fw` as a mutable instance of `T` (which must
+/// implement [`FromBytes`]) and return a reference to it.
+///
+/// # Safety
+///
+/// Callers must ensure that the region of memory returned is not read or written for as long as
+/// the returned reference is alive.
+unsafe fn transmute_mut<'a, 'b, T: Sized + FromBytes>(
+ fw: &'a mut DmaObject,
+ offset: usize,
+) -> Result<&'b mut T> {
+ if offset + size_of::<T>() > fw.size() {
+ return Err(EINVAL);
+ }
+ if (fw.start_ptr_mut() as usize + offset) % align_of::<T>() != 0 {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: we have checked that the pointer is properly aligned that its pointed memory is
+ // large enough the contains an instance of `T`, which implements `FromBytes`.
+ Ok(unsafe { &mut *(fw.start_ptr_mut().add(offset).cast::<T>()) })
+}
+
+/// The FWSEC microcode, extracted from the BIOS and to be run on the GSP falcon.
+///
+/// It is responsible for e.g. carving out the WPR2 region as the first step of the GSP bootflow.
+pub(crate) struct FwsecFirmware {
+ /// Descriptor of the firmware.
+ desc: FalconUCodeDescV3,
+ /// GPU-accessible DMA object containing the firmware.
+ ucode: FirmwareDmaObject<Self, Signed>,
+}
+
+// We need to load full DMEM pages.
+const DMEM_LOAD_SIZE_ALIGN: u32 = 256;
+
+impl FalconLoadParams for FwsecFirmware {
+ fn imem_load_params(&self) -> FalconLoadTarget {
+ FalconLoadTarget {
+ src_start: 0,
+ dst_start: self.desc.imem_phys_base,
+ len: self.desc.imem_load_size,
+ }
+ }
+
+ fn dmem_load_params(&self) -> FalconLoadTarget {
+ FalconLoadTarget {
+ src_start: self.desc.imem_load_size,
+ dst_start: self.desc.dmem_phys_base,
+ // TODO[NUMM]: replace with `align_up` once it lands.
+ len: self
+ .desc
+ .dmem_load_size
+ .next_multiple_of(DMEM_LOAD_SIZE_ALIGN),
+ }
+ }
+
+ fn brom_params(&self) -> FalconBromParams {
+ FalconBromParams {
+ pkc_data_offset: self.desc.pkc_data_offset,
+ engine_id_mask: self.desc.engine_id_mask,
+ ucode_id: self.desc.ucode_id,
+ }
+ }
+
+ fn boot_addr(&self) -> u32 {
+ 0
+ }
+}
+
+impl Deref for FwsecFirmware {
+ type Target = DmaObject;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ucode.0
+ }
+}
+
+impl FalconFirmware for FwsecFirmware {
+ type Target = Gsp;
+}
+
+impl FirmwareDmaObject<FwsecFirmware, Unsigned> {
+ fn new_fwsec(dev: &Device<device::Bound>, bios: &Vbios, cmd: FwsecCommand) -> Result<Self> {
+ let desc = bios.fwsec_image().header(dev)?;
+ let ucode = bios.fwsec_image().ucode(dev, desc)?;
+ let mut dma_object = DmaObject::from_data(dev, ucode)?;
+
+ let hdr_offset = (desc.imem_load_size + desc.interface_offset) as usize;
+ // SAFETY: we have exclusive access to `dma_object`.
+ let hdr: &FalconAppifHdrV1 = unsafe { transmute(&dma_object, hdr_offset) }?;
+
+ if hdr.version != 1 {
+ return Err(EINVAL);
+ }
+
+ // Find the DMEM mapper section in the firmware.
+ for i in 0..hdr.entry_count as usize {
+ let app: &FalconAppifV1 =
+ // SAFETY: we have exclusive access to `dma_object`.
+ unsafe {
+ transmute(
+ &dma_object,
+ hdr_offset + hdr.header_size as usize + i * hdr.entry_size as usize
+ )
+ }?;
+
+ if app.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER {
+ continue;
+ }
+
+ // SAFETY: we have exclusive access to `dma_object`.
+ let dmem_mapper: &mut FalconAppifDmemmapperV3 = unsafe {
+ transmute_mut(
+ &mut dma_object,
+ (desc.imem_load_size + app.dmem_base) as usize,
+ )
+ }?;
+
+ // SAFETY: we have exclusive access to `dma_object`.
+ let frts_cmd: &mut FrtsCmd = unsafe {
+ transmute_mut(
+ &mut dma_object,
+ (desc.imem_load_size + dmem_mapper.cmd_in_buffer_offset) as usize,
+ )
+ }?;
+
+ frts_cmd.read_vbios = ReadVbios {
+ ver: 1,
+ hdr: size_of::<ReadVbios>() as u32,
+ addr: 0,
+ size: 0,
+ flags: 2,
+ };
+
+ dmem_mapper.init_cmd = match cmd {
+ FwsecCommand::Frts {
+ frts_addr,
+ frts_size,
+ } => {
+ frts_cmd.frts_region = FrtsRegion {
+ ver: 1,
+ hdr: size_of::<FrtsRegion>() as u32,
+ addr: (frts_addr >> 12) as u32,
+ size: (frts_size >> 12) as u32,
+ ftype: NVFW_FRTS_CMD_REGION_TYPE_FB,
+ };
+
+ NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS
+ }
+ FwsecCommand::Sb => NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB,
+ };
+
+ // Return early as we found and patched the DMEMMAPPER region.
+ return Ok(Self(dma_object, PhantomData));
+ }
+
+ Err(ENOTSUPP)
+ }
+}
+
+impl FwsecFirmware {
+ /// Extract the Fwsec firmware from `bios` and patch it to run on `falcon` with the `cmd`
+ /// command.
+ pub(crate) fn new(
+ dev: &Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ bios: &Vbios,
+ cmd: FwsecCommand,
+ ) -> Result<Self> {
+ let ucode_dma = FirmwareDmaObject::<Self, _>::new_fwsec(dev, bios, cmd)?;
+
+ // Patch signature if needed.
+ let desc = bios.fwsec_image().header(dev)?;
+ let ucode_signed = if desc.signature_count != 0 {
+ let sig_base_img = (desc.imem_load_size + desc.pkc_data_offset) as usize;
+ let desc_sig_versions = u32::from(desc.signature_versions);
+ let reg_fuse_version =
+ falcon.signature_reg_fuse_version(bar, desc.engine_id_mask, desc.ucode_id)?;
+ dev_dbg!(
+ dev,
+ "desc_sig_versions: {:#x}, reg_fuse_version: {}\n",
+ desc_sig_versions,
+ reg_fuse_version
+ );
+ let signature_idx = {
+ let reg_fuse_version_bit = 1 << reg_fuse_version;
+
+ // Check if the fuse version is supported by the firmware.
+ if desc_sig_versions & reg_fuse_version_bit == 0 {
+ dev_err!(
+ dev,
+ "no matching signature: {:#x} {:#x}\n",
+ reg_fuse_version_bit,
+ desc_sig_versions,
+ );
+ return Err(EINVAL);
+ }
+
+ // `desc_sig_versions` has one bit set per included signature. Thus, the index of
+ // the signature to patch is the number of bits in `desc_sig_versions` set to `1`
+ // before `reg_fuse_version_bit`.
+
+ // Mask of the bits of `desc_sig_versions` to preserve.
+ let reg_fuse_version_mask = reg_fuse_version_bit.wrapping_sub(1);
+
+ (desc_sig_versions & reg_fuse_version_mask).count_ones() as usize
+ };
+
+ dev_dbg!(dev, "patching signature with index {}\n", signature_idx);
+ let signature = bios
+ .fwsec_image()
+ .sigs(dev, desc)
+ .and_then(|sigs| sigs.get(signature_idx).ok_or(EINVAL))?;
+
+ ucode_dma.patch_signature(signature, sig_base_img)?
+ } else {
+ ucode_dma.no_patch_signature()
+ };
+
+ Ok(FwsecFirmware {
+ desc: desc.clone(),
+ ucode: ucode_signed,
+ })
+ }
+
+ /// Loads the FWSEC firmware into `falcon` and execute it.
+ pub(crate) fn run(
+ &self,
+ dev: &Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ ) -> Result<()> {
+ // Reset falcon, load the firmware, and run it.
+ falcon
+ .reset(bar)
+ .inspect_err(|e| dev_err!(dev, "Failed to reset GSP falcon: {:?}\n", e))?;
+ falcon
+ .dma_load(bar, self)
+ .inspect_err(|e| dev_err!(dev, "Failed to load FWSEC firmware: {:?}\n", e))?;
+ let (mbox0, _) = falcon
+ .boot(bar, Some(0), None)
+ .inspect_err(|e| dev_err!(dev, "Failed to boot FWSEC firmware: {:?}\n", e))?;
+ if mbox0 != 0 {
+ dev_err!(dev, "FWSEC firmware returned error {}\n", mbox0);
+ Err(EIO)
+ } else {
+ Ok(())
+ }
+ }
+}
diff --git a/drivers/gpu/nova-core/gfw.rs b/drivers/gpu/nova-core/gfw.rs
new file mode 100644
index 000000000000..8ac1ed187199
--- /dev/null
+++ b/drivers/gpu/nova-core/gfw.rs
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! GPU Firmware (`GFW`) support, a.k.a `devinit`.
+//!
+//! Upon reset, the GPU runs some firmware code from the BIOS to setup its core parameters. Most of
+//! the GPU is considered unusable until this step is completed, so we must wait on it before
+//! performing driver initialization.
+//!
+//! A clarification about devinit terminology: devinit is a sequence of register read/writes after
+//! reset that performs tasks such as:
+//! 1. Programming VRAM memory controller timings.
+//! 2. Power sequencing.
+//! 3. Clock and PLL configuration.
+//! 4. Thermal management.
+//!
+//! devinit itself is a 'script' which is interpreted by an interpreter program typically running
+//! on the PMU microcontroller.
+//!
+//! Note that the devinit sequence also needs to run during suspend/resume.
+
+use kernel::bindings;
+use kernel::prelude::*;
+use kernel::time::Delta;
+
+use crate::driver::Bar0;
+use crate::regs;
+use crate::util;
+
+/// Wait for the `GFW` (GPU firmware) boot completion signal (`GFW_BOOT`), or a 4 seconds timeout.
+///
+/// Upon GPU reset, several microcontrollers (such as PMU, SEC2, GSP etc) run some firmware code to
+/// setup its core parameters. Most of the GPU is considered unusable until this step is completed,
+/// so it must be waited on very early during driver initialization.
+///
+/// The `GFW` code includes several components that need to execute before the driver loads. These
+/// components are located in the VBIOS ROM and executed in a sequence on these different
+/// microcontrollers. The devinit sequence typically runs on the PMU, and the FWSEC runs on the
+/// GSP.
+///
+/// This function waits for a signal indicating that core initialization is complete. Before this
+/// signal is received, little can be done with the GPU. This signal is set by the FWSEC running on
+/// the GSP in Heavy-secured mode.
+pub(crate) fn wait_gfw_boot_completion(bar: &Bar0) -> Result {
+ // Before accessing the completion status in `NV_PGC6_AON_SECURE_SCRATCH_GROUP_05`, we must
+ // first check `NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK`. This is because
+ // `NV_PGC6_AON_SECURE_SCRATCH_GROUP_05` becomes accessible only after the secure firmware
+ // (FWSEC) lowers the privilege level to allow CPU (LS/Light-secured) access. We can only
+ // safely read the status register from CPU (LS/Light-secured) once the mask indicates
+ // that the privilege level has been lowered.
+ //
+ // TIMEOUT: arbitrarily large value. GFW starts running immediately after the GPU is put out of
+ // reset, and should complete in less time than that.
+ util::wait_on(Delta::from_secs(4), || {
+ // Check that FWSEC has lowered its protection level before reading the GFW_BOOT status.
+ let gfw_booted = regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK::read(bar)
+ .read_protection_level0()
+ && regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT::read(bar).completed();
+
+ if gfw_booted {
+ Some(())
+ } else {
+ // TODO[DLAY]: replace with [1] once it merges.
+ // [1] https://lore.kernel.org/rust-for-linux/20250423192857.199712-6-fujita.tomonori@gmail.com/
+ //
+ // SAFETY: `msleep()` is safe to call with any parameter.
+ unsafe { bindings::msleep(1) };
+
+ None
+ }
+ })
+}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index 47653c14838b..b5c9786619a9 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -3,9 +3,15 @@
use kernel::{device, devres::Devres, error::code::*, pci, prelude::*, sync::Arc};
use crate::driver::Bar0;
+use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
+use crate::fb::FbLayout;
+use crate::fb::SysmemFlush;
+use crate::firmware::fwsec::{FwsecCommand, FwsecFirmware};
use crate::firmware::{Firmware, FIRMWARE_VERSION};
+use crate::gfw;
use crate::regs;
use crate::util;
+use crate::vbios::Vbios;
use core::fmt;
macro_rules! define_chipset {
@@ -31,7 +37,7 @@ macro_rules! define_chipset {
];
}
- // TODO replace with something like derive(FromPrimitive)
+ // TODO[FPRI]: replace with something like derive(FromPrimitive)
impl TryFrom<u32> for Chipset {
type Error = kernel::error::Error;
@@ -157,15 +163,106 @@ impl Spec {
}
/// Structure holding the resources required to operate the GPU.
-#[pin_data]
+#[pin_data(PinnedDrop)]
pub(crate) struct Gpu {
spec: Spec,
/// MMIO mapping of PCI BAR 0
bar: Arc<Devres<Bar0>>,
fw: Firmware,
+ /// System memory page required for flushing all pending GPU-side memory writes done through
+ /// PCIE into system memory, via sysmembar (A GPU-initiated HW memory-barrier operation).
+ sysmem_flush: SysmemFlush,
+}
+
+#[pinned_drop]
+impl PinnedDrop for Gpu {
+ fn drop(self: Pin<&mut Self>) {
+ // Unregister the sysmem flush page before we release it.
+ self.bar
+ .try_access_with(|b| self.sysmem_flush.unregister(b));
+ }
}
impl Gpu {
+ /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
+ /// created the WPR2 region.
+ ///
+ /// TODO: this needs to be moved into a larger type responsible for booting the whole GSP
+ /// (`GspBooter`?).
+ fn run_fwsec_frts(
+ dev: &device::Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ bios: &Vbios,
+ fb_layout: &FbLayout,
+ ) -> Result<()> {
+ // Check that the WPR2 region does not already exists - if it does, we cannot run
+ // FWSEC-FRTS until the GPU is reset.
+ if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
+ dev_err!(
+ dev,
+ "WPR2 region already exists - GPU needs to be reset to proceed\n"
+ );
+ return Err(EBUSY);
+ }
+
+ let fwsec_frts = FwsecFirmware::new(
+ dev,
+ falcon,
+ bar,
+ bios,
+ FwsecCommand::Frts {
+ frts_addr: fb_layout.frts.start,
+ frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ },
+ )?;
+
+ // Run FWSEC-FRTS to create the WPR2 region.
+ fwsec_frts.run(dev, falcon, bar)?;
+
+ // SCRATCH_E contains the error code for FWSEC-FRTS.
+ let frts_status = regs::NV_PBUS_SW_SCRATCH_0E::read(bar).frts_err_code();
+ if frts_status != 0 {
+ dev_err!(
+ dev,
+ "FWSEC-FRTS returned with error code {:#x}",
+ frts_status
+ );
+
+ return Err(EIO);
+ }
+
+ // Check that the WPR2 region has been created as we requested.
+ let (wpr2_lo, wpr2_hi) = (
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
+ );
+
+ match (wpr2_lo, wpr2_hi) {
+ (_, 0) => {
+ dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+
+ Err(EIO)
+ }
+ (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
+ dev_err!(
+ dev,
+ "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
+ wpr2_lo,
+ fb_layout.frts.start,
+ );
+
+ Err(EIO)
+ }
+ (wpr2_lo, wpr2_hi) => {
+ dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
+ dev_dbg!(dev, "GPU instance built\n");
+
+ Ok(())
+ }
+ }
+ }
+
pub(crate) fn new(
pdev: &pci::Device<device::Bound>,
devres_bar: Arc<Devres<Bar0>>,
@@ -182,10 +279,34 @@ impl Gpu {
spec.revision
);
+ // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
+ gfw::wait_gfw_boot_completion(bar)
+ .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
+
+ let sysmem_flush = SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?;
+
+ let gsp_falcon = Falcon::<Gsp>::new(
+ pdev.as_ref(),
+ spec.chipset,
+ bar,
+ spec.chipset > Chipset::GA100,
+ )?;
+ gsp_falcon.clear_swgen0_intr(bar);
+
+ let _sec2_falcon = Falcon::<Sec2>::new(pdev.as_ref(), spec.chipset, bar, true)?;
+
+ let fb_layout = FbLayout::new(spec.chipset, bar)?;
+ dev_dbg!(pdev.as_ref(), "{:#x?}\n", fb_layout);
+
+ let bios = Vbios::new(pdev, bar)?;
+
+ Self::run_fwsec_frts(pdev.as_ref(), &gsp_falcon, bar, &bios, &fb_layout)?;
+
Ok(pin_init!(Self {
spec,
bar: devres_bar,
- fw
+ fw,
+ sysmem_flush,
}))
}
}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index 618632f0abcc..cb2bbb30cba1 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -2,18 +2,23 @@
//! Nova Core GPU Driver
+mod dma;
mod driver;
+mod falcon;
+mod fb;
mod firmware;
+mod gfw;
mod gpu;
mod regs;
mod util;
+mod vbios;
pub(crate) const MODULE_NAME: &kernel::str::CStr = <LocalModule as kernel::ModuleMetadata>::NAME;
kernel::module_pci_driver! {
type: driver::NovaCore,
name: "NovaCore",
- author: "Danilo Krummrich",
+ authors: ["Danilo Krummrich"],
description: "Nova Core GPU driver",
license: "GPL v2",
firmware: [],
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index 5a1273230306..d49fddf6a3c6 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -7,10 +7,14 @@
#[macro_use]
mod macros;
+use crate::falcon::{
+ DmaTrfCmdSize, FalconCoreRev, FalconCoreRevSubversion, FalconFbifMemType, FalconFbifTarget,
+ FalconModSelAlgo, FalconSecurityModel, PeregrineCoreSelect,
+};
use crate::gpu::{Architecture, Chipset};
use kernel::prelude::*;
-/* PMC */
+// PMC
register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU" {
3:0 minor_revision as u8, "Minor revision of the chip";
@@ -32,8 +36,305 @@ impl NV_PMC_BOOT_0 {
pub(crate) fn chipset(self) -> Result<Chipset> {
self.architecture()
.map(|arch| {
- ((arch as u32) << Self::IMPLEMENTATION.len()) | self.implementation() as u32
+ ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation())
})
.and_then(Chipset::try_from)
}
}
+
+// PBUS
+
+// TODO[REGA]: this is an array of registers.
+register!(NV_PBUS_SW_SCRATCH_0E@0x00001438 {
+ 31:16 frts_err_code as u16;
+});
+
+// PFB
+
+// The following two registers together hold the physical system memory address that is used by the
+// GPU to perform sysmembar operations (see `fb::SysmemFlush`).
+
+register!(NV_PFB_NISO_FLUSH_SYSMEM_ADDR @ 0x00100c10 {
+ 31:0 adr_39_08 as u32;
+});
+
+register!(NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI @ 0x00100c40 {
+ 23:0 adr_63_40 as u32;
+});
+
+register!(NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE @ 0x00100ce0 {
+ 3:0 lower_scale as u8;
+ 9:4 lower_mag as u8;
+ 30:30 ecc_mode_enabled as bool;
+});
+
+impl NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE {
+ /// Returns the usable framebuffer size, in bytes.
+ pub(crate) fn usable_fb_size(self) -> u64 {
+ let size = (u64::from(self.lower_mag()) << u64::from(self.lower_scale()))
+ * kernel::sizes::SZ_1M as u64;
+
+ if self.ecc_mode_enabled() {
+ // Remove the amount of memory reserved for ECC (one per 16 units).
+ size / 16 * 15
+ } else {
+ size
+ }
+ }
+}
+
+register!(NV_PFB_PRI_MMU_WPR2_ADDR_LO@0x001fa824 {
+ 31:4 lo_val as u32, "Bits 12..40 of the lower (inclusive) bound of the WPR2 region";
+});
+
+impl NV_PFB_PRI_MMU_WPR2_ADDR_LO {
+ /// Returns the lower (inclusive) bound of the WPR2 region.
+ pub(crate) fn lower_bound(self) -> u64 {
+ u64::from(self.lo_val()) << 12
+ }
+}
+
+register!(NV_PFB_PRI_MMU_WPR2_ADDR_HI@0x001fa828 {
+ 31:4 hi_val as u32, "Bits 12..40 of the higher (exclusive) bound of the WPR2 region";
+});
+
+impl NV_PFB_PRI_MMU_WPR2_ADDR_HI {
+ /// Returns the higher (exclusive) bound of the WPR2 region.
+ ///
+ /// A value of zero means the WPR2 region is not set.
+ pub(crate) fn higher_bound(self) -> u64 {
+ u64::from(self.hi_val()) << 12
+ }
+}
+
+// PGC6 register space.
+//
+// `GC6` is a GPU low-power state where VRAM is in self-refresh and the GPU is powered down (except
+// for power rails needed to keep self-refresh working and important registers and hardware
+// blocks).
+//
+// These scratch registers remain powered on even in a low-power state and have a designated group
+// number.
+
+// Privilege level mask register. It dictates whether the host CPU has privilege to access the
+// `PGC6_AON_SECURE_SCRATCH_GROUP_05` register (which it needs to read GFW_BOOT).
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128,
+ "Privilege level mask register" {
+ 0:0 read_protection_level0 as bool, "Set after FWSEC lowers its protection level";
+});
+
+// TODO[REGA]: This is an array of registers.
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234 {
+ 31:0 value as u32;
+});
+
+register!(
+ NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05,
+ "Scratch group 05 register 0 used as GFW boot progress indicator" {
+ 7:0 progress as u8, "Progress of GFW boot (0xff means completed)";
+ }
+);
+
+impl NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT {
+ /// Returns `true` if GFW boot is completed.
+ pub(crate) fn completed(self) -> bool {
+ self.progress() == 0xff
+ }
+}
+
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_42 @ 0x001183a4 {
+ 31:0 value as u32;
+});
+
+register!(
+ NV_USABLE_FB_SIZE_IN_MB => NV_PGC6_AON_SECURE_SCRATCH_GROUP_42,
+ "Scratch group 42 register used as framebuffer size" {
+ 31:0 value as u32, "Usable framebuffer size, in megabytes";
+ }
+);
+
+impl NV_USABLE_FB_SIZE_IN_MB {
+ /// Returns the usable framebuffer size, in bytes.
+ pub(crate) fn usable_fb_size(self) -> u64 {
+ u64::from(self.value()) * kernel::sizes::SZ_1M as u64
+ }
+}
+
+// PDISP
+
+register!(NV_PDISP_VGA_WORKSPACE_BASE @ 0x00625f04 {
+ 3:3 status_valid as bool, "Set if the `addr` field is valid";
+ 31:8 addr as u32, "VGA workspace base address divided by 0x10000";
+});
+
+impl NV_PDISP_VGA_WORKSPACE_BASE {
+ /// Returns the base address of the VGA workspace, or `None` if none exists.
+ pub(crate) fn vga_workspace_addr(self) -> Option<u64> {
+ if self.status_valid() {
+ Some(u64::from(self.addr()) << 16)
+ } else {
+ None
+ }
+ }
+}
+
+// FUSE
+
+register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100 {
+ 15:0 data as u16;
+});
+
+register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140 {
+ 15:0 data as u16;
+});
+
+register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0 {
+ 15:0 data as u16;
+});
+
+// PFALCON
+
+register!(NV_PFALCON_FALCON_IRQSCLR @ +0x00000004 {
+ 4:4 halt as bool;
+ 6:6 swgen0 as bool;
+});
+
+register!(NV_PFALCON_FALCON_MAILBOX0 @ +0x00000040 {
+ 31:0 value as u32;
+});
+
+register!(NV_PFALCON_FALCON_MAILBOX1 @ +0x00000044 {
+ 31:0 value as u32;
+});
+
+register!(NV_PFALCON_FALCON_RM @ +0x00000084 {
+ 31:0 value as u32;
+});
+
+register!(NV_PFALCON_FALCON_HWCFG2 @ +0x000000f4 {
+ 10:10 riscv as bool;
+ 12:12 mem_scrubbing as bool, "Set to 0 after memory scrubbing is completed";
+ 31:31 reset_ready as bool, "Signal indicating that reset is completed (GA102+)";
+});
+
+impl NV_PFALCON_FALCON_HWCFG2 {
+ /// Returns `true` if memory scrubbing is completed.
+ pub(crate) fn mem_scrubbing_done(self) -> bool {
+ !self.mem_scrubbing()
+ }
+}
+
+register!(NV_PFALCON_FALCON_CPUCTL @ +0x00000100 {
+ 1:1 startcpu as bool;
+ 4:4 halted as bool;
+ 6:6 alias_en as bool;
+});
+
+register!(NV_PFALCON_FALCON_BOOTVEC @ +0x00000104 {
+ 31:0 value as u32;
+});
+
+register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
+ 0:0 require_ctx as bool;
+ 1:1 dmem_scrubbing as bool;
+ 2:2 imem_scrubbing as bool;
+ 6:3 dmaq_num as u8;
+ 7:7 secure_stat as bool;
+});
+
+register!(NV_PFALCON_FALCON_DMATRFBASE @ +0x00000110 {
+ 31:0 base as u32;
+});
+
+register!(NV_PFALCON_FALCON_DMATRFMOFFS @ +0x00000114 {
+ 23:0 offs as u32;
+});
+
+register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
+ 0:0 full as bool;
+ 1:1 idle as bool;
+ 3:2 sec as u8;
+ 4:4 imem as bool;
+ 5:5 is_write as bool;
+ 10:8 size as u8 ?=> DmaTrfCmdSize;
+ 14:12 ctxdma as u8;
+ 16:16 set_dmtag as u8;
+});
+
+register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ +0x0000011c {
+ 31:0 offs as u32;
+});
+
+register!(NV_PFALCON_FALCON_DMATRFBASE1 @ +0x00000128 {
+ 8:0 base as u16;
+});
+
+register!(NV_PFALCON_FALCON_HWCFG1 @ +0x0000012c {
+ 3:0 core_rev as u8 ?=> FalconCoreRev, "Core revision";
+ 5:4 security_model as u8 ?=> FalconSecurityModel, "Security model";
+ 7:6 core_rev_subversion as u8 ?=> FalconCoreRevSubversion, "Core revision subversion";
+});
+
+register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ +0x00000130 {
+ 1:1 startcpu as bool;
+});
+
+// Actually known as `NV_PSEC_FALCON_ENGINE` and `NV_PGSP_FALCON_ENGINE` depending on the falcon
+// instance.
+register!(NV_PFALCON_FALCON_ENGINE @ +0x000003c0 {
+ 0:0 reset as bool;
+});
+
+// TODO[REGA]: this is an array of registers.
+register!(NV_PFALCON_FBIF_TRANSCFG @ +0x00000600 {
+ 1:0 target as u8 ?=> FalconFbifTarget;
+ 2:2 mem_type as bool => FalconFbifMemType;
+});
+
+register!(NV_PFALCON_FBIF_CTL @ +0x00000624 {
+ 7:7 allow_phys_no_ctx as bool;
+});
+
+register!(NV_PFALCON2_FALCON_MOD_SEL @ +0x00001180 {
+ 7:0 algo as u8 ?=> FalconModSelAlgo;
+});
+
+register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ +0x00001198 {
+ 7:0 ucode_id as u8;
+});
+
+register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ +0x0000119c {
+ 31:0 value as u32;
+});
+
+// TODO[REGA]: this is an array of registers.
+register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ +0x00001210 {
+ 31:0 value as u32;
+});
+
+// PRISCV
+
+register!(NV_PRISCV_RISCV_BCR_CTRL @ +0x00001668 {
+ 0:0 valid as bool;
+ 4:4 core_select as bool => PeregrineCoreSelect;
+ 8:8 br_fetch as bool;
+});
+
+// The modules below provide registers that are not identical on all supported chips. They should
+// only be used in HAL modules.
+
+pub(crate) mod gm107 {
+ // FUSE
+
+ register!(NV_FUSE_STATUS_OPT_DISPLAY @ 0x00021c04 {
+ 0:0 display_disabled as bool;
+ });
+}
+
+pub(crate) mod ga100 {
+ // FUSE
+
+ register!(NV_FUSE_STATUS_OPT_DISPLAY @ 0x00820c04 {
+ 0:0 display_disabled as bool;
+ });
+}
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index 7ecc70efb3cd..a3e6de1779d4 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -71,6 +71,20 @@
/// pr_info!("CPU CTL: {:#x}", cpuctl);
/// cpuctl.set_start(true).write(&bar, CPU_BASE);
/// ```
+///
+/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
+/// for cases where a register's interpretation depends on the context:
+///
+/// ```no_run
+/// register!(SCRATCH_0 @ 0x0000100, "Scratch register 0" {
+/// 31:0 value as u32, "Raw value";
+///
+/// register!(SCRATCH_0_BOOT_STATUS => SCRATCH_0, "Boot status of the firmware" {
+/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// ```
+///
+/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH_0`, while also
+/// providing its own `completed` method.
macro_rules! register {
// Creates a register at a fixed offset of the MMIO space.
(
@@ -78,25 +92,49 @@ macro_rules! register {
$($fields:tt)*
}
) => {
- register!(@common $name $(, $comment)?);
+ register!(@common $name @ $offset $(, $comment)?);
register!(@field_accessors $name { $($fields)* });
register!(@io $name @ $offset);
};
+ // Creates a alias register of fixed offset register `alias` with its own fields.
+ (
+ $name:ident => $alias:ident $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name @ $alias::OFFSET $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io $name @ $alias::OFFSET);
+ };
+
// Creates a register at a relative offset from a base address.
(
$name:ident @ + $offset:literal $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name $(, $comment)?);
+ register!(@common $name @ $offset $(, $comment)?);
register!(@field_accessors $name { $($fields)* });
register!(@io$name @ + $offset);
};
+ // Creates a alias register of relative offset register `alias` with its own fields.
+ (
+ $name:ident => + $alias:ident $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name @ $alias::OFFSET $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io $name @ + $alias::OFFSET);
+ };
+
+ // All rules below are helpers.
+
// Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
// and conversion to regular `u32`).
- (@common $name:ident $(, $comment:literal)?) => {
+ (@common $name:ident @ $offset:expr $(, $comment:literal)?) => {
$(
#[doc=$comment]
)?
@@ -104,7 +142,12 @@ macro_rules! register {
#[derive(Clone, Copy, Default)]
pub(crate) struct $name(u32);
- // TODO: display the raw hex value, then the value of all the fields. This requires
+ #[allow(dead_code)]
+ impl $name {
+ pub(crate) const OFFSET: usize = $offset;
+ }
+
+ // TODO[REGA]: display the raw hex value, then the value of all the fields. This requires
// matching the fields, which will complexify the syntax considerably...
impl ::core::fmt::Debug for $name {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
@@ -114,7 +157,7 @@ macro_rules! register {
}
}
- impl core::ops::BitOr for $name {
+ impl ::core::ops::BitOr for $name {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
@@ -161,7 +204,7 @@ macro_rules! register {
(@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
#[allow(clippy::eq_op)]
const _: () = {
- kernel::build_assert!(
+ ::kernel::build_assert!(
$hi == $lo,
concat!("boolean field `", stringify!($field), "` covers more than one bit")
);
@@ -172,7 +215,7 @@ macro_rules! register {
(@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
#[allow(clippy::eq_op)]
const _: () = {
- kernel::build_assert!(
+ ::kernel::build_assert!(
$hi >= $lo,
concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
);
@@ -234,7 +277,7 @@ macro_rules! register {
@leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
{ $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
) => {
- kernel::macros::paste!(
+ ::kernel::macros::paste!(
const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
@@ -246,7 +289,7 @@ macro_rules! register {
)?
#[inline]
pub(crate) fn $field(self) -> $res_type {
- kernel::macros::paste!(
+ ::kernel::macros::paste!(
const MASK: u32 = $name::[<$field:upper _MASK>];
const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
);
@@ -255,7 +298,7 @@ macro_rules! register {
$process(field)
}
- kernel::macros::paste!(
+ ::kernel::macros::paste!(
$(
#[doc="Sets the value of this field:"]
#[doc=$comment]
@@ -264,7 +307,7 @@ macro_rules! register {
pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
const MASK: u32 = $name::[<$field:upper _MASK>];
const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
- let value = ((value as u32) << SHIFT) & MASK;
+ let value = (u32::from(value) << SHIFT) & MASK;
self.0 = (self.0 & !MASK) | value;
self
@@ -273,7 +316,7 @@ macro_rules! register {
};
// Creates the IO accessors for a fixed offset register.
- (@io $name:ident @ $offset:literal) => {
+ (@io $name:ident @ $offset:expr) => {
#[allow(dead_code)]
impl $name {
#[inline]
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
index 332a64cfc6a9..76cedf3710d7 100644
--- a/drivers/gpu/nova-core/util.rs
+++ b/drivers/gpu/nova-core/util.rs
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+use kernel::prelude::*;
+use kernel::time::{Delta, Instant, Monotonic};
+
pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
let src = s.as_bytes();
let mut dst = [0; N];
@@ -19,3 +22,26 @@ pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
}
}
+
+/// Wait until `cond` is true or `timeout` elapsed.
+///
+/// When `cond` evaluates to `Some`, its return value is returned.
+///
+/// `Err(ETIMEDOUT)` is returned if `timeout` has been reached without `cond` evaluating to
+/// `Some`.
+///
+/// TODO[DLAY]: replace with `read_poll_timeout` once it is available.
+/// (https://lore.kernel.org/lkml/20250220070611.214262-8-fujita.tomonori@gmail.com/)
+pub(crate) fn wait_on<R, F: Fn() -> Option<R>>(timeout: Delta, cond: F) -> Result<R> {
+ let start_time = Instant::<Monotonic>::now();
+
+ loop {
+ if let Some(ret) = cond() {
+ return Ok(ret);
+ }
+
+ if start_time.elapsed().as_nanos() > timeout.as_nanos() {
+ return Err(ETIMEDOUT);
+ }
+ }
+}
diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs
new file mode 100644
index 000000000000..5b5d9f38cbb3
--- /dev/null
+++ b/drivers/gpu/nova-core/vbios.rs
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! VBIOS extraction and parsing.
+
+use crate::driver::Bar0;
+use crate::firmware::fwsec::Bcrt30Rsa3kSignature;
+use crate::firmware::FalconUCodeDescV3;
+use core::convert::TryFrom;
+use kernel::device;
+use kernel::error::Result;
+use kernel::pci;
+use kernel::prelude::*;
+
+/// The offset of the VBIOS ROM in the BAR0 space.
+const ROM_OFFSET: usize = 0x300000;
+/// The maximum length of the VBIOS ROM to scan into.
+const BIOS_MAX_SCAN_LEN: usize = 0x100000;
+/// The size to read ahead when parsing initial BIOS image headers.
+const BIOS_READ_AHEAD_SIZE: usize = 1024;
+/// The bit in the last image indicator byte for the PCI Data Structure that
+/// indicates the last image. Bit 0-6 are reserved, bit 7 is last image bit.
+const LAST_IMAGE_BIT_MASK: u8 = 0x80;
+
+// PMU lookup table entry types. Used to locate PMU table entries
+// in the Fwsec image, corresponding to falcon ucodes.
+#[expect(dead_code)]
+const FALCON_UCODE_ENTRY_APPID_FIRMWARE_SEC_LIC: u8 = 0x05;
+#[expect(dead_code)]
+const FALCON_UCODE_ENTRY_APPID_FWSEC_DBG: u8 = 0x45;
+const FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: u8 = 0x85;
+
+/// Vbios Reader for constructing the VBIOS data.
+struct VbiosIterator<'a> {
+ pdev: &'a pci::Device,
+ bar0: &'a Bar0,
+ /// VBIOS data vector: As BIOS images are scanned, they are added to this vector for reference
+ /// or copying into other data structures. It is the entire scanned contents of the VBIOS which
+ /// progressively extends. It is used so that we do not re-read any contents that are already
+ /// read as we use the cumulative length read so far, and re-read any gaps as we extend the
+ /// length.
+ data: KVec<u8>,
+ /// Current offset of the [`Iterator`].
+ current_offset: usize,
+ /// Indicate whether the last image has been found.
+ last_found: bool,
+}
+
+impl<'a> VbiosIterator<'a> {
+ fn new(pdev: &'a pci::Device, bar0: &'a Bar0) -> Result<Self> {
+ Ok(Self {
+ pdev,
+ bar0,
+ data: KVec::new(),
+ current_offset: 0,
+ last_found: false,
+ })
+ }
+
+ /// Read bytes from the ROM at the current end of the data vector.
+ fn read_more(&mut self, len: usize) -> Result {
+ let current_len = self.data.len();
+ let start = ROM_OFFSET + current_len;
+
+ // Ensure length is a multiple of 4 for 32-bit reads
+ if len % core::mem::size_of::<u32>() != 0 {
+ dev_err!(
+ self.pdev.as_ref(),
+ "VBIOS read length {} is not a multiple of 4\n",
+ len
+ );
+ return Err(EINVAL);
+ }
+
+ self.data.reserve(len, GFP_KERNEL)?;
+ // Read ROM data bytes and push directly to `data`.
+ for addr in (start..start + len).step_by(core::mem::size_of::<u32>()) {
+ // Read 32-bit word from the VBIOS ROM
+ let word = self.bar0.try_read32(addr)?;
+
+ // Convert the `u32` to a 4 byte array and push each byte.
+ word.to_ne_bytes()
+ .iter()
+ .try_for_each(|&b| self.data.push(b, GFP_KERNEL))?;
+ }
+
+ Ok(())
+ }
+
+ /// Read bytes at a specific offset, filling any gap.
+ fn read_more_at_offset(&mut self, offset: usize, len: usize) -> Result {
+ if offset > BIOS_MAX_SCAN_LEN {
+ dev_err!(self.pdev.as_ref(), "Error: exceeded BIOS scan limit.\n");
+ return Err(EINVAL);
+ }
+
+ // If `offset` is beyond current data size, fill the gap first.
+ let current_len = self.data.len();
+ let gap_bytes = offset.saturating_sub(current_len);
+
+ // Now read the requested bytes at the offset.
+ self.read_more(gap_bytes + len)
+ }
+
+ /// Read a BIOS image at a specific offset and create a [`BiosImage`] from it.
+ ///
+ /// `self.data` is extended as needed and a new [`BiosImage`] is returned.
+ /// `context` is a string describing the operation for error reporting.
+ fn read_bios_image_at_offset(
+ &mut self,
+ offset: usize,
+ len: usize,
+ context: &str,
+ ) -> Result<BiosImage> {
+ let data_len = self.data.len();
+ if offset + len > data_len {
+ self.read_more_at_offset(offset, len).inspect_err(|e| {
+ dev_err!(
+ self.pdev.as_ref(),
+ "Failed to read more at offset {:#x}: {:?}\n",
+ offset,
+ e
+ )
+ })?;
+ }
+
+ BiosImage::new(self.pdev, &self.data[offset..offset + len]).inspect_err(|err| {
+ dev_err!(
+ self.pdev.as_ref(),
+ "Failed to {} at offset {:#x}: {:?}\n",
+ context,
+ offset,
+ err
+ )
+ })
+ }
+}
+
+impl<'a> Iterator for VbiosIterator<'a> {
+ type Item = Result<BiosImage>;
+
+ /// Iterate over all VBIOS images until the last image is detected or offset
+ /// exceeds scan limit.
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.last_found {
+ return None;
+ }
+
+ if self.current_offset > BIOS_MAX_SCAN_LEN {
+ dev_err!(
+ self.pdev.as_ref(),
+ "Error: exceeded BIOS scan limit, stopping scan\n"
+ );
+ return None;
+ }
+
+ // Parse image headers first to get image size.
+ let image_size = match self.read_bios_image_at_offset(
+ self.current_offset,
+ BIOS_READ_AHEAD_SIZE,
+ "parse initial BIOS image headers",
+ ) {
+ Ok(image) => image.image_size_bytes(),
+ Err(e) => return Some(Err(e)),
+ };
+
+ // Now create a new `BiosImage` with the full image data.
+ let full_image = match self.read_bios_image_at_offset(
+ self.current_offset,
+ image_size,
+ "parse full BIOS image",
+ ) {
+ Ok(image) => image,
+ Err(e) => return Some(Err(e)),
+ };
+
+ self.last_found = full_image.is_last();
+
+ // Advance to next image (aligned to 512 bytes).
+ self.current_offset += image_size;
+ // TODO[NUMM]: replace with `align_up` once it lands.
+ self.current_offset = self.current_offset.next_multiple_of(512);
+
+ Some(Ok(full_image))
+ }
+}
+
+pub(crate) struct Vbios {
+ fwsec_image: FwSecBiosImage,
+}
+
+impl Vbios {
+ /// Probe for VBIOS extraction.
+ ///
+ /// Once the VBIOS object is built, `bar0` is not read for [`Vbios`] purposes anymore.
+ pub(crate) fn new(pdev: &pci::Device, bar0: &Bar0) -> Result<Vbios> {
+ // Images to extract from iteration
+ let mut pci_at_image: Option<PciAtBiosImage> = None;
+ let mut first_fwsec_image: Option<FwSecBiosBuilder> = None;
+ let mut second_fwsec_image: Option<FwSecBiosBuilder> = None;
+
+ // Parse all VBIOS images in the ROM
+ for image_result in VbiosIterator::new(pdev, bar0)? {
+ let full_image = image_result?;
+
+ dev_dbg!(
+ pdev.as_ref(),
+ "Found BIOS image: size: {:#x}, type: {}, last: {}\n",
+ full_image.image_size_bytes(),
+ full_image.image_type_str(),
+ full_image.is_last()
+ );
+
+ // Get references to images we will need after the loop, in order to
+ // setup the falcon data offset.
+ match full_image {
+ BiosImage::PciAt(image) => {
+ pci_at_image = Some(image);
+ }
+ BiosImage::FwSec(image) => {
+ if first_fwsec_image.is_none() {
+ first_fwsec_image = Some(image);
+ } else {
+ second_fwsec_image = Some(image);
+ }
+ }
+ // For now we don't need to handle these
+ BiosImage::Efi(_image) => {}
+ BiosImage::Nbsi(_image) => {}
+ }
+ }
+
+ // Using all the images, setup the falcon data pointer in Fwsec.
+ if let (Some(mut second), Some(first), Some(pci_at)) =
+ (second_fwsec_image, first_fwsec_image, pci_at_image)
+ {
+ second
+ .setup_falcon_data(pdev, &pci_at, &first)
+ .inspect_err(|e| dev_err!(pdev.as_ref(), "Falcon data setup failed: {:?}\n", e))?;
+ Ok(Vbios {
+ fwsec_image: second.build(pdev)?,
+ })
+ } else {
+ dev_err!(
+ pdev.as_ref(),
+ "Missing required images for falcon data setup, skipping\n"
+ );
+ Err(EINVAL)
+ }
+ }
+
+ pub(crate) fn fwsec_image(&self) -> &FwSecBiosImage {
+ &self.fwsec_image
+ }
+}
+
+/// PCI Data Structure as defined in PCI Firmware Specification
+#[derive(Debug, Clone)]
+#[repr(C)]
+struct PcirStruct {
+ /// PCI Data Structure signature ("PCIR" or "NPDS")
+ signature: [u8; 4],
+ /// PCI Vendor ID (e.g., 0x10DE for NVIDIA)
+ vendor_id: u16,
+ /// PCI Device ID
+ device_id: u16,
+ /// Device List Pointer
+ device_list_ptr: u16,
+ /// PCI Data Structure Length
+ pci_data_struct_len: u16,
+ /// PCI Data Structure Revision
+ pci_data_struct_rev: u8,
+ /// Class code (3 bytes, 0x03 for display controller)
+ class_code: [u8; 3],
+ /// Size of this image in 512-byte blocks
+ image_len: u16,
+ /// Revision Level of the Vendor's ROM
+ vendor_rom_rev: u16,
+ /// ROM image type (0x00 = PC-AT compatible, 0x03 = EFI, 0x70 = NBSI)
+ code_type: u8,
+ /// Last image indicator (0x00 = Not last image, 0x80 = Last image)
+ last_image: u8,
+ /// Maximum Run-time Image Length (units of 512 bytes)
+ max_runtime_image_len: u16,
+}
+
+impl PcirStruct {
+ fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ if data.len() < core::mem::size_of::<PcirStruct>() {
+ dev_err!(pdev.as_ref(), "Not enough data for PcirStruct\n");
+ return Err(EINVAL);
+ }
+
+ let mut signature = [0u8; 4];
+ signature.copy_from_slice(&data[0..4]);
+
+ // Signature should be "PCIR" (0x52494350) or "NPDS" (0x5344504e).
+ if &signature != b"PCIR" && &signature != b"NPDS" {
+ dev_err!(
+ pdev.as_ref(),
+ "Invalid signature for PcirStruct: {:?}\n",
+ signature
+ );
+ return Err(EINVAL);
+ }
+
+ let mut class_code = [0u8; 3];
+ class_code.copy_from_slice(&data[13..16]);
+
+ let image_len = u16::from_le_bytes([data[16], data[17]]);
+ if image_len == 0 {
+ dev_err!(pdev.as_ref(), "Invalid image length: 0\n");
+ return Err(EINVAL);
+ }
+
+ Ok(PcirStruct {
+ signature,
+ vendor_id: u16::from_le_bytes([data[4], data[5]]),
+ device_id: u16::from_le_bytes([data[6], data[7]]),
+ device_list_ptr: u16::from_le_bytes([data[8], data[9]]),
+ pci_data_struct_len: u16::from_le_bytes([data[10], data[11]]),
+ pci_data_struct_rev: data[12],
+ class_code,
+ image_len,
+ vendor_rom_rev: u16::from_le_bytes([data[18], data[19]]),
+ code_type: data[20],
+ last_image: data[21],
+ max_runtime_image_len: u16::from_le_bytes([data[22], data[23]]),
+ })
+ }
+
+ /// Check if this is the last image in the ROM.
+ fn is_last(&self) -> bool {
+ self.last_image & LAST_IMAGE_BIT_MASK != 0
+ }
+
+ /// Calculate image size in bytes from 512-byte blocks.
+ fn image_size_bytes(&self) -> usize {
+ self.image_len as usize * 512
+ }
+}
+
+/// BIOS Information Table (BIT) Header.
+///
+/// This is the head of the BIT table, that is used to locate the Falcon data. The BIT table (with
+/// its header) is in the [`PciAtBiosImage`] and the falcon data it is pointing to is in the
+/// [`FwSecBiosImage`].
+#[derive(Debug, Clone, Copy)]
+#[expect(dead_code)]
+struct BitHeader {
+ /// 0h: BIT Header Identifier (BMP=0x7FFF/BIT=0xB8FF)
+ id: u16,
+ /// 2h: BIT Header Signature ("BIT\0")
+ signature: [u8; 4],
+ /// 6h: Binary Coded Decimal Version, ex: 0x0100 is 1.00.
+ bcd_version: u16,
+ /// 8h: Size of BIT Header (in bytes)
+ header_size: u8,
+ /// 9h: Size of BIT Tokens (in bytes)
+ token_size: u8,
+ /// 10h: Number of token entries that follow
+ token_entries: u8,
+ /// 11h: BIT Header Checksum
+ checksum: u8,
+}
+
+impl BitHeader {
+ fn new(data: &[u8]) -> Result<Self> {
+ if data.len() < 12 {
+ return Err(EINVAL);
+ }
+
+ let mut signature = [0u8; 4];
+ signature.copy_from_slice(&data[2..6]);
+
+ // Check header ID and signature
+ let id = u16::from_le_bytes([data[0], data[1]]);
+ if id != 0xB8FF || &signature != b"BIT\0" {
+ return Err(EINVAL);
+ }
+
+ Ok(BitHeader {
+ id,
+ signature,
+ bcd_version: u16::from_le_bytes([data[6], data[7]]),
+ header_size: data[8],
+ token_size: data[9],
+ token_entries: data[10],
+ checksum: data[11],
+ })
+ }
+}
+
+/// BIT Token Entry: Records in the BIT table followed by the BIT header.
+#[derive(Debug, Clone, Copy)]
+#[expect(dead_code)]
+struct BitToken {
+ /// 00h: Token identifier
+ id: u8,
+ /// 01h: Version of the token data
+ data_version: u8,
+ /// 02h: Size of token data in bytes
+ data_size: u16,
+ /// 04h: Offset to the token data
+ data_offset: u16,
+}
+
+// Define the token ID for the Falcon data
+const BIT_TOKEN_ID_FALCON_DATA: u8 = 0x70;
+
+impl BitToken {
+ /// Find a BIT token entry by BIT ID in a PciAtBiosImage
+ fn from_id(image: &PciAtBiosImage, token_id: u8) -> Result<Self> {
+ let header = &image.bit_header;
+
+ // Offset to the first token entry
+ let tokens_start = image.bit_offset + header.header_size as usize;
+
+ for i in 0..header.token_entries as usize {
+ let entry_offset = tokens_start + (i * header.token_size as usize);
+
+ // Make sure we don't go out of bounds
+ if entry_offset + header.token_size as usize > image.base.data.len() {
+ return Err(EINVAL);
+ }
+
+ // Check if this token has the requested ID
+ if image.base.data[entry_offset] == token_id {
+ return Ok(BitToken {
+ id: image.base.data[entry_offset],
+ data_version: image.base.data[entry_offset + 1],
+ data_size: u16::from_le_bytes([
+ image.base.data[entry_offset + 2],
+ image.base.data[entry_offset + 3],
+ ]),
+ data_offset: u16::from_le_bytes([
+ image.base.data[entry_offset + 4],
+ image.base.data[entry_offset + 5],
+ ]),
+ });
+ }
+ }
+
+ // Token not found
+ Err(ENOENT)
+ }
+}
+
+/// PCI ROM Expansion Header as defined in PCI Firmware Specification.
+///
+/// This is header is at the beginning of every image in the set of images in the ROM. It contains
+/// a pointer to the PCI Data Structure which describes the image. For "NBSI" images (NoteBook
+/// System Information), the ROM header deviates from the standard and contains an offset to the
+/// NBSI image however we do not yet parse that in this module and keep it for future reference.
+#[derive(Debug, Clone, Copy)]
+#[expect(dead_code)]
+struct PciRomHeader {
+ /// 00h: Signature (0xAA55)
+ signature: u16,
+ /// 02h: Reserved bytes for processor architecture unique data (20 bytes)
+ reserved: [u8; 20],
+ /// 16h: NBSI Data Offset (NBSI-specific, offset from header to NBSI image)
+ nbsi_data_offset: Option<u16>,
+ /// 18h: Pointer to PCI Data Structure (offset from start of ROM image)
+ pci_data_struct_offset: u16,
+ /// 1Ah: Size of block (this is NBSI-specific)
+ size_of_block: Option<u32>,
+}
+
+impl PciRomHeader {
+ fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ if data.len() < 26 {
+ // Need at least 26 bytes to read pciDataStrucPtr and sizeOfBlock.
+ return Err(EINVAL);
+ }
+
+ let signature = u16::from_le_bytes([data[0], data[1]]);
+
+ // Check for valid ROM signatures.
+ match signature {
+ 0xAA55 | 0xBB77 | 0x4E56 => {}
+ _ => {
+ dev_err!(pdev.as_ref(), "ROM signature unknown {:#x}\n", signature);
+ return Err(EINVAL);
+ }
+ }
+
+ // Read the pointer to the PCI Data Structure at offset 0x18.
+ let pci_data_struct_ptr = u16::from_le_bytes([data[24], data[25]]);
+
+ // Try to read optional fields if enough data.
+ let mut size_of_block = None;
+ let mut nbsi_data_offset = None;
+
+ if data.len() >= 30 {
+ // Read size_of_block at offset 0x1A.
+ size_of_block = Some(
+ u32::from(data[29]) << 24
+ | u32::from(data[28]) << 16
+ | u32::from(data[27]) << 8
+ | u32::from(data[26]),
+ );
+ }
+
+ // For NBSI images, try to read the nbsiDataOffset at offset 0x16.
+ if data.len() >= 24 {
+ nbsi_data_offset = Some(u16::from_le_bytes([data[22], data[23]]));
+ }
+
+ Ok(PciRomHeader {
+ signature,
+ reserved: [0u8; 20],
+ pci_data_struct_offset: pci_data_struct_ptr,
+ size_of_block,
+ nbsi_data_offset,
+ })
+ }
+}
+
+/// NVIDIA PCI Data Extension Structure.
+///
+/// This is similar to the PCI Data Structure, but is Nvidia-specific and is placed right after the
+/// PCI Data Structure. It contains some fields that are redundant with the PCI Data Structure, but
+/// are needed for traversing the BIOS images. It is expected to be present in all BIOS images
+/// except for NBSI images.
+#[derive(Debug, Clone)]
+#[repr(C)]
+struct NpdeStruct {
+ /// 00h: Signature ("NPDE")
+ signature: [u8; 4],
+ /// 04h: NVIDIA PCI Data Extension Revision
+ npci_data_ext_rev: u16,
+ /// 06h: NVIDIA PCI Data Extension Length
+ npci_data_ext_len: u16,
+ /// 08h: Sub-image Length (in 512-byte units)
+ subimage_len: u16,
+ /// 0Ah: Last image indicator flag
+ last_image: u8,
+}
+
+impl NpdeStruct {
+ fn new(pdev: &pci::Device, data: &[u8]) -> Option<Self> {
+ if data.len() < core::mem::size_of::<Self>() {
+ dev_dbg!(pdev.as_ref(), "Not enough data for NpdeStruct\n");
+ return None;
+ }
+
+ let mut signature = [0u8; 4];
+ signature.copy_from_slice(&data[0..4]);
+
+ // Signature should be "NPDE" (0x4544504E).
+ if &signature != b"NPDE" {
+ dev_dbg!(
+ pdev.as_ref(),
+ "Invalid signature for NpdeStruct: {:?}\n",
+ signature
+ );
+ return None;
+ }
+
+ let subimage_len = u16::from_le_bytes([data[8], data[9]]);
+ if subimage_len == 0 {
+ dev_dbg!(pdev.as_ref(), "Invalid subimage length: 0\n");
+ return None;
+ }
+
+ Some(NpdeStruct {
+ signature,
+ npci_data_ext_rev: u16::from_le_bytes([data[4], data[5]]),
+ npci_data_ext_len: u16::from_le_bytes([data[6], data[7]]),
+ subimage_len,
+ last_image: data[10],
+ })
+ }
+
+ /// Check if this is the last image in the ROM.
+ fn is_last(&self) -> bool {
+ self.last_image & LAST_IMAGE_BIT_MASK != 0
+ }
+
+ /// Calculate image size in bytes from 512-byte blocks.
+ fn image_size_bytes(&self) -> usize {
+ self.subimage_len as usize * 512
+ }
+
+ /// Try to find NPDE in the data, the NPDE is right after the PCIR.
+ fn find_in_data(
+ pdev: &pci::Device,
+ data: &[u8],
+ rom_header: &PciRomHeader,
+ pcir: &PcirStruct,
+ ) -> Option<Self> {
+ // Calculate the offset where NPDE might be located
+ // NPDE should be right after the PCIR structure, aligned to 16 bytes
+ let pcir_offset = rom_header.pci_data_struct_offset as usize;
+ let npde_start = (pcir_offset + pcir.pci_data_struct_len as usize + 0x0F) & !0x0F;
+
+ // Check if we have enough data
+ if npde_start + core::mem::size_of::<Self>() > data.len() {
+ dev_dbg!(pdev.as_ref(), "Not enough data for NPDE\n");
+ return None;
+ }
+
+ // Try to create NPDE from the data
+ NpdeStruct::new(pdev, &data[npde_start..])
+ }
+}
+
+// Use a macro to implement BiosImage enum and methods. This avoids having to
+// repeat each enum type when implementing functions like base() in BiosImage.
+macro_rules! bios_image {
+ (
+ $($variant:ident: $class:ident),* $(,)?
+ ) => {
+ // BiosImage enum with variants for each image type
+ enum BiosImage {
+ $($variant($class)),*
+ }
+
+ impl BiosImage {
+ /// Get a reference to the common BIOS image data regardless of type
+ fn base(&self) -> &BiosImageBase {
+ match self {
+ $(Self::$variant(img) => &img.base),*
+ }
+ }
+
+ /// Returns a string representing the type of BIOS image
+ fn image_type_str(&self) -> &'static str {
+ match self {
+ $(Self::$variant(_) => stringify!($variant)),*
+ }
+ }
+ }
+ }
+}
+
+impl BiosImage {
+ /// Check if this is the last image.
+ fn is_last(&self) -> bool {
+ let base = self.base();
+
+ // For NBSI images (type == 0x70), return true as they're
+ // considered the last image
+ if matches!(self, Self::Nbsi(_)) {
+ return true;
+ }
+
+ // For other image types, check the NPDE first if available
+ if let Some(ref npde) = base.npde {
+ return npde.is_last();
+ }
+
+ // Otherwise, fall back to checking the PCIR last_image flag
+ base.pcir.is_last()
+ }
+
+ /// Get the image size in bytes.
+ fn image_size_bytes(&self) -> usize {
+ let base = self.base();
+
+ // Prefer NPDE image size if available
+ if let Some(ref npde) = base.npde {
+ return npde.image_size_bytes();
+ }
+
+ // Otherwise, fall back to the PCIR image size
+ base.pcir.image_size_bytes()
+ }
+
+ /// Create a [`BiosImageBase`] from a byte slice and convert it to a [`BiosImage`] which
+ /// triggers the constructor of the specific BiosImage enum variant.
+ fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ let base = BiosImageBase::new(pdev, data)?;
+ let image = base.into_image().inspect_err(|e| {
+ dev_err!(pdev.as_ref(), "Failed to create BiosImage: {:?}\n", e);
+ })?;
+
+ Ok(image)
+ }
+}
+
+bios_image! {
+ PciAt: PciAtBiosImage, // PCI-AT compatible BIOS image
+ Efi: EfiBiosImage, // EFI (Extensible Firmware Interface)
+ Nbsi: NbsiBiosImage, // NBSI (Nvidia Bios System Interface)
+ FwSec: FwSecBiosBuilder, // FWSEC (Firmware Security)
+}
+
+/// The PciAt BIOS image is typically the first BIOS image type found in the BIOS image chain.
+///
+/// It contains the BIT header and the BIT tokens.
+struct PciAtBiosImage {
+ base: BiosImageBase,
+ bit_header: BitHeader,
+ bit_offset: usize,
+}
+
+struct EfiBiosImage {
+ base: BiosImageBase,
+ // EFI-specific fields can be added here in the future.
+}
+
+struct NbsiBiosImage {
+ base: BiosImageBase,
+ // NBSI-specific fields can be added here in the future.
+}
+
+struct FwSecBiosBuilder {
+ base: BiosImageBase,
+ /// These are temporary fields that are used during the construction of the
+ /// [`FwSecBiosBuilder`].
+ ///
+ /// Once FwSecBiosBuilder is constructed, the `falcon_ucode_offset` will be copied into a new
+ /// [`FwSecBiosImage`].
+ ///
+ /// The offset of the Falcon data from the start of Fwsec image.
+ falcon_data_offset: Option<usize>,
+ /// The [`PmuLookupTable`] starts at the offset of the falcon data pointer.
+ pmu_lookup_table: Option<PmuLookupTable>,
+ /// The offset of the Falcon ucode.
+ falcon_ucode_offset: Option<usize>,
+}
+
+/// The [`FwSecBiosImage`] structure contains the PMU table and the Falcon Ucode.
+///
+/// The PMU table contains voltage/frequency tables as well as a pointer to the Falcon Ucode.
+pub(crate) struct FwSecBiosImage {
+ base: BiosImageBase,
+ /// The offset of the Falcon ucode.
+ falcon_ucode_offset: usize,
+}
+
+// Convert from BiosImageBase to BiosImage
+impl TryFrom<BiosImageBase> for BiosImage {
+ type Error = Error;
+
+ fn try_from(base: BiosImageBase) -> Result<Self> {
+ match base.pcir.code_type {
+ 0x00 => Ok(BiosImage::PciAt(base.try_into()?)),
+ 0x03 => Ok(BiosImage::Efi(EfiBiosImage { base })),
+ 0x70 => Ok(BiosImage::Nbsi(NbsiBiosImage { base })),
+ 0xE0 => Ok(BiosImage::FwSec(FwSecBiosBuilder {
+ base,
+ falcon_data_offset: None,
+ pmu_lookup_table: None,
+ falcon_ucode_offset: None,
+ })),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+/// BIOS Image structure containing various headers and reference fields to all BIOS images.
+///
+/// Each BiosImage type has a BiosImageBase type along with other image-specific fields. Note that
+/// Rust favors composition of types over inheritance.
+#[derive(Debug)]
+#[expect(dead_code)]
+struct BiosImageBase {
+ /// PCI ROM Expansion Header
+ rom_header: PciRomHeader,
+ /// PCI Data Structure
+ pcir: PcirStruct,
+ /// NVIDIA PCI Data Extension (optional)
+ npde: Option<NpdeStruct>,
+ /// Image data (includes ROM header and PCIR)
+ data: KVec<u8>,
+}
+
+impl BiosImageBase {
+ fn into_image(self) -> Result<BiosImage> {
+ BiosImage::try_from(self)
+ }
+
+ /// Creates a new BiosImageBase from raw byte data.
+ fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ // Ensure we have enough data for the ROM header.
+ if data.len() < 26 {
+ dev_err!(pdev.as_ref(), "Not enough data for ROM header\n");
+ return Err(EINVAL);
+ }
+
+ // Parse the ROM header.
+ let rom_header = PciRomHeader::new(pdev, &data[0..26])
+ .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PciRomHeader: {:?}\n", e))?;
+
+ // Get the PCI Data Structure using the pointer from the ROM header.
+ let pcir_offset = rom_header.pci_data_struct_offset as usize;
+ let pcir_data = data
+ .get(pcir_offset..pcir_offset + core::mem::size_of::<PcirStruct>())
+ .ok_or(EINVAL)
+ .inspect_err(|_| {
+ dev_err!(
+ pdev.as_ref(),
+ "PCIR offset {:#x} out of bounds (data length: {})\n",
+ pcir_offset,
+ data.len()
+ );
+ dev_err!(
+ pdev.as_ref(),
+ "Consider reading more data for construction of BiosImage\n"
+ );
+ })?;
+
+ let pcir = PcirStruct::new(pdev, pcir_data)
+ .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PcirStruct: {:?}\n", e))?;
+
+ // Look for NPDE structure if this is not an NBSI image (type != 0x70).
+ let npde = NpdeStruct::find_in_data(pdev, data, &rom_header, &pcir);
+
+ // Create a copy of the data.
+ let mut data_copy = KVec::new();
+ data_copy.extend_from_slice(data, GFP_KERNEL)?;
+
+ Ok(BiosImageBase {
+ rom_header,
+ pcir,
+ npde,
+ data: data_copy,
+ })
+ }
+}
+
+impl PciAtBiosImage {
+ /// Find a byte pattern in a slice.
+ fn find_byte_pattern(haystack: &[u8], needle: &[u8]) -> Result<usize> {
+ haystack
+ .windows(needle.len())
+ .position(|window| window == needle)
+ .ok_or(EINVAL)
+ }
+
+ /// Find the BIT header in the [`PciAtBiosImage`].
+ fn find_bit_header(data: &[u8]) -> Result<(BitHeader, usize)> {
+ let bit_pattern = [0xff, 0xb8, b'B', b'I', b'T', 0x00];
+ let bit_offset = Self::find_byte_pattern(data, &bit_pattern)?;
+ let bit_header = BitHeader::new(&data[bit_offset..])?;
+
+ Ok((bit_header, bit_offset))
+ }
+
+ /// Get a BIT token entry from the BIT table in the [`PciAtBiosImage`]
+ fn get_bit_token(&self, token_id: u8) -> Result<BitToken> {
+ BitToken::from_id(self, token_id)
+ }
+
+ /// Find the Falcon data pointer structure in the [`PciAtBiosImage`].
+ ///
+ /// This is just a 4 byte structure that contains a pointer to the Falcon data in the FWSEC
+ /// image.
+ fn falcon_data_ptr(&self, pdev: &pci::Device) -> Result<u32> {
+ let token = self.get_bit_token(BIT_TOKEN_ID_FALCON_DATA)?;
+
+ // Make sure we don't go out of bounds
+ if token.data_offset as usize + 4 > self.base.data.len() {
+ return Err(EINVAL);
+ }
+
+ // read the 4 bytes at the offset specified in the token
+ let offset = token.data_offset as usize;
+ let bytes: [u8; 4] = self.base.data[offset..offset + 4].try_into().map_err(|_| {
+ dev_err!(pdev.as_ref(), "Failed to convert data slice to array");
+ EINVAL
+ })?;
+
+ let data_ptr = u32::from_le_bytes(bytes);
+
+ if (data_ptr as usize) < self.base.data.len() {
+ dev_err!(pdev.as_ref(), "Falcon data pointer out of bounds\n");
+ return Err(EINVAL);
+ }
+
+ Ok(data_ptr)
+ }
+}
+
+impl TryFrom<BiosImageBase> for PciAtBiosImage {
+ type Error = Error;
+
+ fn try_from(base: BiosImageBase) -> Result<Self> {
+ let data_slice = &base.data;
+ let (bit_header, bit_offset) = PciAtBiosImage::find_bit_header(data_slice)?;
+
+ Ok(PciAtBiosImage {
+ base,
+ bit_header,
+ bit_offset,
+ })
+ }
+}
+
+/// The [`PmuLookupTableEntry`] structure is a single entry in the [`PmuLookupTable`].
+///
+/// See the [`PmuLookupTable`] description for more information.
+#[expect(dead_code)]
+struct PmuLookupTableEntry {
+ application_id: u8,
+ target_id: u8,
+ data: u32,
+}
+
+impl PmuLookupTableEntry {
+ fn new(data: &[u8]) -> Result<Self> {
+ if data.len() < 6 {
+ return Err(EINVAL);
+ }
+
+ Ok(PmuLookupTableEntry {
+ application_id: data[0],
+ target_id: data[1],
+ data: u32::from_le_bytes(data[2..6].try_into().map_err(|_| EINVAL)?),
+ })
+ }
+}
+
+/// The [`PmuLookupTableEntry`] structure is used to find the [`PmuLookupTableEntry`] for a given
+/// application ID.
+///
+/// The table of entries is pointed to by the falcon data pointer in the BIT table, and is used to
+/// locate the Falcon Ucode.
+#[expect(dead_code)]
+struct PmuLookupTable {
+ version: u8,
+ header_len: u8,
+ entry_len: u8,
+ entry_count: u8,
+ table_data: KVec<u8>,
+}
+
+impl PmuLookupTable {
+ fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ if data.len() < 4 {
+ return Err(EINVAL);
+ }
+
+ let header_len = data[1] as usize;
+ let entry_len = data[2] as usize;
+ let entry_count = data[3] as usize;
+
+ let required_bytes = header_len + (entry_count * entry_len);
+
+ if data.len() < required_bytes {
+ dev_err!(
+ pdev.as_ref(),
+ "PmuLookupTable data length less than required\n"
+ );
+ return Err(EINVAL);
+ }
+
+ // Create a copy of only the table data
+ let table_data = {
+ let mut ret = KVec::new();
+ ret.extend_from_slice(&data[header_len..required_bytes], GFP_KERNEL)?;
+ ret
+ };
+
+ // Debug logging of entries (dumps the table data to dmesg)
+ for i in (header_len..required_bytes).step_by(entry_len) {
+ dev_dbg!(
+ pdev.as_ref(),
+ "PMU entry: {:02x?}\n",
+ &data[i..][..entry_len]
+ );
+ }
+
+ Ok(PmuLookupTable {
+ version: data[0],
+ header_len: header_len as u8,
+ entry_len: entry_len as u8,
+ entry_count: entry_count as u8,
+ table_data,
+ })
+ }
+
+ fn lookup_index(&self, idx: u8) -> Result<PmuLookupTableEntry> {
+ if idx >= self.entry_count {
+ return Err(EINVAL);
+ }
+
+ let index = (idx as usize) * self.entry_len as usize;
+ PmuLookupTableEntry::new(&self.table_data[index..])
+ }
+
+ // find entry by type value
+ fn find_entry_by_type(&self, entry_type: u8) -> Result<PmuLookupTableEntry> {
+ for i in 0..self.entry_count {
+ let entry = self.lookup_index(i)?;
+ if entry.application_id == entry_type {
+ return Ok(entry);
+ }
+ }
+
+ Err(EINVAL)
+ }
+}
+
+impl FwSecBiosBuilder {
+ fn setup_falcon_data(
+ &mut self,
+ pdev: &pci::Device,
+ pci_at_image: &PciAtBiosImage,
+ first_fwsec: &FwSecBiosBuilder,
+ ) -> Result {
+ let mut offset = pci_at_image.falcon_data_ptr(pdev)? as usize;
+ let mut pmu_in_first_fwsec = false;
+
+ // The falcon data pointer assumes that the PciAt and FWSEC images
+ // are contiguous in memory. However, testing shows the EFI image sits in
+ // between them. So calculate the offset from the end of the PciAt image
+ // rather than the start of it. Compensate.
+ offset -= pci_at_image.base.data.len();
+
+ // The offset is now from the start of the first Fwsec image, however
+ // the offset points to a location in the second Fwsec image. Since
+ // the fwsec images are contiguous, subtract the length of the first Fwsec
+ // image from the offset to get the offset to the start of the second
+ // Fwsec image.
+ if offset < first_fwsec.base.data.len() {
+ pmu_in_first_fwsec = true;
+ } else {
+ offset -= first_fwsec.base.data.len();
+ }
+
+ self.falcon_data_offset = Some(offset);
+
+ if pmu_in_first_fwsec {
+ self.pmu_lookup_table =
+ Some(PmuLookupTable::new(pdev, &first_fwsec.base.data[offset..])?);
+ } else {
+ self.pmu_lookup_table = Some(PmuLookupTable::new(pdev, &self.base.data[offset..])?);
+ }
+
+ match self
+ .pmu_lookup_table
+ .as_ref()
+ .ok_or(EINVAL)?
+ .find_entry_by_type(FALCON_UCODE_ENTRY_APPID_FWSEC_PROD)
+ {
+ Ok(entry) => {
+ let mut ucode_offset = entry.data as usize;
+ ucode_offset -= pci_at_image.base.data.len();
+ if ucode_offset < first_fwsec.base.data.len() {
+ dev_err!(pdev.as_ref(), "Falcon Ucode offset not in second Fwsec.\n");
+ return Err(EINVAL);
+ }
+ ucode_offset -= first_fwsec.base.data.len();
+ self.falcon_ucode_offset = Some(ucode_offset);
+ }
+ Err(e) => {
+ dev_err!(
+ pdev.as_ref(),
+ "PmuLookupTableEntry not found, error: {:?}\n",
+ e
+ );
+ return Err(EINVAL);
+ }
+ }
+ Ok(())
+ }
+
+ /// Build the final FwSecBiosImage from this builder
+ fn build(self, pdev: &pci::Device) -> Result<FwSecBiosImage> {
+ let ret = FwSecBiosImage {
+ base: self.base,
+ falcon_ucode_offset: self.falcon_ucode_offset.ok_or(EINVAL)?,
+ };
+
+ if cfg!(debug_assertions) {
+ // Print the desc header for debugging
+ let desc = ret.header(pdev.as_ref())?;
+ dev_dbg!(pdev.as_ref(), "PmuLookupTableEntry desc: {:#?}\n", desc);
+ }
+
+ Ok(ret)
+ }
+}
+
+impl FwSecBiosImage {
+ /// Get the FwSec header ([`FalconUCodeDescV3`]).
+ pub(crate) fn header(&self, dev: &device::Device) -> Result<&FalconUCodeDescV3> {
+ // Get the falcon ucode offset that was found in setup_falcon_data.
+ let falcon_ucode_offset = self.falcon_ucode_offset;
+
+ // Make sure the offset is within the data bounds.
+ if falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>() > self.base.data.len() {
+ dev_err!(dev, "fwsec-frts header not contained within BIOS bounds\n");
+ return Err(ERANGE);
+ }
+
+ // Read the first 4 bytes to get the version.
+ let hdr_bytes: [u8; 4] = self.base.data[falcon_ucode_offset..falcon_ucode_offset + 4]
+ .try_into()
+ .map_err(|_| EINVAL)?;
+ let hdr = u32::from_le_bytes(hdr_bytes);
+ let ver = (hdr & 0xff00) >> 8;
+
+ if ver != 3 {
+ dev_err!(dev, "invalid fwsec firmware version: {:?}\n", ver);
+ return Err(EINVAL);
+ }
+
+ // Return a reference to the FalconUCodeDescV3 structure.
+ //
+ // SAFETY: We have checked that `falcon_ucode_offset + size_of::<FalconUCodeDescV3>` is
+ // within the bounds of `data`. Also, this data vector is from ROM, and the `data` field
+ // in `BiosImageBase` is immutable after construction.
+ Ok(unsafe {
+ &*(self
+ .base
+ .data
+ .as_ptr()
+ .add(falcon_ucode_offset)
+ .cast::<FalconUCodeDescV3>())
+ })
+ }
+
+ /// Get the ucode data as a byte slice
+ pub(crate) fn ucode(&self, dev: &device::Device, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
+ let falcon_ucode_offset = self.falcon_ucode_offset;
+
+ // The ucode data follows the descriptor.
+ let ucode_data_offset = falcon_ucode_offset + desc.size();
+ let size = (desc.imem_load_size + desc.dmem_load_size) as usize;
+
+ // Get the data slice, checking bounds in a single operation.
+ self.base
+ .data
+ .get(ucode_data_offset..ucode_data_offset + size)
+ .ok_or(ERANGE)
+ .inspect_err(|_| dev_err!(dev, "fwsec ucode data not contained within BIOS bounds\n"))
+ }
+
+ /// Get the signatures as a byte slice
+ pub(crate) fn sigs(
+ &self,
+ dev: &device::Device,
+ desc: &FalconUCodeDescV3,
+ ) -> Result<&[Bcrt30Rsa3kSignature]> {
+ // The signatures data follows the descriptor.
+ let sigs_data_offset = self.falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>();
+ let sigs_size =
+ desc.signature_count as usize * core::mem::size_of::<Bcrt30Rsa3kSignature>();
+
+ // Make sure the data is within bounds.
+ if sigs_data_offset + sigs_size > self.base.data.len() {
+ dev_err!(
+ dev,
+ "fwsec signatures data not contained within BIOS bounds\n"
+ );
+ return Err(ERANGE);
+ }
+
+ // SAFETY: we checked that `data + sigs_data_offset + (signature_count *
+ // sizeof::<Bcrt30Rsa3kSignature>()` is within the bounds of `data`.
+ Ok(unsafe {
+ core::slice::from_raw_parts(
+ self.base
+ .data
+ .as_ptr()
+ .add(sigs_data_offset)
+ .cast::<Bcrt30Rsa3kSignature>(),
+ desc.signature_count as usize,
+ )
+ })
+ }
+}
diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig
index c24e9edd022e..cd3d19c4a201 100644
--- a/drivers/gpu/trace/Kconfig
+++ b/drivers/gpu/trace/Kconfig
@@ -1,4 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config TRACE_GPU_MEM
- bool
+ bool "Enable GPU memory usage tracepoints"
+ default n
+ help
+ Choose this option to enable tracepoints for tracking
+ global and per-process GPU memory usage. Intended for
+ performance profiling and required for Android.
+
+ Tracepoint availability varies by GPU driver.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 18f2c92beff8..68e45a26e85f 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -437,7 +437,7 @@ find_active_client(struct list_head *head)
*/
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
{
- if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ if (pci_is_display(pdev)) {
/*
* apple-gmux is needed on pre-retina MacBook Pro
* to probe the panel if pdev is the inactive GPU.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 43859fc75747..a57901203aeb 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -771,6 +771,7 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- 3M PCT touch screens
- ActionStar dual touch panels
+ - Apple Touch Bar on x86 MacBook Pros
- Atmel panels
- Cando dual touch panels
- Chunghwa panels
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 3438d392920f..0f2cbae39b2b 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -146,6 +146,8 @@ static const char *get_sensor_name(int idx)
return "gyroscope";
case mag_idx:
return "magnetometer";
+ case op_idx:
+ return "operating-mode";
case als_idx:
case ACS_IDX: /* ambient color sensor */
return "ALS";
@@ -243,6 +245,20 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = -ENOMEM;
goto cleanup;
}
+
+ if (cl_data->sensor_idx[i] == op_idx) {
+ info.period = AMD_SFH_IDLE_LOOP;
+ info.sensor_idx = cl_data->sensor_idx[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
+ mp2_ops->start(privdata, info);
+ cl_data->sensor_sts[i] = amd_sfh_wait_for_response(privdata,
+ cl_data->sensor_idx[i],
+ SENSOR_ENABLED);
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ cl_data->is_any_sensor_enabled = true;
+ continue;
+ }
+
cl_data->sensor_sts[i] = SENSOR_DISABLED;
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
@@ -303,6 +319,13 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->cur_hid_dev = i;
+ if (cl_data->sensor_idx[i] == op_idx) {
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ continue;
+ }
+
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
rc = amdtp_hid_probe(i, cl_data);
if (rc)
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 1c91be8daedd..7452b0302953 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -11,7 +11,7 @@
#ifndef AMDSFH_HID_H
#define AMDSFH_HID_H
-#define MAX_HID_DEVICES 6
+#define MAX_HID_DEVICES 7
#define AMD_SFH_HID_VENDOR 0x1022
#define AMD_SFH_HID_PRODUCT 0x0001
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 1c1fd63330c9..2983af969579 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -29,6 +29,7 @@
#define ACEL_EN BIT(0)
#define GYRO_EN BIT(1)
#define MAGNO_EN BIT(2)
+#define OP_EN BIT(15)
#define HPD_EN BIT(16)
#define ALS_EN BIT(19)
#define ACS_EN BIT(22)
@@ -232,6 +233,9 @@ int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
if (MAGNO_EN & activestatus)
sensor_id[num_of_sensors++] = mag_idx;
+ if (OP_EN & activestatus)
+ sensor_id[num_of_sensors++] = op_idx;
+
if (ALS_EN & activestatus)
sensor_id[num_of_sensors++] = als_idx;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 05e400a4a83e..2eb61f4e8434 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -79,6 +79,7 @@ enum sensor_idx {
accel_idx = 0,
gyro_idx = 1,
mag_idx = 2,
+ op_idx = 15,
als_idx = 19
};
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 0639b1f43d88..61404d7a43ee 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -30,7 +30,7 @@
#include "hid-ids.h"
#define APPLE_RDESC_JIS BIT(0)
-#define APPLE_IGNORE_MOUSE BIT(1)
+/* BIT(1) reserved, was: APPLE_IGNORE_MOUSE */
#define APPLE_HAS_FN BIT(2)
/* BIT(3) reserved, was: APPLE_HIDDEV */
#define APPLE_ISO_TILDE_QUIRK BIT(4)
@@ -42,11 +42,13 @@
#define APPLE_BACKLIGHT_CTL BIT(10)
#define APPLE_IS_NON_APPLE BIT(11)
#define APPLE_MAGIC_BACKLIGHT BIT(12)
+#define APPLE_DISABLE_FKEYS BIT(13)
-#define APPLE_FLAG_FKEY 0x01
+#define APPLE_FLAG_FKEY BIT(0)
+#define APPLE_FLAG_TB_FKEY BIT(1)
#define HID_COUNTRY_INTERNATIONAL_ISO 13
-#define APPLE_BATTERY_TIMEOUT_MS 60000
+#define APPLE_BATTERY_TIMEOUT_SEC 60
#define HID_USAGE_MAGIC_BL 0xff00000f
#define APPLE_MAGIC_REPORT_ID_POWER 3
@@ -55,7 +57,7 @@
static unsigned int fnmode = 3;
module_param(fnmode, uint, 0644);
MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
- "1 = fkeyslast, 2 = fkeysfirst, [3] = auto)");
+ "1 = fkeyslast, 2 = fkeysfirst, [3] = auto, 4 = fkeysdisabled)");
static int iso_layout = -1;
module_param(iso_layout, int, 0644);
@@ -89,6 +91,19 @@ struct apple_sc_backlight {
struct hid_device *hdev;
};
+struct apple_backlight_config_report {
+ u8 report_id;
+ u8 version;
+ u16 backlight_off, backlight_on_min, backlight_on_max;
+};
+
+struct apple_backlight_set_report {
+ u8 report_id;
+ u8 version;
+ u16 backlight;
+ u16 rate;
+};
+
struct apple_magic_backlight {
struct led_classdev cdev;
struct hid_report *brightness;
@@ -108,7 +123,7 @@ struct apple_sc {
struct apple_key_translation {
u16 from;
u16 to;
- u8 flags;
+ unsigned long flags;
};
static const struct apple_key_translation magic_keyboard_alu_fn_keys[] = {
@@ -152,21 +167,7 @@ static const struct apple_key_translation magic_keyboard_2015_fn_keys[] = {
{ }
};
-struct apple_backlight_config_report {
- u8 report_id;
- u8 version;
- u16 backlight_off, backlight_on_min, backlight_on_max;
-};
-
-struct apple_backlight_set_report {
- u8 report_id;
- u8 version;
- u16 backlight;
- u16 rate;
-};
-
-
-static const struct apple_key_translation apple2021_fn_keys[] = {
+static const struct apple_key_translation magic_keyboard_2021_and_2024_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
{ KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
@@ -212,19 +213,19 @@ static const struct apple_key_translation macbookair_fn_keys[] = {
static const struct apple_key_translation macbookpro_no_esc_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
- { KEY_GRAVE, KEY_ESC },
- { KEY_1, KEY_F1 },
- { KEY_2, KEY_F2 },
- { KEY_3, KEY_F3 },
- { KEY_4, KEY_F4 },
- { KEY_5, KEY_F5 },
- { KEY_6, KEY_F6 },
- { KEY_7, KEY_F7 },
- { KEY_8, KEY_F8 },
- { KEY_9, KEY_F9 },
- { KEY_0, KEY_F10 },
- { KEY_MINUS, KEY_F11 },
- { KEY_EQUAL, KEY_F12 },
+ { KEY_GRAVE, KEY_ESC, APPLE_FLAG_TB_FKEY },
+ { KEY_1, KEY_F1, APPLE_FLAG_TB_FKEY },
+ { KEY_2, KEY_F2, APPLE_FLAG_TB_FKEY },
+ { KEY_3, KEY_F3, APPLE_FLAG_TB_FKEY },
+ { KEY_4, KEY_F4, APPLE_FLAG_TB_FKEY },
+ { KEY_5, KEY_F5, APPLE_FLAG_TB_FKEY },
+ { KEY_6, KEY_F6, APPLE_FLAG_TB_FKEY },
+ { KEY_7, KEY_F7, APPLE_FLAG_TB_FKEY },
+ { KEY_8, KEY_F8, APPLE_FLAG_TB_FKEY },
+ { KEY_9, KEY_F9, APPLE_FLAG_TB_FKEY },
+ { KEY_0, KEY_F10, APPLE_FLAG_TB_FKEY },
+ { KEY_MINUS, KEY_F11, APPLE_FLAG_TB_FKEY },
+ { KEY_EQUAL, KEY_F12, APPLE_FLAG_TB_FKEY },
{ KEY_UP, KEY_PAGEUP },
{ KEY_DOWN, KEY_PAGEDOWN },
{ KEY_LEFT, KEY_HOME },
@@ -235,18 +236,18 @@ static const struct apple_key_translation macbookpro_no_esc_fn_keys[] = {
static const struct apple_key_translation macbookpro_dedicated_esc_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
- { KEY_1, KEY_F1 },
- { KEY_2, KEY_F2 },
- { KEY_3, KEY_F3 },
- { KEY_4, KEY_F4 },
- { KEY_5, KEY_F5 },
- { KEY_6, KEY_F6 },
- { KEY_7, KEY_F7 },
- { KEY_8, KEY_F8 },
- { KEY_9, KEY_F9 },
- { KEY_0, KEY_F10 },
- { KEY_MINUS, KEY_F11 },
- { KEY_EQUAL, KEY_F12 },
+ { KEY_1, KEY_F1, APPLE_FLAG_TB_FKEY },
+ { KEY_2, KEY_F2, APPLE_FLAG_TB_FKEY },
+ { KEY_3, KEY_F3, APPLE_FLAG_TB_FKEY },
+ { KEY_4, KEY_F4, APPLE_FLAG_TB_FKEY },
+ { KEY_5, KEY_F5, APPLE_FLAG_TB_FKEY },
+ { KEY_6, KEY_F6, APPLE_FLAG_TB_FKEY },
+ { KEY_7, KEY_F7, APPLE_FLAG_TB_FKEY },
+ { KEY_8, KEY_F8, APPLE_FLAG_TB_FKEY },
+ { KEY_9, KEY_F9, APPLE_FLAG_TB_FKEY },
+ { KEY_0, KEY_F10, APPLE_FLAG_TB_FKEY },
+ { KEY_MINUS, KEY_F11, APPLE_FLAG_TB_FKEY },
+ { KEY_EQUAL, KEY_F12, APPLE_FLAG_TB_FKEY },
{ KEY_UP, KEY_PAGEUP },
{ KEY_DOWN, KEY_PAGEDOWN },
{ KEY_LEFT, KEY_HOME },
@@ -425,7 +426,12 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
unsigned int real_fnmode;
if (fnmode == 3) {
- real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
+ if (asc->quirks & APPLE_DISABLE_FKEYS)
+ real_fnmode = 4;
+ else if (asc->quirks & APPLE_IS_NON_APPLE)
+ real_fnmode = 2;
+ else
+ real_fnmode = 1;
} else {
real_fnmode = fnmode;
}
@@ -466,42 +472,54 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
asc->fn_on = !!value;
if (real_fnmode) {
- if (hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO ||
- hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS)
+ switch (hid->product) {
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO:
+ case USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS:
table = magic_keyboard_alu_fn_keys;
- else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2015 ||
- hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015)
+ break;
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2015:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015:
table = magic_keyboard_2015_fn_keys;
- else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
- hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 ||
- hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
- hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
- table = apple2021_fn_keys;
- else if (hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132 ||
- hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680 ||
- hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213)
- table = macbookpro_no_esc_fn_keys;
- else if (hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K ||
- hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223 ||
- hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F)
- table = macbookpro_dedicated_esc_fn_keys;
- else if (hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K ||
- hid->product == USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K)
- table = apple_fn_keys;
- else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
- hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
- table = macbookair_fn_keys;
- else if (hid->product < 0x21d || hid->product >= 0x300)
- table = powerbook_fn_keys;
- else
+ break;
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2024:
+ case USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2024:
+ table = magic_keyboard_2021_and_2024_fn_keys;
+ break;
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT:
+ table = macbookpro_no_esc_fn_keys;
+ break;
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223:
+ table = macbookpro_dedicated_esc_fn_keys;
+ break;
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K:
+ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K:
table = apple_fn_keys;
+ break;
+ default:
+ if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+ table = macbookair_fn_keys;
+ else if (hid->product < 0x21d || hid->product >= 0x300)
+ table = powerbook_fn_keys;
+ else
+ table = apple_fn_keys;
+ }
trans = apple_find_translation(table, code);
@@ -524,8 +542,16 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
do_translate = asc->fn_on;
break;
default:
- /* should never happen */
+ /* case 4 */
+ do_translate = false;
+ }
+ } else if (trans->flags & APPLE_FLAG_TB_FKEY) {
+ switch (real_fnmode) {
+ case 4:
do_translate = false;
+ break;
+ default:
+ do_translate = asc->fn_on;
}
} else {
do_translate = asc->fn_on;
@@ -619,7 +645,7 @@ static void apple_battery_timer_tick(struct timer_list *t)
if (apple_fetch_battery(hdev) == 0) {
mod_timer(&asc->battery_timer,
- jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
+ jiffies + secs_to_jiffies(APPLE_BATTERY_TIMEOUT_SEC));
}
}
@@ -682,7 +708,7 @@ static void apple_setup_input(struct input_dev *input)
apple_setup_key_translation(input, apple_iso_keyboard);
apple_setup_key_translation(input, magic_keyboard_alu_fn_keys);
apple_setup_key_translation(input, magic_keyboard_2015_fn_keys);
- apple_setup_key_translation(input, apple2021_fn_keys);
+ apple_setup_key_translation(input, magic_keyboard_2021_and_2024_fn_keys);
apple_setup_key_translation(input, macbookpro_no_esc_fn_keys);
apple_setup_key_translation(input, macbookpro_dedicated_esc_fn_keys);
}
@@ -890,7 +916,8 @@ static int apple_magic_backlight_init(struct hid_device *hdev)
backlight->brightness = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_BRIGHTNESS];
backlight->power = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_POWER];
- if (!backlight->brightness || !backlight->power)
+ if (!backlight->brightness || backlight->brightness->maxfield < 2 ||
+ !backlight->power || backlight->power->maxfield < 2)
return -ENODEV;
backlight->cdev.name = ":white:" LED_FUNCTION_KBD_BACKLIGHT;
@@ -933,10 +960,12 @@ static int apple_probe(struct hid_device *hdev,
return ret;
}
- timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0);
- mod_timer(&asc->battery_timer,
- jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
- apple_fetch_battery(hdev);
+ if (quirks & APPLE_RDESC_BATTERY) {
+ timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0);
+ mod_timer(&asc->battery_timer,
+ jiffies + secs_to_jiffies(APPLE_BATTERY_TIMEOUT_SEC));
+ apple_fetch_battery(hdev);
+ }
if (quirks & APPLE_BACKLIGHT_CTL)
apple_backlight_init(hdev);
@@ -950,7 +979,9 @@ static int apple_probe(struct hid_device *hdev,
return 0;
out_err:
- timer_delete_sync(&asc->battery_timer);
+ if (quirks & APPLE_RDESC_BATTERY)
+ timer_delete_sync(&asc->battery_timer);
+
hid_hw_stop(hdev);
return ret;
}
@@ -959,7 +990,8 @@ static void apple_remove(struct hid_device *hdev)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
- timer_delete_sync(&asc->battery_timer);
+ if (asc->quirks & APPLE_RDESC_BATTERY)
+ timer_delete_sync(&asc->battery_timer);
hid_hw_stop(hdev);
}
@@ -1129,19 +1161,25 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K),
.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK |
+ APPLE_DISABLE_FKEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK |
+ APPLE_DISABLE_FKEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT),
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK |
+ APPLE_DISABLE_FKEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK |
+ APPLE_DISABLE_FKEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
@@ -1157,10 +1195,6 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
- { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
@@ -1169,6 +1203,18 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT),
.driver_data = APPLE_MAGIC_BACKLIGHT },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index a63be219525a..5419a6c10907 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -66,8 +66,12 @@ static s32 snto32(__u32 value, unsigned int n)
static u32 s32ton(__s32 value, unsigned int n)
{
- s32 a = value >> (n - 1);
+ s32 a;
+ if (!value || !n)
+ return 0;
+
+ a = value >> (n - 1);
if (a && a != -1)
return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
return value & ((1 << n) - 1);
@@ -659,9 +663,9 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
default:
if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
- hid_warn(parser->device, "reserved main item tag 0x%x\n", item->tag);
+ hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag);
else
- hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
@@ -2809,7 +2813,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
+ return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n",
hdev->bus, hdev->group, hdev->vendor, hdev->product);
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 234fa82eab07..482f62a78c41 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1288,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set_rv = cp2112_gpio_set;
+ dev->gc.set = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 4424c0512bae..7107071c7c51 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3291,6 +3291,8 @@ static const char *keys[KEY_MAX + 1] = {
[BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
[BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
[BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
+ [BTN_GRIPL] = "BtnGripL", [BTN_GRIPR] = "BtnGripR",
+ [BTN_GRIPL2] = "BtnGripL2", [BTN_GRIPR2] = "BtnGripR2",
[BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
[BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
[BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
@@ -3726,7 +3728,7 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
*/
if (!list->hdev || !list->hdev->debug) {
ret = -EIO;
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
goto out;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 33cc5820f2be..5a1096283855 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -167,20 +167,27 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2015 0x0267
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015 0x026c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2024 0x0321
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2024 0x0322
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K 0x027a
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132 0x027b
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680 0x027c
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213 0x027d
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K 0x027e
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223 0x027f
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K 0x0280
-#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F 0x0340
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K 0x027a
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132 0x027b
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680 0x027c
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT 0x0278
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213 0x027d
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K 0x027e
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223 0x027f
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K 0x0280
+#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F 0x0340
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
@@ -188,10 +195,6 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL3 0x8241
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
-#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
-#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320
-#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
-#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
@@ -846,7 +849,7 @@
#define USB_DEVICE_ID_PXN_V12 0x1212
#define USB_DEVICE_ID_PXN_V12_LITE 0x1112
#define USB_DEVICE_ID_PXN_V12_LITE_2 0x1211
-#define USB_DEVICE_LITE_STAR_GT987_FF 0x2141
+#define USB_DEVICE_ID_LITE_STAR_GT987 0x2141
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07
@@ -1406,6 +1409,7 @@
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S 0x0909
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW 0x0933
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO 0x091b
#define USB_DEVICE_ID_UGEE_TABLET_G5 0x0074
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
#define USB_DEVICE_ID_UGEE_TABLET_RAINBOW_CV720 0x0055
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 445623dd1bd6..32b711723f2a 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -956,7 +956,7 @@ static ssize_t lg4ff_combine_show(struct device *dev, struct device_attribute *a
return 0;
}
- count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.combine);
+ count = sysfs_emit(buf, "%u\n", entry->wdata.combine);
return count;
}
@@ -1009,7 +1009,7 @@ static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *att
return 0;
}
- count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.range);
+ count = sysfs_emit(buf, "%u\n", entry->wdata.range);
return count;
}
@@ -1073,7 +1073,7 @@ static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *a
return 0;
}
- count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name);
+ count = sysfs_emit(buf, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name);
return count;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 36f034ac605d..7d4a25c6de0e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define MOUSE_REPORT_ID 0x29
#define MOUSE2_REPORT_ID 0x12
#define DOUBLE_REPORT_ID 0xf7
-#define USB_BATTERY_TIMEOUT_MS 60000
+#define USB_BATTERY_TIMEOUT_SEC 60
/* These definitions are not precise, but they're close enough. (Bits
* 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
@@ -791,17 +791,31 @@ static void magicmouse_enable_mt_work(struct work_struct *work)
hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
}
+static bool is_usb_magicmouse2(__u32 vendor, __u32 product)
+{
+ if (vendor != USB_VENDOR_ID_APPLE)
+ return false;
+ return product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC;
+}
+
+static bool is_usb_magictrackpad2(__u32 vendor, __u32 product)
+{
+ if (vendor != USB_VENDOR_ID_APPLE)
+ return false;
+ return product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC;
+}
+
static int magicmouse_fetch_battery(struct hid_device *hdev)
{
#ifdef CONFIG_HID_BATTERY_STRENGTH
struct hid_report_enum *report_enum;
struct hid_report *report;
- if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
- (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
- hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC &&
- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
+ if (!hdev->battery ||
+ (!is_usb_magicmouse2(hdev->vendor, hdev->product) &&
+ !is_usb_magictrackpad2(hdev->vendor, hdev->product)))
return -1;
report_enum = &hdev->report_enum[hdev->battery_report_type];
@@ -827,7 +841,7 @@ static void magicmouse_battery_timer_tick(struct timer_list *t)
if (magicmouse_fetch_battery(hdev) == 0) {
mod_timer(&msc->battery_timer,
- jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
+ jiffies + secs_to_jiffies(USB_BATTERY_TIMEOUT_SEC));
}
}
@@ -863,17 +877,17 @@ static int magicmouse_probe(struct hid_device *hdev,
return ret;
}
- timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0);
- mod_timer(&msc->battery_timer,
- jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
- magicmouse_fetch_battery(hdev);
-
- if (id->vendor == USB_VENDOR_ID_APPLE &&
- (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
- ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
- hdev->type != HID_TYPE_USBMOUSE)))
+ if (is_usb_magicmouse2(id->vendor, id->product) ||
+ is_usb_magictrackpad2(id->vendor, id->product)) {
+ timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0);
+ mod_timer(&msc->battery_timer,
+ jiffies + secs_to_jiffies(USB_BATTERY_TIMEOUT_SEC));
+ magicmouse_fetch_battery(hdev);
+ }
+
+ if (is_usb_magicmouse2(id->vendor, id->product) ||
+ (is_usb_magictrackpad2(id->vendor, id->product) &&
+ hdev->type != HID_TYPE_USBMOUSE))
return 0;
if (!msc->input) {
@@ -936,7 +950,10 @@ static int magicmouse_probe(struct hid_device *hdev,
return 0;
err_stop_hw:
- timer_delete_sync(&msc->battery_timer);
+ if (is_usb_magicmouse2(id->vendor, id->product) ||
+ is_usb_magictrackpad2(id->vendor, id->product))
+ timer_delete_sync(&msc->battery_timer);
+
hid_hw_stop(hdev);
return ret;
}
@@ -947,7 +964,9 @@ static void magicmouse_remove(struct hid_device *hdev)
if (msc) {
cancel_delayed_work_sync(&msc->work);
- timer_delete_sync(&msc->battery_timer);
+ if (is_usb_magicmouse2(hdev->vendor, hdev->product) ||
+ is_usb_magictrackpad2(hdev->vendor, hdev->product))
+ timer_delete_sync(&msc->battery_timer);
}
hid_hw_stop(hdev);
@@ -964,11 +983,8 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* 0x05, 0x01, // Usage Page (Generic Desktop) 0
* 0x09, 0x02, // Usage (Mouse) 2
*/
- if (hdev->vendor == USB_VENDOR_ID_APPLE &&
- (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+ if ((is_usb_magicmouse2(hdev->vendor, hdev->product) ||
+ is_usb_magictrackpad2(hdev->vendor, hdev->product)) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
hid_info(hdev,
"fixing up magicmouse battery report descriptor\n");
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index e6ea0a2140eb..dafdd5b4a079 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -279,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set_rv = mcp_set,
- .set_multiple_rv = mcp_set_multiple,
+ .set = mcp_set,
+ .set_multiple = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 6c0ac14f11a6..475ac352df30 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/gpio/driver.h>
#include <linux/iio/iio.h>
+#include <linux/minmax.h>
#include "hid-ids.h"
/* Commands codes in a raw output report */
@@ -55,6 +56,27 @@ enum {
MCP2221_ALT_F_NOT_GPIOD = 0xEF,
};
+/* MCP SRAM read offsets cmd: MCP2221_GET_SRAM_SETTINGS */
+enum {
+ MCP2221_SRAM_RD_GP0 = 22,
+ MCP2221_SRAM_RD_GP1 = 23,
+ MCP2221_SRAM_RD_GP2 = 24,
+ MCP2221_SRAM_RD_GP3 = 25,
+};
+
+/* MCP SRAM write offsets cmd: MCP2221_SET_SRAM_SETTINGS */
+enum {
+ MCP2221_SRAM_WR_GP_ENA_ALTER = 7,
+ MCP2221_SRAM_WR_GP0 = 8,
+ MCP2221_SRAM_WR_GP1 = 9,
+ MCP2221_SRAM_WR_GP2 = 10,
+ MCP2221_SRAM_WR_GP3 = 11,
+};
+
+#define MCP2221_SRAM_GP_DESIGN_MASK 0x07
+#define MCP2221_SRAM_GP_DIRECTION_MASK 0x08
+#define MCP2221_SRAM_GP_VALUE_MASK 0x10
+
/* MCP GPIO direction encoding */
enum {
MCP2221_DIR_OUT = 0x00,
@@ -241,10 +263,7 @@ static int mcp_i2c_write(struct mcp2221 *mcp,
idx = 0;
sent = 0;
- if (msg->len < 60)
- len = msg->len;
- else
- len = 60;
+ len = min(msg->len, 60);
do {
mcp->txbuf[0] = type;
@@ -271,10 +290,7 @@ static int mcp_i2c_write(struct mcp2221 *mcp,
break;
idx = idx + len;
- if ((msg->len - sent) < 60)
- len = msg->len - sent;
- else
- len = 60;
+ len = min(msg->len - sent, 60);
/*
* Testing shows delay is needed between successive writes
@@ -607,6 +623,80 @@ static const struct i2c_algorithm mcp_i2c_algo = {
};
#if IS_REACHABLE(CONFIG_GPIOLIB)
+static int mcp_gpio_read_sram(struct mcp2221 *mcp)
+{
+ int ret;
+
+ memset(mcp->txbuf, 0, 64);
+ mcp->txbuf[0] = MCP2221_GET_SRAM_SETTINGS;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 64);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+/*
+ * If CONFIG_IIO is not enabled, check for the gpio pins
+ * if they are in gpio mode. For the ones which are not
+ * in gpio mode, set them into gpio mode.
+ */
+static int mcp2221_check_gpio_pinfunc(struct mcp2221 *mcp)
+{
+ int i;
+ int needgpiofix = 0;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_IIO))
+ return 0;
+
+ ret = mcp_gpio_read_sram(mcp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MCP_NGPIO; i++) {
+ if ((mcp->mode[i] & MCP2221_SRAM_GP_DESIGN_MASK) != 0x0) {
+ dev_warn(&mcp->hdev->dev,
+ "GPIO %d not in gpio mode\n", i);
+ needgpiofix = 1;
+ }
+ }
+
+ if (!needgpiofix)
+ return 0;
+
+ /*
+ * Set all bytes to 0, so Bit 7 is not set. The chip
+ * only changes content of a register when bit 7 is set.
+ */
+ memset(mcp->txbuf, 0, 64);
+ mcp->txbuf[0] = MCP2221_SET_SRAM_SETTINGS;
+
+ /*
+ * Set bit 7 in MCP2221_SRAM_WR_GP_ENA_ALTER to enable
+ * loading of a new set of gpio settings to GP SRAM
+ */
+ mcp->txbuf[MCP2221_SRAM_WR_GP_ENA_ALTER] = 0x80;
+ for (i = 0; i < MCP_NGPIO; i++) {
+ if ((mcp->mode[i] & MCP2221_SRAM_GP_DESIGN_MASK) == 0x0) {
+ /* write current GPIO mode */
+ mcp->txbuf[MCP2221_SRAM_WR_GP0 + i] = mcp->mode[i];
+ } else {
+ /* pin is not in gpio mode, set it to input mode */
+ mcp->txbuf[MCP2221_SRAM_WR_GP0 + i] = 0x08;
+ dev_warn(&mcp->hdev->dev,
+ "Set GPIO mode for gpio pin %d!\n", i);
+ }
+ }
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 64);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
static int mcp_gpio_get(struct gpio_chip *gc,
unsigned int offset)
{
@@ -1208,7 +1298,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set_rv = mcp_gpio_set;
+ mcp->gc->set = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
@@ -1218,6 +1308,8 @@ static int mcp2221_probe(struct hid_device *hdev,
ret = devm_gpiochip_add_data(&hdev->dev, mcp->gc, mcp);
if (ret)
return ret;
+
+ mcp2221_check_gpio_pinfunc(mcp);
#endif
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index a1c54ffe02b4..294516a8f541 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -73,6 +73,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
#define MT_QUIRK_DISABLE_WAKEUP BIT(21)
#define MT_QUIRK_ORIENTATION_INVERT BIT(22)
+#define MT_QUIRK_APPLE_TOUCHBAR BIT(23)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -220,6 +221,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
#define MT_CLS_GOOGLE 0x0111
#define MT_CLS_RAZER_BLADE_STEALTH 0x0112
#define MT_CLS_SMART_TECH 0x0113
+#define MT_CLS_APPLE_TOUCHBAR 0x0114
#define MT_CLS_SIS 0x0457
#define MT_DEFAULT_MAXCONTACT 10
@@ -405,6 +407,12 @@ static const struct mt_class mt_classes[] = {
MT_QUIRK_CONTACT_CNT_ACCURATE |
MT_QUIRK_SEPARATE_APP_REPORT,
},
+ { .name = MT_CLS_APPLE_TOUCHBAR,
+ .quirks = MT_QUIRK_HOVERING |
+ MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE |
+ MT_QUIRK_APPLE_TOUCHBAR,
+ .maxcontacts = 11,
+ },
{ .name = MT_CLS_SIS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_ALWAYS_VALID |
@@ -625,6 +633,7 @@ static struct mt_application *mt_find_application(struct mt_device *td,
static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
struct hid_report *report)
{
+ struct mt_class *cls = &td->mtclass;
struct mt_report_data *rdata;
struct hid_field *field;
int r, n;
@@ -649,7 +658,11 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
for (n = 0; n < field->report_count; n++) {
- if (field->usage[n].hid == HID_DG_CONTACTID) {
+ unsigned int hid = field->usage[n].hid;
+
+ if (hid == HID_DG_CONTACTID ||
+ (cls->quirks & MT_QUIRK_APPLE_TOUCHBAR &&
+ hid == HID_DG_TRANSDUCER_INDEX)) {
rdata->is_mt_collection = true;
break;
}
@@ -821,12 +834,31 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(confidence_state);
return 1;
+ case HID_DG_TOUCH:
+ /*
+ * Legacy devices use TIPSWITCH and not TOUCH.
+ * One special case here is of the Apple Touch Bars.
+ * In these devices, the tip state is contained in
+ * fields with the HID_DG_TOUCH usage.
+ * Let's just ignore this field for other devices.
+ */
+ if (!(cls->quirks & MT_QUIRK_APPLE_TOUCHBAR))
+ return -1;
+ fallthrough;
case HID_DG_TIPSWITCH:
if (field->application != HID_GD_SYSTEM_MULTIAXIS)
input_set_capability(hi->input,
EV_KEY, BTN_TOUCH);
MT_STORE_FIELD(tip_state);
return 1;
+ case HID_DG_TRANSDUCER_INDEX:
+ /*
+ * Contact ID in case of Apple Touch Bars is contained
+ * in fields with HID_DG_TRANSDUCER_INDEX usage.
+ */
+ if (!(cls->quirks & MT_QUIRK_APPLE_TOUCHBAR))
+ return 0;
+ fallthrough;
case HID_DG_CONTACTID:
MT_STORE_FIELD(contactid);
app->touches_by_report++;
@@ -883,10 +915,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_DG_CONTACTMAX:
/* contact max are global to the report */
return -1;
- case HID_DG_TOUCH:
- /* Legacy devices use TIPSWITCH and not TOUCH.
- * Let's just ignore this field. */
- return -1;
}
/* let hid-input decide for the others */
return 0;
@@ -1314,6 +1342,13 @@ static int mt_touch_input_configured(struct hid_device *hdev,
struct input_dev *input = hi->input;
int ret;
+ /*
+ * HID_DG_CONTACTMAX field is not present on Apple Touch Bars,
+ * but the maximum contact count is greater than the default.
+ */
+ if (cls->quirks & MT_QUIRK_APPLE_TOUCHBAR && cls->maxcontacts)
+ td->maxcontacts = cls->maxcontacts;
+
if (!td->maxcontacts)
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
@@ -1321,6 +1356,13 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (td->serial_maybe)
mt_post_parse_default_settings(td, app);
+ /*
+ * The application for Apple Touch Bars is HID_DG_TOUCHPAD,
+ * but these devices are direct.
+ */
+ if (cls->quirks & MT_QUIRK_APPLE_TOUCHBAR)
+ app->mt_flags |= INPUT_MT_DIRECT;
+
if (cls->is_indirect)
app->mt_flags |= INPUT_MT_POINTER;
@@ -1823,6 +1865,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
return ret;
+ if (mtclass->name == MT_CLS_APPLE_TOUCHBAR &&
+ !hid_find_field(hdev, HID_INPUT_REPORT,
+ HID_DG_TOUCHPAD, HID_DG_TRANSDUCER_INDEX))
+ return -ENODEV;
+
if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
mt_fix_const_fields(hdev, HID_DG_CONTACTID);
@@ -2320,6 +2367,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
+ /* Apple Touch Bar */
+ { .driver_data = MT_CLS_APPLE_TOUCHBAR,
+ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
+
/* Google MT devices */
{ .driver_data = MT_CLS_GOOGLE,
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 9bf9ce8dc803..ff11f1ad344d 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -314,6 +314,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223) },
@@ -973,14 +974,6 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 949d307c66a8..197126d6e081 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -755,15 +755,12 @@ static int steam_input_register(struct steam_device *steam)
input_set_capability(input, EV_KEY, BTN_THUMBL);
input_set_capability(input, EV_KEY, BTN_THUMB);
input_set_capability(input, EV_KEY, BTN_THUMB2);
+ input_set_capability(input, EV_KEY, BTN_GRIPL);
+ input_set_capability(input, EV_KEY, BTN_GRIPR);
if (steam->quirks & STEAM_QUIRK_DECK) {
input_set_capability(input, EV_KEY, BTN_BASE);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY1);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY2);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY3);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY4);
- } else {
- input_set_capability(input, EV_KEY, BTN_GEAR_DOWN);
- input_set_capability(input, EV_KEY, BTN_GEAR_UP);
+ input_set_capability(input, EV_KEY, BTN_GRIPL2);
+ input_set_capability(input, EV_KEY, BTN_GRIPR2);
}
input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0);
@@ -1419,8 +1416,8 @@ static inline s16 steam_le16(u8 *data)
* 9.4 | BTN_SELECT | menu left
* 9.5 | BTN_MODE | steam logo
* 9.6 | BTN_START | menu right
- * 9.7 | BTN_GEAR_DOWN | left back lever
- * 10.0 | BTN_GEAR_UP | right back lever
+ * 9.7 | BTN_GRIPL | left back lever
+ * 10.0 | BTN_GRIPR | right back lever
* 10.1 | -- | left-pad clicked
* 10.2 | BTN_THUMBR | right-pad clicked
* 10.3 | BTN_THUMB | left-pad touched (but see explanation below)
@@ -1485,8 +1482,8 @@ static void steam_do_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
- input_event(input, EV_KEY, BTN_GEAR_DOWN, !!(b9 & BIT(7)));
- input_event(input, EV_KEY, BTN_GEAR_UP, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_GRIPL, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_GRIPR, !!(b10 & BIT(0)));
input_event(input, EV_KEY, BTN_THUMBR, !!(b10 & BIT(2)));
input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
input_event(input, EV_KEY, BTN_THUMB, lpad_touched || lpad_and_joy);
@@ -1547,8 +1544,8 @@ static void steam_do_input_event(struct steam_device *steam,
* 9.4 | BTN_SELECT | menu left
* 9.5 | BTN_MODE | steam logo
* 9.6 | BTN_START | menu right
- * 9.7 | BTN_TRIGGER_HAPPY3 | left bottom grip button
- * 10.0 | BTN_TRIGGER_HAPPY4 | right bottom grip button
+ * 9.7 | BTN_GRIPL2 | left bottom grip button
+ * 10.0 | BTN_GRIPR2 | right bottom grip button
* 10.1 | BTN_THUMB | left pad pressed
* 10.2 | BTN_THUMB2 | right pad pressed
* 10.3 | -- | left pad touched
@@ -1573,8 +1570,8 @@ static void steam_do_input_event(struct steam_device *steam,
* 12.6 | -- | unknown
* 12.7 | -- | unknown
* 13.0 | -- | unknown
- * 13.1 | BTN_TRIGGER_HAPPY1 | left top grip button
- * 13.2 | BTN_TRIGGER_HAPPY2 | right top grip button
+ * 13.1 | BTN_GRIPL | left top grip button
+ * 13.2 | BTN_GRIPR | right top grip button
* 13.3 | -- | unknown
* 13.4 | -- | unknown
* 13.5 | -- | unknown
@@ -1659,8 +1656,8 @@ static void steam_do_deck_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY3, !!(b9 & BIT(7)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY4, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_GRIPL2, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_GRIPR2, !!(b10 & BIT(0)));
input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
input_event(input, EV_KEY, BTN_THUMBR, !!(b11 & BIT(2)));
input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0)));
@@ -1669,8 +1666,8 @@ static void steam_do_deck_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3)));
input_event(input, EV_KEY, BTN_THUMB, !!(b10 & BIT(1)));
input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(2)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY1, !!(b13 & BIT(1)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY2, !!(b13 & BIT(2)));
+ input_event(input, EV_KEY, BTN_GRIPL, !!(b13 & BIT(1)));
+ input_event(input, EV_KEY, BTN_GRIPR, !!(b13 & BIT(2)));
input_event(input, EV_KEY, BTN_BASE, !!(b14 & BIT(2)));
input_sync(input);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index af98398d9247..34fb03ae8ee2 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -62,6 +62,30 @@ static const __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+/* Buttons considered valid tablet pad inputs. */
+static const unsigned int uclogic_extra_input_mapping[] = {
+ BTN_0,
+ BTN_1,
+ BTN_2,
+ BTN_3,
+ BTN_4,
+ BTN_5,
+ BTN_6,
+ BTN_7,
+ BTN_8,
+ BTN_RIGHT,
+ BTN_MIDDLE,
+ BTN_SIDE,
+ BTN_EXTRA,
+ BTN_FORWARD,
+ BTN_BACK,
+ BTN_B,
+ BTN_A,
+ BTN_BASE,
+ BTN_BASE2,
+ BTN_X
+};
+
static int uclogic_input_mapping(struct hid_device *hdev,
struct hid_input *hi,
struct hid_field *field,
@@ -72,9 +96,27 @@ static int uclogic_input_mapping(struct hid_device *hdev,
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
struct uclogic_params *params = &drvdata->params;
- /* Discard invalid pen usages */
- if (params->pen.usage_invalid && (field->application == HID_DG_PEN))
- return -1;
+ if (field->application == HID_GD_KEYPAD) {
+ /*
+ * Remap input buttons to sensible ones that are not invalid.
+ * This only affects previous behavior for devices with more than ten or so buttons.
+ */
+ const int key = (usage->hid & HID_USAGE) - 1;
+
+ if (key < ARRAY_SIZE(uclogic_extra_input_mapping)) {
+ hid_map_usage(hi,
+ usage,
+ bit,
+ max,
+ EV_KEY,
+ uclogic_extra_input_mapping[key]);
+ return 1;
+ }
+ } else if (field->application == HID_DG_PEN) {
+ /* Discard invalid pen usages */
+ if (params->pen.usage_invalid)
+ return -1;
+ }
/* Let hid-core decide what to do */
return 0;
@@ -407,8 +449,22 @@ static int uclogic_raw_event_frame(
/* If need to, and can, transform the bitmap dial reports */
if (frame->bitmap_dial_byte > 0 && frame->bitmap_dial_byte < size) {
- if (data[frame->bitmap_dial_byte] == 2)
+ switch (data[frame->bitmap_dial_byte]) {
+ case 2:
data[frame->bitmap_dial_byte] = -1;
+ break;
+
+ /* Everything below here is for tablets that shove multiple dials into 1 byte */
+ case 16:
+ data[frame->bitmap_dial_byte] = 0;
+ data[frame->bitmap_second_dial_destination_byte] = 1;
+ break;
+
+ case 32:
+ data[frame->bitmap_dial_byte] = 0;
+ data[frame->bitmap_second_dial_destination_byte] = -1;
+ break;
+ }
}
return 0;
@@ -546,6 +602,8 @@ static const struct hid_device_id uclogic_devices[] = {
.driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index a6044996abf2..4a17f7332c3f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -103,6 +103,8 @@ static void uclogic_params_frame_hid_dbg(
frame->touch_flip_at);
hid_dbg(hdev, "\t\t.bitmap_dial_byte = %u\n",
frame->bitmap_dial_byte);
+ hid_dbg(hdev, "\t\t.bitmap_second_dial_destination_byte = %u\n",
+ frame->bitmap_second_dial_destination_byte);
}
/**
@@ -1341,7 +1343,7 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
struct uclogic_params *p)
{
struct uclogic_raw_event_hook *event_hook;
- __u8 reconnect_event[] = {
+ static const __u8 reconnect_event[] = {
/* Event received on wireless tablet reconnection */
0x02, 0xF8, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
@@ -1529,6 +1531,126 @@ cleanup:
return rc;
}
+/*
+ * uclogic_params_init_ugee_xppen_pro_22r() - Initializes a UGEE XP-Pen Pro 22R tablet device.
+ *
+ * @hdev: The HID device of the tablet interface to initialize and get
+ * parameters from. Cannot be NULL.
+ * @params: Parameters to fill in (to be cleaned with
+ * uclogic_params_cleanup()). Not modified in case of error.
+ * Cannot be NULL.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_init_ugee_xppen_pro_22r(struct uclogic_params *params,
+ struct hid_device *hdev,
+ const u8 rdesc_frame_arr[],
+ const size_t rdesc_frame_size)
+{
+ int rc = 0;
+ struct usb_interface *iface;
+ __u8 bInterfaceNumber;
+ const int str_desc_len = 12;
+ u8 *str_desc = NULL;
+ __u8 *rdesc_pen = NULL;
+ s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
+ enum uclogic_params_frame_type frame_type;
+ /* The resulting parameters (noop) */
+ struct uclogic_params p = {0, };
+
+ if (!hdev || !params) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ iface = to_usb_interface(hdev->dev.parent);
+ bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+
+ /* Ignore non-pen interfaces */
+ if (bInterfaceNumber != 2) {
+ rc = -EINVAL;
+ uclogic_params_init_invalid(&p);
+ goto cleanup;
+ }
+
+ /*
+ * Initialize the interface by sending magic data.
+ * This magic data is the same as other UGEE v2 tablets.
+ */
+ rc = uclogic_probe_interface(hdev,
+ uclogic_ugee_v2_probe_arr,
+ uclogic_ugee_v2_probe_size,
+ uclogic_ugee_v2_probe_endpoint);
+ if (rc) {
+ uclogic_params_init_invalid(&p);
+ goto cleanup;
+ }
+
+ /**
+ * Read the string descriptor containing pen and frame parameters.
+ * These are slightly different than typical UGEE v2 devices.
+ */
+ rc = uclogic_params_get_str_desc(&str_desc, hdev, 100, str_desc_len);
+ if (rc != str_desc_len) {
+ rc = (rc < 0) ? rc : -EINVAL;
+ hid_err(hdev, "failed retrieving pen and frame parameters: %d\n", rc);
+ uclogic_params_init_invalid(&p);
+ goto cleanup;
+ }
+
+ rc = uclogic_params_parse_ugee_v2_desc(str_desc, str_desc_len,
+ desc_params,
+ ARRAY_SIZE(desc_params),
+ &frame_type);
+ if (rc)
+ goto cleanup;
+
+ // str_desc doesn't report the correct amount of buttons, so manually fix it
+ desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM] = 20;
+
+ kfree(str_desc);
+ str_desc = NULL;
+
+ /* Initialize the pen interface */
+ rdesc_pen = uclogic_rdesc_template_apply(
+ uclogic_rdesc_ugee_v2_pen_template_arr,
+ uclogic_rdesc_ugee_v2_pen_template_size,
+ desc_params, ARRAY_SIZE(desc_params));
+ if (!rdesc_pen) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ p.pen.desc_ptr = rdesc_pen;
+ p.pen.desc_size = uclogic_rdesc_ugee_v2_pen_template_size;
+ p.pen.id = 0x02;
+ p.pen.subreport_list[0].value = 0xf0;
+ p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
+
+ /* Initialize the frame interface */
+ rc = uclogic_params_frame_init_with_desc(
+ &p.frame_list[0],
+ rdesc_frame_arr,
+ rdesc_frame_size,
+ UCLOGIC_RDESC_V1_FRAME_ID);
+ if (rc < 0) {
+ hid_err(hdev, "initializing frame params failed: %d\n", rc);
+ goto cleanup;
+ }
+
+ p.frame_list[0].bitmap_dial_byte = 7;
+ p.frame_list[0].bitmap_second_dial_destination_byte = 8;
+
+ /* Output parameters */
+ memcpy(params, &p, sizeof(*params));
+ memset(&p, 0, sizeof(p));
+cleanup:
+ kfree(str_desc);
+ uclogic_params_cleanup(&p);
+ return rc;
+}
+
/**
* uclogic_params_init() - initialize a tablet interface and discover its
* parameters.
@@ -1846,6 +1968,16 @@ int uclogic_params_init(struct uclogic_params *params,
}
break;
+ case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO):
+ rc = uclogic_params_init_ugee_xppen_pro_22r(&p,
+ hdev,
+ uclogic_rdesc_xppen_artist_22r_pro_frame_arr,
+ uclogic_rdesc_xppen_artist_22r_pro_frame_size);
+ if (rc != 0)
+ goto cleanup;
+
+ break;
}
#undef VID_PID
diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
index 35ff062d09b5..6ec8643d2ee5 100644
--- a/drivers/hid/hid-uclogic-params.h
+++ b/drivers/hid/hid-uclogic-params.h
@@ -175,6 +175,11 @@ struct uclogic_params_frame {
* counterclockwise, as opposed to the normal 1 and -1.
*/
unsigned int bitmap_dial_byte;
+ /*
+ * Destination offset for the second bitmap dial byte, if the tablet
+ * supports a second dial at all.
+ */
+ unsigned int bitmap_second_dial_destination_byte;
};
/*
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index 9b9cbc2aae36..08a89c6aae3b 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -1193,6 +1193,50 @@ const __u8 uclogic_rdesc_xppen_deco01_frame_arr[] = {
const size_t uclogic_rdesc_xppen_deco01_frame_size =
sizeof(uclogic_rdesc_xppen_deco01_frame_arr);
+/* Fixed report descriptor for XP-Pen Arist 22R Pro frame */
+const __u8 uclogic_rdesc_xppen_artist_22r_pro_frame_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, UCLOGIC_RDESC_V1_FRAME_ID,
+ /* Report ID (Virtual report), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x14, /* Usage Maximum (14h), */
+ 0x95, 0x14, /* Report Count (20), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x14, /* Report Count (20), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x38, /* Usage (Wheel), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x15, 0xFF, /* Logical Minimum (-1), */
+ 0x25, 0x08, /* Logical Maximum (8), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x05, 0x0C, /* Usage Page (Consumer Devices), */
+ 0x0A, 0x38, 0x02, /* Usage (AC PAN), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection */
+ 0xC0, /* End Collection */
+};
+
+const size_t uclogic_rdesc_xppen_artist_22r_pro_frame_size =
+ sizeof(uclogic_rdesc_xppen_artist_22r_pro_frame_arr);
+
/**
* uclogic_rdesc_template_apply() - apply report descriptor parameters to a
* report descriptor template, creating a report descriptor. Copies the
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index 3878a0e8c464..644a35ff12f2 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -210,4 +210,8 @@ extern const size_t uclogic_rdesc_ugee_g5_frame_size;
/* Least-significant bit of Ugee G5 frame rotary encoder state */
#define UCLOGIC_RDESC_UGEE_G5_FRAME_RE_LSB 38
+/* Fixed report descriptor for XP-Pen Arist 22R Pro frame */
+extern const __u8 uclogic_rdesc_xppen_artist_22r_pro_frame_arr[];
+extern const size_t uclogic_rdesc_xppen_artist_22r_pro_frame_size;
+
#endif /* _HID_UCLOGIC_RDESC_H */
diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
index 001a0f5efb9d..554a6559aeb7 100644
--- a/drivers/hid/hid-universal-pidff.c
+++ b/drivers/hid/hid-universal-pidff.c
@@ -57,6 +57,7 @@ static int universal_pidff_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int i, error;
+
error = hid_parse(hdev);
if (error) {
hid_err(hdev, "HID parse failed\n");
@@ -91,8 +92,8 @@ static int universal_pidff_probe(struct hid_device *hdev,
/* Check if HID_PID support is enabled */
int (*init_function)(struct hid_device *, u32);
- init_function = hid_pidff_init_with_quirks;
+ init_function = hid_pidff_init_with_quirks;
if (!init_function) {
hid_warn(hdev, "HID_PID support not enabled!\n");
return 0;
@@ -177,7 +178,7 @@ static const struct hid_device_id universal_pidff_devices[] = {
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
- { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF),
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_LITE_STAR_GT987),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
diff --git a/drivers/hid/intel-thc-hid/Makefile b/drivers/hid/intel-thc-hid/Makefile
index 6f762d87af07..f1182253b5b7 100644
--- a/drivers/hid/intel-thc-hid/Makefile
+++ b/drivers/hid/intel-thc-hid/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_INTEL_THC_HID) += intel-thc.o
intel-thc-objs += intel-thc/intel-thc-dev.o
intel-thc-objs += intel-thc/intel-thc-dma.o
+intel-thc-objs += intel-thc/intel-thc-wot.o
obj-$(CONFIG_INTEL_QUICKSPI) += intel-quickspi.o
intel-quickspi-objs += intel-quickspi/pci-quickspi.o
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 8a8c4a46f927..e944a6ccb776 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -11,13 +11,20 @@
#include <linux/sizes.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+
#include "intel-thc-dev.h"
#include "intel-thc-hw.h"
+#include "intel-thc-wot.h"
#include "quicki2c-dev.h"
#include "quicki2c-hid.h"
#include "quicki2c-protocol.h"
+static struct quicki2c_ddata ptl_ddata = {
+ .max_detect_size = MAX_RX_DETECT_SIZE_PTL,
+};
+
/* THC QuickI2C ACPI method to get device properties */
/* HIDI2C device method */
static guid_t i2c_hid_guid =
@@ -27,19 +34,26 @@ static guid_t i2c_hid_guid =
static guid_t thc_platform_guid =
GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38);
+/* QuickI2C Wake-on-Touch GPIO resource */
+static const struct acpi_gpio_params wake_gpio = { 0, 0, true };
+
+static const struct acpi_gpio_mapping quicki2c_gpios[] = {
+ { "wake-on-touch", &wake_gpio, 1 },
+ { }
+};
+
/**
* quicki2c_acpi_get_dsm_property - Query device ACPI DSM parameter
- *
- * @adev: point to ACPI device
+ * @adev: Point to ACPI device
* @guid: ACPI method's guid
* @rev: ACPI method's revision
* @func: ACPI method's function number
* @type: ACPI parameter's data type
- * @prop_buf: point to return buffer
+ * @prop_buf: Point to return buffer
*
* This is a helper function for device to query its ACPI DSM parameters.
*
- * Return: 0 if success or ENODEV on failed.
+ * Return: 0 if success or ENODEV on failure.
*/
static int quicki2c_acpi_get_dsm_property(struct acpi_device *adev, const guid_t *guid,
u64 rev, u64 func, acpi_object_type type, void *prop_buf)
@@ -67,11 +81,10 @@ static int quicki2c_acpi_get_dsm_property(struct acpi_device *adev, const guid_t
/**
* quicki2c_acpi_get_dsd_property - Query device ACPI DSD parameter
- *
- * @adev: point to ACPI device
+ * @adev: Point to ACPI device
* @dsd_method_name: ACPI method's property name
* @type: ACPI parameter's data type
- * @prop_buf: point to return buffer
+ * @prop_buf: Point to return buffer
*
* This is a helper function for device to query its ACPI DSD parameters.
*
@@ -100,13 +113,12 @@ static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string
}
/**
- * quicki2c_get_acpi_resources - Query all quicki2c devices' ACPI parameters
+ * quicki2c_get_acpi_resources - Query all QuickI2C devices' ACPI parameters
+ * @qcdev: Point to quicki2c_device structure
*
- * @qcdev: point to quicki2c device
+ * This function gets all QuickI2C devices' ACPI resource.
*
- * This function gets all quicki2c devices' ACPI resource.
- *
- * Return: 0 if success or error code on failed.
+ * Return: 0 if success or error code on failure.
*/
static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
{
@@ -192,10 +204,9 @@ static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
}
/**
- * quicki2c_irq_quick_handler - The ISR of the quicki2c driver
- *
+ * quicki2c_irq_quick_handler - The ISR of the QuickI2C driver
* @irq: The irq number
- * @dev_id: pointer to the device structure
+ * @dev_id: Pointer to the quicki2c_device structure
*
* Return: IRQ_WAKE_THREAD if further process needed.
*/
@@ -214,13 +225,13 @@ static irqreturn_t quicki2c_irq_quick_handler(int irq, void *dev_id)
/**
* try_recover - Try to recovery THC and Device
- * @qcdev: pointer to quicki2c device
+ * @qcdev: Pointer to quicki2c_device structure
*
- * This function is a error handler, called when fatal error happens.
- * It try to reset Touch Device and re-configure THC to recovery
- * transferring between Device and THC.
+ * This function is an error handler, called when fatal error happens.
+ * It try to reset touch device and re-configure THC to recovery
+ * communication between touch device and THC.
*
- * Return: 0 if successful or error code on failed
+ * Return: 0 if successful or error code on failure
*/
static int try_recover(struct quicki2c_device *qcdev)
{
@@ -264,7 +275,7 @@ static int handle_input_report(struct quicki2c_device *qcdev)
continue;
}
- /* discard samples before driver probe complete */
+ /* Discard samples before driver probe complete */
if (qcdev->state != QUICKI2C_ENABLED)
continue;
@@ -276,10 +287,9 @@ static int handle_input_report(struct quicki2c_device *qcdev)
}
/**
- * quicki2c_irq_thread_handler - IRQ thread handler of quicki2c driver
- *
+ * quicki2c_irq_thread_handler - IRQ thread handler of QuickI2C driver
* @irq: The IRQ number
- * @dev_id: pointer to the quicki2c device structure
+ * @dev_id: Pointer to the quicki2c_device structure
*
* Return: IRQ_HANDLED to finish this handler.
*/
@@ -325,20 +335,21 @@ exit:
}
/**
- * quicki2c_dev_init - Initialize quicki2c device
- *
- * @pdev: pointer to the thc pci device
- * @mem_addr: The pointer of MMIO memory address
+ * quicki2c_dev_init - Initialize QuickI2C device
+ * @pdev: Pointer to the THC PCI device
+ * @mem_addr: The Pointer of MMIO memory address
+ * @ddata: Point to quicki2c_ddata structure
*
- * Alloc quicki2c device structure and initialized THC device,
+ * Alloc quicki2c_device structure and initialized THC device,
* then configure THC to HIDI2C mode.
*
* If success, enable THC hardware interrupt.
*
- * Return: pointer to the quicki2c device structure if success
- * or NULL on failed.
+ * Return: Pointer to the quicki2c_device structure if success
+ * or NULL on failure.
*/
-static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __iomem *mem_addr)
+static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __iomem *mem_addr,
+ const struct quicki2c_ddata *ddata)
{
struct device *dev = &pdev->dev;
struct quicki2c_device *qcdev;
@@ -352,10 +363,11 @@ static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __io
qcdev->dev = dev;
qcdev->mem_addr = mem_addr;
qcdev->state = QUICKI2C_DISABLED;
+ qcdev->ddata = ddata;
init_waitqueue_head(&qcdev->reset_ack_wq);
- /* thc hw init */
+ /* THC hardware init */
qcdev->thc_hw = thc_dev_init(qcdev->dev, qcdev->mem_addr);
if (IS_ERR(qcdev->thc_hw)) {
ret = PTR_ERR(qcdev->thc_hw);
@@ -392,15 +404,16 @@ static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __io
thc_interrupt_enable(qcdev->thc_hw, true);
+ thc_wot_config(qcdev->thc_hw, &quicki2c_gpios[0]);
+
qcdev->state = QUICKI2C_INITED;
return qcdev;
}
/**
- * quicki2c_dev_deinit - De-initialize quicki2c device
- *
- * @qcdev: pointer to the quicki2c device structure
+ * quicki2c_dev_deinit - De-initialize QuickI2C device
+ * @qcdev: Pointer to the quicki2c_device structure
*
* Disable THC interrupt and deinitilize THC.
*/
@@ -408,18 +421,63 @@ static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
{
thc_interrupt_enable(qcdev->thc_hw, false);
thc_ltr_unconfig(qcdev->thc_hw);
+ thc_wot_unconfig(qcdev->thc_hw);
qcdev->state = QUICKI2C_DISABLED;
}
/**
- * quicki2c_dma_init - Configure THC DMA for quicki2c device
- * @qcdev: pointer to the quicki2c device structure
+ * quicki2c_dma_adv_enable - Configure and enable DMA advanced features
+ * @qcdev: Pointer to the quicki2c_device structure
+ *
+ * If platform supports THC DMA advanced features, such as max input size
+ * control or interrupt delay, configures and enables them.
+ */
+static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev)
+{
+ /*
+ * If platform supports max input size control feature and touch device
+ * max input length <= THC detect capability, enable the feature with device
+ * max input length.
+ */
+ if (qcdev->ddata->max_detect_size >=
+ le16_to_cpu(qcdev->dev_desc.max_input_len)) {
+ thc_i2c_set_rx_max_size(qcdev->thc_hw,
+ le16_to_cpu(qcdev->dev_desc.max_input_len));
+ thc_i2c_rx_max_size_enable(qcdev->thc_hw, true);
+ }
+
+ /* If platform supports interrupt delay feature, enable it with given delay */
+ if (qcdev->ddata->interrupt_delay) {
+ thc_i2c_set_rx_int_delay(qcdev->thc_hw,
+ qcdev->ddata->interrupt_delay);
+ thc_i2c_rx_int_delay_enable(qcdev->thc_hw, true);
+ }
+}
+
+/**
+ * quicki2c_dma_adv_disable - Disable DMA advanced features
+ * @qcdev: Pointer to the quicki2c device structure
+ *
+ * Disable all DMA advanced features if platform supports.
+ */
+static void quicki2c_dma_adv_disable(struct quicki2c_device *qcdev)
+{
+ if (qcdev->ddata->max_detect_size)
+ thc_i2c_rx_max_size_enable(qcdev->thc_hw, false);
+
+ if (qcdev->ddata->interrupt_delay)
+ thc_i2c_rx_int_delay_enable(qcdev->thc_hw, false);
+}
+
+/**
+ * quicki2c_dma_init - Configure THC DMA for QuickI2C device
+ * @qcdev: Pointer to the quicki2c_device structure
*
* This function uses TIC's parameters(such as max input length, max output
* length) to allocate THC DMA buffers and configure THC DMA engines.
*
- * Return: 0 if success or error code on failed.
+ * Return: 0 if success or error code on failure.
*/
static int quicki2c_dma_init(struct quicki2c_device *qcdev)
{
@@ -451,12 +509,15 @@ static int quicki2c_dma_init(struct quicki2c_device *qcdev)
return ret;
}
- return ret;
+ if (qcdev->ddata)
+ quicki2c_dma_adv_enable(qcdev);
+
+ return 0;
}
/**
- * quicki2c_dma_deinit - Release THC DMA for quicki2c device
- * @qcdev: pointer to the quicki2c device structure
+ * quicki2c_dma_deinit - Release THC DMA for QuickI2C device
+ * @qcdev: Pointer to the quicki2c_device structure
*
* Stop THC DMA engines and release all DMA buffers.
*
@@ -465,11 +526,14 @@ static void quicki2c_dma_deinit(struct quicki2c_device *qcdev)
{
thc_dma_unconfigure(qcdev->thc_hw);
thc_dma_release(qcdev->thc_hw);
+
+ if (qcdev->ddata)
+ quicki2c_dma_adv_disable(qcdev);
}
/**
* quicki2c_alloc_report_buf - Alloc report buffers
- * @qcdev: pointer to the quicki2c device structure
+ * @qcdev: Pointer to the quicki2c_device structure
*
* Allocate report descriptor buffer, it will be used for restore TIC HID
* report descriptor.
@@ -480,7 +544,7 @@ static void quicki2c_dma_deinit(struct quicki2c_device *qcdev)
* Allocate output report buffer, it will be used for store HID output report,
* such as set feature.
*
- * Return: 0 if success or error code on failed.
+ * Return: 0 if success or error code on failure.
*/
static int quicki2c_alloc_report_buf(struct quicki2c_device *qcdev)
{
@@ -518,28 +582,27 @@ static int quicki2c_alloc_report_buf(struct quicki2c_device *qcdev)
}
/*
- * quicki2c_probe: Quicki2c driver probe function
- *
- * @pdev: point to pci device
- * @id: point to pci_device_id structure
+ * quicki2c_probe: QuickI2C driver probe function
+ * @pdev: Point to PCI device
+ * @id: Point to pci_device_id structure
*
* This function initializes THC and HIDI2C device, the flow is:
- * - do THC pci device initialization
- * - query HIDI2C ACPI parameters
- * - configure THC to HIDI2C mode
- * - go through HIDI2C enumeration flow
- * |- read device descriptor
- * |- reset HIDI2C device
- * - enable THC interrupt and DMA
- * - read report descriptor
- * - register HID device
- * - enable runtime power management
- *
- * Return 0 if success or error code on failed.
+ * - Do THC pci device initialization
+ * - Query HIDI2C ACPI parameters
+ * - Configure THC to HIDI2C mode
+ * - Go through HIDI2C enumeration flow
+ * |- Read device descriptor
+ * |- Reset HIDI2C device
+ * - Enable THC interrupt and DMA
+ * - Read report descriptor
+ * - Register HID device
+ * - Enable runtime power management
+ *
+ * Return 0 if success or error code on failure.
*/
-static int quicki2c_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int quicki2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ const struct quicki2c_ddata *ddata = (const struct quicki2c_ddata *)id->driver_data;
struct quicki2c_device *qcdev;
void __iomem *mem_addr;
int ret;
@@ -577,7 +640,7 @@ static int quicki2c_probe(struct pci_dev *pdev,
pdev->irq = pci_irq_vector(pdev, 0);
- qcdev = quicki2c_dev_init(pdev, mem_addr);
+ qcdev = quicki2c_dev_init(pdev, mem_addr, ddata);
if (IS_ERR(qcdev)) {
dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
ret = PTR_ERR(qcdev);
@@ -668,11 +731,10 @@ disable_pci_device:
/**
* quicki2c_remove - Device Removal Routine
+ * @pdev: Point to PCI device structure
*
- * @pdev: PCI device structure
- *
- * This is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
+ * This is called by the PCI subsystem to alert the driver that it should
+ * release a PCI device.
*/
static void quicki2c_remove(struct pci_dev *pdev)
{
@@ -694,12 +756,10 @@ static void quicki2c_remove(struct pci_dev *pdev)
/**
* quicki2c_shutdown - Device Shutdown Routine
+ * @pdev: Point to PCI device structure
*
- * @pdev: PCI device structure
- *
- * This is called from the reboot notifier
- * it's a simplified version of remove so we go down
- * faster.
+ * This is called from the reboot notifier, it's a simplified version of remove
+ * so we go down faster.
*/
static void quicki2c_shutdown(struct pci_dev *pdev)
{
@@ -930,13 +990,13 @@ static const struct dev_pm_ops quicki2c_pm_ops = {
};
static const struct pci_device_id quicki2c_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT1), },
- {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT2), },
- {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT1), },
- {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2), },
- {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1), },
- {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2), },
- {}
+ { PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_I2C_PORT1, NULL) },
+ { PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_I2C_PORT2, NULL) },
+ { PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index 6ddb584bd611..93d6fa982d60 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -7,12 +7,12 @@
#include <linux/hid-over-i2c.h>
#include <linux/workqueue.h>
-#define THC_LNL_DEVICE_ID_I2C_PORT1 0xA848
-#define THC_LNL_DEVICE_ID_I2C_PORT2 0xA84A
-#define THC_PTL_H_DEVICE_ID_I2C_PORT1 0xE348
-#define THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
-#define THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
-#define THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_I2C_PORT1 0xA848
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_I2C_PORT2 0xA84A
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT1 0xE348
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256
@@ -36,6 +36,12 @@
#define QUICKI2C_DEFAULT_LP_LTR_VALUE 500
#define QUICKI2C_RPM_TIMEOUT_MS 500
+/* PTL Max packet size detection capability is 255 Bytes */
+#define MAX_RX_DETECT_SIZE_PTL 255
+
+/* Default interrupt delay is 1ms, suitable for most devices */
+#define DEFAULT_INTERRUPT_DELAY_US (1 * USEC_PER_MSEC)
+
/*
* THC uses runtime auto suspend to dynamically switch between THC active LTR
* and low power LTR to save CPU power.
@@ -122,6 +128,16 @@ struct quicki2c_subip_acpi_config {
u64 HMSL;
};
+/**
+ * struct quicki2c_ddata - Driver specific data for quicki2c device
+ * @max_detect_size: Identify max packet size detect for rx
+ * @interrupt_delay: Identify interrupt detect delay for rx
+ */
+struct quicki2c_ddata {
+ u32 max_detect_size;
+ u32 interrupt_delay;
+};
+
struct device;
struct pci_dev;
struct thc_device;
@@ -130,15 +146,15 @@ struct acpi_device;
/**
* struct quicki2c_device - THC QuickI2C device struct
- * @dev: point to kernel device
- * @pdev: point to PCI device
- * @thc_hw: point to THC device
- * @hid_dev: point to hid device
- * @acpi_dev: point to ACPI device
- * @driver_data: point to quicki2c specific driver data
+ * @dev: Point to kernel device
+ * @pdev: Point to PCI device
+ * @thc_hw: Point to THC device
+ * @hid_dev: Point to HID device
+ * @acpi_dev: Point to ACPI device
+ * @ddata: Point to QuickI2C platform specific driver data
* @state: THC I2C device state
* @mem_addr: MMIO memory address
- * @dev_desc: device descriptor for HIDI2C protocol
+ * @dev_desc: Device descriptor for HIDI2C protocol
* @i2c_slave_addr: HIDI2C device slave address
* @hid_desc_addr: Register address for retrieve HID device descriptor
* @active_ltr_val: THC active LTR value
@@ -146,12 +162,12 @@ struct acpi_device;
* @i2c_speed_mode: 0 - standard mode, 1 - fast mode, 2 - fast mode plus
* @i2c_clock_hcnt: I2C CLK high period time (unit in cycle count)
* @i2c_clock_lcnt: I2C CLK low period time (unit in cycle count)
- * @report_descriptor: store a copy of device report descriptor
- * @input_buf: store a copy of latest input report data
- * @report_buf: store a copy of latest input/output report packet from set/get feature
- * @report_len: the length of input/output report packet
- * @reset_ack_wq: workqueue for waiting reset response from device
- * @reset_ack: indicate reset response received or not
+ * @report_descriptor: Store a copy of device report descriptor
+ * @input_buf: Store a copy of latest input report data
+ * @report_buf: Store a copy of latest input/output report packet from set/get feature
+ * @report_len: The length of input/output report packet
+ * @reset_ack_wq: Workqueue for waiting reset response from device
+ * @reset_ack: Indicate reset response received or not
*/
struct quicki2c_device {
struct device *dev;
@@ -159,6 +175,7 @@ struct quicki2c_device {
struct thc_device *thc_hw;
struct hid_device *hid_dev;
struct acpi_device *acpi_dev;
+ const struct quicki2c_ddata *ddata;
enum quicki2c_dev_state state;
void __iomem *mem_addr;
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index d4f89f44c3b4..5e5f179dd113 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -11,8 +11,11 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+
#include "intel-thc-dev.h"
#include "intel-thc-hw.h"
+#include "intel-thc-wot.h"
#include "quickspi-dev.h"
#include "quickspi-hid.h"
@@ -46,6 +49,15 @@ static guid_t thc_platform_guid =
GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30,
0xf7, 0x87, 0xa1, 0x38);
+
+/* QuickSPI Wake-on-Touch GPIO resource */
+static const struct acpi_gpio_params wake_gpio = { 0, 0, true };
+
+static const struct acpi_gpio_mapping quickspi_gpios[] = {
+ { "wake-on-touch", &wake_gpio, 1 },
+ { }
+};
+
/**
* thc_acpi_get_property - Query device ACPI parameter
*
@@ -426,6 +438,8 @@ static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __io
thc_interrupt_enable(qsdev->thc_hw, true);
+ thc_wot_config(qsdev->thc_hw, &quickspi_gpios[0]);
+
qsdev->state = QUICKSPI_INITIATED;
return qsdev;
@@ -442,6 +456,7 @@ static void quickspi_dev_deinit(struct quickspi_device *qsdev)
{
thc_interrupt_enable(qsdev->thc_hw, false);
thc_ltr_unconfig(qsdev->thc_hw);
+ thc_wot_unconfig(qsdev->thc_hw);
qsdev->state = QUICKSPI_DISABLED;
}
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index c105df7f6c87..6f2263869b20 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Intel Corporation */
#include <linux/bitfield.h>
+#include <linux/math.h>
#include <linux/regmap.h>
#include "intel-thc-dev.h"
@@ -1571,6 +1572,145 @@ int thc_i2c_subip_regs_restore(struct thc_device *dev)
}
EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_restore, "INTEL_THC");
+/**
+ * thc_i2c_set_rx_max_size - Set I2C Rx transfer max input size
+ * @dev: The pointer of THC private device context
+ * @max_rx_size: Max input report packet size for input report
+ *
+ * Set @max_rx_size for I2C RxDMA max input size control feature.
+ *
+ * Return: 0 on success, other error codes on failure.
+ */
+int thc_i2c_set_rx_max_size(struct thc_device *dev, u32 max_rx_size)
+{
+ u32 val;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!max_rx_size)
+ return -EOPNOTSUPP;
+
+ ret = regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &val);
+ if (ret)
+ return ret;
+
+ val |= FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_I2C_MAX_SIZE, max_rx_size);
+
+ ret = regmap_write(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, val);
+ if (ret)
+ return ret;
+
+ dev->i2c_max_rx_size = max_rx_size;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_set_rx_max_size, "INTEL_THC");
+
+/**
+ * thc_i2c_rx_max_size_enable - Enable I2C Rx max input size control
+ * @dev: The pointer of THC private device context
+ * @enable: Enable max input size control or not
+ *
+ * Enable or disable I2C RxDMA max input size control feature.
+ * Max input size control only can be enabled after max input size
+ * was set by thc_i2c_set_rx_max_size().
+ *
+ * Return: 0 on success, other error codes on failure.
+ */
+int thc_i2c_rx_max_size_enable(struct thc_device *dev, bool enable)
+{
+ u32 mask = THC_M_PRT_SPI_ICRRD_OPCODE_I2C_MAX_SIZE_EN;
+ u32 val = enable ? mask : 0;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!dev->i2c_max_rx_size)
+ return -EOPNOTSUPP;
+
+ ret = regmap_write_bits(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, mask, val);
+ if (ret)
+ return ret;
+
+ dev->i2c_max_rx_size_en = enable;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_rx_max_size_enable, "INTEL_THC");
+
+/**
+ * thc_i2c_set_rx_int_delay - Set I2C Rx input interrupt delay value
+ * @dev: The pointer of THC private device context
+ * @delay_us: Interrupt delay value, unit is us
+ *
+ * Set @delay_us for I2C RxDMA input interrupt delay feature.
+ *
+ * Return: 0 on success, other error codes on failure.
+ */
+int thc_i2c_set_rx_int_delay(struct thc_device *dev, u32 delay_us)
+{
+ u32 val;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!delay_us)
+ return -EOPNOTSUPP;
+
+ ret = regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &val);
+ if (ret)
+ return ret;
+
+ /* THC hardware counts at 10us unit */
+ val |= FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_I2C_INTERVAL, DIV_ROUND_UP(delay_us, 10));
+
+ ret = regmap_write(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, val);
+ if (ret)
+ return ret;
+
+ dev->i2c_int_delay_us = delay_us;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_set_rx_int_delay, "INTEL_THC");
+
+/**
+ * thc_i2c_rx_int_delay_enable - Enable I2C Rx interrupt delay
+ * @dev: The pointer of THC private device context
+ * @enable: Enable interrupt delay or not
+ *
+ * Enable or disable I2C RxDMA input interrupt delay feature.
+ * Input interrupt delay can only be enabled after interrupt delay value
+ * was set by thc_i2c_set_rx_int_delay().
+ *
+ * Return: 0 on success, other error codes on failure.
+ */
+int thc_i2c_rx_int_delay_enable(struct thc_device *dev, bool enable)
+{
+ u32 mask = THC_M_PRT_SPI_ICRRD_OPCODE_I2C_INTERVAL_EN;
+ u32 val = enable ? mask : 0;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!dev->i2c_int_delay_us)
+ return -EOPNOTSUPP;
+
+ ret = regmap_write_bits(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, mask, val);
+ if (ret)
+ return ret;
+
+ dev->i2c_int_delay_en = enable;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_rx_int_delay_enable, "INTEL_THC");
+
MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
index 0517fee2c668..0db435335e24 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include "intel-thc-dma.h"
+#include "intel-thc-wot.h"
#define THC_REGMAP_COMMON_OFFSET 0x10
#define THC_REGMAP_MMIO_OFFSET 0x1000
@@ -52,16 +53,21 @@ enum thc_int_type {
* struct thc_device - THC private device struct
* @thc_regmap: MMIO regmap structure for accessing THC registers
* @mmio_addr: MMIO registers address
- * @thc_bus_lock: mutex locker for THC config
- * @port_type: port type of THC port instance
+ * @thc_bus_lock: Mutex locker for THC config
+ * @port_type: Port type of THC port instance
* @pio_int_supported: PIO interrupt supported flag
* @dma_ctx: DMA specific data
- * @write_complete_wait: signal event for DMA write complete
- * @swdma_complete_wait: signal event for SWDMA sequence complete
- * @write_done: bool value that indicates if DMA write is done
- * @swdma_done: bool value that indicates if SWDMA swquence is done
- * @perf_limit: the delay between read operation and write operation
- * @i2c_subip_regs: the copy of THC I2C sub-system registers for resuming restore
+ * @wot: THC Wake-on-Touch data
+ * @write_complete_wait: Signal event for DMA write complete
+ * @swdma_complete_wait: Signal event for SWDMA sequence complete
+ * @write_done: Bool value that indicates if DMA write is done
+ * @swdma_done: Bool value that indicates if SWDMA sequence is done
+ * @perf_limit: The delay between read operation and write operation
+ * @i2c_subip_regs: The copy of THC I2C sub-system registers for resuming restore
+ * @i2c_max_rx_size: I2C Rx transfer max input size
+ * @i2c_int_delay_us: I2C input interrupt delay, unit is us
+ * @i2c_max_rx_size_en: Bool value that indicates I2C max input size control enabled or not
+ * @i2c_int_delay_en: Bool value that indicates I2C input interrupt delay enabled or not
*/
struct thc_device {
struct device *dev;
@@ -73,6 +79,8 @@ struct thc_device {
struct thc_dma_context *dma_ctx;
+ struct thc_wot wot;
+
wait_queue_head_t write_complete_wait;
wait_queue_head_t swdma_complete_wait;
bool write_done;
@@ -81,6 +89,11 @@ struct thc_device {
u32 perf_limit;
u32 *i2c_subip_regs;
+
+ u32 i2c_max_rx_size;
+ u32 i2c_int_delay_us;
+ bool i2c_max_rx_size_en;
+ bool i2c_int_delay_en;
};
struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr);
@@ -112,5 +125,9 @@ int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
const u32 speed, const u32 hcnt, const u32 lcnt);
int thc_i2c_subip_regs_save(struct thc_device *dev);
int thc_i2c_subip_regs_restore(struct thc_device *dev);
+int thc_i2c_set_rx_max_size(struct thc_device *dev, u32 max_rx_size);
+int thc_i2c_rx_max_size_enable(struct thc_device *dev, bool enable);
+int thc_i2c_set_rx_int_delay(struct thc_device *dev, u32 delay_us);
+int thc_i2c_rx_int_delay_enable(struct thc_device *dev, bool enable);
#endif /* _INTEL_THC_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
index 8f97e71df7f4..82b8854843e0 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -712,6 +712,28 @@ static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
thc_reset_dma_settings(dev);
+ /*
+ * Max input size control feature is only available for RxDMA, it must keep disabled
+ * during SWDMA operation, and restore to previous state after SWDMA is done.
+ * Max input size variables in THC device context track hardware state, and keep change
+ * when feature state was changed, so those variables cannot be used to record feature
+ * state after state was changed during SWDMA operation. Here have to use a temp variable
+ * in DMA context to record feature state before SWDMA operation.
+ */
+ if (dev->i2c_max_rx_size_en) {
+ thc_i2c_rx_max_size_enable(dev, false);
+ dev->dma_ctx->rx_max_size_en = true;
+ }
+
+ /*
+ * Interrupt delay feature is in the same situation with max input size control feature,
+ * needs record feature state before SWDMA.
+ */
+ if (dev->i2c_int_delay_en) {
+ thc_i2c_rx_int_delay_enable(dev, false);
+ dev->dma_ctx->rx_int_delay_en = true;
+ }
+
mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
@@ -754,6 +776,24 @@ static int thc_swdma_read_completion(struct thc_device *dev)
if (ret)
return ret;
+ /*
+ * Restore max input size control feature to previous state after SWDMA if it was
+ * enabled before SWDMA, and reset temp rx_max_size_en variable for next time.
+ */
+ if (dev->dma_ctx->rx_max_size_en) {
+ thc_i2c_rx_max_size_enable(dev, true);
+ dev->dma_ctx->rx_max_size_en = false;
+ }
+
+ /*
+ * Restore input interrupt delay feature to previous state after SWDMA if it was
+ * enabled before SWDMA, and reset temp rx_int_delay_en variable for next time.
+ */
+ if (dev->dma_ctx->rx_int_delay_en) {
+ thc_i2c_rx_int_delay_enable(dev, true);
+ dev->dma_ctx->rx_int_delay_en = false;
+ }
+
thc_reset_dma_settings(dev);
dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
index ca923ff2bef9..78917400492c 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
@@ -27,7 +27,7 @@
/**
* THC DMA channels:
- * @THC_RXDMA1: legacy channel, reserved for raw data reading
+ * @THC_RXDMA1: Legacy channel, reserved for raw data reading
* @THC_RXDMA2: DMA to read HID data from touch device
* @THC_TXDMA: DMA to write to touch device
* @THC_SWDMA: SW triggered DMA to write and read from touch device
@@ -42,11 +42,11 @@ enum thc_dma_channel {
/**
* THC DMA Physical Memory Descriptor (PRD)
- * @dest_addr: bit[53:0], destination address in system memory
- * @int_on_completion: bit[63], if set, thc will trigger interrupt to driver
- * @len: bit[87:64], length of this entry
- * @end_of_prd: bit[88], if set, this entry is last one of current PRD table
- * @hw_status: bit[90:89], hw status bits
+ * @dest_addr: Bit[53:0], destination address in system memory
+ * @int_on_completion: Bit[63], if set, thc will trigger interrupt to driver
+ * @len: Bit[87:64], length of this entry
+ * @end_of_prd: Bit[88], if set, this entry is last one of current PRD table
+ * @hw_status: Bit[90:89], hardware status bits
*/
struct thc_prd_entry {
u64 dest_addr : 54;
@@ -88,14 +88,14 @@ struct thc_prd_table {
* struct thc_dma_configuration - THC DMA configure
* @dma_channel: DMA channel for current DMA configuration
* @prd_tbls_dma_handle: DMA buffer handle
- * @dir: direction of DMA for this config
+ * @dir: Direction of DMA for this config
* @prd_tbls: PRD tables for current DMA
- * @sgls: array of pointers to scatter-gather lists
- * @sgls_nent: actual number of entries per sg list
- * @prd_tbl_num: actual number of PRD tables
- * @max_packet_size: size of the buffer needed for 1 DMA message (1 PRD table)
+ * @sgls: Array of pointers to scatter-gather lists
+ * @sgls_nent: Actual number of entries per scatter-gather list
+ * @prd_tbl_num: Actual number of PRD tables
+ * @max_packet_size: Size of the buffer needed for 1 DMA message (1 PRD table)
* @prd_base_addr_high: High 32bits memory address where stores PRD table
- * @prd_base_addr_low: low 32bits memory address where stores PRD table
+ * @prd_base_addr_low: Low 32bits memory address where stores PRD table
* @prd_cntrl: PRD control register value
* @dma_cntrl: DMA control register value
*/
@@ -117,13 +117,21 @@ struct thc_dma_configuration {
u32 dma_cntrl;
};
-/*
- * THC DMA context
- * Store all THC Channel configures
+/**
+ * struct thc_dma_context - THC DMA context
+ * @thc_dma_configuration: Array of all THC Channel configures
+ * @use_write_interrupts: Indicate TxDMA using interrupt or polling
+ * @rx_max_size_en: Temp flag to indicate THC I2C Rx max input size control feature
+ * enabled or not, only be used during SWDMA operation.
+ * @rx_int_delay_en: Temp flag to indicate THC I2C Rx interrupt delay feature
+ * enabled or not, only be used during SWDMA operation.
*/
struct thc_dma_context {
struct thc_dma_configuration dma_config[MAX_THC_DMA_CHANNEL];
u8 use_write_interrupts;
+
+ bool rx_max_size_en;
+ bool rx_int_delay_en;
};
struct thc_device;
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
index 6729c4c25dab..413730f8e3f7 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
@@ -399,6 +399,11 @@
#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO GENMASK(23, 16)
#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO GENMASK(15, 8)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_I2C_MAX_SIZE GENMASK(15, 0)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_I2C_INTERVAL GENMASK(23, 16)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_I2C_INTERVAL_EN BIT(30)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_I2C_MAX_SIZE_EN BIT(31)
+
#define THC_M_PRT_INT_EN_SIPE BIT(0)
#define THC_M_PRT_INT_EN_SBO BIT(1)
#define THC_M_PRT_INT_EN_SIDR BIT(2)
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c
new file mode 100644
index 000000000000..1291b4ea2cd8
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/pm_wakeirq.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-wot.h"
+
+/**
+ * thc_wot_config - Query and configure wake-on-touch feature
+ * @thc_dev: Point to thc_device structure
+ * @gpio_map: Point to ACPI GPIO resource mapping structure
+ *
+ * THC ACPI device only provides _CRS with GpioInt() resources, doesn't contain
+ * _DSD to map this GPIO resource, so this function first registers wake GPIO
+ * mapping manually, then queries wake-on-touch GPIO resource from ACPI,
+ * if it exists and is wake-able, configure driver to enable it, otherwise,
+ * return immediately.
+ * This function will not return error as it doesn't impact major function.
+ */
+void thc_wot_config(struct thc_device *thc_dev, const struct acpi_gpio_mapping *gpio_map)
+{
+ struct acpi_device *adev;
+ struct thc_wot *wot;
+ int ret;
+
+ if (!thc_dev)
+ return;
+
+ adev = ACPI_COMPANION(thc_dev->dev);
+ if (!adev)
+ return;
+
+ wot = &thc_dev->wot;
+
+ ret = acpi_dev_add_driver_gpios(adev, gpio_map);
+ if (ret) {
+ dev_warn(thc_dev->dev, "Can't add wake GPIO resource, ret = %d\n", ret);
+ return;
+ }
+
+ wot->gpio_irq = acpi_dev_gpio_irq_wake_get_by(adev, "wake-on-touch", 0,
+ &wot->gpio_irq_wakeable);
+ if (wot->gpio_irq <= 0) {
+ dev_warn(thc_dev->dev, "Can't find wake GPIO resource\n");
+ return;
+ }
+
+ if (!wot->gpio_irq_wakeable) {
+ dev_warn(thc_dev->dev, "GPIO resource isn't wakeable\n");
+ return;
+ }
+
+ ret = device_init_wakeup(thc_dev->dev, true);
+ if (ret) {
+ dev_warn(thc_dev->dev, "Failed to init wake up.\n");
+ return;
+ }
+
+ ret = dev_pm_set_dedicated_wake_irq(thc_dev->dev, wot->gpio_irq);
+ if (ret) {
+ dev_warn(thc_dev->dev, "Failed to set wake up IRQ.\n");
+ device_init_wakeup(thc_dev->dev, false);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(thc_wot_config, "INTEL_THC");
+
+/**
+ * thc_wot_unconfig - Unconfig wake-on-touch feature
+ * @thc_dev: Point to thc_device structure
+ *
+ * Configure driver to disable wake-on-touch and release ACPI resource.
+ */
+void thc_wot_unconfig(struct thc_device *thc_dev)
+{
+ struct acpi_device *adev;
+
+ if (!thc_dev)
+ return;
+
+ adev = ACPI_COMPANION(thc_dev->dev);
+ if (!adev)
+ return;
+
+ if (thc_dev->wot.gpio_irq_wakeable)
+ device_init_wakeup(thc_dev->dev, false);
+
+ if (thc_dev->wot.gpio_irq > 0) {
+ dev_pm_clear_wake_irq(thc_dev->dev);
+ acpi_dev_remove_driver_gpios(adev);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(thc_wot_unconfig, "INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h
new file mode 100644
index 000000000000..6c700621b242
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Intel Corporation */
+
+#ifndef _INTEL_THC_WOT_H_
+#define _INTEL_THC_WOT_H_
+
+#include <linux/types.h>
+
+#include <linux/gpio/consumer.h>
+
+/**
+ * struct thc_wot - THC Wake-on-Touch data structure
+ * @gpio_irq : GPIO interrupt IRQ number for wake-on-touch
+ * @gpio_irq_wakeable : Indicate GPIO IRQ workable or not
+ */
+struct thc_wot {
+ int gpio_irq;
+ bool gpio_irq_wakeable;
+};
+
+struct thc_device;
+
+void thc_wot_config(struct thc_device *thc_dev, const struct acpi_gpio_mapping *gpio_map);
+void thc_wot_unconfig(struct thc_device *thc_dev);
+
+#endif /* _INTEL_THC_WOT_H_ */
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 8dfd2c554a27..614a20b62023 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -210,9 +210,7 @@ struct pidff_device {
*/
static s32 pidff_clamp(s32 i, struct hid_field *field)
{
- s32 clamped = clamp(i, field->logical_minimum, field->logical_maximum);
- pr_debug("clamped from %d to %d", i, clamped);
- return clamped;
+ return (s32)clamp(i, field->logical_minimum, field->logical_maximum);
}
/*
@@ -229,8 +227,10 @@ static int pidff_rescale(int i, int max, struct hid_field *field)
*/
static int pidff_rescale_signed(int i, struct hid_field *field)
{
- if (i > 0) return i * field->logical_maximum / S16_MAX;
- if (i < 0) return i * field->logical_minimum / S16_MIN;
+ if (i > 0)
+ return i * field->logical_maximum / S16_MAX;
+ if (i < 0)
+ return i * field->logical_minimum / S16_MIN;
return 0;
}
@@ -241,11 +241,11 @@ static u32 pidff_rescale_time(u16 time, struct hid_field *field)
{
u32 scaled_time = time;
int exponent = field->unit_exponent;
- pr_debug("time field exponent: %d\n", exponent);
- for (;exponent < FF_TIME_EXPONENT; exponent++)
+ pr_debug("time field exponent: %d\n", exponent);
+ for (; exponent < FF_TIME_EXPONENT; exponent++)
scaled_time *= 10;
- for (;exponent > FF_TIME_EXPONENT; exponent--)
+ for (; exponent > FF_TIME_EXPONENT; exponent--)
scaled_time /= 10;
pr_debug("time calculated from %d to %d\n", time, scaled_time);
@@ -275,8 +275,8 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
static void pidff_set_time(struct pidff_usage *usage, u16 time)
{
- u32 modified_time = pidff_rescale_time(time, usage->field);
- usage->value[0] = pidff_clamp(modified_time, usage->field);
+ usage->value[0] = pidff_clamp(
+ pidff_rescale_time(time, usage->field), usage->field);
}
static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
@@ -332,6 +332,7 @@ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
bool needs_new_envelope;
+
needs_new_envelope = envelope->attack_level != 0 ||
envelope->fade_level != 0 ||
envelope->attack_length != 0 ||
@@ -568,7 +569,7 @@ static void pidff_set_device_control(struct pidff_device *pidff, int field)
hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
/* Clear current bitmask */
- for(i = 0; i < sizeof(pidff_device_control); i++) {
+ for (i = 0; i < sizeof(pidff_device_control); i++) {
index = pidff->control_id[i];
if (index < 1)
continue;
@@ -619,7 +620,7 @@ static void pidff_fetch_pool(struct pidff_device *pidff)
struct hid_device *hid = pidff->hid;
/* Repeat if PID_SIMULTANEOUS_MAX < 2 to make sure it's correct */
- for(i = 0; i < 20; i++) {
+ for (i = 0; i < 20; i++) {
hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
hid_hw_wait(hid);
@@ -715,6 +716,7 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
static int pidff_playback(struct input_dev *dev, int effect_id, int value)
{
struct pidff_device *pidff = dev->ff->private;
+
pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
return 0;
}
@@ -849,7 +851,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
case FF_INERTIA:
case FF_FRICTION:
if (!old) {
- switch(effect->type) {
+ switch (effect->type) {
case FF_SPRING:
type_id = PID_SPRING;
break;
@@ -940,7 +942,7 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
struct hid_report *report, int count, int strict)
{
if (!report) {
- pr_debug("pidff_find_fields, null report\n");
+ pr_debug("%s, null report\n", __func__);
return -1;
}
@@ -974,13 +976,11 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
pr_debug("Delay field not found, but that's OK\n");
pr_debug("Setting MISSING_DELAY quirk\n");
return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
- }
- else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
+ } else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
pr_debug("PBO field not found, but that's OK\n");
pr_debug("Setting MISSING_PBO quirk\n");
return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
- }
- else if (!found && strict) {
+ } else if (!found && strict) {
pr_debug("failed to locate %d\n", k);
return -1;
}
@@ -1069,7 +1069,7 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
int usage, int enforce_min)
{
if (!report) {
- pr_debug("pidff_find_special_field, null report\n");
+ pr_debug("%s, null report\n", __func__);
return NULL;
}
@@ -1081,10 +1081,9 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
if (!enforce_min ||
report->field[i]->logical_minimum == 1)
return report->field[i];
- else {
- pr_err("logical_minimum is not 1 as it should be\n");
- return NULL;
- }
+
+ pr_err("logical_minimum is not 1 as it should be\n");
+ return NULL;
}
}
return NULL;
@@ -1207,6 +1206,7 @@ static int pidff_find_effects(struct pidff_device *pidff,
for (i = 0; i < sizeof(pidff_effect_types); i++) {
int pidff_type = pidff->type_id[i];
+
if (pidff->set_effect_type->usage[pidff_type].hid !=
pidff->create_new_effect_type->usage[pidff_type].hid) {
hid_err(pidff->hid,
diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
index dda571e0a5bd..a53a8b436baa 100644
--- a/drivers/hid/usbhid/hid-pidff.h
+++ b/drivers/hid/usbhid/hid-pidff.h
@@ -9,8 +9,7 @@
/* Delay field (0xA7) missing. Skip it during set effect report upload */
#define HID_PIDFF_QUIRK_MISSING_DELAY BIT(0)
-/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION
- report upload */
+/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION upload */
#define HID_PIDFF_QUIRK_MISSING_PBO BIT(1)
/* Initialise device control field even if logical_minimum != 1 */
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
index 48c514da34eb..806674722868 100644
--- a/drivers/hv/mshv_eventfd.c
+++ b/drivers/hv/mshv_eventfd.c
@@ -368,6 +368,14 @@ static void mshv_irqfd_queue_proc(struct file *file, wait_queue_head_t *wqh,
container_of(polltbl, struct mshv_irqfd, irqfd_polltbl);
irqfd->irqfd_wqh = wqh;
+
+ /*
+ * TODO: Ensure there isn't already an exclusive, priority waiter, e.g.
+ * that the irqfd isn't already bound to another partition. Only the
+ * first exclusive waiter encountered will be notified, and
+ * add_wait_queue_priority() doesn't enforce exclusivity.
+ */
+ irqfd->irqfd_wait.flags |= WQ_FLAG_EXCLUSIVE;
add_wait_queue_priority(wqh, &irqfd->irqfd_wait);
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 5f2541c11fe9..8cefa14e1633 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1704,12 +1704,15 @@ static int adt7475_pwm_properties_parse_reference_args(struct fwnode_handle *fwn
if (ret)
return ret;
- if (rargs.nargs != 4) {
+ if (rargs.nargs != 3 && rargs.nargs != 4) {
fwnode_handle_put(rargs.fwnode);
return -EINVAL;
}
- for (i = 0; i < 4; i++)
+ /* Let duty_cycle default to period */
+ args[3] = rargs.args[1];
+
+ for (i = 0; i < rargs.nargs; i++)
args[i] = rargs.args[i];
ret = _adt7475_pwm_properties_parse_args(args, cfg);
@@ -1724,11 +1727,22 @@ static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode,
{
int ret;
u32 args[4] = {};
+ size_t n_vals = fwnode_property_count_u32(fwnode, "pwms");
+
+ if (n_vals != 3 && n_vals != 4)
+ return -EOVERFLOW;
- ret = fwnode_property_read_u32_array(fwnode, "pwms", args, ARRAY_SIZE(args));
+ ret = fwnode_property_read_u32_array(fwnode, "pwms", args, n_vals);
if (ret)
return ret;
+ /*
+ * If there are no item to define the duty_cycle, default it to the
+ * period.
+ */
+ if (n_vals == 3)
+ args[3] = args[1];
+
return _adt7475_pwm_properties_parse_args(args, cfg);
}
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 13a789cc85d2..d5f864b360b0 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -26,6 +26,7 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
#include <dt-bindings/pwm/pwm.h>
@@ -126,6 +127,10 @@ module_param(init, int, 0444);
struct amc6821_data {
struct regmap *regmap;
struct mutex update_lock;
+ unsigned long fan_state;
+ unsigned long fan_max_state;
+ unsigned int *fan_cooling_levels;
+ enum pwm_polarity pwm_polarity;
};
/*
@@ -804,6 +809,65 @@ static const struct hwmon_chip_info amc6821_chip_info = {
.info = amc6821_info,
};
+static int amc6821_set_sw_dcy(struct amc6821_data *data, u8 duty_cycle)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, AMC6821_REG_DCY, duty_cycle);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, AMC6821_REG_CONF1,
+ AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1, 0);
+}
+
+static int amc6821_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct amc6821_data *data = cdev->devdata;
+
+ if (!data)
+ return -EINVAL;
+
+ *state = data->fan_max_state;
+
+ return 0;
+}
+
+static int amc6821_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct amc6821_data *data = cdev->devdata;
+
+ if (!data)
+ return -EINVAL;
+
+ *state = data->fan_state;
+
+ return 0;
+}
+
+static int amc6821_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct amc6821_data *data = cdev->devdata;
+ int ret;
+
+ if (!data || state > data->fan_max_state)
+ return -EINVAL;
+
+ ret = amc6821_set_sw_dcy(data, data->fan_cooling_levels[state]);
+ if (ret)
+ return ret;
+
+ data->fan_state = state;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops amc6821_cooling_ops = {
+ .get_max_state = amc6821_get_max_state,
+ .get_cur_state = amc6821_get_cur_state,
+ .set_cur_state = amc6821_set_cur_state,
+};
+
/* Return 0 if detection is successful, -ENODEV otherwise */
static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info)
{
@@ -848,11 +912,11 @@ static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info
return 0;
}
-static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
+static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client,
+ struct device_node *fan_np)
{
enum pwm_polarity polarity = PWM_POLARITY_NORMAL;
struct of_phandle_args args;
- struct device_node *fan_np;
/*
* For backward compatibility, the pwminv module parameter takes
@@ -863,10 +927,6 @@ static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
if (pwminv > 0)
return PWM_POLARITY_INVERSED;
- fan_np = of_get_child_by_name(client->dev.of_node, "fan");
- if (!fan_np)
- return PWM_POLARITY_NORMAL;
-
if (of_parse_phandle_with_args(fan_np, "pwms", "#pwm-cells", 0, &args))
goto out;
of_node_put(args.np);
@@ -877,10 +937,34 @@ static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
if (args.args[1] & PWM_POLARITY_INVERTED)
polarity = PWM_POLARITY_INVERSED;
out:
- of_node_put(fan_np);
return polarity;
}
+static int amc6821_of_fan_read_data(struct i2c_client *client,
+ struct amc6821_data *data,
+ struct device_node *fan_np)
+{
+ int num;
+
+ data->pwm_polarity = amc6821_pwm_polarity(client, fan_np);
+
+ num = of_property_count_u32_elems(fan_np, "cooling-levels");
+ if (num <= 0)
+ return 0;
+
+ data->fan_max_state = num - 1;
+
+ data->fan_cooling_levels = devm_kcalloc(&client->dev, num,
+ sizeof(u32),
+ GFP_KERNEL);
+
+ if (!data->fan_cooling_levels)
+ return -ENOMEM;
+
+ return of_property_read_u32_array(fan_np, "cooling-levels",
+ data->fan_cooling_levels, num);
+}
+
static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *data)
{
struct regmap *regmap = data->regmap;
@@ -902,7 +986,7 @@ static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *d
return err;
regval = AMC6821_CONF1_START;
- if (amc6821_pwm_polarity(client) == PWM_POLARITY_INVERSED)
+ if (data->pwm_polarity == PWM_POLARITY_INVERSED)
regval |= AMC6821_CONF1_PWMINV;
err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
@@ -911,6 +995,14 @@ static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *d
regval);
if (err)
return err;
+
+ /* Software DCY-control mode with fan enabled when cooling-levels present */
+ if (data->fan_cooling_levels) {
+ err = amc6821_set_sw_dcy(data,
+ data->fan_cooling_levels[data->fan_max_state]);
+ if (err)
+ return err;
+ }
}
return 0;
}
@@ -944,6 +1036,7 @@ static int amc6821_probe(struct i2c_client *client)
struct amc6821_data *data;
struct device *hwmon_dev;
struct regmap *regmap;
+ struct device_node *fan_np __free(device_node) = NULL;
int err;
data = devm_kzalloc(dev, sizeof(struct amc6821_data), GFP_KERNEL);
@@ -956,6 +1049,14 @@ static int amc6821_probe(struct i2c_client *client)
"Failed to initialize regmap\n");
data->regmap = regmap;
+ fan_np = of_get_child_by_name(dev->of_node, "fan");
+ if (fan_np) {
+ err = amc6821_of_fan_read_data(client, data, fan_np);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to read fan device tree data\n");
+ }
+
err = amc6821_init_client(client, data);
if (err)
return err;
@@ -970,7 +1071,15 @@ static int amc6821_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &amc6821_chip_info,
amc6821_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(dev, PTR_ERR(hwmon_dev),
+ "Failed to initialize hwmon\n");
+
+ if (IS_ENABLED(CONFIG_THERMAL) && fan_np && data->fan_cooling_levels)
+ return PTR_ERR_OR_ZERO(devm_thermal_of_cooling_device_register(dev,
+ fan_np, client->name, data, &amc6821_cooling_ops));
+
+ return 0;
}
static const struct i2c_device_id amc6821_id[] = {
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index e0a95197c71b..4ac554731e98 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -165,7 +165,9 @@ enum board_family {
family_amd_400_series,
family_amd_500_series,
family_amd_600_series,
+ family_amd_800_series,
family_intel_300_series,
+ family_intel_400_series,
family_intel_600_series
};
@@ -259,6 +261,20 @@ static const struct ec_sensor_info sensors_family_amd_600[] = {
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
};
+static const struct ec_sensor_info sensors_family_amd_800[] = {
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
+ [ec_sensor_temp_vrm] =
+ EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+};
+
static const struct ec_sensor_info sensors_family_intel_300[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -279,6 +295,20 @@ static const struct ec_sensor_info sensors_family_intel_300[] = {
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
};
+static const struct ec_sensor_info sensors_family_intel_400[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+};
+
static const struct ec_sensor_info sensors_family_intel_600[] = {
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
@@ -362,6 +392,14 @@ static const struct ec_board_info board_info_pro_art_x670E_creator_wifi = {
.family = family_amd_600_series,
};
+static const struct ec_board_info board_info_pro_art_x870E_creator_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_800_series,
+};
+
static const struct ec_board_info board_info_pro_art_b550_creator = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -498,6 +536,18 @@ static const struct ec_board_info board_info_strix_z390_f_gaming = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_strix_z490_f_gaming = {
+ .sensors = SENSOR_TEMP_CHIPSET |
+ SENSOR_TEMP_CPU |
+ SENSOR_TEMP_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT |
+ SENSOR_FAN_VRM_HS,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_400_series,
+};
+
static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
.sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
.mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
@@ -548,6 +598,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_pro_art_x570_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
&board_info_pro_art_x670E_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X870E-CREATOR WIFI",
+ &board_info_pro_art_x870E_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
&board_info_pro_art_b550_creator),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
@@ -586,6 +638,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_strix_x570_i_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z390-F GAMING",
&board_info_strix_z390_f_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z490-F GAMING",
+ &board_info_strix_z490_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
&board_info_strix_z690_a_gaming_wifi_d4),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
@@ -1058,9 +1112,15 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_amd_600_series:
ec_data->sensors_info = sensors_family_amd_600;
break;
+ case family_amd_800_series:
+ ec_data->sensors_info = sensors_family_amd_800;
+ break;
case family_intel_300_series:
ec_data->sensors_info = sensors_family_intel_300;
break;
+ case family_intel_400_series:
+ ec_data->sensors_info = sensors_family_intel_400;
+ break;
case family_intel_600_series:
ec_data->sensors_info = sensors_family_intel_600;
break;
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index 35c862eb158b..b7bb325c3ad9 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -4,9 +4,9 @@
*
* Copyright 2019 Analog Devices Inc.
*/
+#include <linux/adi-axi-common.h>
#include <linux/bits.h>
#include <linux/clk.h>
-#include <linux/fpga/adi-axi-common.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/interrupt.h>
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index f8f22b8a67cd..6b5c8f200780 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -885,6 +885,7 @@ static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */
{ HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */
{ HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */
+ { HID_USB_DEVICE(0x1b1c, 0x1c27) }, /* Corsair HX1200i Series 2025 */
{ },
};
MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 234c54956a4b..60809289f816 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/platform_data/emc2305.h>
#include <linux/thermal.h>
+#include <linux/pwm.h>
+#include <linux/of_device.h>
+#include <linux/util_macros.h>
#define EMC2305_REG_DRIVE_FAIL_STATUS 0x27
#define EMC2305_REG_VENDOR 0xfe
@@ -23,6 +26,12 @@
#define EMC2305_TACH_REGS_UNUSE_BITS 3
#define EMC2305_TACH_CNT_MULTIPLIER 0x02
#define EMC2305_TACH_RANGE_MIN 480
+#define EMC2305_DEFAULT_OUTPUT 0x0
+#define EMC2305_DEFAULT_POLARITY 0x0
+#define EMC2305_REG_POLARITY 0x2a
+#define EMC2305_REG_DRIVE_PWM_OUT 0x2b
+#define EMC2305_OPEN_DRAIN 0x0
+#define EMC2305_PUSH_PULL 0x1
#define EMC2305_PWM_DUTY2STATE(duty, max_state, pwm_max) \
DIV_ROUND_CLOSEST((duty) * (max_state), (pwm_max))
@@ -39,6 +48,9 @@
#define EMC2305_REG_FAN_MIN_DRIVE(n) (0x38 + 0x10 * (n))
#define EMC2305_REG_FAN_TACH(n) (0x3e + 0x10 * (n))
+/* Supported base PWM frequencies */
+static const unsigned int base_freq_table[] = { 2441, 4882, 19530, 26000 };
+
enum emc230x_product_id {
EMC2305 = 0x34,
EMC2303 = 0x35,
@@ -89,8 +101,11 @@ struct emc2305_cdev_data {
* @hwmon_dev: hwmon device
* @max_state: maximum cooling state of the cooling device
* @pwm_num: number of PWM channels
+ * @pwm_output_mask: PWM output mask
+ * @pwm_polarity_mask: PWM polarity mask
* @pwm_separate: separate PWM settings for every channel
* @pwm_min: array of minimum PWM per channel
+ * @pwm_freq: array of PWM frequency per channel
* @cdev_data: array of cooling devices data
*/
struct emc2305_data {
@@ -98,8 +113,11 @@ struct emc2305_data {
struct device *hwmon_dev;
u8 max_state;
u8 pwm_num;
+ u8 pwm_output_mask;
+ u8 pwm_polarity_mask;
bool pwm_separate;
u8 pwm_min[EMC2305_PWM_MAX];
+ u16 pwm_freq[EMC2305_PWM_MAX];
struct emc2305_cdev_data cdev_data[EMC2305_PWM_MAX];
};
@@ -281,7 +299,7 @@ static int emc2305_set_pwm(struct device *dev, long val, int channel)
return 0;
}
-static int emc2305_set_single_tz(struct device *dev, int idx)
+static int emc2305_set_single_tz(struct device *dev, struct device_node *fan_node, int idx)
{
struct emc2305_data *data = dev_get_drvdata(dev);
long pwm;
@@ -291,7 +309,7 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
pwm = data->pwm_min[cdev_idx];
data->cdev_data[cdev_idx].cdev =
- devm_thermal_of_cooling_device_register(dev, dev->of_node,
+ devm_thermal_of_cooling_device_register(dev, fan_node,
emc2305_fan_name[idx], data,
&emc2305_cooling_ops);
@@ -299,6 +317,12 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
return PTR_ERR(data->cdev_data[cdev_idx].cdev);
}
+
+ if (data->cdev_data[cdev_idx].cur_state > 0)
+ /* Update pwm when temperature is above trips */
+ pwm = EMC2305_PWM_STATE2DUTY(data->cdev_data[cdev_idx].cur_state,
+ data->max_state, EMC2305_FAN_MAX);
+
/* Set minimal PWM speed. */
if (data->pwm_separate) {
ret = emc2305_set_pwm(dev, pwm, cdev_idx);
@@ -312,10 +336,10 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
}
}
data->cdev_data[cdev_idx].cur_state =
- EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_PWM_DUTY2STATE(pwm, data->max_state,
EMC2305_FAN_MAX);
data->cdev_data[cdev_idx].last_hwmon_state =
- EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_PWM_DUTY2STATE(pwm, data->max_state,
EMC2305_FAN_MAX);
return 0;
}
@@ -326,10 +350,10 @@ static int emc2305_set_tz(struct device *dev)
int i, ret;
if (!data->pwm_separate)
- return emc2305_set_single_tz(dev, 0);
+ return emc2305_set_single_tz(dev, dev->of_node, 0);
for (i = 0; i < data->pwm_num; i++) {
- ret = emc2305_set_single_tz(dev, i + 1);
+ ret = emc2305_set_single_tz(dev, dev->of_node, i + 1);
if (ret)
return ret;
}
@@ -511,15 +535,85 @@ static int emc2305_identify(struct device *dev)
return 0;
}
+static int emc2305_of_parse_pwm_child(struct device *dev,
+ struct device_node *child,
+ struct emc2305_data *data)
+{ u32 ch;
+ int ret;
+ struct of_phandle_args args;
+
+ ret = of_property_read_u32(child, "reg", &ch);
+ if (ret) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return ret;
+ }
+
+ ret = of_parse_phandle_with_args(child, "pwms", "#pwm-cells", 0, &args);
+
+ if (ret)
+ return ret;
+
+ if (args.args_count > 0) {
+ data->pwm_freq[ch] = find_closest(args.args[0], base_freq_table,
+ ARRAY_SIZE(base_freq_table));
+ } else {
+ data->pwm_freq[ch] = base_freq_table[3];
+ }
+
+ if (args.args_count > 1) {
+ if (args.args[1] == PWM_POLARITY_NORMAL || args.args[1] == PWM_POLARITY_INVERSED)
+ data->pwm_polarity_mask |= args.args[1] << ch;
+ else
+ dev_err(dev, "Wrong PWM polarity config provided: %d\n", args.args[0]);
+ } else {
+ data->pwm_polarity_mask |= PWM_POLARITY_NORMAL << ch;
+ }
+
+ if (args.args_count > 2) {
+ if (args.args[2] == EMC2305_PUSH_PULL || args.args[2] <= EMC2305_OPEN_DRAIN)
+ data->pwm_output_mask |= args.args[2] << ch;
+ else
+ dev_err(dev, "Wrong PWM output config provided: %d\n", args.args[1]);
+ } else {
+ data->pwm_output_mask |= EMC2305_OPEN_DRAIN << ch;
+ }
+
+ return 0;
+}
+
+static int emc2305_probe_childs_from_dt(struct device *dev)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct device_node *child;
+ int ret, count = 0;
+
+ data->pwm_output_mask = 0x0;
+ data->pwm_polarity_mask = 0x0;
+
+ for_each_child_of_node(dev->of_node, child) {
+ if (of_property_present(child, "reg")) {
+ ret = emc2305_of_parse_pwm_child(dev, child, data);
+ if (ret) {
+ of_node_put(child);
+ continue;
+ }
+ count++;
+ }
+ }
+ return count;
+}
+
static int emc2305_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
+ struct device_node *child;
struct emc2305_data *data;
struct emc2305_platform_data *pdata;
int vendor;
int ret;
int i;
+ int pwm_childs;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
@@ -539,22 +633,40 @@ static int emc2305_probe(struct i2c_client *client)
if (ret)
return ret;
+ pwm_childs = emc2305_probe_childs_from_dt(dev);
+
pdata = dev_get_platdata(&client->dev);
- if (pdata) {
- if (!pdata->max_state || pdata->max_state > EMC2305_FAN_MAX_STATE)
- return -EINVAL;
- data->max_state = pdata->max_state;
- /*
- * Validate a number of active PWM channels. Note that
- * configured number can be less than the actual maximum
- * supported by the device.
- */
- if (!pdata->pwm_num || pdata->pwm_num > EMC2305_PWM_MAX)
- return -EINVAL;
- data->pwm_num = pdata->pwm_num;
- data->pwm_separate = pdata->pwm_separate;
- for (i = 0; i < EMC2305_PWM_MAX; i++)
- data->pwm_min[i] = pdata->pwm_min[i];
+
+ if (!pwm_childs) {
+ if (pdata) {
+ if (!pdata->max_state || pdata->max_state > EMC2305_FAN_MAX_STATE)
+ return -EINVAL;
+ data->max_state = pdata->max_state;
+ /*
+ * Validate a number of active PWM channels. Note that
+ * configured number can be less than the actual maximum
+ * supported by the device.
+ */
+ if (!pdata->pwm_num || pdata->pwm_num > EMC2305_PWM_MAX)
+ return -EINVAL;
+ data->pwm_num = pdata->pwm_num;
+ data->pwm_output_mask = pdata->pwm_output_mask;
+ data->pwm_polarity_mask = pdata->pwm_polarity_mask;
+ data->pwm_separate = pdata->pwm_separate;
+ for (i = 0; i < EMC2305_PWM_MAX; i++) {
+ data->pwm_min[i] = pdata->pwm_min[i];
+ data->pwm_freq[i] = pdata->pwm_freq[i];
+ }
+ } else {
+ data->max_state = EMC2305_FAN_MAX_STATE;
+ data->pwm_separate = false;
+ data->pwm_output_mask = EMC2305_DEFAULT_OUTPUT;
+ data->pwm_polarity_mask = EMC2305_DEFAULT_POLARITY;
+ for (i = 0; i < EMC2305_PWM_MAX; i++) {
+ data->pwm_min[i] = EMC2305_FAN_MIN;
+ data->pwm_freq[i] = base_freq_table[3];
+ }
+ }
} else {
data->max_state = EMC2305_FAN_MAX_STATE;
data->pwm_separate = false;
@@ -568,11 +680,32 @@ static int emc2305_probe(struct i2c_client *client)
return PTR_ERR(data->hwmon_dev);
if (IS_REACHABLE(CONFIG_THERMAL)) {
- ret = emc2305_set_tz(dev);
- if (ret != 0)
- return ret;
+ /* Parse and check for the available PWM child nodes */
+ if (pwm_childs > 0) {
+ i = 0;
+ for_each_child_of_node(dev->of_node, child) {
+ ret = emc2305_set_single_tz(dev, child, i);
+ if (ret != 0)
+ return ret;
+ i++;
+ }
+ } else {
+ ret = emc2305_set_tz(dev);
+ if (ret != 0)
+ return ret;
+ }
}
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_DRIVE_PWM_OUT,
+ data->pwm_output_mask);
+ if (ret < 0)
+ dev_err(dev, "Failed to configure pwm output, using default\n");
+
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_POLARITY,
+ data->pwm_polarity_mask);
+ if (ret < 0)
+ dev_err(dev, "Failed to configure pwm polarity, using default\n");
+
for (i = 0; i < data->pwm_num; i++) {
ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_MIN_DRIVE(i),
data->pwm_min[i]);
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 0f9af82cebec..105b9f9dbec3 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -64,7 +64,7 @@ static ssize_t pwm_auto_point_temp_show(struct device *dev,
return ret;
ret = regs[0] | regs[1] << 8;
- return sprintf(buf, "%d\n", ret * 10);
+ return sprintf(buf, "%d\n", ret * 100);
}
static ssize_t pwm_auto_point_temp_store(struct device *dev,
@@ -99,7 +99,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)));
+ return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
}
static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 157e232aace0..daed437d34a4 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -349,7 +349,7 @@ static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
void *buf, size_t size)
{
- int rs_size, res;
+ int rs_size;
struct aem_read_sensor_req rs_req;
/* Use preallocated rx buffer */
struct aem_read_sensor_resp *rs_resp = data->rs_resp;
@@ -383,17 +383,12 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
aem_send_message(ipmi);
- res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
- if (!res) {
- res = -ETIMEDOUT;
- goto out;
- }
+ if (!wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT))
+ return -ETIMEDOUT;
if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
- memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
- res = -ENOENT;
- goto out;
- }
+ memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id)))
+ return -ENOENT;
switch (size) {
case 1: {
@@ -417,10 +412,8 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
break;
}
}
- res = 0;
-out:
- return res;
+ return 0;
}
/* Update AEM energy registers */
@@ -491,7 +484,6 @@ static void aem_delete(struct aem_data *data)
/* Retrieve version and module handle for an AEM1 instance */
static int aem_find_aem1_count(struct aem_ipmi_data *data)
{
- int res;
struct aem_find_firmware_req ff_req;
struct aem_find_firmware_resp ff_resp;
@@ -508,8 +500,7 @@ static int aem_find_aem1_count(struct aem_ipmi_data *data)
aem_send_message(data);
- res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT);
- if (!res)
+ if (!wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT))
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(ff_resp) ||
@@ -632,7 +623,6 @@ static int aem_find_aem2(struct aem_ipmi_data *data,
struct aem_find_instance_resp *fi_resp,
int instance_num)
{
- int res;
struct aem_find_instance_req fi_req;
fi_req.id = system_x_id;
@@ -648,8 +638,7 @@ static int aem_find_aem2(struct aem_ipmi_data *data,
aem_send_message(data);
- res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT);
- if (!res)
+ if (!wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT))
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) ||
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 9a5fd16a4ec2..5a394eeff676 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -6,6 +6,7 @@
* Copyright (C) 2021 Nathan Rossi <nathan.rossi@digi.com>
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/i2c.h>
@@ -41,7 +42,7 @@
#define INA238_CONFIG_ADCRANGE BIT(4)
#define SQ52206_CONFIG_ADCRANGE_HIGH BIT(4)
-#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
+#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
#define INA238_DIAG_ALERT_TMPOL BIT(7)
#define INA238_DIAG_ALERT_SHNTOL BIT(6)
@@ -104,9 +105,10 @@
#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
+#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
+#define INA228_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -114,16 +116,17 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
-enum ina238_ids { ina238, ina237, sq52206 };
+enum ina238_ids { ina238, ina237, sq52206, ina228 };
struct ina238_config {
+ bool has_20bit_voltage_current; /* vshunt, vbus and current are 20-bit fields */
bool has_power_highest; /* chip detection power peak */
- bool has_energy; /* chip detection energy */
- u8 temp_shift; /* fixed parameters for temp calculate */
+ bool has_energy; /* chip detection energy */
+ u8 temp_shift; /* fixed parameters for temp calculate */
u32 power_calculate_factor; /* fixed parameters for power calculate */
- u16 config_default; /* Power-on default state */
+ u16 config_default; /* Power-on default state */
int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
- int temp_lsb; /* use for temperature calculate */
+ int temp_lsb; /* use for temperature calculate */
};
struct ina238_data {
@@ -137,6 +140,7 @@ struct ina238_data {
static const struct ina238_config ina238_config[] = {
[ina238] = {
+ .has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
.temp_shift = 4,
@@ -146,6 +150,7 @@ static const struct ina238_config ina238_config[] = {
.temp_lsb = INA238_DIE_TEMP_LSB,
},
[ina237] = {
+ .has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
.temp_shift = 4,
@@ -155,6 +160,7 @@ static const struct ina238_config ina238_config[] = {
.temp_lsb = INA238_DIE_TEMP_LSB,
},
[sq52206] = {
+ .has_20bit_voltage_current = false,
.has_energy = true,
.has_power_highest = true,
.temp_shift = 0,
@@ -163,6 +169,16 @@ static const struct ina238_config ina238_config[] = {
.bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
.temp_lsb = SQ52206_DIE_TEMP_LSB,
},
+ [ina228] = {
+ .has_20bit_voltage_current = true,
+ .has_energy = true,
+ .has_power_highest = false,
+ .temp_shift = 0,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA228_DIE_TEMP_LSB,
+ },
};
static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
@@ -199,6 +215,65 @@ static int ina238_read_reg40(const struct i2c_client *client, u8 reg, u64 *val)
return 0;
}
+static int ina238_read_field_s20(const struct i2c_client *client, u8 reg, s32 *val)
+{
+ u32 regval;
+ int err;
+
+ err = ina238_read_reg24(client, reg, &regval);
+ if (err)
+ return err;
+
+ /* bits 3-0 Reserved, always zero */
+ regval >>= 4;
+
+ *val = sign_extend32(regval, 19);
+
+ return 0;
+}
+
+static int ina228_read_shunt_voltage(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+ int err;
+
+ err = ina238_read_field_s20(data->client, INA238_SHUNT_VOLTAGE, &regval);
+ if (err)
+ return err;
+
+ /*
+ * gain of 1 -> LSB / 4
+ * This field has 16 bit on ina238. ina228 adds another 4 bits of
+ * precision. ina238 conversion factors can still be applied when
+ * dividing by 16.
+ */
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) * data->gain / (1000 * 4) / 16;
+ return 0;
+}
+
+static int ina228_read_bus_voltage(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+ int err;
+
+ err = ina238_read_field_s20(data->client, INA238_BUS_VOLTAGE, &regval);
+ if (err)
+ return err;
+
+ /*
+ * gain of 1 -> LSB / 4
+ * This field has 16 bit on ina238. ina228 adds another 4 bits of
+ * precision. ina238 conversion factors can still be applied when
+ * dividing by 16.
+ */
+ *val = (regval * data->config->bus_voltage_lsb) / 1000 / 16;
+ return 0;
+}
+
static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
@@ -211,6 +286,8 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
case 0:
switch (attr) {
case hwmon_in_input:
+ if (data->config->has_20bit_voltage_current)
+ return ina228_read_shunt_voltage(dev, attr, channel, val);
reg = INA238_SHUNT_VOLTAGE;
break;
case hwmon_in_max:
@@ -234,6 +311,8 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
case 1:
switch (attr) {
case hwmon_in_input:
+ if (data->config->has_20bit_voltage_current)
+ return ina228_read_bus_voltage(dev, attr, channel, val);
reg = INA238_BUS_VOLTAGE;
break;
case hwmon_in_max:
@@ -271,7 +350,7 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
if (channel == 0)
/* gain of 1 -> LSB / 4 */
*val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
- data->gain / (1000 * 4);
+ data->gain / (1000 * 4);
else
*val = (regval * data->config->bus_voltage_lsb) / 1000;
break;
@@ -341,13 +420,25 @@ static int ina238_read_current(struct device *dev, u32 attr, long *val)
switch (attr) {
case hwmon_curr_input:
- err = regmap_read(data->regmap, INA238_CURRENT, &regval);
- if (err < 0)
- return err;
+ if (data->config->has_20bit_voltage_current) {
+ err = ina238_read_field_s20(data->client, INA238_CURRENT, &regval);
+ if (err)
+ return err;
+ } else {
+ err = regmap_read(data->regmap, INA238_CURRENT, &regval);
+ if (err < 0)
+ return err;
+ /* sign-extend */
+ regval = (s16)regval;
+ }
/* Signed register, fixed 1mA current lsb. result in mA */
- *val = div_s64((s16)regval * INA238_FIXED_SHUNT * data->gain,
+ *val = div_s64((s64)regval * INA238_FIXED_SHUNT * data->gain,
data->rshunt * 4);
+
+ /* Account for 4 bit offset */
+ if (data->config->has_20bit_voltage_current)
+ *val /= 16;
break;
default:
return -EOPNOTSUPP;
@@ -370,7 +461,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
@@ -381,7 +472,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
@@ -395,7 +486,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
@@ -448,7 +539,7 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
return err;
/* Signed, result in mC */
*val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
@@ -456,7 +547,7 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
return err;
/* Signed, result in mC */
*val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -501,8 +592,8 @@ static ssize_t energy1_input_show(struct device *dev,
return ret;
/* result in uJ */
- energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
- data->config->power_calculate_factor, 4 * data->rshunt);
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
+ data->config->power_calculate_factor, 4 * data->rshunt);
return sysfs_emit(buf, "%llu\n", energy);
}
@@ -750,6 +841,7 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
+ { "ina228", ina228 },
{ "ina237", ina237 },
{ "ina238", ina238 },
{ "sq52206", sq52206 },
@@ -759,6 +851,10 @@ MODULE_DEVICE_TABLE(i2c, ina238_id);
static const struct of_device_id __maybe_unused ina238_of_match[] = {
{
+ .compatible = "ti,ina228",
+ .data = (void *)ina228
+ },
+ {
.compatible = "ti,ina237",
.data = (void *)ina237
},
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index a07e2eb93c71..1fcd320d6161 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -339,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set_rv = ltc2992_gpio_set;
- st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
+ st->gc.set = ltc2992_gpio_set;
+ st->gc.set_multiple = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index f607fe8f7937..dbb30abcd343 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -177,13 +177,15 @@ static const unsigned int ltc4282_out_rates[] = {
LTC4282_CLKOUT_CNV, LTC4282_CLKOUT_SYSTEM
};
-static long ltc4282_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ltc4282_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int idx = find_closest(rate, ltc4282_out_rates,
+ int idx = find_closest(req->rate, ltc4282_out_rates,
ARRAY_SIZE(ltc4282_out_rates));
- return ltc4282_out_rates[idx];
+ req->rate = ltc4282_out_rates[idx];
+
+ return 0;
}
static unsigned long ltc4282_recalc_rate(struct clk_hw *hw,
@@ -1124,7 +1126,7 @@ static ssize_t ltc4282_energy_show(struct device *dev,
static const struct clk_ops ltc4282_ops = {
.recalc_rate = ltc4282_recalc_rate,
- .round_rate = ltc4282_round_rate,
+ .determine_rate = ltc4282_determine_rate,
.set_rate = ltc4282_set_rate,
.disable = ltc4282_disable,
};
@@ -1596,7 +1598,7 @@ static const struct hwmon_ops ltc4282_hwmon_ops = {
.read_string = ltc4282_read_labels,
};
-static const struct hwmon_chip_info ltc2947_chip_info = {
+static const struct hwmon_chip_info ltc4282_chip_info = {
.ops = &ltc4282_hwmon_ops,
.info = ltc4282_info,
};
@@ -1717,7 +1719,7 @@ static int ltc4282_probe(struct i2c_client *i2c)
mutex_init(&st->lock);
hwmon = devm_hwmon_device_register_with_info(dev, "ltc4282", st,
- &ltc2947_chip_info,
+ &ltc4282_chip_info,
ltc4282_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index 48e8f8ba4d05..a31c7b655da1 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -445,7 +445,7 @@ static ssize_t temp1_resolution_show(struct device *dev,
val = FIELD_GET(MAX31827_CONFIGURATION_RESOLUTION_MASK, val);
- return scnprintf(buf, PAGE_SIZE, "%u\n", max31827_resolutions[val]);
+ return sysfs_emit(buf, "%u\n", max31827_resolutions[val]);
}
static ssize_t temp1_resolution_store(struct device *dev,
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 441f984a859d..55e492452ce8 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -67,6 +67,15 @@ config SENSORS_ADP1050
This driver can also be built as a module. If so, the module will
be called adp1050.
+config SENSORS_ADP1050_REGULATOR
+ bool "Regulator support for ADP1050 and compatibles"
+ depends on SENSORS_ADP1050 && REGULATOR
+ help
+ If you say yes here you get regulator support for Analog Devices
+ LTP8800-1A, LTP8800-4A, and LTP8800-2. LTP8800 is a family of DC/DC
+ µModule regulators that can provide microprocessor power from 54V
+ power distribution architecture.
+
config SENSORS_BEL_PFE
tristate "Bel PFE Compatible Power Supplies"
help
diff --git a/drivers/hwmon/pmbus/adp1050.c b/drivers/hwmon/pmbus/adp1050.c
index ef46c880b168..a73774f8da2d 100644
--- a/drivers/hwmon/pmbus/adp1050.c
+++ b/drivers/hwmon/pmbus/adp1050.c
@@ -11,6 +11,12 @@
#include "pmbus.h"
+#if IS_ENABLED(CONFIG_SENSORS_ADP1050_REGULATOR)
+static const struct regulator_desc adp1050_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("vout"),
+};
+#endif /* CONFIG_SENSORS_ADP1050_REGULATOR */
+
static struct pmbus_driver_info adp1050_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
@@ -23,19 +29,79 @@ static struct pmbus_driver_info adp1050_info = {
| PMBUS_HAVE_STATUS_TEMP,
};
+static struct pmbus_driver_info adp1051_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static struct pmbus_driver_info adp1055_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static struct pmbus_driver_info ltp8800_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_STATUS_TEMP,
+#if IS_ENABLED(CONFIG_SENSORS_ADP1050_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = adp1050_reg_desc,
+#endif
+};
+
static int adp1050_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, &adp1050_info);
+ struct pmbus_driver_info *info;
+
+ info = (struct pmbus_driver_info *)i2c_get_match_data(client);
+ if (!info)
+ return -ENODEV;
+
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id adp1050_id[] = {
- {"adp1050"},
+ { .name = "adp1050", .driver_data = (kernel_ulong_t)&adp1050_info },
+ { .name = "adp1051", .driver_data = (kernel_ulong_t)&adp1051_info },
+ { .name = "adp1055", .driver_data = (kernel_ulong_t)&adp1055_info },
+ { .name = "ltp8800", .driver_data = (kernel_ulong_t)&ltp8800_info },
{}
};
MODULE_DEVICE_TABLE(i2c, adp1050_id);
static const struct of_device_id adp1050_of_match[] = {
- { .compatible = "adi,adp1050"},
+ { .compatible = "adi,adp1050", .data = &adp1050_info },
+ { .compatible = "adi,adp1051", .data = &adp1051_info },
+ { .compatible = "adi,adp1055", .data = &adp1055_info },
+ { .compatible = "adi,ltp8800", .data = &ltp8800_info },
{}
};
MODULE_DEVICE_TABLE(of, adp1050_of_match);
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 2af921039309..c52c55d2e7f4 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -63,6 +63,7 @@ enum chips {
raa228228,
raa229001,
raa229004,
+ raa229621,
};
enum variants {
@@ -465,6 +466,7 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
{"raa228228", raa_dmpvr2_2rail_nontc},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
+ {"raa229621", raa_dmpvr2_2rail},
{}
};
@@ -512,6 +514,7 @@ static const struct of_device_id isl68137_of_match[] = {
{ .compatible = "renesas,raa228228", .data = (void *)raa_dmpvr2_2rail_nontc },
{ .compatible = "renesas,raa229001", .data = (void *)raa_dmpvr2_2rail },
{ .compatible = "renesas,raa229004", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,raa229621", .data = (void *)raa_dmpvr2_2rail },
{ },
};
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 63524dff5e75..ca2bfa25eb04 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -16,7 +16,7 @@
#include "pmbus.h"
enum chips {
- tps53647, tps53667, tps53676, tps53679, tps53681, tps53688
+ tps53647, tps53667, tps53676, tps53679, tps53681, tps53685, tps53688
};
#define TPS53647_PAGE_NUM 1
@@ -31,7 +31,8 @@ enum chips {
#define TPS53679_PROT_VR13_5MV 0x07 /* VR13.0 mode, 5-mV DAC */
#define TPS53679_PAGE_NUM 2
-#define TPS53681_DEVICE_ID 0x81
+#define TPS53681_DEVICE_ID "\x81"
+#define TPS53685_DEVICE_ID "TIShP"
#define TPS53681_PMBUS_REVISION 0x33
@@ -86,10 +87,12 @@ static int tps53679_identify_phases(struct i2c_client *client,
}
static int tps53679_identify_chip(struct i2c_client *client,
- u8 revision, u16 id)
+ u8 revision, char *id)
{
u8 buf[I2C_SMBUS_BLOCK_MAX];
int ret;
+ int buf_len;
+ int id_len;
ret = pmbus_read_byte_data(client, 0, PMBUS_REVISION);
if (ret < 0)
@@ -102,8 +105,14 @@ static int tps53679_identify_chip(struct i2c_client *client,
ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
if (ret < 0)
return ret;
- if (ret != 1 || buf[0] != id) {
- dev_err(&client->dev, "Unexpected device ID 0x%x\n", buf[0]);
+
+ /* Adjust length if null terminator if present */
+ buf_len = (buf[ret - 1] != '\x00' ? ret : ret - 1);
+
+ id_len = strlen(id);
+
+ if (buf_len != id_len || strncmp(id, buf, id_len)) {
+ dev_err(&client->dev, "Unexpected device ID: %*ph\n", ret, buf);
return -ENODEV;
}
return 0;
@@ -117,7 +126,7 @@ static int tps53679_identify_chip(struct i2c_client *client,
*/
static int tps53679_identify_multiphase(struct i2c_client *client,
struct pmbus_driver_info *info,
- int pmbus_rev, int device_id)
+ int pmbus_rev, char *device_id)
{
int ret;
@@ -138,6 +147,16 @@ static int tps53679_identify(struct i2c_client *client,
return tps53679_identify_mode(client, info);
}
+static int tps53685_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ info->func[1] |= PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_STATUS_INPUT;
+ info->format[PSC_VOLTAGE_OUT] = linear;
+ return tps53679_identify_chip(client, TPS53681_PMBUS_REVISION,
+ TPS53685_DEVICE_ID);
+}
+
static int tps53681_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -263,6 +282,10 @@ static int tps53679_probe(struct i2c_client *client)
info->identify = tps53681_identify;
info->read_word_data = tps53681_read_word_data;
break;
+ case tps53685:
+ info->pages = TPS53679_PAGE_NUM;
+ info->identify = tps53685_identify;
+ break;
default:
return -ENODEV;
}
@@ -277,6 +300,7 @@ static const struct i2c_device_id tps53679_id[] = {
{"tps53676", tps53676},
{"tps53679", tps53679},
{"tps53681", tps53681},
+ {"tps53685", tps53685},
{"tps53688", tps53688},
{}
};
@@ -289,6 +313,7 @@ static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53676", .data = (void *)tps53676},
{.compatible = "ti,tps53679", .data = (void *)tps53679},
{.compatible = "ti,tps53681", .data = (void *)tps53681},
+ {.compatible = "ti,tps53685", .data = (void *)tps53685},
{.compatible = "ti,tps53688", .data = (void *)tps53688},
{}
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 52d4000902d5..55e7af3a5f98 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -364,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set_rv = ucd9000_gpio_set;
+ data->gpio.set = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 7d7d70afde65..a23edd35c19f 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1448,7 +1448,8 @@ w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
return 0;
case hwmon_temp_alarm:
if (channel < 3) {
- int bit[] = { 4, 5, 13 };
+ static const int bit[] = { 4, 5, 13 };
+
*val = (data->alarms >> bit[channel]) & 1;
return 0;
}
@@ -1479,7 +1480,8 @@ w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
return 0;
case hwmon_in_alarm:
if (channel < 10) {
- int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+ static const int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+
*val = (data->alarms >> bit[channel]) & 1;
return 0;
}
@@ -1507,7 +1509,8 @@ w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
return 0;
case hwmon_fan_alarm:
if (channel < 5) {
- int bit[] = { 6, 7, 11, 10, 23 };
+ static const int bit[] = { 6, 7, 11, 10, 23 };
+
*val = (data->alarms >> bit[channel]) & 1;
return 0;
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 7163950eb371..f3a13b300835 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
-#include <linux/pfn_t.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -1618,7 +1617,7 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
get_page(page);
- return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
+ return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn(page));
}
static const struct vm_operations_struct msc_mmap_ops = {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c8d115b58e44..070d014fdc5d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -992,7 +992,6 @@ config I2C_APPLE
tristate "Apple SMBus platform driver"
depends on !I2C_PASEMI
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Say Y here if you want to use the I2C controller present on Apple
Silicon chips such as the M1.
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 879719e91df2..a35e4c64a1d4 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -101,7 +101,7 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
}
#endif
-static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev)
+static int dw_i2c_get_parent_regmap(struct dw_i2c_dev *dev)
{
dev->map = dev_get_regmap(dev->dev->parent, NULL);
if (!dev->map)
@@ -123,12 +123,15 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
struct platform_device *pdev = to_platform_device(dev->dev);
int ret;
+ if (device_is_compatible(dev->dev, "intel,xe-i2c"))
+ return dw_i2c_get_parent_regmap(dev);
+
switch (dev->flags & MODEL_MASK) {
case MODEL_BAIKAL_BT1:
ret = bt1_i2c_request_regs(dev);
break;
case MODEL_WANGXUN_SP:
- ret = txgbe_i2c_request_regs(dev);
+ ret = dw_i2c_get_parent_regmap(dev);
break;
default:
dev->base = devm_platform_ioremap_resource(pdev, 0);
@@ -205,25 +208,28 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
+ u32 flags = (uintptr_t)device_get_match_data(&pdev->dev);
struct device *device = &pdev->dev;
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
int irq, ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq == -ENXIO)
+ flags |= ACCESS_POLLING;
+ else if (irq < 0)
return irq;
dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- dev->flags = (uintptr_t)device_get_match_data(device);
if (device_property_present(device, "wx,i2c-snps-model"))
- dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
+ flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
dev->dev = device;
dev->irq = irq;
+ dev->flags = flags;
platform_set_drvdata(pdev, dev);
ret = dw_i2c_plat_request_regs(dev);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 13889f52b6f7..ff2289b52c84 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -155,9 +155,9 @@ static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
- { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
- { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 38 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 9, 19 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 5, 15 },
{}
};
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index c8b4b404f6c1..e6815f6cae78 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -742,11 +742,14 @@ static void stm32f7_i2c_dma_callback(void *arg)
{
struct stm32f7_i2c_dev *i2c_dev = arg;
struct stm32_i2c_dma *dma = i2c_dev->dma;
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
stm32f7_i2c_disable_dma_req(i2c_dev);
dmaengine_terminate_async(dma->chan_using);
dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
dma->dma_data_dir);
+ if (!f7_msg->smbus)
+ i2c_put_dma_safe_msg_buf(f7_msg->buf, i2c_dev->msg, true);
complete(&dma->dma_complete);
}
@@ -882,6 +885,7 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
{
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
void __iomem *base = i2c_dev->base;
+ u8 *dma_buf;
u32 cr1, cr2;
int ret;
@@ -931,17 +935,23 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure DMA or enable RX/TX interrupt */
i2c_dev->use_dma = false;
- if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
- && !i2c_dev->atomic) {
- ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
- msg->flags & I2C_M_RD,
- f7_msg->count, f7_msg->buf,
- stm32f7_i2c_dma_callback,
- i2c_dev);
- if (!ret)
- i2c_dev->use_dma = true;
- else
- dev_warn(i2c_dev->dev, "can't use DMA\n");
+ if (i2c_dev->dma && !i2c_dev->atomic) {
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, STM32F7_I2C_DMA_LEN_MIN);
+ if (dma_buf) {
+ f7_msg->buf = dma_buf;
+ ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
+ msg->flags & I2C_M_RD,
+ f7_msg->count, f7_msg->buf,
+ stm32f7_i2c_dma_callback,
+ i2c_dev);
+ if (ret) {
+ dev_warn(i2c_dev->dev, "can't use DMA\n");
+ i2c_put_dma_safe_msg_buf(f7_msg->buf, msg, false);
+ f7_msg->buf = msg->buf;
+ } else {
+ i2c_dev->use_dma = true;
+ }
+ }
}
if (!i2c_dev->use_dma) {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4f05afab161f..4eb31b913c1a 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -134,6 +134,8 @@
#define I2C_MST_FIFO_STATUS_TX GENMASK(23, 16)
#define I2C_MST_FIFO_STATUS_RX GENMASK(7, 0)
+#define I2C_MASTER_RESET_CNTRL 0x0a8
+
/* configuration load timeout in microseconds */
#define I2C_CONFIG_LOAD_TIMEOUT 1000000
@@ -184,6 +186,9 @@ enum msg_end_type {
* @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
* provides additional features and allows for longer messages to
* be transferred in one go.
+ * @has_mst_reset: The I2C controller contains MASTER_RESET_CTRL register which
+ * provides an alternative to controller reset when configured as
+ * I2C master
* @quirks: I2C adapter quirks for limiting write/read transfer size and not
* allowing 0 length transfers.
* @supports_bus_clear: Bus Clear support to recover from bus hang during
@@ -213,6 +218,7 @@ struct tegra_i2c_hw_feature {
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
+ bool has_mst_reset;
const struct i2c_adapter_quirks *quirks;
bool supports_bus_clear;
bool has_apb_dma;
@@ -605,6 +611,26 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
return 0;
}
+static int tegra_i2c_master_reset(struct tegra_i2c_dev *i2c_dev)
+{
+ if (!i2c_dev->hw->has_mst_reset)
+ return -EOPNOTSUPP;
+
+ /*
+ * Writing 1 to I2C_MASTER_RESET_CNTRL will reset all internal state of
+ * Master logic including FIFOs. Clear this bit to 0 for normal operation.
+ * SW needs to wait for 2us after assertion and de-assertion of this soft
+ * reset.
+ */
+ i2c_writel(i2c_dev, 0x1, I2C_MASTER_RESET_CNTRL);
+ fsleep(2);
+
+ i2c_writel(i2c_dev, 0x0, I2C_MASTER_RESET_CNTRL);
+ fsleep(2);
+
+ return 0;
+}
+
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
@@ -612,6 +638,16 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
int err;
/*
+ * Reset the controller before initializing it.
+ * In case if device_reset() returns -ENOENT, i.e. when the reset is
+ * not available, the internal software reset will be used if it is
+ * supported by the controller.
+ */
+ err = device_reset(i2c_dev->dev);
+ if (err == -ENOENT)
+ err = tegra_i2c_master_reset(i2c_dev);
+
+ /*
* The reset shouldn't ever fail in practice. The failure will be a
* sign of a severe problem that needs to be resolved. Still we don't
* want to fail the initialization completely because this may break
@@ -619,7 +655,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
* emit a noisy warning on error, which won't stay unnoticed and
* won't hose machine entirely.
*/
- err = device_reset(i2c_dev->dev);
WARN_ON_ONCE(err);
if (IS_DVC(i2c_dev))
@@ -1266,17 +1301,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read) {
- dma_sync_single_for_device(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_FROM_DEVICE);
-
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
if (err)
return err;
- } else {
- dma_sync_single_for_cpu(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_TO_DEVICE);
}
}
@@ -1286,11 +1313,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->dma_mode) {
memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
msg->buf, i2c_dev->msg_len);
-
- dma_sync_single_for_device(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_TO_DEVICE);
-
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
if (err)
return err;
@@ -1331,13 +1353,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
return -ETIMEDOUT;
}
- if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
- dma_sync_single_for_cpu(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_FROM_DEVICE);
-
+ if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE)
memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len);
- }
}
time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete,
@@ -1468,6 +1485,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = false,
.has_apb_dma = true,
@@ -1492,6 +1510,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = false,
.has_apb_dma = true,
@@ -1516,6 +1535,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1540,6 +1560,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1564,6 +1585,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1588,6 +1610,7 @@ static const struct tegra_i2c_hw_feature tegra186_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = false,
@@ -1612,6 +1635,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
+ .has_mst_reset = true,
.quirks = &tegra194_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = false,
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 3445cc3b476b..ed90858a27b7 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -370,6 +370,7 @@ static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
* the device works without issues on Windows at what is expected to be
* a 400KHz frequency. The root cause of the issue is not known.
*/
+ { "DLL0945", 0 },
{ "ELAN06FA", 0 },
{}
};
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index c688af270a11..50fbc0d06e62 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set_rv = ltc4306_gpio_set;
+ data->gpiochip.set = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
index 284ff4afeeac..d3b32b794172 100644
--- a/drivers/i2c/muxes/i2c-mux-mule.c
+++ b/drivers/i2c/muxes/i2c-mux-mule.c
@@ -47,7 +47,6 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
struct mule_i2c_reg_mux *priv;
struct i2c_client *client;
struct i2c_mux_core *muxc;
- struct device_node *dev;
unsigned int readback;
int ndev, ret;
bool old_fw;
@@ -95,7 +94,7 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
"Failed to register mux remove\n");
/* Create device adapters */
- for_each_child_of_node(mux_dev->of_node, dev) {
+ for_each_child_of_node_scoped(mux_dev->of_node, dev) {
u32 reg;
ret = of_property_read_u32(dev, "reg", &reg);
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index e80e48756914..2396545763ff 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -26,11 +26,12 @@
*
* This function can sleep and thus cannot be called in atomic context.
*
- * Return: 0 in case of success, a negative error core otherwise.
- * -EAGAIN: controller lost address arbitration. Target
- * (IBI, HJ or controller role request) win the bus. Client
- * driver needs to resend the 'xfers' some time later.
- * See I3C spec ver 1.1.1 09-Jun-2021. Section: 5.1.2.2.3.
+ * Return:
+ * * 0 in case of success, a negative error core otherwise.
+ * * -EAGAIN: controller lost address arbitration. Target (IBI, HJ or
+ * controller role request) win the bus. Client driver needs to resend the
+ * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section:
+ * 5.1.2.2.3.
*/
int i3c_device_do_priv_xfers(struct i3c_device *dev,
struct i3c_priv_xfer *xfers,
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 433f6088b7ce..0d857cc68cc5 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -9,6 +9,7 @@
#define I3C_INTERNALS_H
#include <linux/i3c/master.h>
+#include <linux/io.h>
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
@@ -22,4 +23,41 @@ int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+
+/**
+ * i3c_writel_fifo - Write data buffer to 32bit FIFO
+ * @addr: FIFO Address to write to
+ * @buf: Pointer to the data bytes to write
+ * @nbytes: Number of bytes to write
+ */
+static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
+ int nbytes)
+{
+ writesl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
+ writel(tmp, addr);
+ }
+}
+
+/**
+ * i3c_readl_fifo - Read data buffer from 32bit FIFO
+ * @addr: FIFO Address to read from
+ * @buf: Pointer to the buffer to store read bytes
+ * @nbytes: Number of bytes to read
+ */
+static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
+ int nbytes)
+{
+ readsl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ tmp = readl(addr);
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index fd81871609d9..2ef898a8fd80 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -141,7 +141,7 @@ static ssize_t bcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.bcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.bcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -158,7 +158,7 @@ static ssize_t dcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.dcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.dcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -727,12 +727,12 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
switch (i3cbus->mode) {
case I3C_BUS_MODE_PURE:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
if (!i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i2c = max_i2c_scl_rate;
break;
@@ -754,8 +754,8 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
*/
- if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
- i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ if (i3cbus->scl_rate.i3c > I3C_BUS_I3C_SCL_MAX_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE)
return -EINVAL;
return 0;
@@ -837,14 +837,14 @@ static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
return -EINVAL;
if (!master->ops->send_ccc_cmd)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
return -EINVAL;
if (master->ops->supports_ccc_cmd &&
!master->ops->supports_ccc_cmd(master, cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = master->ops->send_ccc_cmd(master, cmd);
if (ret) {
@@ -1439,7 +1439,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
if (dev->info.bcr & I3C_BCR_HDR_CAP) {
ret = i3c_master_gethdrcap_locked(master, &dev->info);
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
}
@@ -2210,7 +2210,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
*/
if (boardinfo->base.flags & I2C_CLIENT_TEN) {
dev_err(dev, "I2C device with 10 bit address not supported.");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* LVR is encoded in reg[2]. */
@@ -2340,13 +2340,13 @@ static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
return -EINVAL;
if (!master->ops->i2c_xfers)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Doing transfers to different devices is not supported. */
addr = xfers[0].addr;
for (i = 1; i < nxfers; i++) {
if (addr != xfers[i].addr)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
i3c_bus_normaluse_lock(&master->bus);
@@ -2467,6 +2467,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action
case BUS_NOTIFY_DEL_DEVICE:
ret = i3c_master_i2c_detach(adap, client);
break;
+ default:
+ ret = -EINVAL;
}
i3c_bus_maintenance_unlock(&master->bus);
@@ -2766,7 +2768,7 @@ static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
* controller)
* @ops: the master controller operations
* @secondary: true if you are registering a secondary master. Will return
- * -ENOTSUPP if set to true since secondary masters are not yet
+ * -EOPNOTSUPP if set to true since secondary masters are not yet
* supported
*
* This function takes care of everything for you:
@@ -2785,7 +2787,7 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
- unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
@@ -2793,7 +2795,7 @@ int i3c_master_register(struct i3c_master_controller *master,
/* We do not support secondary masters yet. */
if (secondary)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = i3c_master_check_ops(ops);
if (ret)
@@ -2844,7 +2846,7 @@ int i3c_master_register(struct i3c_master_controller *master,
}
if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
- i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_MAX_RATE;
}
ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
@@ -2954,7 +2956,7 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
return -EINVAL;
if (!master->ops->priv_xfers)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return master->ops->priv_xfers(dev, xfers, nxfers);
}
@@ -3004,7 +3006,7 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
int ret;
if (!master->ops->request_ibi)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (dev->ibi)
return -EBUSY;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 7b30db3253af..13df2944f2ec 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -64,3 +64,13 @@ config MIPI_I3C_HCI_PCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci-pci.
+
+config RENESAS_I3C
+ tristate "Renesas I3C controller driver"
+ depends on HAS_IOMEM
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Support the Renesas I3C controller as found in some RZ variants.
+
+ This driver can also be built as a module. If so, the module will be
+ called renesas-i3c.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index 3e97960160bc..aac74f3e3851 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
+obj-$(CONFIG_RENESAS_I3C) += renesas-i3c.o
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 611c22b72c15..974122b2d20e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -23,6 +23,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include "../internals.h"
#include "dw-i3c-master.h"
#define DEVICE_CTRL 0x0
@@ -336,37 +337,19 @@ static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
- }
-}
-
-static void dw_i3c_master_read_fifo(struct dw_i3c_master *master,
- int reg, u8 *bytes, int nbytes)
-{
- readsl(master->regs + reg, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
-
- readsl(master->regs + reg, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_writel_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
- return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes);
+ i3c_readl_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
- return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes);
+ i3c_readl_fifo(master->regs + IBI_QUEUE_STATUS, bytes, nbytes);
}
static struct dw_i3c_xfer *
@@ -622,14 +605,14 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
core_period = DIV_ROUND_UP(1000000000, core_rate);
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
SCL_I2C_FMP_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
master->i2c_fmp_timing = scl_timing;
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
SCL_I2C_FM_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
@@ -699,7 +682,6 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
dw_i3c_master_enable(master);
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -829,7 +811,6 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
else
ret = dw_i3c_ccc_set(master, ccc);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -912,7 +893,6 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
dw_i3c_master_free_xfer(xfer);
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -932,7 +912,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return 0;
if (i3c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
@@ -943,7 +923,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
if (!xfer)
@@ -998,7 +978,6 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1093,7 +1072,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return 0;
if (i2c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
@@ -1104,7 +1083,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
if (!xfer)
@@ -1142,13 +1121,12 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
dw_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1316,7 +1294,6 @@ static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
master->regs + DEVICE_CTRL);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
}
@@ -1342,7 +1319,6 @@ static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
if (rc) {
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1362,7 +1338,6 @@ static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
}
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index fd3752cea654..97b151564d3d 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -23,6 +23,8 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "../internals.h"
+
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -412,7 +414,6 @@ struct cdns_i3c_master {
} xferqueue;
void __iomem *regs;
struct clk *sysclk;
- struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
const struct cdns_i3c_data *devdata;
@@ -427,25 +428,13 @@ to_cdns_i3c_master(struct i3c_master_controller *master)
static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + TX_FIFO, &tmp, 1);
- }
+ i3c_writel_fifo(master->regs + TX_FIFO, bytes, nbytes);
}
static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
u8 *bytes, int nbytes)
{
- readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
-
- readsl(master->regs + RX_FIFO, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + RX_FIFO, bytes, nbytes);
}
static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
@@ -742,7 +731,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!nxfers)
@@ -750,7 +739,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (nxfers > master->caps.cmdfifodepth ||
nxfers > master->caps.cmdrfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* First make sure that all transactions (block of transfers separated
@@ -765,7 +754,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (rxslots > master->caps.rxfifodepth ||
txslots > master->caps.txfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!cdns_xfer)
@@ -822,11 +811,11 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
int i, ret = 0;
if (nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
@@ -836,7 +825,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.txfifodepth ||
nrxwords > master->caps.rxfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
@@ -863,7 +852,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
cdns_i3c_master_queue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
cdns_i3c_master_unqueue_xfer(master, xfer);
ret = xfer->ret;
@@ -1330,12 +1319,7 @@ static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
buf = slot->data;
nbytes = IBIR_XFER_BYTES(ibir);
- readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
- if (nbytes % 3) {
- u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
-
- memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + IBI_DATA_FIFO, buf, nbytes);
slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
dev->ibi->max_payload_len);
@@ -1566,6 +1550,7 @@ MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
+ struct clk *pclk;
int ret, irq;
u32 val;
@@ -1581,11 +1566,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
- master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ master->sysclk = devm_clk_get_enabled(&pdev->dev, "sysclk");
if (IS_ERR(master->sysclk))
return PTR_ERR(master->sysclk);
@@ -1593,18 +1578,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->sysclk);
- if (ret)
- goto err_disable_pclk;
-
- if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
- ret = -EINVAL;
- goto err_disable_sysclk;
- }
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER)
+ return -EINVAL;
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
@@ -1615,7 +1590,7 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
dev_name(&pdev->dev), master);
if (ret)
- goto err_disable_sysclk;
+ return ret;
platform_set_drvdata(pdev, master);
@@ -1637,29 +1612,15 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
- if (!master->ibi.slots) {
- ret = -ENOMEM;
- goto err_disable_sysclk;
- }
+ if (!master->ibi.slots)
+ return -ENOMEM;
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
- ret = i3c_master_register(&master->base, &pdev->dev,
- &cdns_i3c_master_ops, false);
- if (ret)
- goto err_disable_sysclk;
-
- return 0;
-
-err_disable_sysclk:
- clk_disable_unprepare(master->sysclk);
-
-err_disable_pclk:
- clk_disable_unprepare(master->pclk);
-
- return ret;
+ return i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
}
static void cdns_i3c_master_remove(struct platform_device *pdev)
@@ -1668,9 +1629,6 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
-
- clk_disable_unprepare(master->sysclk);
- clk_disable_unprepare(master->pclk);
}
static struct platform_driver cdns_i3c_master = {
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index bc4538694540..60f1175f1f37 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -395,7 +395,7 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, HZ) &&
+ if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
new file mode 100644
index 000000000000..174d3dc5d276
--- /dev/null
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas I3C Controller driver
+ * Copyright (C) 2023-25 Renesas Electronics Corp.
+ *
+ * TODO: IBI support, HotJoin support, Target support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include "../internals.h"
+
+#define PRTS 0x00
+#define PRTS_PRTMD BIT(0)
+
+#define BCTL 0x14
+#define BCTL_INCBA BIT(0)
+#define BCTL_HJACKCTL BIT(8)
+#define BCTL_ABT BIT(29)
+#define BCTL_BUSE BIT(31)
+
+#define MSDVAD 0x18
+#define MSDVAD_MDYAD(x) FIELD_PREP(GENMASK(21, 16), x)
+#define MSDVAD_MDYADV BIT(31)
+
+#define RSTCTL 0x20
+#define RSTCTL_RI3CRST BIT(0)
+#define RSTCTL_INTLRST BIT(16)
+
+#define INST 0x30
+
+#define IBINCTL 0x58
+#define IBINCTL_NRHJCTL BIT(0)
+#define IBINCTL_NRMRCTL BIT(1)
+#define IBINCTL_NRSIRCTL BIT(3)
+
+#define SVCTL 0x64
+
+#define REFCKCTL 0x70
+#define REFCKCTL_IREFCKS(x) FIELD_PREP(GENMASK(2, 0), x)
+
+#define STDBR 0x74
+#define STDBR_SBRLO(cond, x) FIELD_PREP(GENMASK(7, 0), (x) >> (cond))
+#define STDBR_SBRHO(cond, x) FIELD_PREP(GENMASK(15, 8), (x) >> (cond))
+#define STDBR_SBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define STDBR_SBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+#define STDBR_DSBRPO BIT(31)
+
+#define EXTBR 0x78
+#define EXTBR_EBRLO(x) FIELD_PREP(GENMASK(7, 0), x)
+#define EXTBR_EBRHO(x) FIELD_PREP(GENMASK(15, 8), x)
+#define EXTBR_EBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define EXTBR_EBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+
+#define BFRECDT 0x7c
+#define BFRECDT_FRECYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BAVLCDT 0x80
+#define BAVLCDT_AVLCYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BIDLCDT 0x84
+#define BIDLCDT_IDLCYC(x) FIELD_PREP(GENMASK(17, 0), x)
+
+#define ACKCTL 0xa0
+#define ACKCTL_ACKT BIT(1)
+#define ACKCTL_ACKTWP BIT(2)
+
+#define SCSTRCTL 0xa4
+#define SCSTRCTL_ACKTWE BIT(0)
+#define SCSTRCTL_RWE BIT(1)
+
+#define SCSTLCTL 0xb0
+
+#define CNDCTL 0x140
+#define CNDCTL_STCND BIT(0)
+#define CNDCTL_SRCND BIT(1)
+#define CNDCTL_SPCND BIT(2)
+
+#define NCMDQP 0x150 /* Normal Command Queue */
+#define NCMDQP_CMD_ATTR(x) FIELD_PREP(GENMASK(2, 0), x)
+#define NCMDQP_IMMED_XFER 0x01
+#define NCMDQP_ADDR_ASSGN 0x02
+#define NCMDQP_TID(x) FIELD_PREP(GENMASK(6, 3), x)
+#define NCMDQP_CMD(x) FIELD_PREP(GENMASK(14, 7), x)
+#define NCMDQP_CP BIT(15)
+#define NCMDQP_DEV_INDEX(x) FIELD_PREP(GENMASK(20, 16), x)
+#define NCMDQP_BYTE_CNT(x) FIELD_PREP(GENMASK(25, 23), x)
+#define NCMDQP_DEV_COUNT(x) FIELD_PREP(GENMASK(29, 26), x)
+#define NCMDQP_MODE(x) FIELD_PREP(GENMASK(28, 26), x)
+#define NCMDQP_RNW(x) FIELD_PREP(GENMASK(29, 29), x)
+#define NCMDQP_ROC BIT(30)
+#define NCMDQP_TOC BIT(31)
+#define NCMDQP_DATA_LENGTH(x) FIELD_PREP(GENMASK(31, 16), x)
+
+#define NRSPQP 0x154 /* Normal Respone Queue */
+#define NRSPQP_NO_ERROR 0
+#define NRSPQP_ERROR_CRC 1
+#define NRSPQP_ERROR_PARITY 2
+#define NRSPQP_ERROR_FRAME 3
+#define NRSPQP_ERROR_IBA_NACK 4
+#define NRSPQP_ERROR_ADDRESS_NACK 5
+#define NRSPQP_ERROR_OVER_UNDER_FLOW 6
+#define NRSPQP_ERROR_TRANSF_ABORT 8
+#define NRSPQP_ERROR_I2C_W_NACK_ERR 9
+#define NRSPQP_ERROR_UNSUPPORTED 10
+#define NRSPQP_DATA_LEN(x) FIELD_GET(GENMASK(15, 0), x)
+#define NRSPQP_ERR_STATUS(x) FIELD_GET(GENMASK(31, 28), x)
+
+#define NTDTBP0 0x158 /* Normal Transfer Data Buffer */
+#define NTDTBP0_DEPTH 16
+
+#define NQTHCTL 0x190
+#define NQTHCTL_CMDQTH(x) FIELD_PREP(GENMASK(1, 0), x)
+#define NQTHCTL_IBIDSSZ(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NTBTHCTL0 0x194
+
+#define NRQTHCTL 0x1c0
+
+#define BST 0x1d0
+#define BST_STCNDDF BIT(0)
+#define BST_SPCNDDF BIT(1)
+#define BST_NACKDF BIT(4)
+#define BST_TENDF BIT(8)
+
+#define BSTE 0x1d4
+#define BSTE_STCNDDE BIT(0)
+#define BSTE_SPCNDDE BIT(1)
+#define BSTE_NACKDE BIT(4)
+#define BSTE_TENDE BIT(8)
+#define BSTE_ALE BIT(16)
+#define BSTE_TODE BIT(20)
+#define BSTE_WUCNDDE BIT(24)
+#define BSTE_ALL_FLAG (BSTE_STCNDDE | BSTE_SPCNDDE |\
+ BSTE_NACKDE | BSTE_TENDE |\
+ BSTE_ALE | BSTE_TODE | BSTE_WUCNDDE)
+
+#define BIE 0x1d8
+#define BIE_STCNDDIE BIT(0)
+#define BIE_SPCNDDIE BIT(1)
+#define BIE_NACKDIE BIT(4)
+#define BIE_TENDIE BIT(8)
+
+#define NTST 0x1e0
+#define NTST_TDBEF0 BIT(0)
+#define NTST_RDBFF0 BIT(1)
+#define NTST_CMDQEF BIT(3)
+#define NTST_RSPQFF BIT(4)
+#define NTST_TABTF BIT(5)
+#define NTST_TEF BIT(9)
+
+#define NTSTE 0x1e4
+#define NTSTE_TDBEE0 BIT(0)
+#define NTSTE_RDBFE0 BIT(1)
+#define NTSTE_IBIQEFE BIT(2)
+#define NTSTE_CMDQEE BIT(3)
+#define NTSTE_RSPQFE BIT(4)
+#define NTSTE_TABTE BIT(5)
+#define NTSTE_TEE BIT(9)
+#define NTSTE_RSQFE BIT(20)
+#define NTSTE_ALL_FLAG (NTSTE_TDBEE0 | NTSTE_RDBFE0 |\
+ NTSTE_IBIQEFE | NTSTE_CMDQEE |\
+ NTSTE_RSPQFE | NTSTE_TABTE |\
+ NTSTE_TEE | NTSTE_RSQFE)
+
+#define NTIE 0x1e8
+#define NTIE_TDBEIE0 BIT(0)
+#define NTIE_RDBFIE0 BIT(1)
+#define NTIE_IBIQEFIE BIT(2)
+#define NTIE_RSPQFIE BIT(4)
+#define NTIE_RSQFIE BIT(20)
+
+#define BCST 0x210
+#define BCST_BFREF BIT(0)
+
+#define DATBAS(x) (0x224 + 0x8 * (x))
+#define DATBAS_DVSTAD(x) FIELD_PREP(GENMASK(6, 0), x)
+#define DATBAS_DVDYAD(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NDBSTLV0 0x398
+#define NDBSTLV0_RDBLV(x) FIELD_GET(GENMASK(15, 8), x)
+
+#define RENESAS_I3C_MAX_DEVS 8
+#define I2C_INIT_MSG -1
+
+enum i3c_internal_state {
+ I3C_INTERNAL_STATE_DISABLED,
+ I3C_INTERNAL_STATE_CONTROLLER_IDLE,
+ I3C_INTERNAL_STATE_CONTROLLER_ENTDAA,
+ I3C_INTERNAL_STATE_CONTROLLER_SETDASA,
+ I3C_INTERNAL_STATE_CONTROLLER_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_READ,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ,
+};
+
+enum renesas_i3c_event {
+ I3C_COMMAND_ADDRESS_ASSIGNMENT,
+ I3C_WRITE,
+ I3C_READ,
+ I3C_COMMAND_WRITE,
+ I3C_COMMAND_READ,
+};
+
+struct renesas_i3c_cmd {
+ u32 cmd0;
+ u32 len;
+ const void *tx_buf;
+ u32 tx_count;
+ void *rx_buf;
+ u32 rx_count;
+ u32 err;
+ u8 rnw;
+ /* i2c xfer */
+ int i2c_bytes_left;
+ int i2c_is_last;
+ u8 *i2c_buf;
+ const struct i2c_msg *msg;
+};
+
+struct renesas_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ bool is_i2c_xfer;
+ unsigned int ncmds;
+ struct renesas_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct renesas_i3c_xferqueue {
+ struct list_head list;
+ struct renesas_i3c_xfer *cur;
+ /* Lock for accessing the xfer queue */
+ spinlock_t lock;
+};
+
+struct renesas_i3c {
+ struct i3c_master_controller base;
+ enum i3c_internal_state internal_state;
+ u16 maxdevs;
+ u32 free_pos;
+ u32 i2c_STDBR;
+ u32 i3c_STDBR;
+ u8 addrs[RENESAS_I3C_MAX_DEVS];
+ struct renesas_i3c_xferqueue xferqueue;
+ void __iomem *regs;
+ struct clk *tclk;
+};
+
+struct renesas_i3c_i2c_dev_data {
+ u8 index;
+};
+
+struct renesas_i3c_irq_desc {
+ const char *name;
+ irq_handler_t isr;
+ const char *desc;
+};
+
+struct renesas_i3c_config {
+ unsigned int has_pclkrw:1;
+};
+
+static inline void renesas_i3c_reg_update(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 data = readl(reg);
+
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, reg);
+}
+
+static inline u32 renesas_readl(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void renesas_writel(void __iomem *base, u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static void renesas_set_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, val);
+}
+
+static void renesas_clear_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, 0);
+}
+
+static inline struct renesas_i3c *to_renesas_i3c(struct i3c_master_controller *m)
+{
+ return container_of(m, struct renesas_i3c, base);
+}
+
+static inline u32 datbas_dvdyad_with_parity(u8 addr)
+{
+ return DATBAS_DVDYAD(addr | (parity8(addr) ? 0 : BIT(7)));
+}
+
+static int renesas_i3c_get_free_pos(struct renesas_i3c *i3c)
+{
+ if (!(i3c->free_pos & GENMASK(i3c->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(i3c->free_pos) - 1;
+}
+
+static int renesas_i3c_get_addr_pos(struct renesas_i3c *i3c, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (addr == i3c->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static struct renesas_i3c_xfer *renesas_i3c_alloc_xfer(struct renesas_i3c *i3c,
+ unsigned int ncmds)
+{
+ struct renesas_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void renesas_i3c_start_xfer_locked(struct renesas_i3c *i3c)
+{
+ struct renesas_i3c_xfer *xfer = i3c->xferqueue.cur;
+ struct renesas_i3c_cmd *cmd;
+ u32 cmd1;
+
+ if (!xfer)
+ return;
+
+ cmd = xfer->cmds;
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ case I3C_INTERNAL_STATE_CONTROLLER_SETDASA:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, 0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ if (cmd->len <= 4) {
+ cmd->cmd0 |= NCMDQP_CMD_ATTR(NCMDQP_IMMED_XFER);
+ cmd->cmd0 |= NCMDQP_BYTE_CNT(cmd->len);
+ cmd->tx_count = cmd->len;
+ cmd1 = cmd->len == 0 ? 0 : *(u32 *)cmd->tx_buf;
+ } else {
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ }
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the command queue empty flag */
+ renesas_clear_bit(i3c->regs, NTST, NTST_CMDQEF);
+}
+
+static void renesas_i3c_dequeue_xfer_locked(struct renesas_i3c *i3c,
+ struct renesas_i3c_xfer *xfer)
+{
+ if (i3c->xferqueue.cur == xfer)
+ i3c->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void renesas_i3c_dequeue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock)
+ renesas_i3c_dequeue_xfer_locked(i3c, xfer);
+}
+
+static void renesas_i3c_enqueue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ reinit_completion(&xfer->comp);
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock) {
+ if (i3c->xferqueue.cur) {
+ list_add_tail(&xfer->node, &i3c->xferqueue.list);
+ } else {
+ i3c->xferqueue.cur = xfer;
+ if (!xfer->is_i2c_xfer)
+ renesas_i3c_start_xfer_locked(i3c);
+ }
+ }
+}
+
+static void renesas_i3c_wait_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ unsigned long time_left;
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ time_left = wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000));
+ if (!time_left)
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+}
+
+static void renesas_i3c_set_prts(struct renesas_i3c *i3c, u32 val)
+{
+ /* Required sequence according to tnrza0140ae */
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+ renesas_writel(i3c->regs, PRTS, val);
+ renesas_clear_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+}
+
+static void renesas_i3c_bus_enable(struct i3c_master_controller *m, bool i3c_mode)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ /* Setup either I3C or I2C protocol */
+ if (i3c_mode) {
+ renesas_i3c_set_prts(i3c, 0);
+ /* Revisit: INCBA handling, especially after I2C transfers */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL | BCTL_INCBA);
+ renesas_set_bit(i3c->regs, MSDVAD, MSDVAD_MDYADV);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+ } else {
+ renesas_i3c_set_prts(i3c, PRTS_PRTMD);
+ renesas_writel(i3c->regs, STDBR, i3c->i2c_STDBR);
+ }
+
+ /* Enable I3C bus */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_BUSE);
+}
+
+static int renesas_i3c_reset(struct renesas_i3c *i3c)
+{
+ u32 val;
+
+ renesas_writel(i3c->regs, BCTL, 0);
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_RI3CRST);
+
+ return read_poll_timeout(renesas_readl, val, !(val & RSTCTL_RI3CRST),
+ 0, 1000, false, i3c->regs, RSTCTL);
+}
+
+static int renesas_i3c_bus_init(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ struct i2c_timings t;
+ unsigned long rate;
+ u32 double_SBR, val;
+ int cks, pp_high_ticks, pp_low_ticks, i3c_total_ticks;
+ int od_high_ticks, od_low_ticks, i2c_total_ticks;
+ int ret;
+
+ rate = clk_get_rate(i3c->tclk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ i2c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i2c);
+ i3c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i3c);
+
+ i2c_parse_fw_timings(&m->dev, &t, true);
+
+ for (cks = 0; cks < 7; cks++) {
+ /* SCL low-period calculation in Open-drain mode */
+ od_low_ticks = ((i2c_total_ticks * 6) / 10);
+
+ /* SCL clock calculation in Push-Pull mode */
+ if (bus->mode == I3C_BUS_MODE_PURE)
+ pp_high_ticks = ((i3c_total_ticks * 5) / 10);
+ else
+ pp_high_ticks = DIV_ROUND_UP(I3C_BUS_THIGH_MIXED_MAX_NS,
+ NSEC_PER_SEC / rate);
+ pp_low_ticks = i3c_total_ticks - pp_high_ticks;
+
+ if ((od_low_ticks / 2) <= 0xFF && pp_low_ticks < 0x3F)
+ break;
+
+ i2c_total_ticks /= 2;
+ i3c_total_ticks /= 2;
+ rate /= 2;
+ }
+
+ /* SCL clock period calculation in Open-drain mode */
+ if ((od_low_ticks / 2) > 0xFF || pp_low_ticks > 0x3F) {
+ dev_err(&m->dev, "invalid speed (i2c-scl = %lu Hz, i3c-scl = %lu Hz). Too slow.\n",
+ (unsigned long)bus->scl_rate.i2c, (unsigned long)bus->scl_rate.i3c);
+ return -EINVAL;
+ }
+
+ /* SCL high-period calculation in Open-drain mode */
+ od_high_ticks = i2c_total_ticks - od_low_ticks;
+
+ /* Standard Bit Rate setting */
+ double_SBR = od_low_ticks > 0xFF ? 1 : 0;
+ i3c->i3c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+
+ od_low_ticks -= t.scl_fall_ns / (NSEC_PER_SEC / rate) + 1;
+ od_high_ticks -= t.scl_rise_ns / (NSEC_PER_SEC / rate) + 1;
+ i3c->i2c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+
+ /* Extended Bit Rate setting */
+ renesas_writel(i3c->regs, EXTBR, EXTBR_EBRLO(od_low_ticks) |
+ EXTBR_EBRHO(od_high_ticks) |
+ EXTBR_EBRLP(pp_low_ticks) |
+ EXTBR_EBRHP(pp_high_ticks));
+
+ renesas_writel(i3c->regs, REFCKCTL, REFCKCTL_IREFCKS(cks));
+
+ /* Disable Slave Mode */
+ renesas_writel(i3c->regs, SVCTL, 0);
+
+ /* Initialize Queue/Buffer threshold */
+ renesas_writel(i3c->regs, NQTHCTL, NQTHCTL_IBIDSSZ(6) |
+ NQTHCTL_CMDQTH(1));
+
+ /* The only supported configuration is two entries*/
+ renesas_writel(i3c->regs, NTBTHCTL0, 0);
+ /* Interrupt when there is one entry in the queue */
+ renesas_writel(i3c->regs, NRQTHCTL, 0);
+
+ /* Enable all Bus/Transfer Status Flags */
+ renesas_writel(i3c->regs, BSTE, BSTE_ALL_FLAG);
+ renesas_writel(i3c->regs, NTSTE, NTSTE_ALL_FLAG);
+
+ /* Interrupt enable settings */
+ renesas_writel(i3c->regs, BIE, BIE_NACKDIE | BIE_TENDIE);
+ renesas_writel(i3c->regs, NTIE, 0);
+
+ /* Clear Status register */
+ renesas_writel(i3c->regs, NTST, 0);
+ renesas_writel(i3c->regs, INST, 0);
+ renesas_writel(i3c->regs, BST, 0);
+
+ /* Hot-Join Acknowlege setting. */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL);
+
+ renesas_writel(i3c->regs, IBINCTL, IBINCTL_NRHJCTL | IBINCTL_NRMRCTL |
+ IBINCTL_NRSIRCTL);
+
+ renesas_writel(i3c->regs, SCSTLCTL, 0);
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_ACKTWE);
+
+ /* Bus condition timing */
+ val = DIV_ROUND_UP(I3C_BUS_TBUF_MIXED_FM_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BFRECDT, BFRECDT_FRECYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TAVAL_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BAVLCDT, BAVLCDT_AVLCYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TIDLE_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BIDLCDT, BIDLCDT_IDLCYC(val));
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ renesas_writel(i3c->regs, MSDVAD, MSDVAD_MDYAD(ret) | MSDVAD_MDYADV);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ return i3c_master_set_info(&i3c->base, &info);
+}
+
+static void renesas_i3c_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ renesas_i3c_reset(i3c);
+}
+
+static int renesas_i3c_daa(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 last_addr = 0, pos;
+ int ret;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ olddevs = ~(i3c->free_pos);
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_ENTDAA;
+
+ /* Setting DATBASn registers for target devices. */
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ i3c->addrs[pos] = ret;
+ last_addr = ret;
+
+ renesas_writel(i3c->regs, DATBAS(pos), datbas_dvdyad_with_parity(ret));
+ }
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rx_count = 0;
+
+ ret = renesas_i3c_get_free_pos(i3c);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Setup the command descriptor to start the ENTDAA command
+ * and starting at the selected device index.
+ */
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_ENTDAA) | NCMDQP_DEV_INDEX(ret) |
+ NCMDQP_DEV_COUNT(i3c->maxdevs - ret) | NCMDQP_TOC;
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ newdevs = GENMASK(i3c->maxdevs - cmd->rx_count - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(1, true):
+ case I3C_CCC_ENTAS(2, true):
+ case I3C_CCC_ENTAS(3, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_ENTAS(1, false):
+ case I3C_CCC_ENTAS(2, false):
+ case I3C_CCC_ENTAS(3, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTTM:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int renesas_i3c_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = renesas_i3c_get_addr_pos(i3c, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ renesas_i3c_bus_enable(m, true);
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rnw = ccc->rnw;
+ cmd->cmd0 = 0;
+
+ /* Calculate the command descriptor. */
+ switch (ccc->id) {
+ case I3C_CCC_SETDASA:
+ renesas_writel(i3c->regs, DATBAS(pos),
+ DATBAS_DVSTAD(ccc->dests[0].addr) |
+ DATBAS_DVDYAD(*(u8 *)ccc->dests[0].payload.data >> 1));
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_SETDASA) | NCMDQP_DEV_INDEX(pos) |
+ NCMDQP_DEV_COUNT(0) | NCMDQP_TOC;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_SETDASA;
+ break;
+ default:
+ /* Calculate the command descriptor. */
+ cmd->cmd0 = NCMDQP_TID(I3C_COMMAND_WRITE) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(ccc->rnw) | NCMDQP_CMD(ccc->id) |
+ NCMDQP_ROC | NCMDQP_TOC | NCMDQP_CP |
+ NCMDQP_DEV_INDEX(pos);
+
+ if (ccc->rnw) {
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->rx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ;
+ } else {
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->tx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE;
+ }
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ ret = xfer->ret;
+ if (ret)
+ ccc->err = I3C_ERROR_M2;
+
+ kfree(xfer);
+
+ return ret;
+}
+
+static int renesas_i3c_priv_xfers(struct i3c_dev_desc *dev, struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct renesas_i3c_xfer *xfer;
+ int i;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ init_completion(&xfer->comp);
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct renesas_i3c_cmd *cmd = xfer->cmds;
+
+ /* Calculate the Transfer Command Descriptor */
+ cmd->rnw = i3c_xfers[i].rnw;
+ cmd->cmd0 = NCMDQP_DEV_INDEX(data->index) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(cmd->rnw) | NCMDQP_ROC | NCMDQP_TOC;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_READ);
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_READ;
+ } else {
+ cmd->tx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_WRITE);
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_WRITE;
+ }
+
+ if (!i3c_xfers[i].rnw && i3c_xfers[i].len > 4) {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ if (cmd->len > NTDTBP0_DEPTH * sizeof(u32))
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+ }
+
+ return 0;
+}
+
+static int renesas_i3c_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ i3c->free_pos &= ~BIT(pos);
+
+ renesas_writel(i3c->regs, DATBAS(pos), DATBAS_DVSTAD(dev->info.static_addr) |
+ datbas_dvdyad_with_parity(i3c->addrs[pos]));
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int renesas_i3c_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i3c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int renesas_i3c_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u8 start_bit = CNDCTL_STCND;
+ int i;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ renesas_i3c_bus_enable(m, false);
+
+ init_completion(&xfer->comp);
+ xfer->is_i2c_xfer = true;
+ cmd = xfer->cmds;
+
+ if (!(renesas_readl(i3c->regs, BCST) & BCST_BFREF)) {
+ cmd->err = -EBUSY;
+ return cmd->err;
+ }
+
+ renesas_writel(i3c->regs, BST, 0);
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ cmd->i2c_bytes_left = I2C_INIT_MSG;
+ cmd->i2c_buf = i2c_xfers[i].buf;
+ cmd->msg = &i2c_xfers[i];
+ cmd->i2c_is_last = (i == i2c_nxfers - 1);
+
+ renesas_set_bit(i3c->regs, BIE, BIE_NACKDIE);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_STCNDDIE);
+
+ /* Issue Start condition */
+ renesas_set_bit(i3c->regs, CNDCTL, start_bit);
+
+ renesas_set_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+
+ wait_for_completion_timeout(&xfer->comp, m->i2c.timeout);
+
+ if (cmd->err)
+ break;
+
+ start_bit = CNDCTL_SRCND;
+ }
+
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+ return cmd->err;
+}
+
+static int renesas_i3c_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->addr;
+ i3c->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i2c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static irqreturn_t renesas_i3c_tx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left != I2C_INIT_MSG) {
+ val = *cmd->i2c_buf;
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+
+ if (cmd->i2c_bytes_left == 0) {
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_TENDIE);
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0);
+ } else {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_resp_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u32 resp_descriptor = renesas_readl(i3c->regs, NRSPQP);
+ u32 bytes_remaining = 0;
+ u32 ntst, data_len;
+ int ret = 0;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ /* Clear the Respone Queue Full status flag*/
+ renesas_clear_bit(i3c->regs, NTST, NTST_RSPQFF);
+
+ data_len = NRSPQP_DATA_LEN(resp_descriptor);
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ cmd->rx_count = data_len;
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ /* Disable the transmit IRQ if it hasn't been disabled already. */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ if (NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) && !cmd->err)
+ bytes_remaining = data_len - cmd->rx_count;
+
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, bytes_remaining);
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ break;
+ default:
+ break;
+ }
+
+ switch (NRSPQP_ERR_STATUS(resp_descriptor)) {
+ case NRSPQP_NO_ERROR:
+ break;
+ case NRSPQP_ERROR_PARITY:
+ case NRSPQP_ERROR_IBA_NACK:
+ case NRSPQP_ERROR_TRANSF_ABORT:
+ case NRSPQP_ERROR_CRC:
+ case NRSPQP_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case NRSPQP_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case NRSPQP_ERROR_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case NRSPQP_ERROR_I2C_W_NACK_ERR:
+ case NRSPQP_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * If the transfer was aborted, then the abort flag must be cleared
+ * before notifying the application that a transfer has completed.
+ */
+ ntst = renesas_readl(i3c->regs, NTST);
+ if (ntst & NTST_TABTF)
+ renesas_clear_bit(i3c->regs, BCTL, BCTL_ABT);
+
+ /* Clear error status flags. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TEF | NTST_TABTF);
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&i3c->xferqueue.list,
+ struct renesas_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ i3c->xferqueue.cur = xfer;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_tend_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (renesas_readl(i3c->regs, BST) & BST_NACKDF) {
+ /* We got a NACKIE */
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ renesas_clear_bit(i3c->regs, BST, BST_NACKDF);
+ cmd->err = -ENXIO;
+ } else if (cmd->i2c_bytes_left) {
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ return IRQ_NONE;
+ }
+
+ if (cmd->i2c_is_last || cmd->err) {
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ } else {
+ /* Transfer is complete, but do not send STOP */
+ renesas_clear_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, BST, BST_TENDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_rx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int read_bytes;
+
+ /* If resp_isr already read the data and updated 'xfer', we can just leave */
+ if (!(renesas_readl(i3c->regs, NTIE) & NTIE_RDBFIE0))
+ return IRQ_NONE;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ cmd->i2c_bytes_left = cmd->msg->len;
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ if (cmd->i2c_bytes_left == 1)
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ return IRQ_HANDLED;
+ }
+
+ if (cmd->i2c_bytes_left == 1) {
+ /* STOP must come before we set ACKCTL! */
+ if (cmd->i2c_is_last) {
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_SPCNDDF);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ }
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ } else {
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKTWP);
+ }
+
+ /* Reading acks the RIE interrupt */
+ *cmd->i2c_buf = renesas_readl(i3c->regs, NTDTBP0);
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ } else {
+ read_bytes = NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) * sizeof(u32);
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, read_bytes);
+ cmd->rx_count = read_bytes;
+ }
+
+ /* Clear the Read Buffer Full status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_RDBFF0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_stop_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+
+ /* read back registers to confirm writes have fully propagated */
+ renesas_writel(i3c->regs, BST, 0);
+ renesas_readl(i3c->regs, BST);
+ renesas_writel(i3c->regs, BIE, 0);
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0 | NTST_RDBFF0);
+ renesas_clear_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_start_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ if (cmd->msg->flags & I2C_M_RD) {
+ /* On read, switch over to receive interrupt */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ } else {
+ /* On write, initialize length */
+ cmd->i2c_bytes_left = cmd->msg->len;
+ }
+
+ val = i2c_8bit_addr_from_msg(cmd->msg);
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+ }
+
+ renesas_clear_bit(i3c->regs, BIE, BIE_STCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_STCNDDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops renesas_i3c_ops = {
+ .bus_init = renesas_i3c_bus_init,
+ .bus_cleanup = renesas_i3c_bus_cleanup,
+ .attach_i3c_dev = renesas_i3c_attach_i3c_dev,
+ .reattach_i3c_dev = renesas_i3c_reattach_i3c_dev,
+ .detach_i3c_dev = renesas_i3c_detach_i3c_dev,
+ .do_daa = renesas_i3c_daa,
+ .supports_ccc_cmd = renesas_i3c_supports_ccc_cmd,
+ .send_ccc_cmd = renesas_i3c_send_ccc_cmd,
+ .priv_xfers = renesas_i3c_priv_xfers,
+ .attach_i2c_dev = renesas_i3c_attach_i2c_dev,
+ .detach_i2c_dev = renesas_i3c_detach_i2c_dev,
+ .i2c_xfers = renesas_i3c_i2c_xfers,
+};
+
+static const struct renesas_i3c_irq_desc renesas_i3c_irqs[] = {
+ { .name = "resp", .isr = renesas_i3c_resp_isr, .desc = "i3c-resp" },
+ { .name = "rx", .isr = renesas_i3c_rx_isr, .desc = "i3c-rx" },
+ { .name = "tx", .isr = renesas_i3c_tx_isr, .desc = "i3c-tx" },
+ { .name = "st", .isr = renesas_i3c_start_isr, .desc = "i3c-start" },
+ { .name = "sp", .isr = renesas_i3c_stop_isr, .desc = "i3c-stop" },
+ { .name = "tend", .isr = renesas_i3c_tend_isr, .desc = "i3c-tend" },
+ { .name = "nack", .isr = renesas_i3c_tend_isr, .desc = "i3c-nack" },
+};
+
+static int renesas_i3c_probe(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c;
+ struct reset_control *reset;
+ struct clk *clk;
+ const struct renesas_i3c_config *config = of_device_get_match_data(&pdev->dev);
+ int ret, i;
+
+ if (!config)
+ return -ENODATA;
+
+ i3c = devm_kzalloc(&pdev->dev, sizeof(*i3c), GFP_KERNEL);
+ if (!i3c)
+ return -ENOMEM;
+
+ i3c->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(i3c->regs))
+ return PTR_ERR(i3c->regs);
+
+ clk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (config->has_pclkrw) {
+ clk = devm_clk_get_enabled(&pdev->dev, "pclkrw");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ i3c->tclk = devm_clk_get_enabled(&pdev->dev, "tclk");
+ if (IS_ERR(i3c->tclk))
+ return PTR_ERR(i3c->tclk);
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "tresetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing tresetn ctrl\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "presetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing presetn ctrl\n");
+
+ spin_lock_init(&i3c->xferqueue.lock);
+ INIT_LIST_HEAD(&i3c->xferqueue.list);
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(renesas_i3c_irqs); i++) {
+ ret = platform_get_irq_byname(pdev, renesas_i3c_irqs[i].name);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, renesas_i3c_irqs[i].isr,
+ 0, renesas_i3c_irqs[i].desc, i3c);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, i3c);
+
+ i3c->maxdevs = RENESAS_I3C_MAX_DEVS;
+ i3c->free_pos = GENMASK(i3c->maxdevs - 1, 0);
+
+ return i3c_master_register(&i3c->base, &pdev->dev, &renesas_i3c_ops, false);
+}
+
+static void renesas_i3c_remove(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c = platform_get_drvdata(pdev);
+
+ i3c_master_unregister(&i3c->base);
+}
+
+static const struct renesas_i3c_config empty_i3c_config = {
+};
+
+static const struct renesas_i3c_config r9a09g047_i3c_config = {
+ .has_pclkrw = 1,
+};
+
+static const struct of_device_id renesas_i3c_of_ids[] = {
+ { .compatible = "renesas,r9a08g045-i3c", .data = &empty_i3c_config },
+ { .compatible = "renesas,r9a09g047-i3c", .data = &r9a09g047_i3c_config },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, renesas_i3c_of_ids);
+
+static struct platform_driver renesas_i3c = {
+ .probe = renesas_i3c_probe,
+ .remove = renesas_i3c_remove,
+ .driver = {
+ .name = "renesas-i3c",
+ .of_match_table = renesas_i3c_of_ids,
+ },
+};
+module_platform_driver(renesas_i3c);
+
+MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>");
+MODULE_AUTHOR("Renesas BSP teams");
+MODULE_DESCRIPTION("Renesas I3C controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 7e1a7cb94b43..701ae165b25b 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -104,6 +104,7 @@
#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
@@ -664,7 +665,6 @@ static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
}
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -779,7 +779,6 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
goto rpm_out;
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -801,7 +800,6 @@ static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1207,7 +1205,6 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
dev_err(master->dev, "Cannot handle such a list of devices");
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -1304,14 +1301,19 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
* FIFO start filling as soon as possible after EmitStartAddr.
*/
if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
- u32 end = xfer_len > SVC_I3C_FIFO_SIZE ? 0 : SVC_I3C_MWDATAB_END;
- u32 len = min_t(u32, xfer_len, SVC_I3C_FIFO_SIZE);
-
- writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
- /* Mark END bit if this is the last byte */
- writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
- xfer_len -= len;
- out += len;
+ u32 space, end, len;
+
+ reg = readl(master->regs + SVC_I3C_MDATACTRL);
+ space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
+ if (space) {
+ end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
+ len = min_t(u32, xfer_len, space);
+ writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
+ /* Mark END bit if this is the last byte */
+ writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
+ xfer_len -= len;
+ out += len;
+ }
}
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1511,7 +1513,6 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1708,7 +1709,7 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
svc_i3c_master_dequeue_xfer(master, xfer);
mutex_unlock(&master->lock);
@@ -1801,7 +1802,6 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -1834,7 +1834,6 @@ static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
if (!master->enabled_events)
svc_i3c_master_disable_interrupts(master);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
@@ -1954,7 +1953,6 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (ret)
goto rpm_disable;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 73747d20df85..91a7b7e7c0c8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
};
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
- X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
+ X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
{}
};
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 6cf790ff3eb5..dcdb5778f7d6 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -2064,7 +2064,7 @@ static int ad4130_probe(struct spi_device *spi)
st->gc.can_sleep = true;
st->gc.init_valid_mask = ad4130_gpio_init_valid_mask;
st->gc.get_direction = ad4130_gpio_get_direction;
- st->gc.set_rv = ad4130_gpio_set;
+ st->gc.set = ad4130_gpio_set;
ret = devm_gpiochip_add_data(dev, &st->gc, st);
if (ret)
diff --git a/drivers/iio/adc/ad4170-4.c b/drivers/iio/adc/ad4170-4.c
index 6cd84d6fb08b..efaed92191f1 100644
--- a/drivers/iio/adc/ad4170-4.c
+++ b/drivers/iio/adc/ad4170-4.c
@@ -1807,7 +1807,7 @@ static int ad4170_gpio_init(struct iio_dev *indio_dev)
st->gpiochip.direction_input = ad4170_gpio_direction_input;
st->gpiochip.direction_output = ad4170_gpio_direction_output;
st->gpiochip.get = ad4170_gpio_get;
- st->gpiochip.set_rv = ad4170_gpio_set;
+ st->gpiochip.set = ad4170_gpio_set;
st->gpiochip.owner = THIS_MODULE;
return devm_gpiochip_add_data(&st->spi->dev, &st->gpiochip, indio_dev);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index a2e061f0cb08..ca8fa91796ca 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -673,7 +673,7 @@ static int ad7768_gpio_init(struct iio_dev *indio_dev)
.direction_input = ad7768_gpio_direction_input,
.direction_output = ad7768_gpio_direction_output,
.get = ad7768_gpio_get,
- .set_rv = ad7768_gpio_set,
+ .set = ad7768_gpio_set,
.owner = THIS_MODULE,
};
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 4b7c472a088b..eb42e29960e4 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -6,6 +6,7 @@
* Copyright 2012-2020 Analog Devices Inc.
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
@@ -20,8 +21,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/fpga/adi-axi-common.h>
-
#include <linux/iio/backend.h>
#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/buffer.h>
diff --git a/drivers/iio/adc/rohm-bd79124.c b/drivers/iio/adc/rohm-bd79124.c
index bb7c93ae4055..06c55c8da93f 100644
--- a/drivers/iio/adc/rohm-bd79124.c
+++ b/drivers/iio/adc/rohm-bd79124.c
@@ -246,8 +246,8 @@ static int bd79124_init_valid_mask(struct gpio_chip *gc,
static const struct gpio_chip bd79124gpo_chip = {
.label = "bd79124-gpo",
.get_direction = bd79124gpo_direction_get,
- .set_rv = bd79124gpo_set,
- .set_multiple_rv = bd79124gpo_set_multiple,
+ .set = bd79124gpo_set,
+ .set_multiple = bd79124gpo_set_multiple,
.init_valid_mask = bd79124_init_valid_mask,
.can_sleep = true,
.ngpio = 8,
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0356ccf23fea..bbe1ce577789 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -648,7 +648,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.direction_input = ti_ads7950_direction_input;
st->chip.direction_output = ti_ads7950_direction_output;
st->chip.get = ti_ads7950_get;
- st->chip.set_rv = ti_ads7950_set;
+ st->chip.set = ti_ads7950_set;
ret = gpiochip_add_data(&st->chip, st);
if (ret) {
diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c
index 4d8b64048e4f..f8b04d86b01f 100644
--- a/drivers/iio/addac/ad74115.c
+++ b/drivers/iio/addac/ad74115.c
@@ -1577,7 +1577,7 @@ static int ad74115_setup_gpio_chip(struct ad74115_state *st)
.direction_input = ad74115_gpio_direction_input,
.direction_output = ad74115_gpio_direction_output,
.get = ad74115_gpio_get,
- .set_rv = ad74115_gpio_set,
+ .set = ad74115_gpio_set,
};
return devm_gpiochip_add_data(dev, &st->gc, st);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index a0bb1dbcb7ad..a20b4d48c5f7 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1425,8 +1425,8 @@ static int ad74413r_probe(struct spi_device *spi)
st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
st->gpo_gpiochip.parent = st->dev;
st->gpo_gpiochip.can_sleep = true;
- st->gpo_gpiochip.set_rv = ad74413r_gpio_set;
- st->gpo_gpiochip.set_multiple_rv = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
st->gpo_gpiochip.get_direction =
ad74413r_gpio_get_gpo_direction;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 5f2cd51723f6..4720733d66b2 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -129,7 +129,7 @@ static int ad5592r_gpio_init(struct ad5592r_state *st)
st->gpiochip.direction_input = ad5592r_gpio_direction_input;
st->gpiochip.direction_output = ad5592r_gpio_direction_output;
st->gpiochip.get = ad5592r_gpio_get;
- st->gpiochip.set_rv = ad5592r_gpio_set;
+ st->gpiochip.set = ad5592r_gpio_set;
st->gpiochip.request = ad5592r_gpio_request;
st->gpiochip.owner = THIS_MODULE;
st->gpiochip.names = ad5592r_gpio_names;
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index a0e546dba368..0d525272a8a8 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -5,6 +5,7 @@
*
* Copyright 2016-2024 Analog Devices Inc.
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
@@ -23,7 +24,6 @@
#include <linux/regmap.h>
#include <linux/units.h>
-#include <linux/fpga/adi-axi-common.h>
#include <linux/iio/backend.h>
#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/buffer.h>
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a5827d11e934..3a394cd772f6 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -92,7 +92,6 @@ source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
source "drivers/infiniband/hw/qedr/Kconfig"
-source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d49ded7e95f0..f483e0c12444 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -33,6 +33,7 @@ ib_umad-y := user_mad.o
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
uverbs_std_types_cq.o \
+ uverbs_std_types_dmah.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
uverbs_uapi.o uverbs_std_types_device.o \
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8670e58675c6..92678e438ff4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -161,6 +161,7 @@ struct cm_counter_attribute {
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
+ struct ib_mad_agent *rep_agent;
u32 port_num;
atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
};
@@ -274,7 +275,8 @@ static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
complete(&cm_id_priv->comp);
}
-static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
+static struct ib_mad_send_buf *
+cm_alloc_msg_agent(struct cm_id_private *cm_id_priv, bool rep_agent)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
@@ -286,7 +288,8 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
return ERR_PTR(-EINVAL);
read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
- mad_agent = cm_id_priv->av.port->mad_agent;
+ mad_agent = rep_agent ? cm_id_priv->av.port->rep_agent :
+ cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
goto out;
@@ -315,6 +318,11 @@ out:
return m;
}
+static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
+{
+ return cm_alloc_msg_agent(cm_id_priv, false);
+}
+
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
if (msg->ah)
@@ -323,13 +331,14 @@ static void cm_free_msg(struct ib_mad_send_buf *msg)
}
static struct ib_mad_send_buf *
-cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state)
+cm_alloc_priv_msg_rep(struct cm_id_private *cm_id_priv, enum ib_cm_state state,
+ bool rep_agent)
{
struct ib_mad_send_buf *msg;
lockdep_assert_held(&cm_id_priv->lock);
- msg = cm_alloc_msg(cm_id_priv);
+ msg = cm_alloc_msg_agent(cm_id_priv, rep_agent);
if (IS_ERR(msg))
return msg;
@@ -344,6 +353,12 @@ cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state)
return msg;
}
+static struct ib_mad_send_buf *
+cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state)
+{
+ return cm_alloc_priv_msg_rep(cm_id_priv, state, false);
+}
+
static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
{
struct cm_id_private *cm_id_priv = msg->context[0];
@@ -2295,7 +2310,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
goto out;
}
- msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REP_SENT);
+ msg = cm_alloc_priv_msg_rep(cm_id_priv, IB_CM_REP_SENT, true);
if (IS_ERR(msg)) {
ret = PTR_ERR(msg);
goto out;
@@ -4380,9 +4395,22 @@ static int cm_add_one(struct ib_device *ib_device)
goto error2;
}
+ port->rep_agent = ib_register_mad_agent(ib_device, i,
+ IB_QPT_GSI,
+ NULL,
+ 0,
+ cm_send_handler,
+ NULL,
+ port,
+ 0);
+ if (IS_ERR(port->rep_agent)) {
+ ret = PTR_ERR(port->rep_agent);
+ goto error3;
+ }
+
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
- goto error3;
+ goto error4;
count++;
}
@@ -4397,6 +4425,8 @@ static int cm_add_one(struct ib_device *ib_device)
write_unlock_irqrestore(&cm.device_lock, flags);
return 0;
+error4:
+ ib_unregister_mad_agent(port->rep_agent);
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
@@ -4410,6 +4440,7 @@ error1:
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ ib_unregister_mad_agent(port->rep_agent);
ib_unregister_mad_agent(port->mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
@@ -4439,12 +4470,14 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
rdma_for_each_port (ib_device, i) {
struct ib_mad_agent *mad_agent;
+ struct ib_mad_agent *rep_agent;
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
mad_agent = port->mad_agent;
+ rep_agent = port->rep_agent;
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
/*
* We flush the queue here after the going_down set, this
@@ -4458,8 +4491,10 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
*/
write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
+ port->rep_agent = NULL;
write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
+ ib_unregister_mad_agent(rep_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
}
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index e6ec7b7a40af..c3aa6d7fc66b 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -461,7 +461,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
return NULL;
qp = container_of(res, struct ib_qp, res);
- if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
+ if (qp->qp_type == IB_QPT_RAW_PACKET && !rdma_dev_has_raw_cap(dev))
goto err;
return qp;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index a70876a0a231..584537c71545 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -317,13 +317,18 @@ EXPORT_SYMBOL(__ib_alloc_cq_any);
*/
void ib_free_cq(struct ib_cq *cq)
{
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return;
if (WARN_ON_ONCE(cq->cqe_used))
return;
+ if (cq->device->ops.pre_destroy_cq) {
+ ret = cq->device->ops.pre_destroy_cq(cq);
+ WARN_ONCE(ret, "Disable of kernel CQ shouldn't fail");
+ }
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
break;
@@ -340,7 +345,10 @@ void ib_free_cq(struct ib_cq *cq)
rdma_dim_destroy(cq);
trace_cq_free(cq);
- ret = cq->device->ops.destroy_cq(cq, NULL);
+ if (cq->device->ops.post_destroy_cq)
+ cq->device->ops.post_destroy_cq(cq);
+ else
+ ret = cq->device->ops.destroy_cq(cq, NULL);
WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
rdma_restrack_del(&cq->res);
kfree(cq->wc);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d4263385850a..3145cb34a1d2 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -145,6 +145,33 @@ bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
}
EXPORT_SYMBOL(rdma_dev_access_netns);
+/**
+ * rdma_dev_has_raw_cap() - Returns whether a specified rdma device has
+ * CAP_NET_RAW capability or not.
+ *
+ * @dev: Pointer to rdma device whose capability to be checked
+ *
+ * Returns true if a rdma device's owning user namespace has CAP_NET_RAW
+ * capability, otherwise false. When rdma subsystem is in legacy shared network,
+ * namespace mode, the default net namespace is considered.
+ */
+bool rdma_dev_has_raw_cap(const struct ib_device *dev)
+{
+ const struct net *net;
+
+ /* Network namespace is the resource whose user namespace
+ * to be considered. When in shared mode, there is no reliable
+ * network namespace resource, so consider the default net namespace.
+ */
+ if (ib_devices_shared_netns)
+ net = &init_net;
+ else
+ net = read_pnet(&dev->coredev.rdma_net);
+
+ return ns_capable(net->user_ns, CAP_NET_RAW);
+}
+EXPORT_SYMBOL(rdma_dev_has_raw_cap);
+
/*
* xarray has this behavior where it won't iterate over NULL values stored in
* allocated arrays. So we need our own iterator to see all values stored in
@@ -557,6 +584,8 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
/**
* _ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
+ * @net: network namespace device should be located in, namespace
+ * must stay valid until ib_register_device() is completed.
*
* Low-level drivers should use ib_alloc_device() to allocate &struct
* ib_device. @size is the size of the structure to be allocated,
@@ -564,7 +593,7 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
* ib_dealloc_device() must be used to free structures allocated with
* ib_alloc_device().
*/
-struct ib_device *_ib_alloc_device(size_t size)
+struct ib_device *_ib_alloc_device(size_t size, struct net *net)
{
struct ib_device *device;
unsigned int i;
@@ -581,7 +610,15 @@ struct ib_device *_ib_alloc_device(size_t size)
return NULL;
}
- rdma_init_coredev(&device->coredev, device, &init_net);
+ /* ib_devices_shared_netns can't change while we have active namespaces
+ * in the system which means either init_net is passed or the user has
+ * no idea what they are doing.
+ *
+ * To avoid breaking backward compatibility, when in shared mode,
+ * force to init the device in the init_net.
+ */
+ net = ib_devices_shared_netns ? &init_net : net;
+ rdma_init_coredev(&device->coredev, device, net);
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->qp_open_list_lock);
@@ -2671,6 +2708,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_sub_dev);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
+ SET_DEVICE_OP(dev_ops, alloc_dmah);
SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
@@ -2691,6 +2729,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_ah);
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
+ SET_DEVICE_OP(dev_ops, create_cq_umem);
SET_DEVICE_OP(dev_ops, create_flow);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
@@ -2698,6 +2737,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_user_ah);
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
+ SET_DEVICE_OP(dev_ops, dealloc_dmah);
SET_DEVICE_OP(dev_ops, dealloc_driver);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
@@ -2763,8 +2803,10 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_srq);
SET_DEVICE_OP(dev_ops, modify_wq);
SET_DEVICE_OP(dev_ops, peek_cq);
+ SET_DEVICE_OP(dev_ops, pre_destroy_cq);
SET_DEVICE_OP(dev_ops, poll_cq);
SET_DEVICE_OP(dev_ops, port_groups);
+ SET_DEVICE_OP(dev_ops, post_destroy_cq);
SET_DEVICE_OP(dev_ops, post_recv);
SET_DEVICE_OP(dev_ops, post_send);
SET_DEVICE_OP(dev_ops, post_srq_recv);
@@ -2793,6 +2835,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
+ SET_OBJ_SIZE(dev_ops, ib_dmah);
SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_qp);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 73f3a0b9a54b..8f26bfb69586 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -210,6 +210,29 @@ int ib_response_mad(const struct ib_mad_hdr *hdr)
}
EXPORT_SYMBOL(ib_response_mad);
+#define SOL_FC_MAX_DEFAULT_FRAC 4
+#define SOL_FC_MAX_SA_FRAC 32
+
+static int get_sol_fc_max_outstanding(struct ib_mad_reg_req *mad_reg_req)
+{
+ if (!mad_reg_req)
+ /* Send only agent */
+ return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
+
+ switch (mad_reg_req->mgmt_class) {
+ case IB_MGMT_CLASS_CM:
+ return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
+ case IB_MGMT_CLASS_SUBN_ADM:
+ return mad_recvq_size / SOL_FC_MAX_SA_FRAC;
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ return min(mad_recvq_size, IB_MAD_QP_RECV_SIZE) /
+ SOL_FC_MAX_DEFAULT_FRAC;
+ default:
+ return 0;
+ }
+}
+
/*
* ib_register_mad_agent - Register to send/receive MADs
*
@@ -391,13 +414,17 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
spin_lock_init(&mad_agent_priv->lock);
INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
- INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
+ INIT_LIST_HEAD(&mad_agent_priv->backlog_list);
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions);
refcount_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
+ mad_agent_priv->sol_fc_send_count = 0;
+ mad_agent_priv->sol_fc_wait_count = 0;
+ mad_agent_priv->sol_fc_max =
+ recv_handler ? get_sol_fc_max_outstanding(mad_reg_req) : 0;
ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
if (ret2) {
@@ -1055,6 +1082,180 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
return ret;
}
+static void handle_queued_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) {
+ mad_agent_priv->sol_fc_wait_count--;
+ list_move_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->backlog_list);
+ } else {
+ expect_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
+ list_add_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->backlog_list);
+ }
+}
+
+static void handle_send_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->state == IB_MAD_STATE_INIT) {
+ list_add_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->send_list);
+ } else {
+ expect_mad_state2(mad_send_wr, IB_MAD_STATE_WAIT_RESP,
+ IB_MAD_STATE_QUEUED);
+ list_move_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->send_list);
+ }
+
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ mad_agent_priv->sol_fc_send_count++;
+ }
+}
+
+static void handle_wait_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_send_wr_private *temp_mad_send_wr;
+ struct list_head *list_item;
+ unsigned long delay;
+
+ expect_mad_state3(mad_send_wr, IB_MAD_STATE_SEND_START,
+ IB_MAD_STATE_WAIT_RESP, IB_MAD_STATE_CANCELED);
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
+ mad_send_wr->is_solicited_fc) {
+ mad_agent_priv->sol_fc_send_count--;
+ mad_agent_priv->sol_fc_wait_count++;
+ }
+
+ list_del_init(&mad_send_wr->agent_list);
+ delay = mad_send_wr->timeout;
+ mad_send_wr->timeout += jiffies;
+
+ if (delay) {
+ list_for_each_prev(list_item,
+ &mad_agent_priv->wait_list) {
+ temp_mad_send_wr = list_entry(
+ list_item,
+ struct ib_mad_send_wr_private,
+ agent_list);
+ if (time_after(mad_send_wr->timeout,
+ temp_mad_send_wr->timeout))
+ break;
+ }
+ } else {
+ list_item = &mad_agent_priv->wait_list;
+ }
+
+ list_add(&mad_send_wr->agent_list, list_item);
+}
+
+static void handle_early_resp_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ expect_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+ mad_agent_priv->sol_fc_send_count -= mad_send_wr->is_solicited_fc;
+}
+
+static void handle_canceled_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ not_expect_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
+ mad_agent_priv->sol_fc_send_count--;
+ else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ }
+}
+
+static void handle_done_state(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (mad_send_wr->is_solicited_fc) {
+ if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
+ mad_agent_priv->sol_fc_send_count--;
+ else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
+ mad_agent_priv->sol_fc_wait_count--;
+ }
+
+ list_del_init(&mad_send_wr->agent_list);
+}
+
+void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state new_state)
+{
+ struct ib_mad_agent_private *mad_agent_priv =
+ mad_send_wr->mad_agent_priv;
+
+ switch (new_state) {
+ case IB_MAD_STATE_INIT:
+ break;
+ case IB_MAD_STATE_QUEUED:
+ handle_queued_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_SEND_START:
+ handle_send_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_WAIT_RESP:
+ handle_wait_state(mad_send_wr, mad_agent_priv);
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ return;
+ break;
+ case IB_MAD_STATE_EARLY_RESP:
+ handle_early_resp_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_CANCELED:
+ handle_canceled_state(mad_send_wr, mad_agent_priv);
+ break;
+ case IB_MAD_STATE_DONE:
+ handle_done_state(mad_send_wr, mad_agent_priv);
+ break;
+ }
+
+ mad_send_wr->state = new_state;
+}
+
+static bool is_solicited_fc_mad(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+ u8 mgmt_class;
+
+ if (!mad_send_wr->timeout)
+ return 0;
+
+ rmpp_mad = mad_send_wr->send_buf.mad;
+ if (mad_send_wr->mad_agent_priv->agent.rmpp_version &&
+ (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE))
+ return 0;
+
+ mgmt_class =
+ ((struct ib_mad_hdr *)mad_send_wr->send_buf.mad)->mgmt_class;
+ return mgmt_class == IB_MGMT_CLASS_CM ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_ADM ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+}
+
+static bool mad_is_for_backlog(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_mad_agent_private *mad_agent_priv =
+ mad_send_wr->mad_agent_priv;
+
+ if (!mad_send_wr->is_solicited_fc || !mad_agent_priv->sol_fc_max)
+ return false;
+
+ if (!list_empty(&mad_agent_priv->backlog_list))
+ return true;
+
+ return mad_agent_priv->sol_fc_send_count +
+ mad_agent_priv->sol_fc_wait_count >=
+ mad_agent_priv->sol_fc_max;
+}
+
/*
* ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
* with the registered client
@@ -1080,9 +1281,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
if (ret)
goto error;
- if (!send_buf->mad_agent->send_handler ||
- (send_buf->timeout_ms &&
- !send_buf->mad_agent->recv_handler)) {
+ if (!send_buf->mad_agent->send_handler) {
ret = -EINVAL;
goto error;
}
@@ -1118,15 +1317,19 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
mad_send_wr->max_retries = send_buf->retries;
mad_send_wr->retries_left = send_buf->retries;
send_buf->retries = 0;
- /* Reference for work request to QP + response */
- mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
- mad_send_wr->status = IB_WC_SUCCESS;
+ change_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
/* Reference MAD agent until send completes */
refcount_inc(&mad_agent_priv->refcount);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_add_tail(&mad_send_wr->agent_list,
- &mad_agent_priv->send_list);
+ mad_send_wr->is_solicited_fc = is_solicited_fc_mad(mad_send_wr);
+ if (mad_is_for_backlog(mad_send_wr)) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ return 0;
+ }
+
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
@@ -1138,7 +1341,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
if (ret < 0) {
/* Fail send request */
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- list_del(&mad_send_wr->agent_list);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
deref_mad_agent(mad_agent_priv);
goto error;
@@ -1746,7 +1949,19 @@ ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
*/
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
+ }
+
+ list_for_each_entry(wr, &mad_agent_priv->backlog_list, agent_list) {
+ if ((wr->tid == mad_hdr->tid) &&
+ rcv_has_same_class(wr, wc) &&
+ /*
+ * Don't check GID for direct routed MADs.
+ * These might have permissive LIDs.
+ */
+ (is_direct(mad_hdr->mgmt_class) ||
+ rcv_has_same_gid(mad_agent_priv, wr, wc)))
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
/*
@@ -1765,17 +1980,55 @@ ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
/* Verify request has not been canceled */
- return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
+ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
return NULL;
}
+static void
+process_backlog_mads(struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_mad_send_wc mad_send_wc = {};
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ while (!list_empty(&mad_agent_priv->backlog_list) &&
+ (mad_agent_priv->sol_fc_send_count +
+ mad_agent_priv->sol_fc_wait_count <
+ mad_agent_priv->sol_fc_max)) {
+ mad_send_wr = list_entry(mad_agent_priv->backlog_list.next,
+ struct ib_mad_send_wr_private,
+ agent_list);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ ret = ib_send_mad(mad_send_wr);
+ if (ret) {
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ deref_mad_agent(mad_agent_priv);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ mad_send_wc.status = IB_WC_LOC_QP_OP_ERR;
+ mad_agent_priv->agent.send_handler(
+ &mad_agent_priv->agent, &mad_send_wc);
+ }
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+}
+
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
{
mad_send_wr->timeout = 0;
- if (mad_send_wr->refcount == 1)
- list_move_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->done_list);
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP ||
+ mad_send_wr->state == IB_MAD_STATE_QUEUED)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ else
+ change_mad_state(mad_send_wr, IB_MAD_STATE_EARLY_RESP);
}
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
@@ -1784,6 +2037,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
+ bool is_mad_done;
int ret;
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
@@ -1832,6 +2086,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
} else {
ib_mark_mad_done(mad_send_wr);
+ is_mad_done = (mad_send_wr->state == IB_MAD_STATE_DONE);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */
@@ -1841,10 +2096,13 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
- mad_send_wc.status = IB_WC_SUCCESS;
- mad_send_wc.vendor_err = 0;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
+ if (is_mad_done) {
+ mad_send_wc.status = IB_WC_SUCCESS;
+ mad_send_wc.vendor_err = 0;
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr,
+ &mad_send_wc);
+ }
}
} else {
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
@@ -2172,30 +2430,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *temp_mad_send_wr;
- struct list_head *list_item;
unsigned long delay;
mad_agent_priv = mad_send_wr->mad_agent_priv;
- list_del(&mad_send_wr->agent_list);
-
delay = mad_send_wr->timeout;
- mad_send_wr->timeout += jiffies;
-
- if (delay) {
- list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
- temp_mad_send_wr = list_entry(list_item,
- struct ib_mad_send_wr_private,
- agent_list);
- if (time_after(mad_send_wr->timeout,
- temp_mad_send_wr->timeout))
- break;
- }
- } else {
- list_item = &mad_agent_priv->wait_list;
- }
-
- list_add(&mad_send_wr->agent_list, list_item);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_WAIT_RESP);
/* Reschedule a work item if we have a shorter timeout */
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
@@ -2229,32 +2468,28 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
} else
ret = IB_RMPP_RESULT_UNHANDLED;
- if (mad_send_wc->status != IB_WC_SUCCESS &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = mad_send_wc->status;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
-
- if (--mad_send_wr->refcount > 0) {
- if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
- mad_send_wr->status == IB_WC_SUCCESS) {
- wait_for_response(mad_send_wr);
- }
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ mad_send_wc->status = IB_WC_WR_FLUSH_ERR;
+ else if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
+ mad_send_wr->timeout) {
+ wait_for_response(mad_send_wr);
goto done;
}
/* Remove send from MAD agent and notify client of completion */
- list_del(&mad_send_wr->agent_list);
+ if (mad_send_wr->state != IB_MAD_STATE_DONE)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
adjust_timeout(mad_agent_priv);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status != IB_WC_SUCCESS)
- mad_send_wc->status = mad_send_wr->status;
- if (ret == IB_RMPP_RESULT_INTERNAL)
+ if (ret == IB_RMPP_RESULT_INTERNAL) {
ib_rmpp_send_handler(mad_send_wc);
- else
+ } else {
+ if (mad_send_wr->is_solicited_fc)
+ process_backlog_mads(mad_agent_priv);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
mad_send_wc);
+ }
/* Release reference on agent taken when sending */
deref_mad_agent(mad_agent_priv);
@@ -2396,40 +2631,53 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
return true;
}
+static void clear_mad_error_list(struct list_head *list,
+ enum ib_wc_status wc_status,
+ struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_send_wr_private *mad_send_wr, *n;
+ struct ib_mad_send_wc mad_send_wc;
+
+ mad_send_wc.status = wc_status;
+ mad_send_wc.vendor_err = 0;
+
+ list_for_each_entry_safe(mad_send_wr, n, list, agent_list) {
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
+ deref_mad_agent(mad_agent_priv);
+ }
+}
+
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
{
unsigned long flags;
struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
- struct ib_mad_send_wc mad_send_wc;
struct list_head cancel_list;
INIT_LIST_HEAD(&cancel_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &mad_agent_priv->send_list, agent_list) {
- if (mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
- }
+ &mad_agent_priv->send_list, agent_list)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
- /* Empty wait list to prevent receives from finding a request */
- list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-
- /* Report all cancelled requests */
- mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
- mad_send_wc.vendor_err = 0;
+ /* Empty wait & backlog list to prevent receives from finding request */
+ list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
+ &mad_agent_priv->wait_list, agent_list) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, &cancel_list);
+ }
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
- &cancel_list, agent_list) {
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- list_del(&mad_send_wr->agent_list);
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- deref_mad_agent(mad_agent_priv);
+ &mad_agent_priv->backlog_list, agent_list) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, &cancel_list);
}
+
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ /* Report all cancelled requests */
+ clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
}
static struct ib_mad_send_wr_private*
@@ -2451,6 +2699,13 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
&mad_send_wr->send_buf == send_buf)
return mad_send_wr;
}
+
+ list_for_each_entry(mad_send_wr, &mad_agent_priv->backlog_list,
+ agent_list) {
+ if (&mad_send_wr->send_buf == send_buf)
+ return mad_send_wr;
+ }
+
return NULL;
}
@@ -2468,16 +2723,16 @@ int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
struct ib_mad_agent_private, agent);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
- if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
+ if (!mad_send_wr || mad_send_wr->state == IB_MAD_STATE_CANCELED) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
return -EINVAL;
}
- active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
- if (!timeout_ms) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
- mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
- }
+ active = ((mad_send_wr->state == IB_MAD_STATE_SEND_START) ||
+ (mad_send_wr->state == IB_MAD_STATE_EARLY_RESP) ||
+ (mad_send_wr->state == IB_MAD_STATE_QUEUED && timeout_ms));
+ if (!timeout_ms)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
mad_send_wr->send_buf.timeout_ms = timeout_ms;
if (active)
@@ -2589,6 +2844,11 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->send_buf.retries++;
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
+ if (mad_send_wr->is_solicited_fc &&
+ !list_empty(&mad_send_wr->mad_agent_priv->backlog_list)) {
+ change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
+ return 0;
+ }
if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
ret = ib_retry_rmpp(mad_send_wr);
@@ -2606,26 +2866,25 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
} else
ret = ib_send_mad(mad_send_wr);
- if (!ret) {
- mad_send_wr->refcount++;
- list_add_tail(&mad_send_wr->agent_list,
- &mad_send_wr->mad_agent_priv->send_list);
- }
+ if (!ret)
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
+
return ret;
}
static void timeout_sends(struct work_struct *work)
{
- struct ib_mad_send_wr_private *mad_send_wr, *n;
+ struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wc mad_send_wc;
- struct list_head local_list;
+ struct list_head timeout_list;
+ struct list_head cancel_list;
+ struct list_head *list_item;
unsigned long flags, delay;
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
timed_work.work);
- mad_send_wc.vendor_err = 0;
- INIT_LIST_HEAD(&local_list);
+ INIT_LIST_HEAD(&timeout_list);
+ INIT_LIST_HEAD(&cancel_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->wait_list)) {
@@ -2643,25 +2902,22 @@ static void timeout_sends(struct work_struct *work)
break;
}
- list_del_init(&mad_send_wr->agent_list);
- if (mad_send_wr->status == IB_WC_SUCCESS &&
- !retry_send(mad_send_wr))
+ if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
+ list_item = &cancel_list;
+ else if (retry_send(mad_send_wr))
+ list_item = &timeout_list;
+ else
continue;
- list_add_tail(&mad_send_wr->agent_list, &local_list);
+ change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
+ list_add_tail(&mad_send_wr->agent_list, list_item);
}
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) {
- if (mad_send_wr->status == IB_WC_SUCCESS)
- mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
- else
- mad_send_wc.status = mad_send_wr->status;
- mad_send_wc.send_buf = &mad_send_wr->send_buf;
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
- deref_mad_agent(mad_agent_priv);
- }
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ process_backlog_mads(mad_agent_priv);
+ clear_mad_error_list(&timeout_list, IB_WC_RESP_TIMEOUT_ERR,
+ mad_agent_priv);
+ clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
}
/*
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 1b7445a6f671..f444357d33f4 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -95,13 +95,16 @@ struct ib_mad_agent_private {
spinlock_t lock;
struct list_head send_list;
+ unsigned int sol_fc_send_count;
struct list_head wait_list;
- struct list_head done_list;
+ unsigned int sol_fc_wait_count;
struct delayed_work timed_work;
unsigned long timeout;
struct list_head local_list;
struct work_struct local_work;
struct list_head rmpp_list;
+ unsigned int sol_fc_max;
+ struct list_head backlog_list;
refcount_t refcount;
union {
@@ -118,6 +121,32 @@ struct ib_mad_snoop_private {
struct completion comp;
};
+enum ib_mad_state {
+ /* MAD is in the making and is not yet in any list */
+ IB_MAD_STATE_INIT,
+ /* MAD is in backlog list */
+ IB_MAD_STATE_QUEUED,
+ /*
+ * MAD was sent to the QP and is waiting for completion
+ * notification in send list.
+ */
+ IB_MAD_STATE_SEND_START,
+ /*
+ * MAD send completed successfully, waiting for a response
+ * in wait list.
+ */
+ IB_MAD_STATE_WAIT_RESP,
+ /*
+ * Response came early, before send completion notification,
+ * in send list.
+ */
+ IB_MAD_STATE_EARLY_RESP,
+ /* MAD was canceled while in wait or send list */
+ IB_MAD_STATE_CANCELED,
+ /* MAD processing completed, MAD in no list */
+ IB_MAD_STATE_DONE
+};
+
struct ib_mad_send_wr_private {
struct ib_mad_list_head mad_list;
struct list_head agent_list;
@@ -132,8 +161,6 @@ struct ib_mad_send_wr_private {
int max_retries;
int retries_left;
int retry;
- int refcount;
- enum ib_wc_status status;
/* RMPP control */
struct list_head rmpp_list;
@@ -143,8 +170,48 @@ struct ib_mad_send_wr_private {
int seg_num;
int newwin;
int pad;
+
+ enum ib_mad_state state;
+
+ /* Solicited MAD flow control */
+ bool is_solicited_fc;
};
+static inline void expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state);
+}
+
+static inline void expect_mad_state2(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state1,
+ enum ib_mad_state expected_state2)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state1 &&
+ mad_send_wr->state != expected_state2);
+}
+
+static inline void expect_mad_state3(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state expected_state1,
+ enum ib_mad_state expected_state2,
+ enum ib_mad_state expected_state3)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state != expected_state1 &&
+ mad_send_wr->state != expected_state2 &&
+ mad_send_wr->state != expected_state3);
+}
+
+static inline void
+not_expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state wrong_state)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ WARN_ON(mad_send_wr->state == wrong_state);
+}
+
struct ib_mad_local_private {
struct list_head completion_list;
struct ib_mad_private *mad_priv;
@@ -222,4 +289,7 @@ void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
unsigned long timeout_ms);
+void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
+ enum ib_mad_state new_state);
+
#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index b4b10e8a6495..1c5e0eaf1c94 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -608,16 +608,20 @@ static void abort_send(struct ib_mad_agent_private *agent,
goto out; /* Unmatched send */
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
- (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ (!mad_send_wr->timeout) ||
+ (mad_send_wr->state == IB_MAD_STATE_CANCELED))
goto out; /* Send is already done */
ib_mark_mad_done(mad_send_wr);
+ if (mad_send_wr->state == IB_MAD_STATE_DONE) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ wc.status = IB_WC_REM_ABORT_ERR;
+ wc.vendor_err = rmpp_status;
+ wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+ }
spin_unlock_irqrestore(&agent->lock, flags);
-
- wc.status = IB_WC_REM_ABORT_ERR;
- wc.vendor_err = rmpp_status;
- wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
out:
spin_unlock_irqrestore(&agent->lock, flags);
@@ -684,7 +688,8 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
- (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ (!mad_send_wr->timeout) ||
+ (mad_send_wr->state == IB_MAD_STATE_CANCELED))
goto out; /* Send is already done */
if (seg_num > mad_send_wr->send_buf.seg_count ||
@@ -709,21 +714,24 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_send_wc wc;
ib_mark_mad_done(mad_send_wr);
+ if (mad_send_wr->state == IB_MAD_STATE_DONE) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ wc.status = IB_WC_SUCCESS;
+ wc.vendor_err = 0;
+ wc.send_buf = &mad_send_wr->send_buf;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+ }
spin_unlock_irqrestore(&agent->lock, flags);
-
- wc.status = IB_WC_SUCCESS;
- wc.vendor_err = 0;
- wc.send_buf = &mad_send_wr->send_buf;
- ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
}
- if (mad_send_wr->refcount == 1)
+ if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
ib_reset_mad_timeout(mad_send_wr,
mad_send_wr->send_buf.timeout_ms);
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return;
- } else if (mad_send_wr->refcount == 1 &&
+ } else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP &&
mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
/* Send failure will just result in a timeout/retry */
@@ -731,7 +739,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (ret)
goto out;
- mad_send_wr->refcount++;
+ change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
list_move_tail(&mad_send_wr->agent_list,
&mad_send_wr->mad_agent_priv->send_list);
}
@@ -890,7 +898,6 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->newwin = init_newwin(mad_send_wr);
/* We need to wait for the final ACK even if there isn't a response */
- mad_send_wr->refcount += (mad_send_wr->timeout == 0);
ret = send_next_seg(mad_send_wr);
if (!ret)
return IB_RMPP_RESULT_CONSUMED;
@@ -912,7 +919,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
if (mad_send_wc->status != IB_WC_SUCCESS ||
- mad_send_wr->status != IB_WC_SUCCESS)
+ mad_send_wr->state == IB_MAD_STATE_CANCELED)
return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
if (!mad_send_wr->timeout)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index a872643e8039..2220a2dfab24 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
bool rdma_nl_get_privileged_qkey(void)
{
- return privileged_qkey || capable(CAP_NET_RAW);
+ return privileged_qkey;
}
EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
@@ -1469,10 +1469,11 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
};
-static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack,
- enum rdma_restrack_type res_type,
- res_fill_func_t fill_func)
+static noinline_for_stack int
+res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -2263,10 +2264,10 @@ err:
return ret;
}
-static int stat_get_doit_default_counter(struct sk_buff *skb,
- struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack,
- struct nlattr *tb[])
+static noinline_for_stack int
+stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ struct nlattr *tb[])
{
struct rdma_hw_stats *stats;
struct nlattr *table_attr;
@@ -2356,8 +2357,9 @@ err:
return ret;
}
-static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack, struct nlattr *tb[])
+static noinline_for_stack int
+stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, struct nlattr *tb[])
{
static enum rdma_nl_counter_mode mode;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 90c177edf9b0..18918f463361 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -1019,3 +1019,32 @@ void uverbs_finalize_object(struct ib_uobject *uobj,
WARN_ON(true);
}
}
+
+/**
+ * rdma_uattrs_has_raw_cap() - Returns whether a rdma device linked to the
+ * uverbs attributes file has CAP_NET_RAW
+ * capability or not.
+ *
+ * @attrs: Pointer to uverbs attributes
+ *
+ * Returns true if a rdma device's owning user namespace has CAP_NET_RAW
+ * capability, otherwise false.
+ */
+bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_ucontext *ucontext;
+ bool has_cap = false;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&ufile->device->disassociate_srcu);
+ ucontext = ib_uverbs_get_ucontext_file(ufile);
+ if (IS_ERR(ucontext))
+ goto out;
+ has_cap = rdma_dev_has_raw_cap(ucontext->device);
+
+out:
+ srcu_read_unlock(&ufile->device->disassociate_srcu, srcu_key);
+ return has_cap;
+}
+EXPORT_SYMBOL(rdma_uattrs_has_raw_cap);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 33706dad6c0f..a59b087611cb 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -156,6 +156,7 @@ extern const struct uapi_definition uverbs_def_obj_counters[];
extern const struct uapi_definition uverbs_def_obj_cq[];
extern const struct uapi_definition uverbs_def_obj_device[];
extern const struct uapi_definition uverbs_def_obj_dm[];
+extern const struct uapi_definition uverbs_def_obj_dmah[];
extern const struct uapi_definition uverbs_def_obj_flow_action[];
extern const struct uapi_definition uverbs_def_obj_intf[];
extern const struct uapi_definition uverbs_def_obj_mr[];
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 3313410014cd..a7de6f403fca 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -100,6 +100,8 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return container_of(res, struct rdma_counter, res)->device;
case RDMA_RESTRACK_SRQ:
return container_of(res, struct ib_srq, res)->device;
+ case RDMA_RESTRACK_DMAH:
+ return container_of(res, struct ib_dmah, res)->device;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return NULL;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bc9fe3ceca4d..ce16404cdfb8 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -741,7 +741,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
}
mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
- cmd.access_flags,
+ cmd.access_flags, NULL,
&attrs->driver_udata);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
@@ -1312,9 +1312,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
switch (cmd->qp_type) {
case IB_QPT_RAW_PACKET:
- if (!capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
- break;
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
@@ -1451,7 +1451,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
}
if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
- if (!capable(CAP_NET_RAW)) {
+ if (!rdma_uattrs_has_raw_cap(attrs)) {
ret = -EPERM;
goto err_put;
}
@@ -1877,7 +1877,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
attr->path_mig_state = cmd->base.path_mig_state;
if (cmd->base.attr_mask & IB_QP_QKEY) {
if (cmd->base.qkey & IB_QP_SET_QKEY &&
- !rdma_nl_get_privileged_qkey()) {
+ !(rdma_nl_get_privileged_qkey() ||
+ rdma_uattrs_has_raw_cap(attrs))) {
ret = -EPERM;
goto release_qp;
}
@@ -3225,7 +3226,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
if (cmd.comp_mask)
return -EINVAL;
- if (!capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 432054f0a8a4..37cd37556510 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -64,15 +64,21 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
struct ib_ucq_object *obj = container_of(
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
typeof(*obj), uevent.uobject);
+ struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_device *ib_dev = attrs->context->device;
- int ret;
- u64 user_handle;
+ struct ib_umem_dmabuf *umem_dmabuf;
struct ib_cq_init_attr attr = {};
- struct ib_cq *cq;
- struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_uobject *ev_file_uobj;
+ struct ib_umem *umem = NULL;
+ u64 buffer_length;
+ u64 buffer_offset;
+ struct ib_cq *cq;
+ u64 user_handle;
+ u64 buffer_va;
+ int buffer_fd;
+ int ret;
- if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq)
+ if ((!ib_dev->ops.create_cq && !ib_dev->ops.create_cq_umem) || !ib_dev->ops.destroy_cq)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.comp_vector, attrs,
@@ -112,9 +118,66 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->uevent.event_list);
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA)) {
+
+ ret = uverbs_copy_from(&buffer_va, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ goto err_event_file;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ !ib_dev->ops.create_cq_umem) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ ret = PTR_ERR(umem);
+ goto err_event_file;
+ }
+ } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD)) {
+
+ ret = uverbs_get_raw_fd(&buffer_fd, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_FD);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_offset, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET);
+ if (ret)
+ goto err_event_file;
+
+ ret = uverbs_copy_from(&buffer_length, attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH);
+ if (ret)
+ goto err_event_file;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_VA) ||
+ !ib_dev->ops.create_cq_umem) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset, buffer_length,
+ buffer_fd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem_dmabuf)) {
+ ret = PTR_ERR(umem_dmabuf);
+ goto err_event_file;
+ }
+ umem = &umem_dmabuf->umem;
+ } else if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET) ||
+ uverbs_attr_is_valid(attrs, UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH) ||
+ !ib_dev->ops.create_cq) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
+
cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
if (!cq) {
ret = -ENOMEM;
+ ib_umem_release(umem);
goto err_event_file;
}
@@ -128,7 +191,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
rdma_restrack_set_name(&cq->res, NULL);
- ret = ib_dev->ops.create_cq(cq, &attr, attrs);
+ ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) :
+ ib_dev->ops.create_cq(cq, &attr, attrs);
if (ret)
goto err_free;
@@ -180,6 +244,17 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_OBJECT_ASYNC_EVENT,
UVERBS_ACCESS_READ,
UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_VA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_RAW_FD(UVERBS_ATTR_CREATE_CQ_BUFFER_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
UVERBS_ATTR_UHW());
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
diff --git a/drivers/infiniband/core/uverbs_std_types_dmah.c b/drivers/infiniband/core/uverbs_std_types_dmah.c
new file mode 100644
index 000000000000..453ce656c6f2
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_dmah.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "rdma_core.h"
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+#include "restrack.h"
+
+static int uverbs_free_dmah(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dmah *dmah = uobject->object;
+ int ret;
+
+ if (atomic_read(&dmah->usecnt))
+ return -EBUSY;
+
+ ret = dmah->device->ops.dealloc_dmah(dmah, attrs);
+ if (ret)
+ return ret;
+
+ rdma_restrack_del(&dmah->res);
+ kfree(dmah);
+ return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_DMAH_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DMAH_HANDLE)
+ ->obj_attr.uobject;
+ struct ib_device *ib_dev = attrs->context->device;
+ struct ib_dmah *dmah;
+ int ret;
+
+ dmah = rdma_zalloc_drv_obj(ib_dev, ib_dmah);
+ if (!dmah)
+ return -ENOMEM;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_CPU_ID)) {
+ ret = uverbs_copy_from(&dmah->cpu_id, attrs,
+ UVERBS_ATTR_ALLOC_DMAH_CPU_ID);
+ if (ret)
+ goto err;
+
+ if (!cpumask_test_cpu(dmah->cpu_id, current->cpus_ptr)) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ dmah->valid_fields |= BIT(IB_DMAH_CPU_ID_EXISTS);
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE)) {
+ dmah->mem_type = uverbs_attr_get_enum_id(attrs,
+ UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE);
+ dmah->valid_fields |= BIT(IB_DMAH_MEM_TYPE_EXISTS);
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_ALLOC_DMAH_PH)) {
+ ret = uverbs_copy_from(&dmah->ph, attrs,
+ UVERBS_ATTR_ALLOC_DMAH_PH);
+ if (ret)
+ goto err;
+
+ /* Per PCIe spec 6.2-1.0, only the lowest two bits are applicable */
+ if (dmah->ph & 0xFC) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dmah->valid_fields |= BIT(IB_DMAH_PH_EXISTS);
+ }
+
+ dmah->device = ib_dev;
+ dmah->uobject = uobj;
+ atomic_set(&dmah->usecnt, 0);
+
+ rdma_restrack_new(&dmah->res, RDMA_RESTRACK_DMAH);
+ rdma_restrack_set_name(&dmah->res, NULL);
+
+ ret = ib_dev->ops.alloc_dmah(dmah, attrs);
+ if (ret) {
+ rdma_restrack_put(&dmah->res);
+ goto err;
+ }
+
+ uobj->object = dmah;
+ rdma_restrack_add(&dmah->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_ALLOC_DMAH_HANDLE);
+ return 0;
+err:
+ kfree(dmah);
+ return ret;
+}
+
+static const struct uverbs_attr_spec uverbs_dmah_mem_type[] = {
+ [TPH_MEM_TYPE_VM] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [TPH_MEM_TYPE_PM] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DMAH_ALLOC,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DMAH_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMAH_CPU_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE,
+ uverbs_dmah_mem_type,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMAH_PH,
+ UVERBS_ATTR_TYPE(u8),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_DMAH_FREE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DMA_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DMAH,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_dmah),
+ &UVERBS_METHOD(UVERBS_METHOD_DMAH_ALLOC),
+ &UVERBS_METHOD(UVERBS_METHOD_DMAH_FREE));
+
+const struct uapi_definition uverbs_def_obj_dmah[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DMAH,
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_dmah),
+ UAPI_DEF_OBJ_NEEDS_FN(alloc_dmah)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 7ebc7bd3caae..570b9656801d 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -238,7 +238,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
return ret;
mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
- access_flags,
+ access_flags, NULL,
attrs);
if (IS_ERR(mr))
return PTR_ERR(mr);
@@ -266,6 +266,135 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
return ret;
}
+static int UVERBS_HANDLER(UVERBS_METHOD_REG_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_MR_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_MR_PD_HANDLE);
+ u32 valid_access_flags = IB_ACCESS_SUPPORTED;
+ u64 length, iova, fd_offset = 0, addr = 0;
+ struct ib_device *ib_dev = pd->device;
+ struct ib_dmah *dmah = NULL;
+ bool has_fd_offset = false;
+ bool has_addr = false;
+ bool has_fd = false;
+ u32 access_flags;
+ struct ib_mr *mr;
+ int fd;
+ int ret;
+
+ ret = uverbs_copy_from(&iova, attrs, UVERBS_ATTR_REG_MR_IOVA);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&length, attrs, UVERBS_ATTR_REG_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_ADDR)) {
+ ret = uverbs_copy_from(&addr, attrs,
+ UVERBS_ATTR_REG_MR_ADDR);
+ if (ret)
+ return ret;
+ has_addr = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_FD_OFFSET)) {
+ ret = uverbs_copy_from(&fd_offset, attrs,
+ UVERBS_ATTR_REG_MR_FD_OFFSET);
+ if (ret)
+ return ret;
+ has_fd_offset = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_FD)) {
+ ret = uverbs_get_raw_fd(&fd, attrs,
+ UVERBS_ATTR_REG_MR_FD);
+ if (ret)
+ return ret;
+ has_fd = true;
+ }
+
+ if (has_fd) {
+ if (!ib_dev->ops.reg_user_mr_dmabuf)
+ return -EOPNOTSUPP;
+
+ /* FD requires offset and can't come with addr */
+ if (!has_fd_offset || has_addr)
+ return -EINVAL;
+
+ if ((fd_offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+
+ valid_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_RELAXED_ORDERING;
+ } else {
+ if (!has_addr || has_fd_offset)
+ return -EINVAL;
+
+ if ((addr & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+ }
+
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_REG_MR_DMA_HANDLE)) {
+ dmah = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_REG_MR_DMA_HANDLE);
+ if (IS_ERR(dmah))
+ return PTR_ERR(dmah);
+ }
+
+ ret = uverbs_get_flags32(&access_flags, attrs,
+ UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ valid_access_flags);
+ if (ret)
+ return ret;
+
+ ret = ib_check_mr_access(ib_dev, access_flags);
+ if (ret)
+ return ret;
+
+ if (has_fd)
+ mr = pd->device->ops.reg_user_mr_dmabuf(pd, fd_offset, length,
+ iova, fd, access_flags,
+ dmah, attrs);
+ else
+ mr = pd->device->ops.reg_user_mr(pd, addr, length, iova,
+ access_flags, dmah, NULL);
+
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+ if (dmah) {
+ mr->dmah = dmah;
+ atomic_inc(&dmah->usecnt);
+ }
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_MR_RESP_LKEY,
+ &mr->lkey, sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_ADVISE_MR,
UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
@@ -362,6 +491,44 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_REG_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_MR_DMA_HANDLE,
+ UVERBS_OBJECT_DMAH,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ enum ib_access_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_MR_FD_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_RAW_FD(UVERBS_ATTR_REG_MR_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MR_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE,
@@ -376,7 +543,8 @@ DECLARE_UVERBS_NAMED_OBJECT(
&UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG),
&UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY),
&UVERBS_METHOD(UVERBS_METHOD_QUERY_MR),
- &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR));
+ &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_REG_MR));
const struct uapi_definition uverbs_def_obj_mr[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR,
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
index 7b4773fa4bc0..be0730e8509e 100644
--- a/drivers/infiniband/core/uverbs_std_types_qp.c
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -133,7 +133,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
device = xrcd->device;
break;
case IB_UVERBS_QPT_RAW_PACKET:
- if (!capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
fallthrough;
case IB_UVERBS_QPT_RC:
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index a02916a3a79c..e00ea63175bd 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -631,6 +631,7 @@ static const struct uapi_definition uverbs_core_api[] = {
UAPI_DEF_CHAIN(uverbs_def_obj_cq),
UAPI_DEF_CHAIN(uverbs_def_obj_device),
UAPI_DEF_CHAIN(uverbs_def_obj_dm),
+ UAPI_DEF_CHAIN(uverbs_def_obj_dmah),
UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
UAPI_DEF_CHAIN(uverbs_def_obj_intf),
UAPI_DEF_CHAIN(uverbs_def_obj_mr),
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 75fde0fe9989..3a5f81402d2f 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2223,7 +2223,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
- access_flags, NULL);
+ access_flags, NULL, NULL);
if (IS_ERR(mr))
return mr;
@@ -2262,6 +2262,7 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
struct ib_pd *pd = mr->pd;
struct ib_dm *dm = mr->dm;
+ struct ib_dmah *dmah = mr->dmah;
struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
int ret;
@@ -2272,6 +2273,8 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
atomic_dec(&pd->usecnt);
if (dm)
atomic_dec(&dm->usecnt);
+ if (dmah)
+ atomic_dec(&dmah->usecnt);
kfree(sig_attrs);
}
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index aba96ca9bce5..df61b2299ec0 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
-obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 063801384b2b..37c2bc3bdba5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -4235,6 +4235,7 @@ free_mr:
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
@@ -4242,6 +4243,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_umem *umem;
struct ib_mr *ib_mr;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem))
return ERR_CAST(umem);
@@ -4255,6 +4259,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
@@ -4263,6 +4268,9 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
struct ib_umem *umem;
struct ib_mr *ib_mr;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
fd, mr_access_flags);
if (IS_ERR(umem_dmabuf))
@@ -4738,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
return err;
err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
- &offset, sizeof(length));
+ &offset, sizeof(offset));
if (err)
return err;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 22c9eb8e9cfc..fe00ab691a51 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -258,10 +258,12 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index be34c605d516..dfe3177123e5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1750,9 +1750,9 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
}
}
-static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_swqe *wqe,
- u16 *idx)
+static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u32 *idx)
{
struct bnxt_qplib_hwq *hwq;
int len, t_len, offt;
@@ -1769,7 +1769,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
il_src = (void *)wqe->sg_list[indx].addr;
t_len += len;
if (t_len > qp->max_inline_data)
- return -ENOMEM;
+ return BNXT_RE_INVAL_MSG_SIZE;
while (len) {
if (pull_dst) {
pull_dst = false;
@@ -1795,9 +1795,9 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
return t_len;
}
-static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sge *ssge,
- u16 nsge, u16 *idx)
+static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_sge *ssge,
+ u32 nsge, u32 *idx)
{
struct sq_sge *dsge;
int indx, len = 0;
@@ -1878,14 +1878,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
+ u32 wqe_idx, slots, idx;
u16 wqe_sz, qdf = 0;
bool msn_update;
void *base_hdr;
void *ext_hdr;
__le32 temp32;
- u32 wqe_idx;
- u32 slots;
- u16 idx;
hwq = &sq->hwq;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
@@ -1937,8 +1935,10 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
else
data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
&idx);
- if (data_len < 0)
- goto queue_err;
+ if (data_len > BNXT_RE_MAX_MSG_SIZE) {
+ rc = -EINVAL;
+ goto done;
+ }
/* Make sure we update MSN table only for wired wqes */
msn_update = true;
/* Specifics */
@@ -2139,8 +2139,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
- u16 wqe_sz, idx;
- u32 wqe_idx;
+ u32 wqe_idx, idx;
+ u16 wqe_sz;
int rc = 0;
hwq = &rq->hwq;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 0d9487c889ff..ab125f1d949e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -346,6 +346,9 @@ struct bnxt_qplib_qp {
u8 tos_dscp;
};
+#define BNXT_RE_MAX_MSG_SIZE 0x80000000
+#define BNXT_RE_INVAL_MSG_SIZE 0xFFFFFFFF
+
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9efd32a3dc55..68981399598d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -674,7 +674,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
- req.access = (mr->access_flags & 0xFFFF);
+ req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index e626b05038a1..09faf4a1e849 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -111,6 +111,7 @@ struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
u32 access_flags;
+#define BNXT_QPLIB_MR_ACCESS_MASK 0xFF
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 7eceb3e9f4ce..024845f945ff 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -39,8 +39,8 @@
#ifndef __BNXT_RE_HSI_H__
#define __BNXT_RE_HSI_H__
-/* include bnxt_hsi.h from bnxt_en driver */
-#include "bnxt_hsi.h"
+/* include linux/bnxt/hsi.h */
+#include <linux/bnxt/hsi.h>
/* tx_doorbell (size:32b/4B) */
struct tx_doorbell {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 034b85c42255..b67747ae6a68 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -905,8 +905,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
return 0;
err_free_status_page_and_wr_log:
- if (c4iw_wr_log && rdev->wr_log)
- kfree(rdev->wr_log);
+ kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 5b3007acaa1f..e17c1252536b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -1006,6 +1006,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index a2c71a1d93d5..dcdfe250bdbe 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,7 +489,8 @@ err_free_mhp:
}
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
__be64 *pages;
int shift, n, i;
@@ -501,6 +502,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pr_debug("ib_pd %p\n", pd);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 838182d0409c..96f9c3bc98b2 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -107,6 +107,7 @@ struct efa_cq {
u16 cq_idx;
/* NULL when no interrupts requested */
struct efa_eq *eq;
+ struct ib_umem *umem;
};
struct efa_qp {
@@ -162,12 +163,16 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs);
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index fe0b6aec7839..57178dad5eb7 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -68,6 +68,7 @@ enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE = 3,
+ EFA_ADMIN_GET_STATS_TYPE_NETWORK = 4,
};
enum efa_admin_get_stats_scope {
@@ -651,6 +652,18 @@ struct efa_admin_rdma_write_stats {
u64 write_recv_bytes;
};
+struct efa_admin_network_stats {
+ u64 retrans_bytes;
+
+ u64 retrans_pkts;
+
+ u64 retrans_timeout_events;
+
+ u64 unresponsive_remote_events;
+
+ u64 impaired_remote_conn_events;
+};
+
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
@@ -662,6 +675,8 @@ struct efa_admin_acq_get_stats_resp {
struct efa_admin_rdma_read_stats rdma_read_stats;
struct efa_admin_rdma_write_stats rdma_write_stats;
+
+ struct efa_admin_network_stats network_stats;
} u;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c6b89c45fdc9..9ead02800ac7 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -769,6 +769,11 @@ int efa_com_get_stats(struct efa_com_dev *edev,
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_aq_get_stats_cmd cmd = {};
struct efa_admin_acq_get_stats_resp resp;
+ struct efa_admin_rdma_write_stats *rws;
+ struct efa_admin_rdma_read_stats *rrs;
+ struct efa_admin_messages_stats *ms;
+ struct efa_admin_network_stats *ns;
+ struct efa_admin_basic_stats *bs;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
@@ -791,29 +796,41 @@ int efa_com_get_stats(struct efa_com_dev *edev,
switch (cmd.type) {
case EFA_ADMIN_GET_STATS_TYPE_BASIC:
- result->basic_stats.tx_bytes = resp.u.basic_stats.tx_bytes;
- result->basic_stats.tx_pkts = resp.u.basic_stats.tx_pkts;
- result->basic_stats.rx_bytes = resp.u.basic_stats.rx_bytes;
- result->basic_stats.rx_pkts = resp.u.basic_stats.rx_pkts;
- result->basic_stats.rx_drops = resp.u.basic_stats.rx_drops;
+ bs = &resp.u.basic_stats;
+ result->basic_stats.tx_bytes = bs->tx_bytes;
+ result->basic_stats.tx_pkts = bs->tx_pkts;
+ result->basic_stats.rx_bytes = bs->rx_bytes;
+ result->basic_stats.rx_pkts = bs->rx_pkts;
+ result->basic_stats.rx_drops = bs->rx_drops;
break;
case EFA_ADMIN_GET_STATS_TYPE_MESSAGES:
- result->messages_stats.send_bytes = resp.u.messages_stats.send_bytes;
- result->messages_stats.send_wrs = resp.u.messages_stats.send_wrs;
- result->messages_stats.recv_bytes = resp.u.messages_stats.recv_bytes;
- result->messages_stats.recv_wrs = resp.u.messages_stats.recv_wrs;
+ ms = &resp.u.messages_stats;
+ result->messages_stats.send_bytes = ms->send_bytes;
+ result->messages_stats.send_wrs = ms->send_wrs;
+ result->messages_stats.recv_bytes = ms->recv_bytes;
+ result->messages_stats.recv_wrs = ms->recv_wrs;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_READ:
- result->rdma_read_stats.read_wrs = resp.u.rdma_read_stats.read_wrs;
- result->rdma_read_stats.read_bytes = resp.u.rdma_read_stats.read_bytes;
- result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
- result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
+ rrs = &resp.u.rdma_read_stats;
+ result->rdma_read_stats.read_wrs = rrs->read_wrs;
+ result->rdma_read_stats.read_bytes = rrs->read_bytes;
+ result->rdma_read_stats.read_wr_err = rrs->read_wr_err;
+ result->rdma_read_stats.read_resp_bytes = rrs->read_resp_bytes;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
- result->rdma_write_stats.write_wrs = resp.u.rdma_write_stats.write_wrs;
- result->rdma_write_stats.write_bytes = resp.u.rdma_write_stats.write_bytes;
- result->rdma_write_stats.write_wr_err = resp.u.rdma_write_stats.write_wr_err;
- result->rdma_write_stats.write_recv_bytes = resp.u.rdma_write_stats.write_recv_bytes;
+ rws = &resp.u.rdma_write_stats;
+ result->rdma_write_stats.write_wrs = rws->write_wrs;
+ result->rdma_write_stats.write_bytes = rws->write_bytes;
+ result->rdma_write_stats.write_wr_err = rws->write_wr_err;
+ result->rdma_write_stats.write_recv_bytes = rws->write_recv_bytes;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_NETWORK:
+ ns = &resp.u.network_stats;
+ result->network_stats.retrans_bytes = ns->retrans_bytes;
+ result->network_stats.retrans_pkts = ns->retrans_pkts;
+ result->network_stats.retrans_timeout_events = ns->retrans_timeout_events;
+ result->network_stats.unresponsive_remote_events = ns->unresponsive_remote_events;
+ result->network_stats.impaired_remote_conn_events = ns->impaired_remote_conn_events;
break;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 5511355b700d..3ac2686abba1 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -283,11 +283,20 @@ struct efa_com_rdma_write_stats {
u64 write_recv_bytes;
};
+struct efa_com_network_stats {
+ u64 retrans_bytes;
+ u64 retrans_pkts;
+ u64 retrans_timeout_events;
+ u64 unresponsive_remote_events;
+ u64 impaired_remote_conn_events;
+};
+
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
struct efa_com_messages_stats messages_stats;
struct efa_com_rdma_read_stats rdma_read_stats;
struct efa_com_rdma_write_stats rdma_write_stats;
+ struct efa_com_network_stats network_stats;
};
int efa_com_create_qp(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 4f03c0ec819f..6c415b9adb5f 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -372,6 +372,7 @@ static const struct ib_device_ops efa_dev_ops = {
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
.create_cq = efa_create_cq,
+ .create_cq_umem = efa_create_cq_umem,
.create_qp = efa_create_qp,
.create_user_ah = efa_create_ah,
.dealloc_pd = efa_dealloc_pd,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index a8645a40730f..886923d5fe50 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -64,6 +64,11 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
+ op(EFA_RETRANS_BYTES, "retrans_bytes") \
+ op(EFA_RETRANS_PKTS, "retrans_pkts") \
+ op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
+ op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
+ op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, nam) \
@@ -249,6 +254,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rdma_size = dev_attr->max_rdma_size;
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@@ -1082,8 +1088,11 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
xa_erase(&dev->cqs_xa, cq->cq_idx);
synchronize_irq(cq->eq->irq.irqn);
}
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
return 0;
}
@@ -1122,8 +1131,8 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
return 0;
}
-int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct uverbs_attr_bundle *attrs)
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs)
{
struct ib_udata *udata = &attrs->driver_udata;
struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
@@ -1202,11 +1211,30 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->ucontext = ucontext;
cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
- cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
- if (!cq->cpu_addr) {
- err = -ENOMEM;
- goto err_out;
+
+ if (umem) {
+ if (umem->length < cq->size) {
+ ibdev_dbg(&dev->ibdev, "External memory too small\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ if (!ib_umem_is_contiguous(umem)) {
+ ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ cq->cpu_addr = NULL;
+ cq->dma_addr = ib_umem_start_dma_addr(umem);
+ cq->umem = umem;
+ } else {
+ cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+ if (!cq->cpu_addr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
}
params.uarn = cq->ucontext->uarn;
@@ -1223,7 +1251,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
- goto err_free_mapped;
+ goto err_free_mem;
resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
@@ -1231,7 +1259,9 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->ibcq.cqe = result.actual_depth;
WARN_ON_ONCE(entries != result.actual_depth);
- err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
+ if (!umem)
+ err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
+
if (err) {
ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
cq->cq_idx);
@@ -1269,15 +1299,23 @@ err_remove_mmap:
efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
-err_free_mapped:
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
+err_free_mem:
+ if (umem)
+ ib_umem_release(umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.create_cq_err);
return err;
}
+int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ return efa_create_cq_umem(ibcq, attr, NULL, attrs);
+}
+
static int umem_to_page_list(struct efa_dev *dev,
struct ib_umem *umem,
u64 *page_list,
@@ -1727,6 +1765,7 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct efa_dev *dev = to_edev(ibpd->device);
@@ -1734,6 +1773,11 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
struct efa_mr *mr;
int err;
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -1766,12 +1810,18 @@ err_out:
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_mr *mr;
int err;
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = efa_alloc_mr(ibpd, access_flags, udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -2186,6 +2236,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
struct efa_com_rdma_write_stats *rws;
struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms;
+ struct efa_com_network_stats *ns;
struct efa_com_basic_stats *bs;
int err;
@@ -2238,6 +2289,18 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
}
+ params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ ns = &result.network_stats;
+ stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
+ stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
+ stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
+ stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
+ stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
+
return ARRAY_SIZE(efa_port_stats_descs);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index af36a8d2df22..94c211df09d8 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -629,7 +629,8 @@ err_free_mtt:
static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
struct erdma_mtt *mtt)
{
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
+ dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
+ DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
vfree(mtt->sglist);
}
@@ -1199,13 +1200,17 @@ int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
}
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 virt, int access, struct ib_udata *udata)
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct erdma_mr *mr = NULL;
struct erdma_dev *dev = to_edev(ibpd->device);
u32 stag;
int ret;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (!len || len > dev->attrs.max_mr_size)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index f9408ccc8bad..ef411b81fbd7 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -452,7 +452,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 virt, int access, struct ib_udata *udata);
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 7ead8746b79b..ee7fedc67b86 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -92,9 +92,7 @@ static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{
- int possible, curr_cpu, i, ht;
-
- cpumask_clear(&node_affinity.real_cpu_mask);
+ int possible, curr_cpu, ht;
/* Start with cpu online mask as the real cpu mask */
cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
@@ -110,17 +108,10 @@ void init_real_cpu_mask(void)
* "real" cores. Assumes that HT cores are not enumerated in
* succession (except in the single core case).
*/
- curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
- for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
- /*
- * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
- * skip any gaps.
- */
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
- curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
- }
+ curr_cpu = cpumask_nth(possible / ht, &node_affinity.real_cpu_mask) + 1;
+
+ /* Step 2. Remove the remaining HT siblings. */
+ cpumask_clear_cpus(&node_affinity.real_cpu_mask, curr_cpu, nr_cpu_ids - curr_cpu);
}
int node_affinity_init(void)
@@ -346,9 +337,10 @@ static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
&entry->def_intr.used);
/* If there are non-interrupt CPUs available, use them first */
- if (!cpumask_empty(non_intr_cpus))
- cpu = cpumask_first(non_intr_cpus);
- else /* Otherwise, use interrupt CPUs */
+ cpu = cpumask_first(non_intr_cpus);
+
+ /* Otherwise, use interrupt CPUs */
+ if (cpu >= nr_cpu_ids)
cpu = cpumask_first(available_cpus);
if (cpu >= nr_cpu_ids) { /* empty */
@@ -963,32 +955,23 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
struct hfi1_affinity_node_list *affinity)
{
- int possible, curr_cpu, i;
- uint num_cores_per_socket = node_affinity.num_online_cpus /
- affinity->num_core_siblings /
- node_affinity.num_online_nodes;
+ int curr_cpu;
+ uint num_cores;
cpumask_copy(hw_thread_mask, &affinity->proc.mask);
- if (affinity->num_core_siblings > 0) {
- /* Removing other siblings not needed for now */
- possible = cpumask_weight(hw_thread_mask);
- curr_cpu = cpumask_first(hw_thread_mask);
- for (i = 0;
- i < num_cores_per_socket * node_affinity.num_online_nodes;
- i++)
- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
-
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, hw_thread_mask);
- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
- }
- /* Identifying correct HW threads within physical cores */
- cpumask_shift_left(hw_thread_mask, hw_thread_mask,
- num_cores_per_socket *
- node_affinity.num_online_nodes *
- hw_thread_no);
- }
+ if (affinity->num_core_siblings == 0)
+ return;
+
+ num_cores = rounddown(node_affinity.num_online_cpus / affinity->num_core_siblings,
+ node_affinity.num_online_nodes);
+
+ /* Removing other siblings not needed for now */
+ curr_cpu = cpumask_nth(num_cores * node_affinity.num_online_nodes, hw_thread_mask) + 1;
+ cpumask_clear_cpus(hw_thread_mask, curr_cpu, nr_cpu_ids - curr_cpu);
+
+ /* Identifying correct HW threads within physical cores */
+ cpumask_shift_left(hw_thread_mask, hw_thread_mask, num_cores * hw_thread_no);
}
int hfi1_get_proc_affinity(int node)
@@ -1087,22 +1070,19 @@ int hfi1_get_proc_affinity(int node)
* If HT cores are enabled, identify which HW threads within the
* physical cores should be used.
*/
- if (affinity->num_core_siblings > 0) {
- for (i = 0; i < affinity->num_core_siblings; i++) {
- find_hw_thread_mask(i, hw_thread_mask, affinity);
+ for (i = 0; i < affinity->num_core_siblings; i++) {
+ find_hw_thread_mask(i, hw_thread_mask, affinity);
- /*
- * If there's at least one available core for this HW
- * thread number, stop looking for a core.
- *
- * diff will always be not empty at least once in this
- * loop as the used mask gets reset when
- * (set->mask == set->used) before this loop.
- */
- cpumask_andnot(diff, hw_thread_mask, &set->used);
- if (!cpumask_empty(diff))
- break;
- }
+ /*
+ * If there's at least one available core for this HW
+ * thread number, stop looking for a core.
+ *
+ * diff will always be not empty at least once in this
+ * loop as the used mask gets reset when
+ * (set->mask == set->used) before this loop.
+ */
+ if (cpumask_andnot(diff, hw_thread_mask, &set->used))
+ break;
}
hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
cpumask_pr_args(hw_thread_mask));
@@ -1133,8 +1113,7 @@ int hfi1_get_proc_affinity(int node)
* used for process assignments using the same method as
* the preferred NUMA node.
*/
- cpumask_andnot(diff, available_mask, intrs_mask);
- if (!cpumask_empty(diff))
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
cpumask_copy(available_mask, diff);
/* If we don't have CPUs on the preferred node, use other NUMA nodes */
@@ -1150,8 +1129,7 @@ int hfi1_get_proc_affinity(int node)
* At first, we don't want to place processes on the same
* CPUs as interrupt handlers.
*/
- cpumask_andnot(diff, available_mask, intrs_mask);
- if (!cpumask_empty(diff))
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
cpumask_copy(available_mask, diff);
}
hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 1dcc9cbb4678..78ee04a48a74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -316,16 +316,6 @@ struct hns_roce_mtr {
struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
};
-struct hns_roce_mw {
- struct ib_mw ibmw;
- u32 pdn;
- u32 rkey;
- int enabled; /* MW's active status */
- u32 pbl_hop_num;
- u32 pbl_ba_pg_sz;
- u32 pbl_buf_pg_sz;
-};
-
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
@@ -856,6 +846,7 @@ struct hns_roce_caps {
u16 default_ceq_arm_st;
u8 cong_cap;
enum hns_roce_cong_type default_cong_type;
+ u32 max_ack_req_msg_len;
};
enum hns_roce_device_state {
@@ -933,7 +924,6 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags,
void *mb_buf);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
- int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle);
@@ -1078,11 +1068,6 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
-static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct hns_roce_mw, ibmw);
-}
-
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
@@ -1234,6 +1219,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
@@ -1246,9 +1232,6 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
unsigned long key_to_hw_index(u32 key);
-int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
-int hns_roce_dealloc_mw(struct ib_mw *ibmw);
-
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
u32 page_shift, u32 flags);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ca0798224e56..3d479c63b117 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -249,15 +249,12 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
}
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
- unsigned long hem_alloc_size,
- gfp_t gfp_mask)
+ unsigned long hem_alloc_size)
{
struct hns_roce_hem *hem;
int order;
void *buf;
- WARN_ON(gfp_mask & __GFP_HIGHMEM);
-
order = get_order(hem_alloc_size);
if (PAGE_SIZE << order != hem_alloc_size) {
dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
@@ -265,13 +262,12 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
return NULL;
}
- hem = kmalloc(sizeof(*hem),
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ hem = kmalloc(sizeof(*hem), GFP_KERNEL);
if (!hem)
return NULL;
buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
- &hem->dma, gfp_mask);
+ &hem->dma, GFP_KERNEL);
if (!buf)
goto fail;
@@ -378,7 +374,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
- gfp_t flag;
u64 bt_ba;
u32 size;
int ret;
@@ -417,8 +412,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
- flag = GFP_KERNEL | __GFP_NOWARN;
- table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
if (!table->hem[index->buf]) {
ret = -ENOMEM;
goto err_alloc_hem;
@@ -546,9 +540,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
goto out;
}
- table->hem[i] = hns_roce_alloc_hem(hr_dev,
- table->table_chunk_size,
- GFP_KERNEL | __GFP_NOWARN);
+ table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index fa8747656f25..64bca08f3f1a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -144,7 +144,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
u64 pbl_ba;
/* use ib_access_flags */
- hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
+ hr_reg_write_bool(fseg, FRMR_BIND_EN, 0);
hr_reg_write_bool(fseg, FRMR_ATOMIC,
wr->access & IB_ACCESS_REMOTE_ATOMIC);
hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
@@ -2196,31 +2196,36 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
+ struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
struct hns_roce_caps *caps = &hr_dev->caps;
struct hns_roce_query_pf_caps_a *resp_a;
struct hns_roce_query_pf_caps_b *resp_b;
struct hns_roce_query_pf_caps_c *resp_c;
struct hns_roce_query_pf_caps_d *resp_d;
struct hns_roce_query_pf_caps_e *resp_e;
+ struct hns_roce_query_pf_caps_f *resp_f;
enum hns_roce_opcode_type cmd;
int ctx_hop_num;
int pbl_hop_num;
+ int cmd_num;
int ret;
int i;
cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
+ cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
- for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
+ for (i = 0; i < cmd_num - 1; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
- if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
- ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
+ hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true);
+ desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ ret = hns_roce_cmq_send(hr_dev, desc, cmd_num);
if (ret)
return ret;
@@ -2229,6 +2234,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
+ resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
@@ -2293,6 +2299,8 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
+ caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len);
+
caps->qpc_hop_num = ctx_hop_num;
caps->sccc_hop_num = ctx_hop_num;
caps->srqc_hop_num = ctx_hop_num;
@@ -2627,7 +2635,7 @@ static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
struct ib_pd *pd;
hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_pd))
+ if (!hr_pd)
return NULL;
pd = &hr_pd->ibpd;
pd->device = ibdev;
@@ -2658,7 +2666,7 @@ static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_cq))
+ if (!hr_cq)
return NULL;
cq = &hr_cq->ib_cq;
@@ -2691,7 +2699,7 @@ static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
int ret;
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp))
+ if (!hr_qp)
return -ENOMEM;
qp = &hr_qp->ibqp;
@@ -2986,14 +2994,22 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
int ret;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ ret = free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init free mr!\n");
+ return ret;
+ }
+ }
+
/* The hns ROCEE requires the extdb info to be cleared before using */
ret = hns_roce_clear_extdb_list_info(hr_dev);
if (ret)
- return ret;
+ goto err_clear_extdb_failed;
ret = get_hem_table(hr_dev);
if (ret)
- return ret;
+ goto err_get_hem_table_failed;
if (hr_dev->is_vf)
return 0;
@@ -3008,6 +3024,11 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
err_llm_init_failed:
put_hem_table(hr_dev);
+err_get_hem_table_failed:
+ hns_roce_function_clear(hr_dev);
+err_clear_extdb_failed:
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
return ret;
}
@@ -3313,8 +3334,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
- hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
- mr->access & IB_ACCESS_MW_BIND);
hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
mr->access & IB_ACCESS_REMOTE_ATOMIC);
hr_reg_write_bool(mpt_entry, MPT_RR_EN,
@@ -3358,8 +3377,6 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
if (flags & IB_MR_REREG_ACCESS) {
- hr_reg_write(mpt_entry, MPT_BIND_EN,
- (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
hr_reg_write(mpt_entry, MPT_RR_EN,
@@ -3397,7 +3414,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
hr_reg_enable(mpt_entry, MPT_R_INV_EN);
hr_reg_enable(mpt_entry, MPT_FRE);
- hr_reg_clear(mpt_entry, MPT_MR_MW);
hr_reg_enable(mpt_entry, MPT_BPD);
hr_reg_clear(mpt_entry, MPT_PA);
@@ -3417,38 +3433,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
return 0;
}
-static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
-{
- struct hns_roce_v2_mpt_entry *mpt_entry;
-
- mpt_entry = mb_buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
- hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
- hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
-
- hr_reg_enable(mpt_entry, MPT_R_INV_EN);
- hr_reg_enable(mpt_entry, MPT_LW_EN);
-
- hr_reg_enable(mpt_entry, MPT_MR_MW);
- hr_reg_enable(mpt_entry, MPT_BPD);
- hr_reg_clear(mpt_entry, MPT_PA);
- hr_reg_write(mpt_entry, MPT_BQP,
- mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
-
- mpt_entry->lkey = cpu_to_le32(mw->rkey);
-
- hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
- mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- mw->pbl_hop_num);
- hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
- mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
- hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
- mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-
- return 0;
-}
-
static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
@@ -3849,7 +3833,6 @@ static const u32 wc_send_op_map[] = {
HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
- HR_WC_OP_MAP(BIND_MW, REG_MR),
};
static int to_ib_wc_send_op(u32 hr_opcode)
@@ -4560,7 +4543,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu ib_mtu;
+ u8 ack_req_freq;
const u8 *smac;
+ int lp_msg_len;
u8 lp_pktn_ini;
u64 *mtts;
u8 *dmac;
@@ -4643,7 +4628,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL;
#define MIN_LP_MSG_LEN 1024
/* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
- lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
+ lp_msg_len = max(mtu, MIN_LP_MSG_LEN);
+ lp_pktn_ini = ilog2(lp_msg_len / mtu);
if (attr_mask & IB_QP_PATH_MTU) {
hr_reg_write(context, QPC_MTU, ib_mtu);
@@ -4653,8 +4639,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
- /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
- hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
+ /*
+ * There are several constraints for ACK_REQ_FREQ:
+ * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise
+ * it may cause some unexpected retries when sending large
+ * payload.
+ * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI.
+ * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP
+ * or HC3 congestion control algorithm.
+ */
+ if (hr_qp->cong_type == CONG_TYPE_LDCP ||
+ hr_qp->cong_type == CONG_TYPE_HC3 ||
+ hr_dev->caps.max_ack_req_msg_len < lp_msg_len)
+ ack_req_freq = lp_pktn_ini;
+ else
+ ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu);
+ hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq);
hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
@@ -5349,11 +5349,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_v2_qp_context ctx[2];
- struct hns_roce_v2_qp_context *context = ctx;
- struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
+ struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context *qpc_mask;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = -ENOMEM;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -5364,7 +5363,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- memset(context, 0, hr_dev->caps.qpc_sz);
+ context = kvzalloc(sizeof(*context), GFP_KERNEL);
+ qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL);
+ if (!context || !qpc_mask)
+ goto out;
+
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
@@ -5406,6 +5409,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
clear_qp(hr_qp);
out:
+ kvfree(qpc_mask);
+ kvfree(context);
return ret;
}
@@ -6948,7 +6953,6 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
.frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
- .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
@@ -7044,21 +7048,11 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_roce_init;
}
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
- ret = free_mr_init(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "failed to init free mr!\n");
- goto error_failed_free_mr_init;
- }
- }
handle->priv = hr_dev;
return 0;
-error_failed_free_mr_init:
- hns_roce_exit(hr_dev);
-
error_failed_roce_init:
kfree(hr_dev->priv);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index bc7466830eaf..e64a04d6f85b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -814,24 +814,16 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7
-#define V2_MPT_BYTE_8_MW_CNT_S 8
-#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
-
#define V2_MPT_BYTE_12_FRE_S 0
#define V2_MPT_BYTE_12_PA_S 1
-#define V2_MPT_BYTE_12_MR_MW_S 4
-
#define V2_MPT_BYTE_12_BPD_S 5
#define V2_MPT_BYTE_12_BQP_S 6
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
-#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
-#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
-
#define V2_MPT_BYTE_48_PBL_BA_H_S 0
#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
@@ -1168,7 +1160,8 @@ struct hns_roce_cfg_gmv_tb_b {
#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32)
#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64)
-#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6
struct hns_roce_query_pf_caps_a {
u8 number_ports;
u8 local_ca_ack_delay;
@@ -1280,6 +1273,11 @@ struct hns_roce_query_pf_caps_e {
__le16 aeq_period;
};
+struct hns_roce_query_pf_caps_f {
+ __le32 max_ack_req_msg_len;
+ __le32 rsv[5];
+};
+
#define PF_CAPS_E_FIELD_LOC(h, l) \
FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index e7a497cc125c..d50f36f8a110 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -672,13 +672,6 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = {
.rereg_user_mr = hns_roce_rereg_user_mr,
};
-static const struct ib_device_ops hns_roce_dev_mw_ops = {
- .alloc_mw = hns_roce_alloc_mw,
- .dealloc_mw = hns_roce_dealloc_mw,
-
- INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
-};
-
static const struct ib_device_ops hns_roce_dev_frmr_ops = {
.alloc_mr = hns_roce_alloc_mr,
.map_mr_sg = hns_roce_map_mr_sg,
@@ -732,9 +725,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
- ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
-
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
@@ -947,10 +937,7 @@ err_unmap_dmpt:
static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_bitmap(hr_dev);
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
- mutex_destroy(&hr_dev->pgdir_mutex);
+ mutex_destroy(&hr_dev->pgdir_mutex);
}
/**
@@ -965,11 +952,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
- INIT_LIST_HEAD(&hr_dev->pgdir_list);
- mutex_init(&hr_dev->pgdir_mutex);
- }
+ INIT_LIST_HEAD(&hr_dev->qp_list);
+ spin_lock_init(&hr_dev->qp_list_lock);
+
+ INIT_LIST_HEAD(&hr_dev->pgdir_list);
+ mutex_init(&hr_dev->pgdir_mutex);
hns_roce_init_uar_table(hr_dev);
@@ -1001,9 +988,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
err_uar_table_free:
ida_destroy(&hr_dev->uar_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
- mutex_destroy(&hr_dev->pgdir_mutex);
+ mutex_destroy(&hr_dev->pgdir_mutex);
return ret;
}
@@ -1132,9 +1117,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
}
}
- INIT_LIST_HEAD(&hr_dev->qp_list);
- spin_lock_init(&hr_dev->qp_list_lock);
-
ret = hns_roce_register_device(hr_dev);
if (ret)
goto error_failed_register_device;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 93a48b41955b..0f037e545520 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -231,12 +231,18 @@ err_free:
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_mr *mr;
int ret;
+ if (dmah) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
@@ -483,120 +489,6 @@ err_page_list:
return sg_num;
}
-static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mw *mw)
-{
- struct device *dev = hr_dev->dev;
- int ret;
-
- if (mw->enabled) {
- ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
- key_to_hw_index(mw->rkey) &
- (hr_dev->caps.num_mtpts - 1));
- if (ret)
- dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
-
- hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
- key_to_hw_index(mw->rkey));
- }
-
- ida_free(&hr_dev->mr_table.mtpt_ida.ida,
- (int)key_to_hw_index(mw->rkey));
-}
-
-static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
- struct hns_roce_mw *mw)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = hr_dev->dev;
- unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
- int ret;
-
- /* prepare HEM entry memory */
- ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- if (ret)
- return ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
- goto err_table;
- }
-
- ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
- if (ret) {
- dev_err(dev, "MW write mtpt fail!\n");
- goto err_page;
- }
-
- ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
- if (ret) {
- dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
- goto err_page;
- }
-
- mw->enabled = 1;
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return 0;
-
-err_page:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
-err_table:
- hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
-
- return ret;
-}
-
-int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
- struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_mw *mw = to_hr_mw(ibmw);
- int ret;
- int id;
-
- /* Allocate a key for mw from mr_table */
- id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
- GFP_KERNEL);
- if (id < 0) {
- ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
- return -ENOMEM;
- }
-
- mw->rkey = hw_index_to_key(id);
-
- ibmw->rkey = mw->rkey;
- mw->pdn = to_hr_pd(ibmw->pd)->pdn;
- mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
- mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-
- ret = hns_roce_mw_enable(hr_dev, mw);
- if (ret)
- goto err_mw;
-
- return 0;
-
-err_mw:
- hns_roce_mw_free(hr_dev, mw);
- return ret;
-}
-
-int hns_roce_dealloc_mw(struct ib_mw *ibmw)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
- struct hns_roce_mw *mw = to_hr_mw(ibmw);
-
- hns_roce_mw_free(hr_dev, mw);
- return 0;
-}
-
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_region *region, dma_addr_t *pages,
int max_count)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9f376a2232b0..6ff1b8ce580c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1003,14 +1003,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
int ret;
sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(sq_wrid)) {
+ if (!sq_wrid) {
ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
return -ENOMEM;
}
if (hr_qp->rq.wqe_cnt) {
rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(rq_wrid)) {
+ if (!rq_wrid) {
ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
ret = -ENOMEM;
goto err_sq;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 1e8c92826de2..da5a41b275d8 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -3013,10 +3013,12 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
* @len: length of mr
* @virt: virtual address
* @access: access of mr
+ * @dmah: dma handle
* @udata: user data
*/
static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
@@ -3026,6 +3028,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct irdma_mr *iwmr = NULL;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
@@ -3085,6 +3090,7 @@ error:
static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 len, u64 virt,
int fd, int access,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
@@ -3092,6 +3098,9 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
struct irdma_mr *iwmr;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c
index e533ce21013d..e964e74be48d 100644
--- a/drivers/infiniband/hw/mana/counters.c
+++ b/drivers/infiniband/hw/mana/counters.c
@@ -32,8 +32,32 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
[MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
[MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
[MANA_IB_CURRENT_RATE].name = "current_rate",
+ [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
+ [MANA_IB_TX_BYTES].name = "tx_bytes",
+ [MANA_IB_RX_BYTES].name = "rx_bytes",
+ [MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
+ [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
+ [MANA_IB_RX_READ_REQ].name = "rx_read_requests",
+ [MANA_IB_TX_PKT].name = "tx_packets",
+ [MANA_IB_RX_PKT].name = "rx_packets",
};
+static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
+ [MANA_IB_SENT_CNPS].name = "sent_cnps",
+ [MANA_IB_RECEIVED_ECNS].name = "received_ecns",
+ [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
+ [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
+ [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
+ [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
+ ARRAY_SIZE(mana_ib_device_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
@@ -42,8 +66,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_device_cntrs_resp resp = {};
+ struct mana_rnic_query_device_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
+ stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
+ stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
+ stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
+ stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
+ stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
+
+ return ARRAY_SIZE(mana_ib_device_stats_desc);
+}
+
+static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
ib_dev);
@@ -53,6 +108,7 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
@@ -101,5 +157,23 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
+ stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
+ stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
+ stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
+ stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
+ stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
+ stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
+ stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
+ stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;
+
return ARRAY_SIZE(mana_ib_port_stats_desc);
}
+
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (!port_num)
+ return mana_ib_get_hw_device_stats(ibdev, stats);
+ else
+ return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
+}
diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h
index 7ff92d27f6c3..f68e776bb41d 100644
--- a/drivers/infiniband/hw/mana/counters.h
+++ b/drivers/infiniband/hw/mana/counters.h
@@ -35,10 +35,28 @@ enum mana_ib_port_counters {
MANA_IB_RATE_INC_EVENTS,
MANA_IB_NUM_QPS_RECOVERED,
MANA_IB_CURRENT_RATE,
+ MANA_IB_DUP_RX_REQ,
+ MANA_IB_TX_BYTES,
+ MANA_IB_RX_BYTES,
+ MANA_IB_RX_SEND_REQ,
+ MANA_IB_RX_WRITE_REQ,
+ MANA_IB_RX_READ_REQ,
+ MANA_IB_TX_PKT,
+ MANA_IB_RX_PKT,
+};
+
+enum mana_ib_device_counters {
+ MANA_IB_SENT_CNPS,
+ MANA_IB_RECEIVED_ECNS,
+ MANA_IB_RECEIVED_CNP_COUNT,
+ MANA_IB_QP_CONGESTED_EVENTS,
+ MANA_IB_QP_RECOVERED_EVENTS,
+ MANA_IB_DEV_RATE_INC_EVENTS,
};
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num);
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index);
#endif /* _COUNTERS_H_ */
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 165c0a1e67d1..fa60872f169f 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
.get_hw_stats = mana_ib_get_hw_stats,
};
+static const struct ib_device_ops mana_ib_device_stats_ops = {
+ .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
+};
+
static int mana_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -73,28 +77,31 @@ static int mana_ib_netdev_event(struct notifier_block *this,
struct gdma_context *gc = dev->gdma_dev->gdma_context;
struct mana_context *mc = gc->mana.driver_data;
struct net_device *ndev;
+ int i;
/* Only process events from our parent device */
- if (event_dev != mc->ports[0])
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_CHANGEUPPER:
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- /*
- * RDMA core will setup GID based on updated netdev.
- * It's not possible to race with the core as rtnl lock is being
- * held.
- */
- ib_device_set_netdev(&dev->ib_dev, ndev, 1);
-
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
-
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
+ if (event_dev == mc->ports[i]) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ /*
+ * RDMA core will setup GID based on updated netdev.
+ * It's not possible to race with the core as rtnl lock is being
+ * held.
+ */
+ ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ if (ndev)
+ netdev_put(ndev, &dev->dev_tracker);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_DONE;
}
static int mana_ib_probe(struct auxiliary_device *adev,
@@ -107,7 +114,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
struct net_device *ndev;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
- int ret;
+ int ret, i;
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
@@ -122,51 +129,56 @@ static int mana_ib_probe(struct auxiliary_device *adev,
if (mana_ib_is_rnic(dev)) {
dev->ib_dev.phys_port_cnt = 1;
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto free_ib_device;
- }
-
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
ret = mana_ib_gd_query_adapter_caps(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
- goto deregister_net_notifier;
+ goto free_ib_device;
}
ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
ret = mana_ib_create_eqs(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
+ goto free_ib_device;
}
ret = mana_ib_gd_create_rnic_adapter(dev);
if (ret)
goto destroy_eqs;
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev,
+ "Failed to get netdev for IB port %d", i + 1);
+ goto destroy_rnic;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto destroy_rnic;
+ }
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ }
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
goto destroy_rnic;
}
} else {
@@ -182,7 +194,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
- goto destroy_rnic;
+ goto deregister_net_notifier;
}
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
@@ -199,15 +211,15 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
+deregister_net_notifier:
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
destroy_rnic:
if (mana_ib_is_rnic(dev))
mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
if (mana_ib_is_rnic(dev))
mana_ib_destroy_eqs(dev);
-deregister_net_notifier:
- if (mana_ib_is_rnic(dev))
- unregister_netdevice_notifier(&dev->nb);
free_ib_device:
xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
@@ -221,9 +233,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
+ unregister_netdevice_notifier(&dev->nb);
mana_ib_gd_destroy_rnic_adapter(dev);
mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
}
xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 41a24a186f9d..6a2471f2e804 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -563,8 +563,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->gid_tbl_len = attr.gid_tbl_len;
if (mana_ib_is_rnic(dev)) {
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ if (port_num == 1) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
+ | RDMA_CORE_CAP_ETH_AH;
+ immutable->max_mad_size = 0;
+ }
} else {
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
@@ -633,8 +639,9 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->pkey_tbl_len = 1;
if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
- props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
+ if (port == 1)
+ props->port_cap_flags = IB_PORT_CM_SUP;
}
return 0;
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 42bebd6cd4f7..5d31034ac7fb 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -210,6 +210,7 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
+ MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
};
struct mana_ib_query_adapter_caps_req {
@@ -218,6 +219,8 @@ struct mana_ib_query_adapter_caps_req {
enum mana_ib_adapter_features {
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+ MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
+ MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
};
struct mana_ib_query_adapter_caps_resp {
@@ -514,6 +517,31 @@ struct mana_rnic_query_vf_cntrs_resp {
u64 rate_inc_events;
u64 num_qps_recovered;
u64 current_rate;
+ u64 dup_rx_req;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_send_req;
+ u64 rx_write_req;
+ u64 rx_read_req;
+ u64 tx_pkt;
+ u64 rx_pkt;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u32 sent_cnps;
+ u32 received_ecns;
+ u32 reserved1;
+ u32 received_cnp_count;
+ u32 qp_congested_events;
+ u32 qp_recovered_events;
+ u32 rate_inc_events;
+ u32 reserved2;
}; /* HW Data */
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
@@ -605,6 +633,7 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
@@ -694,5 +723,6 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 6d974d0a8400..55701046ffba 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -106,6 +106,7 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -116,6 +117,9 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
ibdev_dbg(ibdev,
@@ -188,6 +192,7 @@ err_free:
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -199,6 +204,9 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
access_flags &= ~IB_ACCESS_OPTIONAL;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 14fd7d6c54a2..a6bf4d539e67 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -772,7 +772,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
ibqp->qp_num, attr->dest_qp_num);
- req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
+ req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f53b1846594c..5df5b955114e 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -759,6 +759,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem);
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e77645a673fb..94464f1694d9 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -139,6 +139,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -147,6 +148,9 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
int n;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 11878ddf7cc7..dd7bb377f491 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -8,6 +8,7 @@ mlx5_ib-y := ah.o \
cq.o \
data_direct.o \
dm.o \
+ dmah.o \
doorbell.o \
fs.o \
gsi.o \
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index a506fafd2b15..e042e0719ead 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -16,6 +16,18 @@ struct mlx5_ib_counter {
u32 type;
};
+struct mlx5_rdma_counter {
+ struct rdma_counter rdma_counter;
+
+ struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
+ struct xarray qpn_opfc_xa;
+};
+
+static struct mlx5_rdma_counter *to_mcounter(struct rdma_counter *counter)
+{
+ return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
+}
+
#define INIT_Q_COUNTER(_name) \
{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
@@ -602,7 +614,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
return 0;
WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa));
- mlx5r_fs_destroy_fcs(dev, counter);
+ mlx5r_fs_destroy_fcs(dev, mcounter->fc);
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
@@ -612,6 +624,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp, u32 port)
{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
bool new = false;
int err;
@@ -635,7 +648,11 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
if (err)
goto fail_set_counter;
- err = mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ err = mlx5r_fs_bind_op_fc(qp, mcounter->fc, &mcounter->qpn_opfc_xa,
+ port);
if (err)
goto fail_bind_op_fc;
@@ -655,9 +672,12 @@ fail_set_counter:
static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
{
struct rdma_counter *counter = qp->counter;
+ struct mlx5_rdma_counter *mcounter;
int err;
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mcounter = to_mcounter(counter);
+
+ mlx5r_fs_unbind_op_fc(qp, &mcounter->qpn_opfc_xa);
err = mlx5_ib_qp_set_counter(qp, NULL);
if (err)
@@ -666,7 +686,9 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
return 0;
fail_set_counter:
- mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (counter->mode.bind_opcnt)
+ mlx5r_fs_bind_op_fc(qp, mcounter->fc,
+ &mcounter->qpn_opfc_xa, port);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
index bd03cee42014..a04e7dd59455 100644
--- a/drivers/infiniband/hw/mlx5/counters.h
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -8,19 +8,6 @@
#include "mlx5_ib.h"
-struct mlx5_rdma_counter {
- struct rdma_counter rdma_counter;
-
- struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
- struct xarray qpn_opfc_xa;
-};
-
-static inline struct mlx5_rdma_counter *
-to_mcounter(struct rdma_counter *counter)
-{
- return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
-}
-
int mlx5_ib_counters_init(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_clear_description(struct ib_counters *counters);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 1aa5311b03e9..9c8003a78334 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1055,20 +1055,31 @@ err_cqb:
return err;
}
-int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
+
+ return mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+}
+
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq)
+{
+ destroy_cq_kernel(to_mdev(cq->device), to_mcq(cq));
+}
+
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+{
int ret;
- ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ ret = mlx5_ib_pre_destroy_cq(cq);
if (ret)
return ret;
if (udata)
- destroy_cq_user(mcq, udata);
+ destroy_cq_user(to_mcq(cq), udata);
else
- destroy_cq_kernel(dev, mcq);
+ mlx5_ib_post_destroy_cq(cq);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 843dcd312242..028d9f031dde 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -159,7 +159,7 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps)
uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
if (is_user &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX) &&
- capable(CAP_NET_RAW))
+ rdma_dev_has_raw_cap(&dev->ib_dev))
cap |= MLX5_UCTX_CAP_RAW_TX;
if (is_user &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) &
@@ -1393,6 +1393,10 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
}
MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ /* TPH is not allowed to bypass the regular kernel's verbs flow */
+ MLX5_SET(mkc, mkc, pcie_tph_en, 0);
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index b4c97fb62abf..9ded2b7c1e31 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -282,7 +282,7 @@ static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
int err;
u64 address;
- if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
return ERR_PTR(-EOPNOTSUPP);
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/dmah.c b/drivers/infiniband/hw/mlx5/dmah.c
new file mode 100644
index 000000000000..362a88992ffa
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <linux/pci-tph.h>
+#include "dmah.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_ib_alloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ u16 st_bits = BIT(IB_DMAH_CPU_ID_EXISTS) |
+ BIT(IB_DMAH_MEM_TYPE_EXISTS);
+ int err;
+
+ /* PH is a must for TPH following PCIe spec 6.2-1.0 */
+ if (!(ibdmah->valid_fields & BIT(IB_DMAH_PH_EXISTS)))
+ return -EINVAL;
+
+ /* ST is optional; however, partial data for it is not allowed */
+ if (ibdmah->valid_fields & st_bits) {
+ if ((ibdmah->valid_fields & st_bits) != st_bits)
+ return -EINVAL;
+ err = mlx5_st_alloc_index(mdev, ibdmah->mem_type,
+ ibdmah->cpu_id, &dmah->st_index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+
+ if (ibdmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ return mlx5_st_dealloc_index(mdev, dmah->st_index);
+
+ return 0;
+}
+
+const struct ib_device_ops mlx5_ib_dev_dmah_ops = {
+ .alloc_dmah = mlx5_ib_alloc_dmah,
+ .dealloc_dmah = mlx5_ib_dealloc_dmah,
+};
diff --git a/drivers/infiniband/hw/mlx5/dmah.h b/drivers/infiniband/hw/mlx5/dmah.h
new file mode 100644
index 000000000000..68de72b4744a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DMAH_H
+#define _MLX5_IB_DMAH_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dmah_ops;
+
+struct mlx5_ib_dmah {
+ struct ib_dmah ibdmah;
+ u16 st_index;
+};
+
+static inline struct mlx5_ib_dmah *to_mdmah(struct ib_dmah *ibdmah)
+{
+ return container_of(ibdmah, struct mlx5_ib_dmah, ibdmah);
+}
+
+#endif /* _MLX5_IB_DMAH_H */
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 680627f1de33..b0f7663c24c1 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1012,14 +1012,14 @@ static int get_per_qp_prio(struct mlx5_ib_dev *dev,
return 0;
}
-static struct mlx5_per_qp_opfc *
-get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
+static struct mlx5_per_qp_opfc *get_per_qp_opfc(struct xarray *qpn_opfc_xa,
+ u32 qp_num, bool *new)
{
struct mlx5_per_qp_opfc *per_qp_opfc;
*new = false;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp_num);
if (per_qp_opfc)
return per_qp_opfc;
per_qp_opfc = kzalloc(sizeof(*per_qp_opfc), GFP_KERNEL);
@@ -1032,7 +1032,8 @@ get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
}
static int add_op_fc_rules(struct mlx5_ib_dev *dev,
- struct mlx5_rdma_counter *mcounter,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa,
struct mlx5_per_qp_opfc *per_qp_opfc,
struct mlx5_ib_flow_prio *prio,
enum mlx5_ib_optional_counter_type type,
@@ -1055,7 +1056,7 @@ static int add_op_fc_rules(struct mlx5_ib_dev *dev,
return 0;
}
- opfc->fc = mcounter->fc[type];
+ opfc->fc = fc_arr[type];
spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1148,8 +1149,7 @@ static int add_op_fc_rules(struct mlx5_ib_dev *dev,
}
prio->refcount += spec_num;
- err = xa_err(xa_store(&mcounter->qpn_opfc_xa, qp_num, per_qp_opfc,
- GFP_KERNEL));
+ err = xa_err(xa_store(qpn_opfc_xa, qp_num, per_qp_opfc, GFP_KERNEL));
if (err)
goto del_rules;
@@ -1168,8 +1168,9 @@ null_fc:
return err;
}
-static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
- u32 type, struct mlx5_fc **fc)
+static bool
+is_fc_shared_and_in_use(struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX], u32 type,
+ struct mlx5_fc **fc)
{
u32 shared_fc_type;
@@ -1190,7 +1191,7 @@ static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
return false;
}
- *fc = mcounter->fc[shared_fc_type];
+ *fc = fc_arr[shared_fc_type];
if (!(*fc))
return false;
@@ -1198,24 +1199,23 @@ static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
}
void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
- struct rdma_counter *counter)
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX])
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_fc *in_use_fc;
int i;
for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
- if (!mcounter->fc[i])
+ if (!fc_arr[i])
continue;
- if (is_fc_shared_and_in_use(mcounter, i, &in_use_fc)) {
- mcounter->fc[i] = NULL;
+ if (is_fc_shared_and_in_use(fc_arr, i, &in_use_fc)) {
+ fc_arr[i] = NULL;
continue;
}
- mlx5_fc_destroy(dev->mdev, mcounter->fc[i]);
- mcounter->fc[i] = NULL;
+ mlx5_fc_destroy(dev->mdev, fc_arr[i]);
+ fc_arr[i] = NULL;
}
}
@@ -1359,16 +1359,15 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
put_per_qp_prio(dev, type);
}
-void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_op_fc *in_use_opfc;
struct mlx5_ib_flow_prio *prio;
int i, j;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp->qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp->qp_num);
if (!per_qp_opfc)
return;
@@ -1394,13 +1393,13 @@ void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
}
kfree(per_qp_opfc);
- xa_erase(&mcounter->qpn_opfc_xa, qp->qp_num);
+ xa_erase(qpn_opfc_xa, qp->qp_num);
}
-int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
- u32 port)
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_flow_prio *prio;
@@ -1410,9 +1409,6 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
int i, err, per_qp_type;
bool new;
- if (!counter->mode.bind_opcnt)
- return 0;
-
cnts = &dev->port[port - 1].cnts;
for (i = 0; i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES; i++) {
@@ -1424,23 +1420,22 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
prio = get_opfc_prio(dev, per_qp_type);
WARN_ON(!prio->flow_table);
- if (is_fc_shared_and_in_use(mcounter, per_qp_type, &in_use_fc))
- mcounter->fc[per_qp_type] = in_use_fc;
+ if (is_fc_shared_and_in_use(fc_arr, per_qp_type, &in_use_fc))
+ fc_arr[per_qp_type] = in_use_fc;
- if (!mcounter->fc[per_qp_type]) {
- mcounter->fc[per_qp_type] = mlx5_fc_create(dev->mdev,
- false);
- if (IS_ERR(mcounter->fc[per_qp_type]))
- return PTR_ERR(mcounter->fc[per_qp_type]);
+ if (!fc_arr[per_qp_type]) {
+ fc_arr[per_qp_type] = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(fc_arr[per_qp_type]))
+ return PTR_ERR(fc_arr[per_qp_type]);
}
- per_qp_opfc = get_per_qp_opfc(mcounter, qp->qp_num, &new);
+ per_qp_opfc = get_per_qp_opfc(qpn_opfc_xa, qp->qp_num, &new);
if (!per_qp_opfc) {
err = -ENOMEM;
goto free_fc;
}
- err = add_op_fc_rules(dev, mcounter, per_qp_opfc, prio,
- per_qp_type, qp->qp_num, port);
+ err = add_op_fc_rules(dev, fc_arr, qpn_opfc_xa, per_qp_opfc,
+ prio, per_qp_type, qp->qp_num, port);
if (err)
goto del_rules;
}
@@ -1448,12 +1443,12 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
return 0;
del_rules:
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mlx5r_fs_unbind_op_fc(qp, qpn_opfc_xa);
if (new)
kfree(per_qp_opfc);
free_fc:
- if (xa_empty(&mcounter->qpn_opfc_xa))
- mlx5r_fs_destroy_fcs(dev, counter);
+ if (xa_empty(qpn_opfc_xa))
+ mlx5r_fs_destroy_fcs(dev, fc_arr);
return err;
}
@@ -1966,7 +1961,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
- if (ib_port == 0 || user_priority > MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
+ if (ib_port == 0 ||
+ user_priority >= MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
return ERR_PTR(-EINVAL);
ret = mlx5_ib_fill_transport_ns_info(dev, ns_type, &flags,
&vport_idx, &vport,
@@ -2016,10 +2012,10 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
prio = &dev->flow_db->rdma_tx[priority];
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
- prio = &dev->flow_db->rdma_transport_rx[ib_port - 1];
+ prio = &dev->flow_db->rdma_transport_rx[priority][ib_port - 1];
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
- prio = &dev->flow_db->rdma_transport_tx[ib_port - 1];
+ prio = &dev->flow_db->rdma_transport_tx[priority][ib_port - 1];
break;
default: return ERR_PTR(-EINVAL);
}
@@ -2458,7 +2454,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev;
u32 flags;
- if (!capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
fs_matcher = uverbs_attr_get_obj(attrs,
@@ -2989,7 +2985,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
u32 ft_id;
int err;
- if (!capable(CAP_NET_RAW))
+ if (!rdma_dev_has_raw_cap(&dev->ib_dev))
return -EPERM;
err = uverbs_get_const(&ib_uapi_ft_type, attrs,
@@ -3466,31 +3462,40 @@ static const struct ib_device_ops flow_ops = {
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
+ int i, j;
+
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
if (!dev->flow_db)
return -ENOMEM;
- dev->flow_db->rdma_transport_rx = kcalloc(dev->num_ports,
- sizeof(struct mlx5_ib_flow_prio),
- GFP_KERNEL);
- if (!dev->flow_db->rdma_transport_rx)
- goto free_flow_db;
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ dev->flow_db->rdma_transport_rx[i] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_rx[i])
+ goto free_rdma_transport_rx;
+ }
- dev->flow_db->rdma_transport_tx = kcalloc(dev->num_ports,
- sizeof(struct mlx5_ib_flow_prio),
- GFP_KERNEL);
- if (!dev->flow_db->rdma_transport_tx)
- goto free_rdma_transport_rx;
+ for (j = 0; j < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; j++) {
+ dev->flow_db->rdma_transport_tx[j] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_tx[j])
+ goto free_rdma_transport_tx;
+ }
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
return 0;
+free_rdma_transport_tx:
+ while (j--)
+ kfree(dev->flow_db->rdma_transport_tx[j]);
free_rdma_transport_rx:
- kfree(dev->flow_db->rdma_transport_rx);
-free_flow_db:
+ while (i--)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
kfree(dev->flow_db);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
index 2ebe86e5be10..7abba0e2837c 100644
--- a/drivers/infiniband/hw/mlx5/fs.h
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -13,6 +13,8 @@ void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
{
+ int i;
+
/* When a steering anchor is created, a special flow table is also
* created for the user to reference. Since the user can reference it,
* the kernel cannot trust that when the user destroys the steering
@@ -25,8 +27,10 @@ static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
* is a safe assumption that all references are gone.
*/
mlx5_ib_fs_cleanup_anchor(dev);
- kfree(dev->flow_db->rdma_transport_tx);
- kfree(dev->flow_db->rdma_transport_rx);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_tx[i]);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
kfree(dev->flow_db);
}
#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 49af1cfbe6d1..cc8859d3c2f5 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -88,7 +88,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
else
return mlx5_ib_set_vport_rep(lag_master, rep, vport_index);
- ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(lag_master));
if (!ibdev)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index df6557ddbdfc..d456e4fde3e1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,6 +50,7 @@
#include <rdma/ib_ucaps.h>
#include "macsec.h"
#include "data_direct.h"
+#include "dmah.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -4190,7 +4191,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_port = mlx5_ib_modify_port,
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
+ .pre_destroy_cq = mlx5_ib_pre_destroy_cq,
.poll_cq = mlx5_ib_poll_cq,
+ .post_destroy_cq = mlx5_ib_post_destroy_cq,
.post_recv = mlx5_ib_post_recv_nodrain,
.post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
@@ -4212,6 +4215,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_dmah, mlx5_ib_dmah, ibdmah),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
@@ -4339,6 +4343,9 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
+ if (mdev->st)
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dmah_ops);
+
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
@@ -4824,7 +4831,8 @@ static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
!MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
return ERR_PTR(-EOPNOTSUPP);
- mplane = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ mplane = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mparent->mdev));
if (!mplane)
return ERR_PTR(-ENOMEM);
@@ -4938,7 +4946,8 @@ static int mlx5r_probe(struct auxiliary_device *adev,
num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ dev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mdev));
if (!dev)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index fde859d207ae..7ffc7ee92cf0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -104,19 +104,6 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff(
__mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
page_offset_quantized)
-static inline unsigned long
-mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
-{
- /*
- * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
- * to hold any sgl after a move operation. Ideally the mkc page size
- * could be changed at runtime to be optimal, but right now the driver
- * cannot do that.
- */
- return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
- umem_dmabuf->umem.iova);
-}
-
enum {
MLX5_IB_MMAP_OFFSET_START = 9,
MLX5_IB_MMAP_OFFSET_END = 255,
@@ -320,8 +307,8 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
struct mlx5_flow_table *lag_demux_ft;
- struct mlx5_ib_flow_prio *rdma_transport_rx;
- struct mlx5_ib_flow_prio *rdma_transport_tx;
+ struct mlx5_ib_flow_prio *rdma_transport_rx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
+ struct mlx5_ib_flow_prio *rdma_transport_tx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
/* Protect flow steering bypass flow tables
* when add/del flow rules.
* only single add/removal of flow steering rule could be done
@@ -352,6 +339,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
+#define MLX5_IB_UPD_XLT_KEEP_PGSZ BIT(8)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -650,8 +638,13 @@ enum mlx5_mkey_type {
MLX5_MKEY_IMPLICIT_CHILD,
};
+/* Used for non-existent ph value */
+#define MLX5_IB_NO_PH 0xff
+
struct mlx5r_cache_rb_key {
u8 ats:1;
+ u8 ph;
+ u16 st_index;
unsigned int access_mode;
unsigned int access_flags;
unsigned int ndescs;
@@ -739,6 +732,8 @@ struct mlx5_ib_mr {
struct mlx5_ib_mr *dd_crossed_mr;
struct list_head dd_node;
u8 revoked :1;
+ /* Indicates previous dmabuf page fault occurred */
+ u8 dmabuf_faulted:1;
struct mlx5_ib_mkey null_mmkey;
};
};
@@ -899,13 +894,14 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type);
-int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
- u32 port);
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port);
-void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter);
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa);
void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
- struct rdma_counter *counter);
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX]);
struct mlx5_ib_multiport_info;
@@ -1372,16 +1368,20 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1748,20 +1748,71 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
return (port - 1) / dev->num_ports + 1;
}
+static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
+{
+ int max_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ max_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
+ else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ max_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, max_mkey_log_entity_size_fixed_buffer);
+
+ if (!max_log_size ||
+ (max_log_size > 31 &&
+ !MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
+ max_log_size = 31;
+
+ return max_log_size;
+}
+
+static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
+{
+ int min_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
+ MLX5_CAP_GEN_2(dev->mdev,
+ min_mkey_log_entity_size_fixed_buffer_valid))
+ min_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, min_mkey_log_entity_size_fixed_buffer);
+ else
+ min_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
+
+ min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
+ return min_log_size;
+}
+
/*
* For mkc users, instead of a page_offset the command has a start_iova which
* specifies both the page_offset and the on-the-wire IOVA
*/
static __always_inline unsigned long
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- u64 iova)
+ u64 iova, int access_mode)
{
- int page_size_bits =
- MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
- unsigned long bitmap =
- __mlx5_log_page_size_to_bitmap(page_size_bits, 0);
+ unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
+ unsigned long bitmap;
+
+ max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
+ min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
+
+ bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
+static inline unsigned long
+mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf,
+ int access_mode)
+{
+ return mlx5_umem_mkc_find_best_pgsz(to_mdev(umem_dmabuf->umem.ibdev),
+ &umem_dmabuf->umem,
+ umem_dmabuf->umem.iova,
+ access_mode);
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd35e75d9ce5..1317f2cb38a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -44,6 +44,7 @@
#include "mlx5_ib.h"
#include "umr.h"
#include "data_direct.h"
+#include "dmah.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -57,7 +58,7 @@ create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned long page_size, bool populate,
- int access_mode);
+ int access_mode, u16 st_index, u8 ph);
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
@@ -256,6 +257,14 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
get_mkc_octo_size(ent->rb_key.access_mode,
ent->rb_key.ndescs));
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+
+ if (ent->rb_key.ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ent->rb_key.ph);
+ if (ent->rb_key.st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ ent->rb_key.st_index);
+ }
}
/* Asynchronously schedule new MRs to be populated in the cache. */
@@ -641,6 +650,14 @@ static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
if (res)
return res;
+ res = key1.st_index - key2.st_index;
+ if (res)
+ return res;
+
+ res = key1.ph - key2.ph;
+ if (res)
+ return res;
+
/*
* keep ndescs the last in the compare table since the find function
* searches for an exact match on all properties and only closest
@@ -712,6 +729,8 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
smallest->rb_key.access_mode == rb_key.access_mode &&
smallest->rb_key.access_flags == rb_key.access_flags &&
smallest->rb_key.ats == rb_key.ats &&
+ smallest->rb_key.st_index == rb_key.st_index &&
+ smallest->rb_key.ph == rb_key.ph &&
smallest->rb_key.ndescs <= ndescs_limit) ?
smallest :
NULL;
@@ -786,7 +805,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5r_cache_rb_key rb_key = {
.ndescs = ndescs,
.access_mode = access_mode,
- .access_flags = get_unchangeable_access_flags(dev, access_flags)
+ .access_flags = get_unchangeable_access_flags(dev, access_flags),
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
@@ -943,6 +963,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
struct rb_root *root = &dev->cache.rb_root;
struct mlx5r_cache_rb_key rb_key = {
.access_mode = MLX5_MKC_ACCESS_MODE_MTT,
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent;
struct rb_node *node;
@@ -1119,7 +1140,8 @@ static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
- int access_flags, int access_mode)
+ int access_flags, int access_mode,
+ u16 st_index, u8 ph)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5r_cache_rb_key rb_key = {};
@@ -1130,7 +1152,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
- page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
+ page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova,
+ access_mode);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
@@ -1138,6 +1161,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
+ rb_key.st_index = st_index;
+ rb_key.ph = ph;
ent = mkey_cache_ent_from_rb_key(dev, rb_key);
/*
* If the MR can't come from the cache then synchronously create an uncached
@@ -1145,7 +1170,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
*/
if (!ent) {
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode,
+ st_index, ph);
mutex_unlock(&dev->slow_path_mutex);
if (IS_ERR(mr))
return mr;
@@ -1230,7 +1256,7 @@ err_1:
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned long page_size, bool populate,
- int access_mode)
+ int access_mode, u16 st_index, u8 ph)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1240,7 +1266,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u32 *in;
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
- (access_mode == MLX5_MKC_ACCESS_MODE_MTT);
+ (access_mode == MLX5_MKC_ACCESS_MODE_MTT) &&
+ (ph == MLX5_IB_NO_PH);
bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
if (!page_size)
@@ -1304,6 +1331,13 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
get_octo_len(iova, umem->length, mr->page_shift));
}
+ if (ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ph);
+ if (st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index, st_index);
+ }
+
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
@@ -1423,24 +1457,37 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
}
static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
- u64 iova, int access_flags)
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
bool xlt_with_umr;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
int err;
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
+ }
+
xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
if (xlt_with_umr) {
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
- MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
} else {
- unsigned long page_size =
- mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
+ unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, umem, iova, MLX5_MKC_ACCESS_MODE_MTT);
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size,
- true, MLX5_MKC_ACCESS_MODE_MTT);
+ true, MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
mutex_unlock(&dev->slow_path_mutex);
}
if (IS_ERR(mr)) {
@@ -1504,7 +1551,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(odp);
mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
- MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_MKC_ACCESS_MODE_MTT,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX,
+ MLX5_IB_NO_PH);
if (IS_ERR(mr)) {
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
@@ -1528,13 +1577,15 @@ err_dereg_mr:
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *umem;
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ ((access_flags & IB_ACCESS_ON_DEMAND) && dmah))
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
@@ -1550,7 +1601,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(umem))
return ERR_CAST(umem);
- return create_real_mr(pd, umem, iova, access_flags);
+ return create_real_mr(pd, umem, iova, access_flags, dmah);
}
static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
@@ -1575,12 +1626,15 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
static struct ib_mr *
reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
u64 offset, u64 length, u64 virt_addr,
- int fd, int access_flags, int access_mode)
+ int fd, int access_flags, int access_mode,
+ struct ib_dmah *dmah)
{
bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
struct ib_umem_dmabuf *umem_dmabuf;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
int err;
err = mlx5r_umr_resource_init(dev);
@@ -1603,8 +1657,17 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
return ERR_CAST(umem_dmabuf);
}
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
+ }
+
mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
- access_flags, access_mode);
+ access_flags, access_mode,
+ st_index, ph);
if (IS_ERR(mr)) {
ib_umem_release(&umem_dmabuf->umem);
return ERR_CAST(mr);
@@ -1661,7 +1724,8 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
offset, length, virt_addr, fd,
- access_flags, MLX5_MKC_ACCESS_MODE_KSM);
+ access_flags, MLX5_MKC_ACCESS_MODE_KSM,
+ NULL);
if (IS_ERR(crossed_mr)) {
ret = PTR_ERR(crossed_mr);
goto end;
@@ -1688,6 +1752,7 @@ end:
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -1720,7 +1785,8 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return reg_user_mr_dmabuf(pd, pd->device->dma_device,
offset, length, virt_addr,
- fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT);
+ fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT,
+ dmah);
}
/*
@@ -1754,7 +1820,8 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
- *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
+ *page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, new_umem, iova, mr->mmkey.cache_ent->rb_key.access_mode);
if (WARN_ON(!*page_size))
return false;
return (mr->mmkey.cache_ent->rb_key.ndescs) >=
@@ -1817,7 +1884,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
struct mlx5_ib_mr *mr = to_mmr(ib_mr);
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct)
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct ||
+ mr->mmkey.rb_key.ph != MLX5_IB_NO_PH)
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(
@@ -1861,7 +1929,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
return create_real_mr(new_pd, umem, mr->ibmr.iova,
- new_access_flags);
+ new_access_flags, NULL);
}
/*
@@ -1892,7 +1960,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
return NULL;
}
- return create_real_mr(new_pd, new_umem, iova, new_access_flags);
+ return create_real_mr(new_pd, new_umem, iova, new_access_flags, NULL);
}
/*
@@ -1901,7 +1969,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
recreate:
return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
- new_access_flags, udata);
+ new_access_flags, NULL, udata);
}
static int
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f6abd64f07f7..0e8ae85af5a6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -836,9 +836,13 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
u32 *bytes_mapped, u32 flags)
{
struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ int access_mode = mr->data_direct ? MLX5_MKC_ACCESS_MODE_KSM :
+ MLX5_MKC_ACCESS_MODE_MTT;
+ unsigned int old_page_shift = mr->page_shift;
+ unsigned int page_shift;
+ unsigned long page_size;
u32 xlt_flags = 0;
int err;
- unsigned long page_size;
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
@@ -850,20 +854,33 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
return err;
}
- page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf);
+ page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf, access_mode);
if (!page_size) {
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
err = -EINVAL;
} else {
- if (mr->data_direct)
- err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags);
- else
- err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
+ page_shift = order_base_2(page_size);
+ if (page_shift != mr->page_shift && mr->dmabuf_faulted) {
+ err = mlx5r_umr_dmabuf_update_pgsz(mr, xlt_flags,
+ page_shift);
+ } else {
+ mr->page_shift = page_shift;
+ if (mr->data_direct)
+ err = mlx5r_umr_update_data_direct_ksm_pas(
+ mr, xlt_flags);
+ else
+ err = mlx5r_umr_update_mr_pas(mr,
+ xlt_flags);
+ }
}
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
- if (err)
+ if (err) {
+ mr->page_shift = old_page_shift;
return err;
+ }
+
+ mr->dmabuf_faulted = 1;
if (bytes_mapped)
*bytes_mapped += bcnt;
@@ -1866,6 +1883,7 @@ int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
struct mlx5r_cache_rb_key rb_key = {
.access_mode = MLX5_MKC_ACCESS_MODE_KSM,
.ndescs = mlx5_imr_ksm_entries,
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 5be4426a2884..7ef35cddce81 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -32,13 +32,15 @@ static __be64 get_umr_disable_mr_mask(void)
return cpu_to_be64(result);
}
-static __be64 get_umr_update_translation_mask(void)
+static __be64 get_umr_update_translation_mask(struct mlx5_ib_dev *dev)
{
u64 result;
result = MLX5_MKEY_MASK_LEN |
MLX5_MKEY_MASK_PAGE_SIZE |
MLX5_MKEY_MASK_START_ADDR;
+ if (MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5))
+ result |= MLX5_MKEY_MASK_PAGE_SIZE_5;
return cpu_to_be64(result);
}
@@ -654,9 +656,12 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
if (update_translation) {
- wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask(dev);
if (!mr->ibmr.length)
MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
+ if (flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)
+ wqe->ctrl_seg.mkey_mask &=
+ cpu_to_be64(~MLX5_MKEY_MASK_PAGE_SIZE);
}
wqe->ctrl_seg.xlt_octowords =
@@ -664,46 +669,78 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
wqe->data_seg.byte_count = cpu_to_be32(sg->length);
}
+static void
+_mlx5r_umr_init_wqe(struct mlx5_ib_mr *mr, struct mlx5r_umr_wqe *wqe,
+ struct ib_sge *sg, unsigned int flags,
+ unsigned int page_shift, bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe->ctrl_seg, flags, sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe->mkey_seg, mr, page_shift);
+ if (dd) /* Use the data direct internal kernel PD */
+ MLX5_SET(mkc, &wqe->mkey_seg, pd, dev->ddr.pdn);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe->data_seg, sg);
+}
+
static int
-_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
+_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
+ size_t start_block, size_t nblocks)
{
size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
struct mlx5r_umr_wqe wqe = {};
+ size_t processed_blocks = 0;
struct ib_block_iter biter;
+ size_t cur_block_idx = 0;
struct mlx5_ksm *cur_ksm;
struct mlx5_mtt *cur_mtt;
size_t orig_sg_length;
+ size_t total_blocks;
size_t final_size;
void *curr_entry;
struct ib_sge sg;
void *entry;
- u64 offset = 0;
+ u64 offset;
int err = 0;
- entry = mlx5r_umr_create_xlt(dev, &sg,
- ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
- ent_size, flags);
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (start_block > total_blocks)
+ return -EINVAL;
+
+ /* nblocks 0 means update all blocks starting from start_block */
+ if (nblocks)
+ total_blocks = nblocks;
+
+ entry = mlx5r_umr_create_xlt(dev, &sg, total_blocks, ent_size, flags);
if (!entry)
return -ENOMEM;
orig_sg_length = sg.length;
- mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
- mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
- mr->page_shift);
- if (dd) {
- /* Use the data direct internal kernel PD */
- MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+
+ _mlx5r_umr_init_wqe(mr, &wqe, &sg, flags, mr->page_shift, dd);
+
+ /* Set initial translation offset to start_block */
+ offset = (u64)start_block * ent_size;
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
+
+ if (dd)
cur_ksm = entry;
- } else {
+ else
cur_mtt = entry;
- }
-
- mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
curr_entry = entry;
+
rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
+ if (cur_block_idx < start_block) {
+ cur_block_idx++;
+ continue;
+ }
+
+ if (nblocks && processed_blocks >= nblocks)
+ break;
+
if (curr_entry == entry + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
@@ -725,6 +762,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
if (dd) {
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->umem->is_dmabuf &&
+ (flags & MLX5_IB_UPD_XLT_ZAP)) {
+ cur_ksm->va = 0;
+ cur_ksm->key = 0;
+ }
cur_ksm++;
curr_entry = cur_ksm;
} else {
@@ -736,6 +778,8 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
cur_mtt++;
curr_entry = cur_mtt;
}
+
+ processed_blocks++;
}
final_size = curr_entry - entry;
@@ -752,13 +796,32 @@ err:
return err;
}
-int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks)
{
/* No invalidation flow is expected */
- if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP))
+ if (WARN_ON(!mr->umem->is_dmabuf) || ((flags & MLX5_IB_UPD_XLT_ZAP) &&
+ !(flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)))
return -EINVAL;
- return _mlx5r_umr_update_mr_pas(mr, flags, true);
+ return _mlx5r_umr_update_mr_pas(mr, flags, true, start_block, nblocks);
+}
+
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr,
+ unsigned int flags)
+{
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags, 0, 0);
+}
+
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks)
+{
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, false, start_block, nblocks);
}
/*
@@ -768,10 +831,7 @@ int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int fla
*/
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
- if (WARN_ON(mr->umem->is_odp))
- return -EINVAL;
-
- return _mlx5r_umr_update_mr_pas(mr, flags, false);
+ return mlx5r_umr_update_mr_pas_range(mr, flags, 0, 0);
}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
@@ -864,3 +924,202 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
return err;
}
+
+/*
+ * Update only the page-size (log_page_size) field of an existing memory key
+ * using UMR. This is useful when the MR's physical layout stays the same
+ * but the optimal page shift has changed (e.g. dmabuf after pages are
+ * pinned and the HW can switch from 4K to huge-page alignment).
+ */
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ /* Build UMR wqe: we touch only PAGE_SIZE, so use the dedicated mask */
+ wqe.ctrl_seg.mkey_mask = get_umr_update_translation_mask(dev);
+
+ /* MR must be free while page size is modified */
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE | MLX5_UMR_INLINE;
+
+ /* Fill mkey segment with the new page size, keep the rest unchanged */
+ MLX5_SET(mkc, &wqe.mkey_seg, log_page_size, page_shift);
+
+ if (dd)
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+ else
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
+
+ MLX5_SET64(mkc, &wqe.mkey_seg, start_addr, mr->ibmr.iova);
+ MLX5_SET64(mkc, &wqe.mkey_seg, len, mr->ibmr.length);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (!err)
+ mr->page_shift = page_shift;
+
+ return err;
+}
+
+static inline int
+_mlx5r_dmabuf_umr_update_pas(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks, bool dd)
+{
+ if (dd)
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags,
+ start_block,
+ nblocks);
+ else
+ return mlx5r_umr_update_mr_pas_range(mr, flags, start_block,
+ nblocks);
+}
+
+/**
+ * This function makes an mkey non-present by zapping the translation entries of
+ * the mkey by zapping (zeroing out) the first N entries, where N is determined
+ * by the largest page size supported by the device and the MR length.
+ * It then updates the mkey's page size to the largest possible value, ensuring
+ * the MR is completely non-present and safe for further updates.
+ * It is useful to update the page size of a dmabuf MR on a page fault.
+ *
+ * Return: On success, returns the number of entries that were zapped.
+ * On error, returns a negative error code.
+ */
+static int _mlx5r_umr_zap_mkey(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ unsigned int page_shift,
+ size_t *nblocks,
+ bool dd)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ unsigned int max_page_shift;
+ size_t page_shift_nblocks;
+ unsigned int max_log_size;
+ int access_mode;
+ int err;
+
+ access_mode = dd ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT;
+ flags |= MLX5_IB_UPD_XLT_KEEP_PGSZ | MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC;
+ max_log_size = get_max_log_entity_size_cap(dev, access_mode);
+ max_page_shift = order_base_2(mr->ibmr.length);
+ max_page_shift = min(max(max_page_shift, page_shift), max_log_size);
+ /* Count blocks in units of max_page_shift, we will zap exactly this
+ * many to make the whole MR non-present.
+ * Block size must be aligned to MLX5_UMR_FLEX_ALIGNMENT since it may
+ * be used as offset into the XLT later on.
+ */
+ *nblocks = ib_umem_num_dma_blocks(mr->umem, 1UL << max_page_shift);
+ if (dd)
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ else
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT);
+ page_shift_nblocks = ib_umem_num_dma_blocks(mr->umem,
+ 1UL << page_shift);
+ /* If the number of blocks at max possible page shift is greater than
+ * the number of blocks at the new page size, we should just go over the
+ * whole mkey entries.
+ */
+ if (*nblocks >= page_shift_nblocks)
+ *nblocks = 0;
+
+ /* Make the first nblocks entries non-present without changing
+ * page size yet.
+ */
+ if (*nblocks)
+ mr->page_shift = max_page_shift;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, flags, 0, *nblocks, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+
+ /* Change page size to the max page size now that the MR is completely
+ * non-present.
+ */
+ if (*nblocks) {
+ err = mlx5r_umr_update_mr_page_shift(mr, max_page_shift, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mlx5r_umr_dmabuf_update_pgsz - Safely update DMABUF MR page size and its
+ * entries accordingly
+ * @mr: The memory region to update
+ * @xlt_flags: Translation table update flags
+ * @page_shift: The new (optimized) page shift to use
+ *
+ * This function updates the page size and mkey translation entries for a DMABUF
+ * MR in a safe, multi-step process to avoid exposing partially updated mappings
+ * The update is performed in 5 steps:
+ * 1. Make the first X entries non-present, while X is calculated to be
+ * minimal according to a large page shift that can be used to cover the
+ * MR length.
+ * 2. Update the page size to the large supported page size
+ * 3. Load the remaining N-X entries according to the (optimized) page_shift
+ * 4. Update the page size according to the (optimized) page_shift
+ * 5. Load the first X entries with the correct translations
+ *
+ * This ensures that at no point is the MR accessible with a partially updated
+ * translation table, maintaining correctness and preventing access to stale or
+ * inconsistent mappings.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ size_t zapped_blocks;
+ size_t total_blocks;
+ int err;
+
+ err = _mlx5r_umr_zap_mkey(mr, xlt_flags, page_shift, &zapped_blocks,
+ mr->data_direct);
+ if (err)
+ return err;
+
+ /* _mlx5r_umr_zap_mkey already enables the mkey */
+ xlt_flags &= ~MLX5_IB_UPD_XLT_ENABLE;
+ mr->page_shift = page_shift;
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (zapped_blocks && zapped_blocks < total_blocks) {
+ /* Update PAS according to the new page size but don't update
+ * the page size in the mkey yet.
+ */
+ err = _mlx5r_dmabuf_umr_update_pas(
+ mr,
+ xlt_flags | MLX5_IB_UPD_XLT_KEEP_PGSZ,
+ zapped_blocks,
+ total_blocks - zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+ }
+
+ err = mlx5r_umr_update_mr_page_shift(mr, mr->page_shift,
+ mr->data_direct);
+ if (err)
+ goto err;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, xlt_flags, 0, zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mr->page_shift = old_page_shift;
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
index 4a02c9b5aad8..e9361f0140e7 100644
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -94,9 +94,20 @@ struct mlx5r_umr_wqe {
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags);
-int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks);
int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks);
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd);
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift);
#endif /* _MLX5_IB_UMR_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6a1e2e79ddc3..dd572d76866c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -825,7 +825,8 @@ static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
}
static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
struct ib_block_iter biter;
@@ -838,6 +839,9 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err = 0;
int write_mtt_size;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (udata->inlen < sizeof ucmd) {
if (!context->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 979de8f8df14..46d911fd38de 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -847,13 +847,17 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
}
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 usr_addr, int acc, struct ib_udata *udata)
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
int status = -ENOMEM;
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
pd = get_ocrdma_pd(ibpd);
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 0644346d8d98..6c5c3755b8a9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -98,7 +98,8 @@ int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *);
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 568a5b18803f..ab9bf0922979 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2953,13 +2953,17 @@ done:
}
struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 usr_addr, int acc, struct ib_udata *udata)
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_mr *mr;
struct qedr_pd *pd;
int rc = -ENOMEM;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
pd = get_qedr_pd(ibpd);
DP_DEBUG(dev, QEDR_MSG_MR,
"qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 5731458abb06..62420a15101b 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -79,7 +79,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *);
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
deleted file mode 100644
index 6c4895777042..000000000000
--- a/drivers/infiniband/hw/qib/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_QIB
- tristate "Intel PCIe HCA support"
- depends on 64BIT && INFINIBAND_RDMAVT
- depends on PCI
- help
- This is a low-level driver for Intel PCIe QLE InfiniBand host
- channel adapters. This driver does not support the Intel
- HyperTransport card (model QHT7140).
-
-config INFINIBAND_QIB_DCA
- bool "QIB DCA support"
- depends on INFINIBAND_QIB && DCA && SMP && !(INFINIBAND_QIB=y && DCA=m)
- default y
- help
- Setting this enables DCA support on some Intel chip sets
- with the iba7322 HCA.
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
deleted file mode 100644
index 80ffab88fbca..000000000000
--- a/drivers/infiniband/hw/qib/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
-
-ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
- qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
- qib_mad.o qib_pcie.o qib_pio_copy.o \
- qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \
- qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
- qib_user_pages.o qib_user_sdma.o qib_iba7220.o \
- qib_sd7220.o qib_iba7322.o qib_verbs.o
-
-# 6120 has no fallback if no MSI interrupts, others can do INTx
-ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
-
-ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
-ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
-ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
deleted file mode 100644
index 8ee4edd7883c..000000000000
--- a/drivers/infiniband/hw/qib/qib.h
+++ /dev/null
@@ -1,1492 +0,0 @@
-#ifndef _QIB_KERNEL_H
-#define _QIB_KERNEL_H
-/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for qlogic_ib kernel code
- * qib_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/fs.h>
-#include <linux/completion.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/xarray.h>
-#include <rdma/ib_hdrs.h>
-#include <rdma/rdma_vt.h>
-
-#include "qib_common.h"
-#include "qib_verbs.h"
-
-/* only s/w major version of QLogic_IB we can handle */
-#define QIB_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define QIB_CHIP_VERS_MIN 0U
-
-/* The Organization Unique Identifier (Mfg code), and its position in GUID */
-#define QIB_OUI 0x001175
-#define QIB_OUI_LSB 40
-
-/*
- * per driver stats, either not device nor port-specific, or
- * summed over all of the devices and ports.
- * They are described by name via ipathfs filesystem, so layout
- * and number of elements can change without breaking compatibility.
- * If members are added or deleted qib_statnames[] in qib_fs.c must
- * change to match.
- */
-struct qlogic_ib_stats {
- __u64 sps_ints; /* number of interrupts handled */
- __u64 sps_errints; /* number of error interrupts */
- __u64 sps_txerrs; /* tx-related packet errors */
- __u64 sps_rcverrs; /* non-crc rcv packet errors */
- __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
- __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
- __u64 sps_ctxts; /* number of contexts currently open */
- __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
- __u64 sps_buffull;
- __u64 sps_hdrfull;
-};
-
-extern struct qlogic_ib_stats qib_stats;
-extern const struct pci_error_handlers qib_pci_err_handler;
-
-#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
-
-/*
- * Below contains all data related to a single context (formerly called port).
- */
-
-#ifdef CONFIG_DEBUG_FS
-struct qib_opcode_stats_perctx;
-#endif
-
-struct qib_ctxtdata {
- void **rcvegrbuf;
- dma_addr_t *rcvegrbuf_phys;
- /* rcvhdrq base, needs mmap before useful */
- void *rcvhdrq;
- /* kernel virtual address where hdrqtail is updated */
- void *rcvhdrtail_kvaddr;
- /*
- * temp buffer for expected send setup, allocated at open, instead
- * of each setup call
- */
- void *tid_pg_list;
- /*
- * Shared page for kernel to signal user processes that send buffers
- * need disarming. The process should call QIB_CMD_DISARM_BUFS
- * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
- */
- unsigned long *user_event_mask;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /*
- * rcvegr bufs base, physical, must fit
- * in 44 bits so 32 bit programs mmap64 44 bit works)
- */
- dma_addr_t rcvegr_phys;
- /* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
-
- /*
- * number of opens (including slave sub-contexts) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int cnt;
- /*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
- */
- /* instead of calculating it */
- unsigned ctxt;
- /* local node of context */
- int node_id;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- /* number of eager TID entries. */
- u16 rcvegrcnt;
- /* index of first eager TID entry. */
- u16 rcvegr_tid_base;
- /* number of pio bufs for this ctxt (all procs, if shared) */
- u32 piocnt;
- /* first pio buffer for this ctxt */
- u32 pio_base;
- /* chip offset of PIO buffers for this ctxt */
- u32 piobufs;
- /* how many alloc_pages() chunks in rcvegrbuf_pages */
- u32 rcvegrbuf_chunks;
- /* how many egrbufs per chunk */
- u16 rcvegrbufs_perchunk;
- /* ilog2 of above */
- u16 rcvegrbufs_perchunk_shift;
- /* order for rcvegrbuf_pages */
- size_t rcvegrbuf_size;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
- /* per-context flags for fileops/intr communication */
- unsigned long flag;
- /* next expected TID to check when looking for free */
- u32 tidcursor;
- /* WAIT_RCV that timed out, no interrupt */
- u32 rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 pionowait;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
- /* same size as task_struct .comm[], command that opened context */
- char comm[TASK_COMM_LEN];
- /* pkeys set by this use of this ctxt */
- u16 pkeys[4];
- /* so file ops can get at unit */
- struct qib_devdata *dd;
- /* so funcs that need physical port can get it easily */
- struct qib_pportdata *ppd;
- /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
- void *subctxt_uregbase;
- /* An array of pages for the eager receive buffers * N */
- void *subctxt_rcvegrbuf;
- /* An array of pages for the eager header queue entries * N */
- void *subctxt_rcvhdr_base;
- /* The version of the library which opened this ctxt */
- u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
- /* Type of packets or conditions we want to poll for */
- u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- u8 redirect_seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
-#ifdef CONFIG_DEBUG_FS
- /* verbs stats per CTX */
- struct qib_opcode_stats_perctx *opstats;
-#endif
-};
-
-struct rvt_sge_state;
-
-struct qib_sdma_txreq {
- int flags;
- int sg_count;
- dma_addr_t addr;
- void (*callback)(struct qib_sdma_txreq *, int);
- u16 start_idx; /* sdma private */
- u16 next_descq_idx; /* sdma private */
- struct list_head list; /* sdma private */
-};
-
-struct qib_sdma_desc {
- __le64 qw[2];
-};
-
-struct qib_verbs_txreq {
- struct qib_sdma_txreq txreq;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- u32 dwords;
- u16 hdr_dwords;
- u16 hdr_inx;
- struct qib_pio_header *align_buf;
- struct rvt_mregion *mr;
- struct rvt_sge_state *ss;
-};
-
-#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
-#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
-#define QIB_SDMA_TXREQ_F_INTREQ 0x4
-#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
-#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
-
-#define QIB_SDMA_TXREQ_S_OK 0
-#define QIB_SDMA_TXREQ_S_SENDERROR 1
-#define QIB_SDMA_TXREQ_S_ABORTED 2
-#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
-
-/*
- * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
- * Mostly for MADs that set or query link parameters, also ipath
- * config interfaces
- */
-#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
-#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
-#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
-#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
-#define QIB_IB_CFG_SPD 5 /* current Link spd */
-#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
-#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
-#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
-#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
-#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
-#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
-#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
-#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
-#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
-#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
-#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
-#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
-#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
-#define QIB_IB_CFG_VL_HIGH_LIMIT 19
-#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
-#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
-
-/*
- * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
- * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
- * QIB_IB_CFG_LINKDEFAULT cmd
- */
-#define IB_LINKCMD_DOWN (0 << 16)
-#define IB_LINKCMD_ARMED (1 << 16)
-#define IB_LINKCMD_ACTIVE (2 << 16)
-#define IB_LINKINITCMD_NOP 0
-#define IB_LINKINITCMD_POLL 1
-#define IB_LINKINITCMD_SLEEP 2
-#define IB_LINKINITCMD_DISABLE 3
-
-/*
- * valid states passed to qib_set_linkstate() user call
- */
-#define QIB_IB_LINKDOWN 0
-#define QIB_IB_LINKARM 1
-#define QIB_IB_LINKACTIVE 2
-#define QIB_IB_LINKDOWN_ONLY 3
-#define QIB_IB_LINKDOWN_SLEEP 4
-#define QIB_IB_LINKDOWN_DISABLE 5
-
-/*
- * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the possible values for qib_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define QIB_IB_SDR 1
-#define QIB_IB_DDR 2
-#define QIB_IB_QDR 4
-
-#define QIB_DEFAULT_MTU 4096
-
-/* max number of IB ports supported per HCA */
-#define QIB_MAX_IB_PORTS 2
-
-/*
- * Possible IB config parameters for f_get/set_ib_table()
- */
-#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
-#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
-
-/*
- * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
- * these are bits so they can be combined, e.g.
- * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
- */
-#define QIB_RCVCTRL_TAILUPD_ENB 0x01
-#define QIB_RCVCTRL_TAILUPD_DIS 0x02
-#define QIB_RCVCTRL_CTXT_ENB 0x04
-#define QIB_RCVCTRL_CTXT_DIS 0x08
-#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
-#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
-#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
-#define QIB_RCVCTRL_PKEY_DIS 0x80
-#define QIB_RCVCTRL_BP_ENB 0x0100
-#define QIB_RCVCTRL_BP_DIS 0x0200
-#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
-#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
-
-/*
- * Possible "operations" for f_sendctrl(ppd, op, var)
- * these are bits so they can be combined, e.g.
- * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
- * Some operations (e.g. DISARM, ABORT) are known to
- * be "one-shot", so do not modify shadow.
- */
-#define QIB_SENDCTRL_DISARM (0x1000)
-#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
- /* available (0x2000) */
-#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
-#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
-#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
-#define QIB_SENDCTRL_SEND_DIS (0x20000)
-#define QIB_SENDCTRL_SEND_ENB (0x40000)
-#define QIB_SENDCTRL_FLUSH (0x80000)
-#define QIB_SENDCTRL_CLEAR (0x100000)
-#define QIB_SENDCTRL_DISARM_ALL (0x200000)
-
-/*
- * These are the generic indices for requesting per-port
- * counter values via the f_portcntr function. They
- * are always returned as 64 bit values, although most
- * are 32 bit counters.
- */
-/* send-related counters */
-#define QIBPORTCNTR_PKTSEND 0U
-#define QIBPORTCNTR_WORDSEND 1U
-#define QIBPORTCNTR_PSXMITDATA 2U
-#define QIBPORTCNTR_PSXMITPKTS 3U
-#define QIBPORTCNTR_PSXMITWAIT 4U
-#define QIBPORTCNTR_SENDSTALL 5U
-/* receive-related counters */
-#define QIBPORTCNTR_PKTRCV 6U
-#define QIBPORTCNTR_PSRCVDATA 7U
-#define QIBPORTCNTR_PSRCVPKTS 8U
-#define QIBPORTCNTR_RCVEBP 9U
-#define QIBPORTCNTR_RCVOVFL 10U
-#define QIBPORTCNTR_WORDRCV 11U
-/* IB link related error counters */
-#define QIBPORTCNTR_RXLOCALPHYERR 12U
-#define QIBPORTCNTR_RXVLERR 13U
-#define QIBPORTCNTR_ERRICRC 14U
-#define QIBPORTCNTR_ERRVCRC 15U
-#define QIBPORTCNTR_ERRLPCRC 16U
-#define QIBPORTCNTR_BADFORMAT 17U
-#define QIBPORTCNTR_ERR_RLEN 18U
-#define QIBPORTCNTR_IBSYMBOLERR 19U
-#define QIBPORTCNTR_INVALIDRLEN 20U
-#define QIBPORTCNTR_UNSUPVL 21U
-#define QIBPORTCNTR_EXCESSBUFOVFL 22U
-#define QIBPORTCNTR_ERRLINK 23U
-#define QIBPORTCNTR_IBLINKDOWN 24U
-#define QIBPORTCNTR_IBLINKERRRECOV 25U
-#define QIBPORTCNTR_LLI 26U
-/* other error counters */
-#define QIBPORTCNTR_RXDROPPKT 27U
-#define QIBPORTCNTR_VL15PKTDROP 28U
-#define QIBPORTCNTR_ERRPKEY 29U
-#define QIBPORTCNTR_KHDROVFL 30U
-/* sampling counters (these are actually control registers) */
-#define QIBPORTCNTR_PSINTERVAL 31U
-#define QIBPORTCNTR_PSSTART 32U
-#define QIBPORTCNTR_PSSTAT 33U
-
-/* how often we check for packet activity for "power on hours (in seconds) */
-#define ACTIVITY_TIMER 5
-
-#define MAX_NAME_SIZE 64
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify;
-#endif
-
-struct qib_msix_entry {
- void *arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca;
- int rcv;
- struct qib_irq_notify *notifier;
-#endif
- cpumask_var_t mask;
-};
-
-/* Below is an opaque struct. Each chip (device) can maintain
- * private data needed for its operation, but not germane to the
- * rest of the driver. For convenience, we define another that
- * is chip-specific, per-port
- */
-struct qib_chip_specific;
-struct qib_chipport_specific;
-
-enum qib_sdma_states {
- qib_sdma_state_s00_hw_down,
- qib_sdma_state_s10_hw_start_up_wait,
- qib_sdma_state_s20_idle,
- qib_sdma_state_s30_sw_clean_up_wait,
- qib_sdma_state_s40_hw_clean_up_wait,
- qib_sdma_state_s50_hw_halt_wait,
- qib_sdma_state_s99_running,
-};
-
-enum qib_sdma_events {
- qib_sdma_event_e00_go_hw_down,
- qib_sdma_event_e10_go_hw_start,
- qib_sdma_event_e20_hw_started,
- qib_sdma_event_e30_go_running,
- qib_sdma_event_e40_sw_cleaned,
- qib_sdma_event_e50_hw_cleaned,
- qib_sdma_event_e60_hw_halted,
- qib_sdma_event_e70_go_idle,
- qib_sdma_event_e7220_err_halted,
- qib_sdma_event_e7322_err_halted,
- qib_sdma_event_e90_timer_tick,
-};
-
-struct sdma_set_state_action {
- unsigned op_enable:1;
- unsigned op_intenable:1;
- unsigned op_halt:1;
- unsigned op_drain:1;
- unsigned go_s99_running_tofalse:1;
- unsigned go_s99_running_totrue:1;
-};
-
-struct qib_sdma_state {
- struct kref kref;
- struct completion comp;
- enum qib_sdma_states current_state;
- struct sdma_set_state_action *set_state_action;
- unsigned current_op;
- unsigned go_s99_running;
- unsigned first_sendbuf;
- unsigned last_sendbuf; /* really last +1 */
- /* debugging/devel */
- enum qib_sdma_states previous_state;
- unsigned previous_op;
- enum qib_sdma_events last_event;
-};
-
-struct xmit_wait {
- struct timer_list timer;
- u64 counter;
- u8 flags;
- struct cache {
- u64 psxmitdata;
- u64 psrcvdata;
- u64 psxmitpkts;
- u64 psrcvpkts;
- u64 psxmitwait;
- } counter_cache;
-};
-
-/*
- * The structure below encapsulates data relevant to a physical IB Port.
- * Current chips support only one such port, but the separation
- * clarifies things a bit. Note that to conform to IB conventions,
- * port-numbers are one-based. The first or only port is port1.
- */
-struct qib_pportdata {
- struct qib_ibport ibport_data;
-
- struct qib_devdata *dd;
- struct qib_chippport_specific *cpspec; /* chip-specific per-port */
-
- /* GUID for this interface, in network order */
- __be64 guid;
-
- /* QIB_POLL, etc. link-state specific flags, per port */
- u32 lflags;
- /* qib_lflags driver is waiting for */
- u32 state_wanted;
- spinlock_t lflags_lock;
-
- /* ref count for each pkey */
- atomic_t pkeyrefs[4];
-
- /*
- * this address is mapped readonly into user processes so they can
- * get status cheaply, whenever they want. One qword of status per port
- */
- u64 *statusp;
-
- /* SendDMA related entries */
-
- /* read mostly */
- struct qib_sdma_desc *sdma_descq;
- struct workqueue_struct *qib_wq;
- struct qib_sdma_state sdma_state;
- dma_addr_t sdma_descq_phys;
- volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
- dma_addr_t sdma_head_phys;
- u16 sdma_descq_cnt;
-
- /* read/write using lock */
- spinlock_t sdma_lock ____cacheline_aligned_in_smp;
- struct list_head sdma_activelist;
- struct list_head sdma_userpending;
- u64 sdma_descq_added;
- u64 sdma_descq_removed;
- u16 sdma_descq_tail;
- u16 sdma_descq_head;
- u8 sdma_generation;
- u8 sdma_intrequest;
-
- struct tasklet_struct sdma_sw_clean_up_task
- ____cacheline_aligned_in_smp;
-
- wait_queue_head_t state_wait; /* for state_wanted */
-
- /* HoL blocking for SMP replies */
- unsigned hol_state;
- struct timer_list hol_timer;
-
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
- /* last ibcstatus. opaque outside chip-specific code */
- u64 lastibcstat;
-
- /* these are the "32 bit" regs */
-
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
- unsigned long p_sendctrl; /* shadow per-port sendctrl */
-
- u32 ibmtu; /* The MTU programmed for this unit */
- /*
- * Current max size IB packet (in bytes) including IB headers, that
- * we can send. Changes when ibmtu changes.
- */
- u32 ibmaxlen;
- /*
- * ibmaxlen at init time, limited by chip and by receive buffer
- * size. Not changed after init.
- */
- u32 init_ibmaxlen;
- /* LID programmed for this instance */
- u16 lid;
- /* list of pkeys programmed; 0 if not set */
- u16 pkeys[4];
- /* LID mask control */
- u8 lmc;
- u8 link_width_supported;
- u16 link_speed_supported;
- u8 link_width_enabled;
- u16 link_speed_enabled;
- u8 link_width_active;
- u16 link_speed_active;
- u8 vls_supported;
- u8 vls_operational;
- /* Rx Polarity inversion (compensate for ~tx on partner) */
- u8 rx_pol_inv;
-
- u8 hw_pidx; /* physical port index */
- u32 port; /* IB port number and index into dd->pports - 1 */
-
- u8 delay_mult;
-
- /* used to override LED behavior */
- u8 led_override; /* Substituted for normal value, if non-zero */
- u16 led_override_timeoff; /* delta to next timer event */
- u8 led_override_vals[2]; /* Alternates per blink-frame */
- u8 led_override_phase; /* Just counts, LSB picks from vals[] */
- atomic_t led_override_timer_active;
- /* Used to flash LEDs in override mode */
- struct timer_list led_override_timer;
- struct xmit_wait cong_stats;
- struct timer_list symerr_clear_timer;
-
- /* Synchronize access between driver writes and sysfs reads */
- spinlock_t cc_shadow_lock
- ____cacheline_aligned_in_smp;
-
- /* Shadow copy of the congestion control table */
- struct cc_table_shadow *ccti_entries_shadow;
-
- /* Shadow copy of the congestion control entries */
- struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
-
- /* List of congestion control table entries */
- struct ib_cc_table_entry_shadow *ccti_entries;
-
- /* 16 congestion entries with each entry corresponding to a SL */
- struct ib_cc_congestion_entry_shadow *congestion_entries;
-
- /* Maximum number of congestion control entries that the agent expects
- * the manager to send.
- */
- u16 cc_supported_table_entries;
-
- /* Total number of congestion control table entries */
- u16 total_cct_entry;
-
- /* Bit map identifying service level */
- u16 cc_sl_control_map;
-
- /* maximum congestion control table index */
- u16 ccti_limit;
-
- /* CA's max number of 64 entry units in the congestion control table */
- u8 cc_max_table_entries;
-};
-
-/* Observers. Not to be taken lightly, possibly not to ship. */
-/*
- * If a diag read or write is to (bottom <= offset <= top),
- * the "hook" is called, allowing, e.g. shadows to be
- * updated in sync with the driver. struct diag_observer
- * is the "visible" part.
- */
-struct diag_observer;
-
-typedef int (*diag_hook) (struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32);
-
-struct diag_observer {
- diag_hook hook;
- u32 bottom;
- u32 top;
-};
-
-extern int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op);
-
-/* Only declared here, not defined. Private to diags */
-struct diag_observer_list_elt;
-
-/* device data struct now contains only "general per-device" info.
- * fields related to a physical IB port are in a qib_pportdata struct,
- * described above) while fields only used by a particular chip-type are in
- * a qib_chipdata struct, whose contents are opaque to this file.
- */
-struct qib_devdata {
- struct qib_ibdev verbs_dev; /* must be first */
- struct list_head list;
- /* pointers to related structs for this device */
- /* pci access data structure */
- struct pci_dev *pcidev;
- struct cdev *user_cdev;
- struct cdev *diag_cdev;
- struct device *user_device;
- struct device *diag_device;
-
- /* mem-mapped pointer to base of chip regs */
- u64 __iomem *kregbase;
- /* end of mem-mapped chip space excluding sendbuf and user regs */
- u64 __iomem *kregend;
- /* physical address of chip for io_remap, etc. */
- resource_size_t physaddr;
- /* qib_cfgctxts pointers */
- struct qib_ctxtdata **rcd; /* Receive Context Data */
-
- /* qib_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct qib_pportdata *pport;
- struct qib_chip_specific *cspec; /* chip-specific */
-
- /* kvirt address of 1st 2k pio buffer */
- void __iomem *pio2kbase;
- /* kvirt address of 1st 4k pio buffer */
- void __iomem *pio4kbase;
- /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
- void __iomem *piobase;
- /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
- u64 __iomem *userbase;
- void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
- /*
- * points to area where PIOavail registers will be DMA'ed.
- * Has to be on a page of it's own, because the page will be
- * mapped into user program space. This copy is *ONLY* ever
- * written by DMA, not by the driver! Need a copy per device
- * when we get to multiple devices
- */
- volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
- /* physical address where updates occur */
- dma_addr_t pioavailregs_phys;
-
- /* device-specific implementations of functions needed by
- * common code. Contrary to previous consensus, we can't
- * really just point to a device-specific table, because we
- * may need to "bend", e.g. *_f_put_tid
- */
- /* fallback to alternate interrupt type if possible */
- int (*f_intr_fallback)(struct qib_devdata *);
- /* hard reset chip */
- int (*f_reset)(struct qib_devdata *);
- void (*f_quiet_serdes)(struct qib_pportdata *);
- int (*f_bringup_serdes)(struct qib_pportdata *);
- int (*f_early_init)(struct qib_devdata *);
- void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
- void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
- u32, unsigned long);
- void (*f_cleanup)(struct qib_devdata *);
- void (*f_setextled)(struct qib_pportdata *, u32);
- /* fill out chip-specific fields */
- int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
- /* free irq */
- void (*f_free_irq)(struct qib_devdata *);
- struct qib_message_header *(*f_get_msgheader)
- (struct qib_devdata *, __le32 *);
- void (*f_config_ctxts)(struct qib_devdata *);
- int (*f_get_ib_cfg)(struct qib_pportdata *, int);
- int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
- int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
- int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
- int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
- u32 (*f_iblink_state)(u64);
- u8 (*f_ibphys_portstate)(u64);
- void (*f_xgxs_reset)(struct qib_pportdata *);
- /* per chip actions needed for IB Link up/down changes */
- int (*f_ib_updown)(struct qib_pportdata *, int, u64);
- u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
- /* Read/modify/write of GPIO pins (potentially chip-specific */
- int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
- u32 mask);
- /* Enable writes to config EEPROM (if supported) */
- int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
- /*
- * modify rcvctrl shadow[s] and write to appropriate chip-regs.
- * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
- * (ctxt == -1) means "all contexts", only meaningful for
- * clearing. Could remove if chip_spec shutdown properly done.
- */
- void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
- int ctxt);
- /* Read/modify/write sendctrl appropriately for op and port. */
- void (*f_sendctrl)(struct qib_pportdata *, u32 op);
- void (*f_set_intr_state)(struct qib_devdata *, u32);
- void (*f_set_armlaunch)(struct qib_devdata *, u32);
- void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
- int (*f_late_initreg)(struct qib_devdata *);
- int (*f_init_sdma_regs)(struct qib_pportdata *);
- u16 (*f_sdma_gethead)(struct qib_pportdata *);
- int (*f_sdma_busy)(struct qib_pportdata *);
- void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
- void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
- void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
- void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
- void (*f_sdma_hw_start_up)(struct qib_pportdata *);
- void (*f_sdma_init_early)(struct qib_pportdata *);
- void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
- void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
- u32 (*f_hdrqempty)(struct qib_ctxtdata *);
- u64 (*f_portcntr)(struct qib_pportdata *, u32);
- u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
- u64 **);
- u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
- char **, u64 **);
- u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
- void (*f_initvl15_bufs)(struct qib_devdata *);
- void (*f_init_ctxt)(struct qib_ctxtdata *);
- void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *);
- void (*f_writescratch)(struct qib_devdata *, u32);
- int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
-#endif
-
- char *boardname; /* human readable board info */
-
- /* template for writing TIDs */
- u64 tidtemplate;
- /* value to write to free TIDs */
- u64 tidinvalid;
-
- /* number of registers used for pioavail */
- u32 pioavregs;
- /* device (not port) flags, basically device capabilities */
- u32 flags;
- /* last buffer for user use */
- u32 lastctxt_piobuf;
-
- /* reset value */
- u64 z_int_counter;
- /* percpu intcounter */
- u64 __percpu *int_counter;
-
- /* pio bufs allocated per ctxt */
- u32 pbufsctxt;
- /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
- u32 ctxts_extrabuf;
- /*
- * number of ctxts configured as max; zero is set to number chip
- * supports, less gives more pio bufs/ctxt, etc.
- */
- u32 cfgctxts;
- /*
- * number of ctxts available for PSM open
- */
- u32 freectxts;
-
- /*
- * hint that we should update pioavailshadow before
- * looking for a PIO buffer
- */
- u32 upd_pio_shadow;
-
- /* internal debugging stats */
- u32 maxpkts_call;
- u32 avgpkts_call;
- u64 nopiobufs;
-
- /* PCI Vendor ID (here for NodeInfo) */
- u16 vendorid;
- /* PCI Device ID (here for NodeInfo) */
- u16 deviceid;
- /* for write combining settings */
- int wc_cookie;
- unsigned long wc_base;
- unsigned long wc_len;
-
- /* shadow copy of struct page *'s for exp tid pages */
- struct page **pageshadow;
- /* shadow copy of dma handles for exp tid pages */
- dma_addr_t *physshadow;
- u64 __iomem *egrtidbase;
- spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
- /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
- spinlock_t uctxt_lock; /* rcd and user context changes */
- /*
- * per unit status, see also portdata statusp
- * mapped readonly into user processes so they can get unit and
- * IB link status cheaply
- */
- u64 *devstatusp;
- char *freezemsg; /* freeze msg if hw error put chip in freeze */
- u32 freezelen; /* max length of freezemsg */
- /* timer used to prevent stats overflow, error throttling, etc. */
- struct timer_list stats_timer;
-
- /* timer to verify interrupts work, and fallback if possible */
- struct timer_list intrchk_timer;
- unsigned long ureg_align; /* user register alignment */
-
- /*
- * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
- * pio_writing.
- */
- spinlock_t pioavail_lock;
- /*
- * index of last buffer to optimize search for next
- */
- u32 last_pio;
- /*
- * min kernel pio buffer to optimize search
- */
- u32 min_kernel_pio;
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
-
- unsigned long pioavailshadow[6];
- /* bitmap of send buffers available for the kernel to use with PIO. */
- unsigned long pioavailkernel[6];
- /* bitmap of send buffers which need to be disarmed. */
- unsigned long pio_need_disarm[3];
- /* bitmap of send buffers which are being written to. */
- unsigned long pio_writing[3];
- /* kr_revision shadow */
- u64 revision;
- /* Base GUID for device (from eeprom, network order) */
- __be64 base_guid;
-
- /*
- * kr_sendpiobufbase value (chip offset of pio buffers), and the
- * base of the 2KB buffer s(user processes only use 2K)
- */
- u64 piobufbase;
- u32 pio2k_bufbase;
-
- /* these are the "32 bit" regs */
-
- /* number of GUIDs in the flash for this interface */
- u32 nguid;
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long rcvctrl; /* shadow per device rcvctrl */
- unsigned long sendctrl; /* shadow per device sendctrl */
-
- /* value we put in kr_rcvhdrcnt */
- u32 rcvhdrcnt;
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* value we put in kr_rcvhdrentsize */
- u32 rcvhdrentsize;
- /* kr_ctxtcnt value */
- u32 ctxtcnt;
- /* kr_pagealign value */
- u32 palign;
- /* number of "2KB" PIO buffers */
- u32 piobcnt2k;
- /* size in bytes of "2KB" PIO buffers */
- u32 piosize2k;
- /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
- u32 piosize2kmax_dwords;
- /* number of "4KB" PIO buffers */
- u32 piobcnt4k;
- /* size in bytes of "4KB" PIO buffers */
- u32 piosize4k;
- /* kr_rcvegrbase value */
- u32 rcvegrbase;
- /* kr_rcvtidbase value */
- u32 rcvtidbase;
- /* kr_rcvtidcnt value */
- u32 rcvtidcnt;
- /* kr_userregbase */
- u32 uregbase;
- /* shadow the control register contents */
- u32 control;
-
- /* chip address space used by 4k pio buffers */
- u32 align4k;
- /* size of each rcvegrbuffer */
- u16 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
- /* localbus width (1, 2,4,8,16,32) from config space */
- u32 lbus_width;
- /* localbus speed in MHz */
- u32 lbus_speed;
- int unit; /* unit # of this chip */
-
- /* start of CHIP_SPEC move to chipspec, but need code changes */
- /* low and high portions of MSI capability/vector */
- u32 msi_lo;
- /* saved after PCIe init for restore after reset */
- u32 msi_hi;
- /* MSI data (vector) saved for restore */
- u16 msi_data;
- /* so we can rewrite it after a chip reset */
- u32 pcibar0;
- /* so we can rewrite it after a chip reset */
- u32 pcibar1;
- u64 rhdrhead_intr_off;
-
- /*
- * ASCII serial number, from flash, large enough for original
- * all digit strings, and longer QLogic serial number format
- */
- u8 serial[16];
- /* human readable board version */
- u8 boardversion[96];
- u8 lbus_info[32]; /* human readable localbus info */
- /* chip major rev, from qib_revision */
- u8 majrev;
- /* chip minor rev, from qib_revision */
- u8 minrev;
-
- /* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
- u8 n_krcv_queues;
- u8 qpn_mask;
- u8 skip_kctxt_mask;
-
- u16 rhf_offset; /* offset of RHF within receive header entry */
-
- /*
- * GPIO pins for twsi-connected devices, and device code for eeprom
- */
- u8 gpio_sda_num;
- u8 gpio_scl_num;
- u8 twsi_eeprom_dev;
- u8 board_atten;
-
- /* Support (including locks) for EEPROM logging of errors and time */
- /* control access to actual counters, timer */
- spinlock_t eep_st_lock;
- /* control high-level access to EEPROM */
- struct mutex eep_lock;
- uint64_t traffic_wds;
- struct qib_diag_client *diag_client;
- spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
- struct diag_observer_list_elt *diag_observer_list;
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
- /* high volume overflow errors defered to tasklet */
- struct tasklet_struct error_tasklet;
-
- int assigned_node_id; /* NUMA node closest to HCA */
-};
-
-/* hol_state values */
-#define QIB_HOL_UP 0
-#define QIB_HOL_INIT 1
-
-#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
-#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
-#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
-#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
-#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
-
-/* operation types for f_txchk_change() */
-#define TXCHK_CHG_TYPE_DIS1 3
-#define TXCHK_CHG_TYPE_ENAB1 2
-#define TXCHK_CHG_TYPE_KERN 1
-#define TXCHK_CHG_TYPE_USER 0
-
-#define QIB_CHASE_TIME msecs_to_jiffies(145)
-#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
-
-/* Private data for file operations */
-struct qib_filedata {
- struct qib_ctxtdata *rcd;
- unsigned subctxt;
- unsigned tidcursor;
- struct qib_user_sdma_queue *pq;
- int rec_cpu_num; /* for cpu affinity; -1 if none */
-};
-
-extern struct xarray qib_dev_table;
-extern struct qib_devdata *qib_lookup(int unit);
-extern u32 qib_cpulist_count;
-extern unsigned long *qib_cpulist;
-extern unsigned qib_cc_table_size;
-
-int qib_init(struct qib_devdata *, int);
-int init_chip_wc_pat(struct qib_devdata *dd, u32);
-int qib_enable_wc(struct qib_devdata *dd);
-void qib_disable_wc(struct qib_devdata *dd);
-int qib_count_units(int *npresentp, int *nupp);
-int qib_count_active_units(void);
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp);
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
-int qib_dev_init(void);
-void qib_dev_cleanup(void);
-
-int qib_diag_add(struct qib_devdata *);
-void qib_diag_remove(struct qib_devdata *);
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
-void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
-
-int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
-void qib_bad_intrstatus(struct qib_devdata *);
-void qib_handle_urcv(struct qib_devdata *, u64);
-
-/* clean up any per-chip chip-specific stuff */
-void qib_chip_cleanup(struct qib_devdata *);
-/* clean up any chip type-specific stuff */
-void qib_chip_done(void);
-
-/* check to see if we have to force ordering for write combining */
-int qib_unordered_wc(void);
-void qib_pio_copy(void __iomem *to, const void *from, size_t count);
-
-void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
-void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
-void qib_cancel_sends(struct qib_pportdata *);
-
-int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
-int qib_setup_eagerbufs(struct qib_ctxtdata *);
-void qib_set_ctxtcnt(struct qib_devdata *);
-int qib_create_ctxts(struct qib_devdata *dd);
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
-int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
-void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
-
-u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
-int qib_reset_device(int);
-int qib_wait_linkstate(struct qib_pportdata *, u32, int);
-int qib_set_linkstate(struct qib_pportdata *, u8);
-int qib_set_mtu(struct qib_pportdata *, u16);
-int qib_set_lid(struct qib_pportdata *, u32, u8);
-void qib_hol_down(struct qib_pportdata *);
-void qib_hol_init(struct qib_pportdata *);
-void qib_hol_up(struct qib_pportdata *);
-void qib_hol_event(struct timer_list *);
-void qib_disable_after_error(struct qib_devdata *);
-int qib_set_uevent_bits(struct qib_pportdata *, const int);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define ctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->rcd)
-#define subctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->subctxt)
-#define tidcursor_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->tidcursor)
-#define user_sdma_queue_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->pq)
-
-static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
-{
- return ppd->dd;
-}
-
-static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
-{
- return container_of(dev, struct qib_devdata, verbs_dev);
-}
-
-static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
-{
- return dd_from_dev(to_idev(ibdev));
-}
-
-static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
-{
- return container_of(ibp, struct qib_pportdata, ibport_data);
-}
-
-static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u32 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- WARN_ON(pidx >= dd->num_pports);
- return &dd->pport[pidx].ibport_data;
-}
-
-/*
- * values for dd->flags (_device_ related flags) and
- */
-#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
-#define QIB_INITTED 0x2 /* chip and driver up and initted */
-#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
-#define QIB_PRESENT 0x8 /* chip accesses can be done */
-#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
-#define QIB_HAS_THRESH_UPDATE 0x40
-#define QIB_HAS_SDMA_TIMEOUT 0x80
-#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
-#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
-#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
-#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
-#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
-#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
-#define QIB_BADINTR 0x8000 /* severe interrupt problems */
-#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
-#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
-#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
-
-/*
- * values for ppd->lflags (_ib_port_ related flags)
- */
-#define QIBL_LINKV 0x1 /* IB link state valid */
-#define QIBL_LINKDOWN 0x8 /* IB link is down */
-#define QIBL_LINKINIT 0x10 /* IB link level is up */
-#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
-#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
-/* leave a gap for more IB-link state */
-#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
-#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
-#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
- * Do not try to bring up */
-#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
-
-/* IB dword length mask in PBC (lower 11 bits); same for all chips */
-#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
-
-
-/* ctxt_flag bit offsets */
- /* waiting for a packet to arrive */
-#define QIB_CTXT_WAITING_RCV 2
- /* master has not finished initializing */
-#define QIB_CTXT_MASTER_UNINIT 4
- /* waiting for an urgent packet to arrive */
-#define QIB_CTXT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void qib_free_data(struct qib_ctxtdata *dd);
-void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
- u32, struct qib_ctxtdata *);
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
- const struct pci_device_id *);
-void qib_free_devdata(struct qib_devdata *);
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
-
-#define QIB_TWSI_NO_DEV 0xFF
-/* Below qib_twsi_ functions must be called with eep_lock held */
-int qib_twsi_reset(struct qib_devdata *dd);
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
- int len);
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len);
-void qib_get_eeprom_info(struct qib_devdata *);
-void qib_dump_lookup_output_queue(struct qib_devdata *);
-void qib_force_pio_avail_update(struct qib_devdata *);
-void qib_clear_symerror_on_linkup(struct timer_list *t);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
-
-/* send dma routines */
-int qib_setup_sdma(struct qib_pportdata *);
-void qib_teardown_sdma(struct qib_pportdata *);
-void __qib_sdma_intr(struct qib_pportdata *);
-void qib_sdma_intr(struct qib_pportdata *);
-void qib_user_sdma_send_desc(struct qib_pportdata *dd,
- struct list_head *pktlist);
-int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
- u32, struct qib_verbs_txreq *);
-/* ppd->sdma_lock should be locked before calling this. */
-int qib_sdma_make_progress(struct qib_pportdata *dd);
-
-/* must be called under qib_sdma_lock */
-static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_cnt -
- (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
-}
-
-static inline int __qib_sdma_running(struct qib_pportdata *ppd)
-{
- return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
-}
-int qib_sdma_running(struct qib_pportdata *);
-void dump_sdma_state(struct qib_pportdata *ppd);
-void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-
-/*
- * number of words used for protocol header if not set by qib_userinit();
- */
-#define QIB_DFLT_RCVHDRSIZE 9
-
-/*
- * We need to be able to handle an IB header of at least 24 dwords.
- * We need the rcvhdrq large enough to handle largest IB header, but
- * still have room for a 2KB MTU standard IB packet.
- * Additionally, some processor/memory controller combinations
- * benefit quite strongly from having the DMA'ed data be cacheline
- * aligned and a cacheline multiple, so we set the size to 32 dwords
- * (2 64-byte primary cachelines for pretty much all processors of
- * interest). The alignment hurts nothing, other than using somewhat
- * more memory.
- */
-#define QIB_RCVHDR_ENTSIZE 32
-
-int qib_get_user_pages(unsigned long, size_t, struct page **);
-void qib_release_user_pages(struct page **, size_t);
-int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
-int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
-void qib_sendbuf_done(struct qib_devdata *, unsigned);
-
-static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- /*
- * volatile because it's a DMA target from the chip, routine is
- * inlined, and don't want register caching or reordering.
- */
- return (u32) le64_to_cpu(
- *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
-}
-
-/*
- * sysfs interface.
- */
-
-extern const struct attribute_group qib_attr_group;
-extern const struct attribute_group *qib_attr_port_groups[];
-
-int qib_device_create(struct qib_devdata *);
-void qib_device_remove(struct qib_devdata *);
-
-/* Hook for sysfs read of QSFP */
-extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
-
-int __init qib_init_qibfs(void);
-int __exit qib_exit_qibfs(void);
-
-int qibfs_add(struct qib_devdata *);
-int qibfs_remove(struct qib_devdata *);
-
-int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
-int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
- const struct pci_device_id *);
-void qib_pcie_ddcleanup(struct qib_devdata *);
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
-void qib_free_irq(struct qib_devdata *dd);
-int qib_reinit_intr(struct qib_devdata *dd);
-void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
-void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
-/* interrupts for device */
-u64 qib_int_counter(struct qib_devdata *);
-/* interrupt for all devices */
-u64 qib_sps_ints(void);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
-struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-static inline void qib_flush_wc(void)
-{
-#if defined(CONFIG_X86_64)
- asm volatile("sfence" : : : "memory");
-#else
- wmb(); /* no reorder around wc flush */
-#endif
-}
-
-/* global module parameter variables */
-extern unsigned qib_ibmtu;
-extern ushort qib_cfgctxts;
-extern ushort qib_num_cfg_vls;
-extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
-extern unsigned qib_n_krcv_queues;
-extern unsigned qib_sdma_fetch_arb;
-extern unsigned qib_compat_ddr_negotiate;
-extern int qib_special_trigger;
-extern unsigned qib_numa_aware;
-
-extern struct mutex qib_mutex;
-
-/* Number of seconds before our card status check... */
-#define STATUS_TIMEOUT 60
-
-#define QIB_DRV_NAME "ib_qib"
-#define QIB_USER_MINOR_BASE 0
-#define QIB_TRACE_MINOR 127
-#define QIB_DIAGPKT_MINOR 128
-#define QIB_DIAG_MINOR_BASE 129
-#define QIB_NMINORS 255
-
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
-#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
-#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
-
-/*
- * qib_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. qib_dev_porterr is
- * the same as qib_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- * All of these go to the trace log, and the trace log entry is done
- * first to avoid possible serial port delays from printk.
- */
-#define qib_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define qib_dev_err(dd, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
-
-#define qib_dev_warn(dd, fmt, ...) \
- dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
-
-#define qib_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
- ##__VA_ARGS__)
-
-#define qib_devinfo(pcidev, fmt, ...) \
- dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
-
-/*
- * this is used for formatting hw error messages...
- */
-struct qib_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
-
-/* in qib_intr.c... */
-void qib_format_hwerrors(u64 hwerrs,
- const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
-void qib_stop_send_queue(struct rvt_qp *qp);
-void qib_quiesce_qp(struct rvt_qp *qp);
-void qib_flush_qp_waiters(struct rvt_qp *qp);
-int qib_mtu_to_path_mtu(u32 mtu);
-u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
-void qib_notify_error_qp(struct rvt_qp *qp);
-int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr);
-
-#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
deleted file mode 100644
index e16cb6f7de2c..000000000000
--- a/drivers/infiniband/hw/qib/qib_6120_regs.h
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_6120_Revision_OFFS 0x0
-#define QIB_6120_Revision_R_Simulator_LSB 0x3F
-#define QIB_6120_Revision_R_Simulator_RMASK 0x1
-#define QIB_6120_Revision_Reserved_LSB 0x28
-#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_Revision_BoardID_LSB 0x20
-#define QIB_6120_Revision_BoardID_RMASK 0xFF
-#define QIB_6120_Revision_R_SW_LSB 0x18
-#define QIB_6120_Revision_R_SW_RMASK 0xFF
-#define QIB_6120_Revision_R_Arch_LSB 0x10
-#define QIB_6120_Revision_R_Arch_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_6120_Control_OFFS 0x8
-#define QIB_6120_Control_TxLatency_LSB 0x4
-#define QIB_6120_Control_TxLatency_RMASK 0x1
-#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_6120_Control_LinkEn_LSB 0x2
-#define QIB_6120_Control_LinkEn_RMASK 0x1
-#define QIB_6120_Control_FreezeMode_LSB 0x1
-#define QIB_6120_Control_FreezeMode_RMASK 0x1
-#define QIB_6120_Control_SyncReset_LSB 0x0
-#define QIB_6120_Control_SyncReset_RMASK 0x1
-
-#define QIB_6120_PageAlign_OFFS 0x10
-
-#define QIB_6120_PortCnt_OFFS 0x18
-
-#define QIB_6120_SendRegBase_OFFS 0x30
-
-#define QIB_6120_UserRegBase_OFFS 0x38
-
-#define QIB_6120_CntrRegBase_OFFS 0x40
-
-#define QIB_6120_Scratch_OFFS 0x48
-#define QIB_6120_Scratch_TopHalf_LSB 0x20
-#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
-#define QIB_6120_Scratch_BottomHalf_LSB 0x0
-#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
-
-#define QIB_6120_IntBlocked_OFFS 0x60
-#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
-#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
-#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved_LSB 0xF
-#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
-#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
-
-#define QIB_6120_IntMask_OFFS 0x68
-#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved_LSB 0x11
-#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
-#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
-#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
-#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
-#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
-#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
-#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved1_LSB 0x5
-#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
-#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
-#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
-#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
-#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
-#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
-
-#define QIB_6120_IntStatus_OFFS 0x70
-#define QIB_6120_IntStatus_Error_LSB 0x1F
-#define QIB_6120_IntStatus_Error_RMASK 0x1
-#define QIB_6120_IntStatus_PioSent_LSB 0x1E
-#define QIB_6120_IntStatus_PioSent_RMASK 0x1
-#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved_LSB 0xF
-#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
-#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
-#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
-#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
-#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
-#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved1_LSB 0x5
-#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
-#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
-#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
-#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
-#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
-#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
-#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
-
-#define QIB_6120_IntClear_OFFS 0x78
-#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved_LSB 0xF
-#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
-#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
-#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
-#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
-#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
-#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved1_LSB 0x5
-#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
-#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
-#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
-#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
-#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
-#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
-
-#define QIB_6120_ErrMask_OFFS 0x80
-#define QIB_6120_ErrMask_Reserved_LSB 0x34
-#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved1_LSB 0x26
-#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved2_LSB 0x12
-#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_6120_ErrStatus_OFFS 0x88
-#define QIB_6120_ErrStatus_Reserved_LSB 0x34
-#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
-#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
-#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_6120_ErrClear_OFFS 0x90
-#define QIB_6120_ErrClear_Reserved_LSB 0x34
-#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved1_LSB 0x26
-#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved2_LSB 0x12
-#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_6120_HwErrMask_OFFS 0x98
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
-#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
-#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
-#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
-#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
-#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
-
-#define QIB_6120_HwErrStatus_OFFS 0xA0
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
-#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
-#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
-#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
-#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
-#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_HwErrClear_OFFS 0xA8
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
-#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
-#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
-#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
-#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
-#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
-
-#define QIB_6120_HwDiagCtrl_OFFS 0xB0
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
-#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_IBCStatus_OFFS 0xC0
-#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
-#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
-#define QIB_6120_IBCStatus_Reserved_LSB 0x7
-#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_IBCStatus_LinkState_LSB 0x4
-#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
-#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
-
-#define QIB_6120_IBCCtrl_OFFS 0xC8
-#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
-#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
-#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
-#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
-#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_6120_EXTStatus_OFFS 0xD0
-#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved_LSB 0x20
-#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
-#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
-#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
-#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_6120_EXTCtrl_OFFS 0xD8
-#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
-#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_6120_GPIOOut_OFFS 0xE0
-
-#define QIB_6120_GPIOMask_OFFS 0xE8
-
-#define QIB_6120_GPIOStatus_OFFS 0xF0
-
-#define QIB_6120_GPIOClear_OFFS 0xF8
-
-#define QIB_6120_RcvCtrl_OFFS 0x100
-#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
-#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
-#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
-#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
-#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
-#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
-#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
-#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
-#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
-
-#define QIB_6120_RcvBTHQP_OFFS 0x108
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
-#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
-#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_6120_RcvHdrSize_OFFS 0x110
-
-#define QIB_6120_RcvHdrCnt_OFFS 0x118
-
-#define QIB_6120_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_6120_RcvTIDBase_OFFS 0x128
-
-#define QIB_6120_RcvTIDCnt_OFFS 0x130
-
-#define QIB_6120_RcvEgrBase_OFFS 0x138
-
-#define QIB_6120_RcvEgrCnt_OFFS 0x140
-
-#define QIB_6120_RcvBufBase_OFFS 0x148
-
-#define QIB_6120_RcvBufSize_OFFS 0x150
-
-#define QIB_6120_RxIntMemBase_OFFS 0x158
-
-#define QIB_6120_RxIntMemSize_OFFS 0x160
-
-#define QIB_6120_RcvPartitionKey_OFFS 0x168
-
-#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
-#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_6120_SendCtrl_OFFS 0x1C0
-#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
-#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
-#define QIB_6120_SendCtrl_Reserved_LSB 0x17
-#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
-#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
-#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
-#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
-#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
-#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
-#define QIB_6120_SendCtrl_Abort_LSB 0x0
-#define QIB_6120_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
-#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
-#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
-#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_6120_SendPIOSize_OFFS 0x1D0
-#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
-#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
-#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
-#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
-#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
-#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
-#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
-
-#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
-#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_6120_SendBufErr0_OFFS 0x240
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
-
-#define QIB_6120_RcvHdrAddr0_OFFS 0x280
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_SerdesCfg0_OFFS 0x3C0
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
-#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
-#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
-#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
-#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
-#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
-#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
-#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
-#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
-#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
-#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
-#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
-#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
-#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
-#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
-#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
-#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
-#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
-#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
-#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
-#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
-#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
-#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
-#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
-#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
-#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
-#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
-#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
-#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
-#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
-#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
-#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
-#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
-#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
-#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
-#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
-
-#define QIB_6120_SerdesStat_OFFS 0x3D0
-#define QIB_6120_SerdesStat_Reserved_LSB 0xC
-#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
-#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
-#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
-#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
-#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
-#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
-#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
-#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
-#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
-#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
-#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
-#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
-#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
-
-#define QIB_6120_XGXSCfg_OFFS 0x3D8
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
-#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
-#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
-#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
-#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
-#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
-#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
-#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
-#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
-#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
-#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
-#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
-#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
-
-#define QIB_6120_LBIntCnt_OFFS 0x12000
-
-#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
-
-#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
-
-#define QIB_6120_TxDataPktCnt_OFFS 0x12020
-
-#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
-
-#define QIB_6120_TxDwordCnt_OFFS 0x12030
-
-#define QIB_6120_TxLenErrCnt_OFFS 0x12038
-
-#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
-
-#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
-
-#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
-
-#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
-
-#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
-
-#define QIB_6120_RxDataPktCnt_OFFS 0x12068
-
-#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
-
-#define QIB_6120_RxDwordCnt_OFFS 0x12078
-
-#define QIB_6120_RxLenErrCnt_OFFS 0x12080
-
-#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
-
-#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
-
-#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
-
-#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
-
-#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
-
-#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
-
-#define QIB_6120_RxEBPCnt_OFFS 0x120B8
-
-#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
-
-#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
-
-#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
-
-#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
-
-#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
-
-#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
-
-#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
-
-#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
-
-#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
-
-#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
-
-#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
-
-#define QIB_6120_RcvEgrArray0_OFFS 0x14000
-
-#define QIB_6120_RcvTIDArray0_OFFS 0x54000
-
-#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_6120_RcvBuf1_OFFS 0x72000
-
-#define QIB_6120_RcvBuf2_OFFS 0x75000
-
-#define QIB_6120_RcvFlags_OFFS 0x77000
-
-#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_6120_RcvDMABuf_OFFS 0x7B000
-
-#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_6120_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_6120_PCIERetryBuf_OFFS 0x82000
-
-#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
-
-#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
deleted file mode 100644
index 9ecaab6232e3..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ /dev/null
@@ -1,149 +0,0 @@
-#ifndef _QIB_7220_H
-#define _QIB_7220_H
-/*
- * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* grab register-defs auto-generated by HW */
-#include "qib_7220_regs.h"
-
-/* The number of eager receive TIDs for context zero. */
-#define IBA7220_KRCVEGRCNT 2048U
-
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_TXREVLANES 0x0d
-#define IB_7220_LT_STATE_CFGENH 0x10
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- spinlock_t sdepb_lock; /* serdes EPB bus */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 autoneg_tries;
- u32 serdes_first_init_done;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- u8 presets_needed;
- u8 relock_timer_active;
- char emsgbuf[128];
- char sdmamsgbuf[192];
- char bitsmsgbuf[64];
- struct timer_list relock_timer;
- unsigned int relock_interval; /* in jiffies */
- struct qib_devdata *dd;
-};
-
-struct qib_chippport_specific {
- struct qib_pportdata pportdata;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* kr_ibcctrl shadow */
- u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- unsigned long chase_end;
- u32 last_delay_mult;
-};
-
-/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
- */
-int qib_sd7220_presets(struct qib_devdata *dd);
-int qib_sd7220_init(struct qib_devdata *dd);
-void qib_sd7220_clr_ibpar(struct qib_devdata *);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase)
- writeq(value, &dd->kregbase[regno]);
-}
-
-void set_7220_relock_poll(struct qib_devdata *, int);
-void shutdown_7220_relock_poll(struct qib_devdata *);
-void toggle_7220_rclkrls(struct qib_devdata *);
-
-
-#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
deleted file mode 100644
index 0da5bb750e52..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220_regs.h
+++ /dev/null
@@ -1,1496 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7220_Revision_OFFS 0x0
-#define QIB_7220_Revision_R_Simulator_LSB 0x3F
-#define QIB_7220_Revision_R_Simulator_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_LSB 0x3E
-#define QIB_7220_Revision_R_Emulation_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7220_Revision_BoardID_LSB 0x20
-#define QIB_7220_Revision_BoardID_RMASK 0xFF
-#define QIB_7220_Revision_R_SW_LSB 0x18
-#define QIB_7220_Revision_R_SW_RMASK 0xFF
-#define QIB_7220_Revision_R_Arch_LSB 0x10
-#define QIB_7220_Revision_R_Arch_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7220_Control_OFFS 0x8
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
-#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7220_Control_Reserved_LSB 0x5
-#define QIB_7220_Control_Reserved_RMASK 0x1
-#define QIB_7220_Control_TxLatency_LSB 0x4
-#define QIB_7220_Control_TxLatency_RMASK 0x1
-#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7220_Control_LinkEn_LSB 0x2
-#define QIB_7220_Control_LinkEn_RMASK 0x1
-#define QIB_7220_Control_FreezeMode_LSB 0x1
-#define QIB_7220_Control_FreezeMode_RMASK 0x1
-#define QIB_7220_Control_SyncReset_LSB 0x0
-#define QIB_7220_Control_SyncReset_RMASK 0x1
-
-#define QIB_7220_PageAlign_OFFS 0x10
-
-#define QIB_7220_PortCnt_OFFS 0x18
-
-#define QIB_7220_SendRegBase_OFFS 0x30
-
-#define QIB_7220_UserRegBase_OFFS 0x38
-
-#define QIB_7220_CntrRegBase_OFFS 0x40
-
-#define QIB_7220_Scratch_OFFS 0x48
-
-#define QIB_7220_IntMask_OFFS 0x68
-#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
-#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
-#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
-#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
-#define QIB_7220_IntMask_Reserved_LSB 0x31
-#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
-#define QIB_7220_IntMask_JIntMask_LSB 0x1A
-#define QIB_7220_IntMask_JIntMask_RMASK 0x1
-#define QIB_7220_IntMask_Reserved1_LSB 0x11
-#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7220_IntStatus_OFFS 0x70
-#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
-#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
-#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
-#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved_LSB 0x31
-#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7220_IntStatus_Error_LSB 0x1F
-#define QIB_7220_IntStatus_Error_RMASK 0x1
-#define QIB_7220_IntStatus_PioSent_LSB 0x1E
-#define QIB_7220_IntStatus_PioSent_RMASK 0x1
-#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
-#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
-#define QIB_7220_IntStatus_JInt_LSB 0x1A
-#define QIB_7220_IntStatus_JInt_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved1_LSB 0x11
-#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7220_IntClear_OFFS 0x78
-#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
-#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
-#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
-#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved_LSB 0x31
-#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
-#define QIB_7220_IntClear_JIntClear_LSB 0x1A
-#define QIB_7220_IntClear_JIntClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved1_LSB 0x11
-#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7220_ErrMask_OFFS 0x80
-#define QIB_7220_ErrMask_Reserved_LSB 0x36
-#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_Reserved1_LSB 0x12
-#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7220_ErrStatus_OFFS 0x88
-#define QIB_7220_ErrStatus_Reserved_LSB 0x36
-#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
-#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
-#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
-#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
-#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
-#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
-#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_7220_ErrClear_OFFS 0x90
-#define QIB_7220_ErrClear_Reserved_LSB 0x36
-#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_Reserved1_LSB 0x12
-#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7220_HwErrMask_OFFS 0x98
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved_LSB 0x37
-#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
-#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
-#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
-#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
-
-#define QIB_7220_HwErrStatus_OFFS 0xA0
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
-#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
-#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
-#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
-#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_HwErrClear_OFFS 0xA8
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved_LSB 0x37
-#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
-#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
-#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
-#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
-
-#define QIB_7220_HwDiagCtrl_OFFS 0xB0
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
-#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_REG_0000B8_OFFS 0xB8
-
-#define QIB_7220_IBCStatus_OFFS 0xC0
-#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
-#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
-#define QIB_7220_IBCStatus_Reserved_LSB 0xE
-#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
-#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
-#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
-#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
-#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkState_LSB 0x5
-#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
-#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7220_IBCCtrl_OFFS 0xC8
-#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
-#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
-#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7220_EXTStatus_OFFS 0xD0
-#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved_LSB 0x20
-#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
-#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
-#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_7220_EXTCtrl_OFFS 0xD8
-#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
-#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_7220_GPIOOut_OFFS 0xE0
-
-#define QIB_7220_GPIOMask_OFFS 0xE8
-
-#define QIB_7220_GPIOStatus_OFFS 0xF0
-
-#define QIB_7220_GPIOClear_OFFS 0xF8
-
-#define QIB_7220_RcvCtrl_OFFS 0x100
-#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
-#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
-#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
-#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
-#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
-#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
-#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
-#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
-
-#define QIB_7220_RcvBTHQP_OFFS 0x108
-#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
-#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7220_RcvHdrSize_OFFS 0x110
-
-#define QIB_7220_RcvHdrCnt_OFFS 0x118
-
-#define QIB_7220_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_7220_RcvTIDBase_OFFS 0x128
-
-#define QIB_7220_RcvTIDCnt_OFFS 0x130
-
-#define QIB_7220_RcvEgrBase_OFFS 0x138
-
-#define QIB_7220_RcvEgrCnt_OFFS 0x140
-
-#define QIB_7220_RcvBufBase_OFFS 0x148
-
-#define QIB_7220_RcvBufSize_OFFS 0x150
-
-#define QIB_7220_RxIntMemBase_OFFS 0x158
-
-#define QIB_7220_RxIntMemSize_OFFS 0x160
-
-#define QIB_7220_RcvPartitionKey_OFFS 0x168
-
-#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
-#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
-#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
-
-#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
-#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7220_IBCDDRCtrl_OFFS 0x180
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
-#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
-#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
-#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7220_HRTBT_GUID_OFFS 0x188
-
-#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7220_JIntReload_OFFS 0x1B0
-#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
-#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
-#define QIB_7220_JIntReload_J_reload_LSB 0x0
-#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
-
-#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
-#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
-#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
-#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7220_SendCtrl_OFFS 0x1C0
-#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
-#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
-#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
-#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
-#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
-#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
-#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
-#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
-#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
-#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
-#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
-#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
-#define QIB_7220_SendCtrl_Abort_LSB 0x0
-#define QIB_7220_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_7220_SendBufBase_OFFS 0x1C8
-#define QIB_7220_SendBufBase_Reserved_LSB 0x35
-#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
-#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7220_SendBufSize_OFFS 0x1D0
-#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
-#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
-#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7220_SendBufCnt_OFFS 0x1D8
-#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
-#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
-#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
-#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
-#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_7220_TxIntMemBase_OFFS 0x1E8
-
-#define QIB_7220_TxIntMemSize_OFFS 0x1F0
-
-#define QIB_7220_SendDmaBase_OFFS 0x1F8
-#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
-#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
-#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaLenGen_OFFS 0x200
-#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
-#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
-#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
-#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
-#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
-#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
-#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaTail_OFFS 0x208
-#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
-#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
-#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
-#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHead_OFFS 0x210
-#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
-#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
-#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
-#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaBufMask0_OFFS 0x220
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7220_SendDmaStatus_OFFS 0x238
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
-#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
-#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
-#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
-#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
-#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7220_SendBufErr0_OFFS 0x240
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7220_RcvHdrAddr0_OFFS 0x270
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
-
-#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
-
-#define QIB_7220_XGXSCfg_OFFS 0x3D8
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
-#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
-#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
-#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
-#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
-#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
-#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
-#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
-#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
-
-#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
-#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
-#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
-#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
-#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
-#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
-#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
-#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
-#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
-#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
-#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
-#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
-#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
-
-#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
-
-#define QIB_7220_LBIntCnt_OFFS 0x13000
-
-#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
-
-#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
-
-#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
-
-#define QIB_7220_TxDataPktCnt_OFFS 0x13020
-
-#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
-
-#define QIB_7220_TxDwordCnt_OFFS 0x13030
-
-#define QIB_7220_TxLenErrCnt_OFFS 0x13038
-
-#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
-
-#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
-
-#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
-
-#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
-
-#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
-
-#define QIB_7220_RxDataPktCnt_OFFS 0x13068
-
-#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
-
-#define QIB_7220_RxDwordCnt_OFFS 0x13078
-
-#define QIB_7220_RxLenErrCnt_OFFS 0x13080
-
-#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
-
-#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
-
-#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
-
-#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
-
-#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
-
-#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
-
-#define QIB_7220_RxEBPCnt_OFFS 0x130B8
-
-#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
-
-#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
-
-#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
-
-#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
-
-#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
-
-#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
-
-#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
-
-#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
-
-#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
-
-#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
-
-#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
-
-#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
-
-#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
-
-#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
-
-#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
-
-#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
-
-#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
-
-#define QIB_7220_CNT_0131C8_OFFS 0x131C8
-
-#define QIB_7220_PSStat_OFFS 0x13200
-
-#define QIB_7220_PSStart_OFFS 0x13208
-
-#define QIB_7220_PSInterval_OFFS 0x13210
-
-#define QIB_7220_PSRcvDataCount_OFFS 0x13218
-
-#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
-
-#define QIB_7220_PSXmitDataCount_OFFS 0x13228
-
-#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
-
-#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
-
-#define QIB_7220_CNT_013240_OFFS 0x13240
-
-#define QIB_7220_RcvEgrArray_OFFS 0x14000
-
-#define QIB_7220_MEM_038000_OFFS 0x38000
-
-#define QIB_7220_RcvTIDArray0_OFFS 0x53000
-
-#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_7220_MEM_064480_OFFS 0x64480
-
-#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_7220_MEM_064C80_OFFS 0x64C80
-
-#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
-
-#define QIB_7220_MEM_065080_OFFS 0x65080
-
-#define QIB_7220_ScoreBoard_OFFS 0x65400
-
-#define QIB_7220_MEM_065440_OFFS 0x65440
-
-#define QIB_7220_DescriptorFIFO_OFFS 0x65800
-
-#define QIB_7220_MEM_065880_OFFS 0x65880
-
-#define QIB_7220_RcvBuf1_OFFS 0x72000
-
-#define QIB_7220_MEM_074800_OFFS 0x74800
-
-#define QIB_7220_RcvBuf2_OFFS 0x75000
-
-#define QIB_7220_MEM_076400_OFFS 0x76400
-
-#define QIB_7220_RcvFlags_OFFS 0x77000
-
-#define QIB_7220_MEM_078400_OFFS 0x78400
-
-#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_7220_MEM_07A400_OFFS 0x7A400
-
-#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
-
-#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
-
-#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_7220_MEM_07D400_OFFS 0x7D400
-
-#define QIB_7220_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_7220_PCIERetryBuf_OFFS 0x84000
-
-#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
-
-#define QIB_7220_PCIECplBuf_OFFS 0x90000
-
-#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
-
-#define QIB_7220_MEM_095000_OFFS 0x95000
-
-#define QIB_7220_SendBuf0_MA_OFFS 0x100000
-
-#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
deleted file mode 100644
index 32dc81ff8d4a..000000000000
--- a/drivers/infiniband/hw/qib/qib_7322_regs.h
+++ /dev/null
@@ -1,3163 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7322_Revision_OFFS 0x0
-#define QIB_7322_Revision_DEF 0x0000000002010601
-#define QIB_7322_Revision_R_Simulator_LSB 0x3F
-#define QIB_7322_Revision_R_Simulator_MSB 0x3F
-#define QIB_7322_Revision_R_Simulator_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_LSB 0x3E
-#define QIB_7322_Revision_R_Emulation_MSB 0x3E
-#define QIB_7322_Revision_R_Emulation_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
-#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7322_Revision_BoardID_LSB 0x20
-#define QIB_7322_Revision_BoardID_MSB 0x27
-#define QIB_7322_Revision_BoardID_RMASK 0xFF
-#define QIB_7322_Revision_R_SW_LSB 0x18
-#define QIB_7322_Revision_R_SW_MSB 0x1F
-#define QIB_7322_Revision_R_SW_RMASK 0xFF
-#define QIB_7322_Revision_R_Arch_LSB 0x10
-#define QIB_7322_Revision_R_Arch_MSB 0x17
-#define QIB_7322_Revision_R_Arch_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
-#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
-#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7322_Control_OFFS 0x8
-#define QIB_7322_Control_DEF 0x0000000000000000
-#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
-#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7322_Control_FreezeMode_LSB 0x1
-#define QIB_7322_Control_FreezeMode_MSB 0x1
-#define QIB_7322_Control_FreezeMode_RMASK 0x1
-#define QIB_7322_Control_SyncReset_LSB 0x0
-#define QIB_7322_Control_SyncReset_MSB 0x0
-#define QIB_7322_Control_SyncReset_RMASK 0x1
-
-#define QIB_7322_PageAlign_OFFS 0x10
-#define QIB_7322_PageAlign_DEF 0x0000000000001000
-
-#define QIB_7322_ContextCnt_OFFS 0x18
-#define QIB_7322_ContextCnt_DEF 0x0000000000000012
-
-#define QIB_7322_Scratch_OFFS 0x20
-#define QIB_7322_Scratch_DEF 0x0000000000000000
-
-#define QIB_7322_CntrRegBase_OFFS 0x28
-#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
-
-#define QIB_7322_SendRegBase_OFFS 0x30
-#define QIB_7322_SendRegBase_DEF 0x0000000000003000
-
-#define QIB_7322_UserRegBase_OFFS 0x38
-#define QIB_7322_UserRegBase_DEF 0x0000000000200000
-
-#define QIB_7322_IntMask_OFFS 0x68
-#define QIB_7322_IntMask_DEF 0x0000000000000000
-#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
-#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7322_IntStatus_OFFS 0x70
-#define QIB_7322_IntStatus_DEF 0x0000000000000000
-#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_1_LSB 0x1F
-#define QIB_7322_IntStatus_Err_1_MSB 0x1F
-#define QIB_7322_IntStatus_Err_1_RMASK 0x1
-#define QIB_7322_IntStatus_Err_0_LSB 0x1E
-#define QIB_7322_IntStatus_Err_0_MSB 0x1E
-#define QIB_7322_IntStatus_Err_0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_LSB 0x1D
-#define QIB_7322_IntStatus_Err_MSB 0x1D
-#define QIB_7322_IntStatus_Err_RMASK 0x1
-#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7322_IntClear_OFFS 0x78
-#define QIB_7322_IntClear_DEF 0x0000000000000000
-#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
-#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7322_ErrMask_OFFS 0x80
-#define QIB_7322_ErrMask_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_OFFS 0x88
-#define QIB_7322_ErrStatus_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_OFFS 0x90
-#define QIB_7322_ErrClear_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-
-#define QIB_7322_HwErrMask_OFFS 0x98
-#define QIB_7322_HwErrMask_DEF 0x0000000000000000
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
-#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
-
-#define QIB_7322_HwErrStatus_OFFS 0xA0
-#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
-#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
-#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
-#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
-
-#define QIB_7322_HwErrClear_OFFS 0xA8
-#define QIB_7322_HwErrClear_DEF 0x0000000000000000
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
-#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
-#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
-#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
-
-#define QIB_7322_HwDiagCtrl_OFFS 0xB0
-#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
-#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
-
-#define QIB_7322_EXTStatus_OFFS 0xC0
-#define QIB_7322_EXTStatus_DEF 0x000000000000X000
-#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
-#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
-
-#define QIB_7322_EXTCtrl_OFFS 0xC8
-#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
-#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
-#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
-#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
-
-#define QIB_7322_GPIOOut_OFFS 0xE0
-#define QIB_7322_GPIOOut_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOMask_OFFS 0xE8
-#define QIB_7322_GPIOMask_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOStatus_OFFS 0xF0
-#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOClear_OFFS 0xF8
-#define QIB_7322_GPIOClear_DEF 0x0000000000000000
-
-#define QIB_7322_RcvCtrl_OFFS 0x100
-#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
-#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
-#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
-#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
-#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
-#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
-#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
-#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
-#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
-#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
-#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
-#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
-#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
-#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
-
-#define QIB_7322_RcvHdrSize_OFFS 0x110
-#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrCnt_OFFS 0x118
-#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrEntSize_OFFS 0x120
-#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDBase_OFFS 0x128
-#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
-
-#define QIB_7322_RcvTIDCnt_OFFS 0x130
-#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
-
-#define QIB_7322_RcvEgrBase_OFFS 0x138
-#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
-
-#define QIB_7322_RcvEgrCnt_OFFS 0x140
-#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
-
-#define QIB_7322_RcvBufBase_OFFS 0x148
-#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
-
-#define QIB_7322_RcvBufSize_OFFS 0x150
-#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
-
-#define QIB_7322_RxIntMemBase_OFFS 0x158
-#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
-
-#define QIB_7322_RxIntMemSize_OFFS 0x160
-#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
-
-#define QIB_7322_feature_mask_OFFS 0x190
-#define QIB_7322_feature_mask_DEF 0x00000000000000XX
-
-#define QIB_7322_active_feature_mask_OFFS 0x198
-#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
-
-#define QIB_7322_SendCtrl_OFFS 0x1C0
-#define QIB_7322_SendCtrl_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
-#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
-#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
-#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
-#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
-#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
-
-#define QIB_7322_SendBufBase_OFFS 0x1C8
-#define QIB_7322_SendBufBase_DEF 0x0018000000100000
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7322_SendBufSize_OFFS 0x1D0
-#define QIB_7322_SendBufSize_DEF 0x0000108000000880
-#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
-#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
-#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7322_SendBufCnt_OFFS 0x1D8
-#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-
-#define QIB_7322_SendBufErr0_OFFS 0x240
-#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7322_AvailUpdCount_OFFS 0x268
-#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
-#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
-#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
-#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
-
-#define QIB_7322_RcvHdrAddr0_OFFS 0x280
-#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
-#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_ahb_access_ctrl_OFFS 0x460
-#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
-
-#define QIB_7322_ahb_transaction_reg_OFFS 0x468
-#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
-#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
-#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
-#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
-#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
-#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
-
-#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
-#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
-
-#define QIB_7322_SendCheckMask0_OFFS 0x4C0
-#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
-#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendIBPacketMask0_OFFS 0x500
-#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
-
-#define QIB_7322_IntRedirect0_OFFS 0x540
-#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
-#define QIB_7322_IntRedirect0_vec11_LSB 0x37
-#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
-#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec10_LSB 0x32
-#define QIB_7322_IntRedirect0_vec10_MSB 0x36
-#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
-#define QIB_7322_IntRedirect0_vec9_MSB 0x31
-#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec8_LSB 0x28
-#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
-#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec7_LSB 0x23
-#define QIB_7322_IntRedirect0_vec7_MSB 0x27
-#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
-#define QIB_7322_IntRedirect0_vec6_MSB 0x22
-#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec5_LSB 0x19
-#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
-#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec4_LSB 0x14
-#define QIB_7322_IntRedirect0_vec4_MSB 0x18
-#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec3_LSB 0xF
-#define QIB_7322_IntRedirect0_vec3_MSB 0x13
-#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec2_LSB 0xA
-#define QIB_7322_IntRedirect0_vec2_MSB 0xE
-#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec1_LSB 0x5
-#define QIB_7322_IntRedirect0_vec1_MSB 0x9
-#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec0_LSB 0x0
-#define QIB_7322_IntRedirect0_vec0_MSB 0x4
-#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
-
-#define QIB_7322_Int_Granted_OFFS 0x570
-#define QIB_7322_Int_Granted_DEF 0x0000000000000000
-
-#define QIB_7322_vec_clr_without_int_OFFS 0x578
-#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
-
-#define QIB_7322_DCACtrlA_OFFS 0x580
-#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
-
-#define QIB_7322_DCACtrlB_OFFS 0x588
-#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlC_OFFS 0x590
-#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlD_OFFS 0x598
-#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlE_OFFS 0x5A0
-#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlF_OFFS 0x5A8
-#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
-
-#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
-#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
-
-#define QIB_7322_CntrRegBase_0_OFFS 0x1028
-#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
-
-#define QIB_7322_ErrMask_0_OFFS 0x1080
-#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_0_OFFS 0x1088
-#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_0_OFFS 0x1090
-#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7322_TXEStatus_0_OFFS 0x10B8
-#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
-
-#define QIB_7322_RcvCtrl_0_OFFS 0x1100
-#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
-
-#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
-#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
-#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
-#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
-#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
-#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
-#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
-#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
-
-#define QIB_7322_PSStat_0_OFFS 0x1140
-#define QIB_7322_PSStat_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSStart_0_OFFS 0x1148
-#define QIB_7322_PSStart_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSInterval_0_OFFS 0x1150
-#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvStatus_0_OFFS 0x1160
-#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
-#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
-
-#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
-#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
-#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
-
-#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
-#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
-#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
-#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
-#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendCtrl_0_OFFS 0x11C0
-#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
-
-#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
-#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
-#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
-#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
-#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
-#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
-#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
-#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
-#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
-#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaTail_0_OFFS 0x1208
-#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
-#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
-#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHead_0_OFFS 0x1210
-#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
-#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
-#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
-#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
-#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
-#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
-
-#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
-#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
-
-#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
-#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
-
-#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
-#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
-
-#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
-#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
-
-#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
-#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
-
-#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
-#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
-
-#define QIB_7322_IBCStatusA_0_OFFS 0x1540
-#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
-#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
-#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7322_IBCStatusB_0_OFFS 0x1548
-#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
-#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
-#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
-#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
-#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
-#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
-#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
-#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
-#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
-#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
-#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
-#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
-
-#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
-#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
-
-#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
-#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
-
-#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
-#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
-#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
-#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
-
-#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
-#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
-#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
-#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
-
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
-
-#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
-#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
-#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
-#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
-
-#define QIB_7322_LowPriority0_0_OFFS 0x1C00
-#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
-#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
-#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_HighPriority0_0_OFFS 0x1E00
-#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
-#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
-#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_CntrRegBase_1_OFFS 0x2028
-#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
-
-#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
-
-#define QIB_7322_SendCtrl_1_OFFS 0x21C0
-
-#define QIB_7322_SendBufAvail0_OFFS 0x3000
-#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
-
-#define QIB_7322_MsixTable_OFFS 0x8000
-#define QIB_7322_MsixTable_DEF 0x0000000000000000
-
-#define QIB_7322_MsixPba_OFFS 0x9000
-#define QIB_7322_MsixPba_DEF 0x0000000000000000
-
-#define QIB_7322_LAMemory_OFFS 0xA000
-#define QIB_7322_LAMemory_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_OFFS 0x11000
-#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
-#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
-#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
-#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
-#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
-
-#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
-#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
-#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_0_OFFS 0x12000
-#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
-#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
-#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
-#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
-#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
-#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
-#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
-#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
-#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
-#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
-#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
-#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
-#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
-#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
-#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
-#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
-#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
-#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
-#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
-#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
-#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
-#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
-#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
-#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
-#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
-#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
-#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
-#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
-#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
-#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
-#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
-#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
-#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
-#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
-#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
-#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
-#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
-#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
-#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
-#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
-#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
-#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
-#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_1_OFFS 0x13000
-#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
-#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
-#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
-#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
-#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
-#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
-#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
-#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
-#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
-#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
-#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
-#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
-#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
-#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
-#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
-#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
-#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
-#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
-#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
-#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
-#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
-#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
-#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
-#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
-#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
-#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
-#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
-#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
-#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
-#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
-#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
-#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
-#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
-#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
-#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
-#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
-#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
-#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
-#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
-#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
-#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
-#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
-#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrArray_OFFS 0x14000
-#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
-#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
-#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
-#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_RcvTIDArray0_OFFS 0x50000
-#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
-#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
-#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrTail0_OFFS 0x200000
-#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrHead0_OFFS 0x200008
-#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
-#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
-#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
-
-#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
-#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
-#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
-#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
-#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
-#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
deleted file mode 100644
index cf652831d8e7..000000000000
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _QIB_COMMON_H
-#define _QIB_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
-#define QIB_SRC_OUI_1 0x00
-#define QIB_SRC_OUI_2 0x11
-#define QIB_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in fastpath code
- * _QIB_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- */
-
-/*
- * The value in the BTH QP field that QLogic_IB uses to differentiate
- * an qlogic_ib protocol IB packet vs standard IB transport
- * This it needs to be even (0x656b78), because the LSB is sometimes
- * used for the MSB of context. The change may cause a problem
- * interoperating with older software.
- */
-#define QIB_KD_QP 0x656b78
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file. For binary compatibility, values
- * must remain as is; removed states can be reused for different
- * purposes.
- */
-#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
-/* Chip has been found and initted */
-#define QIB_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define QIB_STATUS_IB_READY 0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define QIB_STATUS_IB_CONF 0x80
-/* A Fatal hardware error has occurred. */
-#define QIB_STATUS_HWERROR 0x200
-
-/*
- * The list of usermode accessible registers. Also see Reg_* later in file.
- */
-enum qib_ureg {
- /* (RO) DMA RcvHdr to be used next. */
- ur_rcvhdrtail = 0,
- /* (RW) RcvHdr entry to be processed next by host. */
- ur_rcvhdrhead = 1,
- /* (RO) Index of next Eager index to use. */
- ur_rcvegrindextail = 2,
- /* (RW) Eager TID to be processed next */
- ur_rcvegrindexhead = 3,
- /* For internal use only; max register number. */
- _QIB_UregMax
-};
-
-/* bit values for spi_runtime_flags */
-#define QIB_RUNTIME_PCIE 0x0002
-#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
-#define QIB_RUNTIME_RCVHDR_COPY 0x0008
-#define QIB_RUNTIME_MASTER 0x0010
-#define QIB_RUNTIME_RCHK 0x0020
-#define QIB_RUNTIME_NODMA_RTAIL 0x0080
-#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
-#define QIB_RUNTIME_SDMA 0x0200
-#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
-#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
-#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
-#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
-#define QIB_RUNTIME_HDRSUPP 0x4000
-
-/*
- * This structure is returned by qib_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct qib_base_info {
- /* version of hardware, for feature checking. */
- __u32 spi_hw_version;
- /* version of software, for feature checking. */
- __u32 spi_sw_version;
- /* QLogic_IB context assigned, goes into sent packets */
- __u16 spi_ctxt;
- __u16 spi_subctxt;
- /*
- * IB MTU, packets IB data must be less than this.
- * The MTU is in bytes, and will be a multiple of 4 bytes.
- */
- __u32 spi_mtu;
- /*
- * Size of a PIO buffer. Any given packet's total size must be less
- * than this (in words). Included is the starting control word, so
- * if 513 is returned, then total pkt size is 512 words or less.
- */
- __u32 spi_piosize;
- /* size of the TID cache in qlogic_ib, in entries */
- __u32 spi_tidcnt;
- /* size of the TID Eager list in qlogic_ib, in entries */
- __u32 spi_tidegrcnt;
- /* size of a single receive header queue entry in words. */
- __u32 spi_rcvhdrent_size;
- /*
- * Count of receive header queue entries allocated.
- * This may be less than the spu_rcvhdrcnt passed in!.
- */
- __u32 spi_rcvhdr_cnt;
-
- /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
- __u32 spi_runtime_flags;
-
- /* address where hardware receive header queue is mapped */
- __u64 spi_rcvhdr_base;
-
- /* user program. */
-
- /* base address of eager TID receive buffers used by hardware. */
- __u64 spi_rcv_egrbufs;
-
- /* Allocated by initialization code, not by protocol. */
-
- /*
- * Size of each TID buffer in host memory, starting at
- * spi_rcv_egrbufs. The buffers are virtually contiguous.
- */
- __u32 spi_rcv_egrbufsize;
- /*
- * The special QP (queue pair) value that identifies an qlogic_ib
- * protocol packet from standard IB packets. More, probably much
- * more, to be added.
- */
- __u32 spi_qpair;
-
- /*
- * User register base for init code, not to be used directly by
- * protocol or applications. Always points to chip registers,
- * for normal or shared context.
- */
- __u64 spi_uregbase;
- /*
- * Maximum buffer size in bytes that can be used in a single TID
- * entry (assuming the buffer is aligned to this boundary). This is
- * the minimum of what the hardware and software support Guaranteed
- * to be a power of 2.
- */
- __u32 spi_tid_maxsize;
- /*
- * alignment of each pio send buffer (byte count
- * to add to spi_piobufbase to get to second buffer)
- */
- __u32 spi_pioalign;
- /*
- * The index of the first pio buffer available to this process;
- * needed to do lookup in spi_pioavailaddr; not added to
- * spi_piobufbase.
- */
- __u32 spi_pioindex;
- /* number of buffers mapped for this process */
- __u32 spi_piocnt;
-
- /*
- * Base address of writeonly pio buffers for this process.
- * Each buffer has spi_piosize words, and is aligned on spi_pioalign
- * boundaries. spi_piocnt buffers are mapped from this address
- */
- __u64 spi_piobufbase;
-
- /*
- * Base address of readonly memory copy of the pioavail registers.
- * There are 2 bits for each buffer.
- */
- __u64 spi_pioavailaddr;
-
- /*
- * Address where driver updates a copy of the interface and driver
- * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
- * link status qword (formerly combined with driver status), then a
- * string indicating hardware error, if there was one.
- */
- __u64 spi_status;
-
- /* number of chip ctxts available to user processes */
- __u32 spi_nctxts;
- __u16 spi_unit; /* unit number of chip we are using */
- __u16 spi_port; /* IB port number we are using */
- /* num bufs in each contiguous set */
- __u32 spi_rcv_egrperchunk;
- /* size in bytes of each contiguous set */
- __u32 spi_rcv_egrchunksize;
- /* total size of mmap to cover full rcvegrbuffers */
- __u32 spi_rcv_egrbuftotlen;
- __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
- /* address of readonly memory copy of the rcvhdrq tail register. */
- __u64 spi_rcvhdr_tailaddr;
-
- /*
- * shared memory pages for subctxts if ctxt is shared; these cover
- * all the processes in the group sharing a single context.
- * all have enough space for the num_subcontexts value on this job.
- */
- __u64 spi_subctxt_uregbase;
- __u64 spi_subctxt_rcvegrbuf;
- __u64 spi_subctxt_rcvhdr_base;
-
- /* shared memory page for send buffer disarm status */
- __u64 spi_sendbuf_status;
-} __aligned(8);
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of qib_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way. The driver must be the same or higher
- * for initialization to succeed. In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define QIB_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define QIB_USER_SWMINOR 13
-
-#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
-
-#ifndef QIB_KERN_TYPE
-#define QIB_KERN_TYPE 0
-#endif
-
-/*
- * Similarly, this is the kernel version going back to the user. It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of qib_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
-
-/*
- * Define the driver version number. This is something that refers only
- * to the driver itself, not the software interfaces it supports.
- */
-#define QIB_DRIVER_VERSION_BASE "1.11"
-
-/* create the final driver version string */
-#ifdef QIB_IDSTR
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE " " QIB_IDSTR
-#else
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE
-#endif
-
-/*
- * If the unit is specified via open, HCA choice is fixed. If port is
- * specified, it's also fixed. Otherwise we try to spread contexts
- * across ports and HCAs, using different algorithims. WITHIN is
- * the old default, prior to this mechanism.
- */
-#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
- * ports; this is the default */
-#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
- * active ports within), then next HCA */
-#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
-
-/*
- * This structure is passed to qib_userinit() to tell the driver where
- * user code buffers are, sizes, etc. The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility. It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct qib_user_info {
- /*
- * version of user software, to detect compatibility issues.
- * Should be set to QIB_USER_SWVERSION.
- */
- __u32 spu_userversion;
-
- __u32 _spu_unused2;
-
- /* size of struct base_info to write to */
- __u32 spu_base_info_size;
-
- __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
-
- /*
- * If two or more processes wish to share a context, each process
- * must set the spu_subctxt_cnt and spu_subctxt_id to the same
- * values. The only restriction on the spu_subctxt_id is that
- * it be unique for a given node.
- */
- __u16 spu_subctxt_cnt;
- __u16 spu_subctxt_id;
-
- __u32 spu_port; /* IB port requested by user if > 0 */
-
- /*
- * address of struct base_info to write to
- */
- __u64 spu_base_info;
-
-} __aligned(8);
-
-/* User commands. */
-
-/* 16 available, was: old set up userspace (for old user code) */
-#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
-#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
-#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
-#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
-#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
-/* 22 available, was: return info on slave processes (for old user code) */
-#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
-#define QIB_CMD_USER_INIT 24 /* set up userspace */
-#define QIB_CMD_UNUSED_1 25
-#define QIB_CMD_UNUSED_2 26
-#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
-#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
-#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
-/* 30 is unused */
-#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
-#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
-/* 33 available, was a testing feature */
-#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
-#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
-#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
- * processes: qib_cpus_list */
-
-/*
- * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
- * compatibility with libraries from previous release. The ACK_EVENT
- * will take appropriate driver action (if any, just DISARM for now),
- * then clear the bits passed in as part of the mask. These bits are
- * in the first 64bit word at spi_sendbuf_status, and are passed to
- * the driver in the event_mask union as well.
- */
-#define _QIB_EVENT_DISARM_BUFS_BIT 0
-#define _QIB_EVENT_LINKDOWN_BIT 1
-#define _QIB_EVENT_LID_CHANGE_BIT 2
-#define _QIB_EVENT_LMC_CHANGE_BIT 3
-#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
-#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
-
-#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
-#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
-#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
-#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
-#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
-
-
-/*
- * Poll types
- */
-#define QIB_POLL_TYPE_ANYRCV 0x0
-#define QIB_POLL_TYPE_URGENT 0x1
-
-struct qib_ctxt_info {
- __u16 num_active; /* number of active units */
- __u16 unit; /* unit (chip) assigned to caller */
- __u16 port; /* IB port assigned to caller (1-based) */
- __u16 ctxt; /* ctxt on unit assigned to caller */
- __u16 subctxt; /* subctxt on unit assigned to caller */
- __u16 num_ctxts; /* number of ctxts available on unit */
- __u16 num_subctxts; /* number of subctxts opened on ctxt */
- __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
-};
-
-struct qib_tid_info {
- __u32 tidcnt;
- /* make structure same size in 32 and 64 bit */
- __u32 tid__unused;
- /* virtual address of first page in transfer */
- __u64 tidvaddr;
- /* pointer (same size 32/64 bit) to __u16 tid array */
- __u64 tidlist;
-
- /*
- * pointer (same size 32/64 bit) to bitmap of TIDs used
- * for this call; checked for being large enough at open
- */
- __u64 tidmap;
-};
-
-struct qib_cmd {
- __u32 type; /* command type */
- union {
- struct qib_tid_info tid_info;
- struct qib_user_info user_info;
-
- /*
- * address in userspace where we should put the sdma
- * inflight counter
- */
- __u64 sdma_inflight;
- /*
- * address in userspace where we should put the sdma
- * completion counter
- */
- __u64 sdma_complete;
- /* address in userspace of struct qib_ctxt_info to
- write result to */
- __u64 ctxt_info;
- /* enable/disable receipt of packets */
- __u32 recv_ctrl;
- /* enable/disable armlaunch errors (non-zero to enable) */
- __u32 armlaunch_ctrl;
- /* partition key to set */
- __u16 part_key;
- /* user address of __u32 bitmask of active slaves */
- __u64 slave_mask_addr;
- /* type of polling we want */
- __u16 poll_type;
- /* back pressure enable bit for one particular context */
- __u8 ctxt_bp;
- /* qib_user_event_ack(), IPATH_EVENT_* bits */
- __u64 event_mask;
- } cmd;
-};
-
-struct qib_iovec {
- /* Pointer to data, but same size 32 and 64 bit */
- __u64 iov_base;
-
- /*
- * Length of data; don't need 64 bits, but want
- * qib_sendpkt to remain same size as before 32 bit changes, so...
- */
- __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send. Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the qib_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __qib_sendpkt {
- __u32 sps_flags; /* flags for packet (TBD) */
- __u32 sps_cnt; /* number of entries to use in sps_iov */
- /* array of iov's describing packet. TEMPORARY */
- struct qib_iovec sps_iov[4];
-};
-
-/*
- * Diagnostics can send a packet by "writing" the following
- * structs to the diag data special file.
- * This allows a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-#define _DIAG_XPKT_VERS 3
-struct qib_diag_xpkt {
- __u16 version;
- __u16 unit;
- __u16 port;
- __u16 len;
- __u64 data;
- __u64 pbc_wd;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define QIB_FLASH_VERSION 2
-struct qib_flash {
- /* flash layout version (QIB_FLASH_VERSION) */
- __u8 if_fversion;
- /* checksum protecting if_length bytes */
- __u8 if_csum;
- /*
- * valid length (in use, protected by if_csum), including
- * if_fversion and if_csum themselves)
- */
- __u8 if_length;
- /* the GUID, in network order */
- __u8 if_guid[8];
- /* number of GUIDs to use, starting from if_guid */
- __u8 if_numguid;
- /* the (last 10 characters of) board serial number, in ASCII */
- char if_serial[12];
- /* board mfg date (YYYYMMDD ASCII) */
- char if_mfgdate[8];
- /* last board rework/test date (YYYYMMDD ASCII) */
- char if_testdate[8];
- /* logging of error counts, TBD */
- __u8 if_errcntp[4];
- /* powered on hours, updated at driver unload */
- __u8 if_powerhour[2];
- /* ASCII free-form comment field */
- char if_comment[32];
- /* Backwards compatible prefix for longer QLogic Serial Numbers */
- char if_sprefix[4];
- /* 82 bytes used, min flash size is 128 bytes */
- __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct qlogic_ib_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt;
- __u64 RxP10HdrEgrOvflCnt;
- __u64 RxP11HdrEgrOvflCnt;
- __u64 RxP12HdrEgrOvflCnt;
- __u64 RxP13HdrEgrOvflCnt;
- __u64 RxP14HdrEgrOvflCnt;
- __u64 RxP15HdrEgrOvflCnt;
- __u64 RxP16HdrEgrOvflCnt;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software.
- */
-
-/* RcvHdrFlags bits */
-#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
-#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
-#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
-#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
-#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
-#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
-#define QLOGIC_IB_RHF_SEQ_MASK 0xF
-#define QLOGIC_IB_RHF_SEQ_SHIFT 0
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
-#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
-#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
-#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
-#define QLOGIC_IB_RHF_H_LENERR 0x10000000
-#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
-#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
-#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
-#define QLOGIC_IB_RHF_H_MKERR 0x01000000
-#define QLOGIC_IB_RHF_H_IBERR 0x00800000
-#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
-#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
-#define QLOGIC_IB_RHF_L_SWA 0x00008000
-#define QLOGIC_IB_RHF_L_SWB 0x00004000
-
-/* qlogic_ib header fields */
-#define QLOGIC_IB_I_VERS_MASK 0xF
-#define QLOGIC_IB_I_VERS_SHIFT 28
-#define QLOGIC_IB_I_CTXT_MASK 0xF
-#define QLOGIC_IB_I_CTXT_SHIFT 24
-#define QLOGIC_IB_I_TID_MASK 0x7FF
-#define QLOGIC_IB_I_TID_SHIFT 13
-#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
-#define QLOGIC_IB_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define QLOGIC_IB_KPF_INTR 0x1
-#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
-#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
-
-#define QLOGIC_IB_MAX_SUBCTXT 4
-
-/* SendPIO per-buffer control */
-#define QLOGIC_IB_SP_TEST 0x40
-#define QLOGIC_IB_SP_TESTEBP 0x20
-#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
-
-/* SendPIOAvail bits */
-#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
-#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* qlogic_ib header format */
-struct qib_header {
- /*
- * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
- * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
- * Context 4, TID 11, offset 13.
- */
- __le32 ver_ctxt_tid_offset;
- __le16 chksum;
- __le16 pkt_flags;
-};
-
-/*
- * qlogic_ib user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ qlogic_ib.
- */
-struct qib_message_header {
- __be16 lrh[4];
- __be32 bth[3];
- /* fields below this point are in host byte order */
- struct qib_header iph;
- /* fields below are simplified, but should match PSM */
- /* some are accessed by driver when packet spliting is needed */
- __u8 sub_opcode;
- __u8 flags;
- __u16 commidx;
- __u32 ack_seq_num;
- __u8 flowid;
- __u8 hdr_dlen;
- __u16 mqhdr;
- __u32 uwords[4];
-};
-
-/* sequence number bits for message */
-union qib_seqnum {
- struct {
- __u32 seq:11;
- __u32 gen:8;
- __u32 flow:5;
- };
- struct {
- __u32 pkt:16;
- __u32 msg:8;
- };
- __u32 val;
-};
-
-/* qib receiving-dma tid-session-member */
-struct qib_tid_session_member {
- __u16 tid;
- __u16 offset;
- __u16 length;
-};
-
-/* IB - LRH header consts */
-#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
-#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define QIB_DEFAULT_P_KEY 0xFFFF
-#define QIB_PSN_MASK 0xFFFFFF
-#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
-#define QIB_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from qlogic_ib) */
-#define RCVHQ_RCV_TYPE_EXPECTED 0
-#define RCVHQ_RCV_TYPE_EAGER 1
-#define RCVHQ_RCV_TYPE_NON_KD 2
-#define RCVHQ_RCV_TYPE_ERROR 3
-
-#define QIB_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
-}
-
-static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
- QLOGIC_IB_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
-{
- return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
- QLOGIC_IB_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 qib_hdrget_index(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
- QLOGIC_IB_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
- QLOGIC_IB_RHF_SEQ_MASK;
-}
-
-static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
- QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
-}
-#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
deleted file mode 100644
index caeb77d07a58..000000000000
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-
-#include "qib.h"
-#include "qib_verbs.h"
-#include "qib_debugfs.h"
-
-static struct dentry *qib_dbg_root;
-
-#define DEBUGFS_FILE(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}; \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-} \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = seq_release \
-};
-
-static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-
-static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _opcode_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- for (j = 0; j < dd->first_user_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long) n_packets,
- (unsigned long long) n_bytes);
-
- return 0;
-}
-
-DEBUGFS_FILE(opcode_stats)
-
-static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (!*pos)
- return SEQ_START_TOKEN;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN)
- return pos;
-
- ++*pos;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _ctx_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos;
- loff_t i, j;
- u64 n_packets = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN) {
- seq_puts(s, "Ctx:npkts\n");
- return 0;
- }
-
- spos = v;
- i = *spos;
-
- if (!dd->rcd[i])
- return SEQ_SKIP;
-
- for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
- n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
-
- if (!n_packets)
- return SEQ_SKIP;
-
- seq_printf(s, " %llu:%llu\n", i, n_packets);
- return 0;
-}
-
-DEBUGFS_FILE(ctx_stats)
-
-static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
- __acquires(RCU)
-{
- struct rvt_qp_iter *iter;
- loff_t n = *pos;
-
- iter = rvt_qp_iter_init(s->private, 0, NULL);
-
- /* stop calls rcu_read_unlock */
- rcu_read_lock();
-
- if (!iter)
- return NULL;
-
- do {
- if (rvt_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
- } while (n--);
-
- return iter;
-}
-
-static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
- loff_t *pos)
- __must_hold(RCU)
-{
- struct rvt_qp_iter *iter = iter_ptr;
-
- (*pos)++;
-
- if (rvt_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
-{
- struct rvt_qp_iter *iter = iter_ptr;
-
- if (!iter)
- return 0;
-
- qib_qp_iter_print(s, iter);
-
- return 0;
-}
-
-DEBUGFS_FILE(qp_stats)
-
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
-{
- struct dentry *root;
- char name[10];
-
- snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit);
- root = debugfs_create_dir(name, qib_dbg_root);
- ibd->qib_ibdev_dbg = root;
-
- debugfs_create_file("opcode_stats", 0400, root, ibd,
- &_opcode_stats_file_ops);
- debugfs_create_file("ctx_stats", 0400, root, ibd, &_ctx_stats_file_ops);
- debugfs_create_file("qp_stats", 0400, root, ibd, &_qp_stats_file_ops);
-}
-
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
-{
- if (!qib_dbg_root)
- goto out;
- debugfs_remove_recursive(ibd->qib_ibdev_dbg);
-out:
- ibd->qib_ibdev_dbg = NULL;
-}
-
-void qib_dbg_init(void)
-{
- qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL);
-}
-
-void qib_dbg_exit(void)
-{
- debugfs_remove_recursive(qib_dbg_root);
- qib_dbg_root = NULL;
-}
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.h b/drivers/infiniband/hw/qib/qib_debugfs.h
deleted file mode 100644
index 7ae983a91b8b..000000000000
--- a/drivers/infiniband/hw/qib/qib_debugfs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _QIB_DEBUGFS_H
-#define _QIB_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-struct qib_ibdev;
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd);
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd);
-void qib_dbg_init(void);
-void qib_dbg_exit(void);
-
-#endif
-
-#endif /* _QIB_DEBUGFS_H */
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
deleted file mode 100644
index 11da796dd1b7..000000000000
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ /dev/null
@@ -1,906 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the qib_diag device, normally minor number 129. Diagnostic use
- * of the QLogic_IB chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * Each client that opens the diag device must read then write
- * offset 0, to prevent lossage from random cat or od. diag_state
- * sequences this "handshake".
- */
-enum diag_state { UNUSED = 0, OPENED, INIT, READY };
-
-/* State for an individual client. PID so children cannot abuse handshake */
-static struct qib_diag_client {
- struct qib_diag_client *next;
- struct qib_devdata *dd;
- pid_t pid;
- enum diag_state state;
-} *client_pool;
-
-/*
- * Get a client struct. Recycled if possible, else kmalloc.
- * Must be called with qib_mutex held
- */
-static struct qib_diag_client *get_client(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- dc = client_pool;
- if (dc)
- /* got from pool remove it and use */
- client_pool = dc->next;
- else
- /* None in pool, alloc and init */
- dc = kmalloc(sizeof(*dc), GFP_KERNEL);
-
- if (dc) {
- dc->next = NULL;
- dc->dd = dd;
- dc->pid = current->pid;
- dc->state = OPENED;
- }
- return dc;
-}
-
-/*
- * Return to pool. Must be called with qib_mutex held
- */
-static void return_client(struct qib_diag_client *dc)
-{
- struct qib_devdata *dd = dc->dd;
- struct qib_diag_client *tdc, *rdc;
-
- rdc = NULL;
- if (dc == dd->diag_client) {
- dd->diag_client = dc->next;
- rdc = dc;
- } else {
- tdc = dc->dd->diag_client;
- while (tdc) {
- if (dc == tdc->next) {
- tdc->next = dc->next;
- rdc = dc;
- break;
- }
- tdc = tdc->next;
- }
- }
- if (rdc) {
- rdc->state = UNUSED;
- rdc->dd = NULL;
- rdc->pid = 0;
- rdc->next = client_pool;
- client_pool = rdc;
- }
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp);
-static int qib_diag_release(struct inode *in, struct file *fp);
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off);
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diag_write,
- .read = qib_diag_read,
- .open = qib_diag_open,
- .release = qib_diag_release,
- .llseek = default_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diagpkt_write,
- .llseek = noop_llseek,
-};
-
-int qib_diag_add(struct qib_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
- ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
- &diag_file_ops, &dd->diag_cdev,
- &dd->diag_device);
-done:
- return ret;
-}
-
-static void qib_unregister_observers(struct qib_devdata *dd);
-
-void qib_diag_remove(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- if (atomic_dec_and_test(&diagpkt_count))
- qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
-
- qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-
- /*
- * Return all diag_clients of this device. There should be none,
- * as we are "guaranteed" that no clients are still open
- */
- while (dd->diag_client)
- return_client(dd->diag_client);
-
- /* Now clean up all unused client structs */
- while (client_pool) {
- dc = client_pool;
- client_pool = dc->next;
- kfree(dc);
- }
- /* Clean up observer list */
- qib_unregister_observers(dd);
-}
-
-/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
- *
- * @dd: the qlogic_ib device
- * @offs: the offset in chip-space
- * @cntp: Pointer to max (byte) count for transfer starting at offset
- * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
- * mapping. It is needed because with the use of PAT for control of
- * write-combining, the logically contiguous address-space of the chip
- * may be split into virtually non-contiguous spaces, with different
- * attributes, which are them mapped to contiguous physical space
- * based from the first BAR.
- *
- * The code below makes the same assumptions as were made in
- * init_chip_wc_pat() (qib_init.c), copied here:
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- *
- * If cntp is non-NULL, returns how many bytes from offset can be accessed
- * Returns 0 if the offset is not mapped.
- */
-static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
- u32 *cntp)
-{
- u32 kreglen;
- u32 snd_bottom, snd_lim = 0;
- u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
- u32 __iomem *map = NULL;
- u32 cnt = 0;
- u32 tot4k, offs4k;
-
- /* First, simplest case, offset is within the first map. */
- kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
- if (offset < kreglen) {
- map = krb32 + (offset / sizeof(u32));
- cnt = kreglen - offset;
- goto mapped;
- }
-
- /*
- * Next check for user regs, the next most common case,
- * and a cheap check because if they are not in the first map
- * they are last in chip.
- */
- if (dd->userbase) {
- /* If user regs mapped, they are after send, so set limit. */
- u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
-
- if (!dd->piovl15base)
- snd_lim = dd->uregbase;
- krb32 = (u32 __iomem *)dd->userbase;
- if (offset >= dd->uregbase && offset < ulim) {
- map = krb32 + (offset - dd->uregbase) / sizeof(u32);
- cnt = ulim - offset;
- goto mapped;
- }
- }
-
- /*
- * Lastly, check for offset within Send Buffers.
- * This is gnarly because struct devdata is deliberately vague
- * about things like 7322 VL15 buffers, and we are not in
- * chip-specific code here, so should not make many assumptions.
- * The one we _do_ make is that the only chip that has more sndbufs
- * than we admit is the 7322, and it has userregs above that, so
- * we know the snd_lim.
- */
- /* Assume 2K buffers are first. */
- snd_bottom = dd->pio2k_bufbase;
- if (snd_lim == 0) {
- u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
-
- snd_lim = snd_bottom + tot2k;
- }
- /* If 4k buffers exist, account for them by bumping
- * appropriate limit.
- */
- tot4k = dd->piobcnt4k * dd->align4k;
- offs4k = dd->piobufbase >> 32;
- if (dd->piobcnt4k) {
- if (snd_bottom > offs4k)
- snd_bottom = offs4k;
- else {
- /* 4k above 2k. Bump snd_lim, if needed*/
- if (!dd->userbase || dd->piovl15base)
- snd_lim = offs4k + tot4k;
- }
- }
- /*
- * Judgement call: can we ignore the space between SendBuffs and
- * UserRegs, where we would like to see vl15 buffs, but not more?
- */
- if (offset >= snd_bottom && offset < snd_lim) {
- offset -= snd_bottom;
- map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
- cnt = snd_lim - offset;
- }
-
- if (!map && offs4k && dd->piovl15base) {
- snd_lim = offs4k + tot4k + 2 * dd->align4k;
- if (offset >= (offs4k + tot4k) && offset < snd_lim) {
- map = (u32 __iomem *)dd->piovl15base +
- ((offset - (offs4k + tot4k)) / sizeof(u32));
- cnt = snd_lim - offset;
- }
- }
-
-mapped:
- if (cntp)
- *cntp = cnt;
- return map;
-}
-
-/*
- * qib_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip. This is usually used for a single qword
- *
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data = readq(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(u64))) {
- ret = -EFAULT;
- goto bail;
- }
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-
-static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writeq(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u32 data = readl(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
-
- reg_addr++;
- uaddr += sizeof(u32);
-
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- while (reg_addr < reg_end) {
- u32 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writel(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u32);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp)
-{
- int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
- struct qib_devdata *dd;
- struct qib_diag_client *dc;
- int ret;
-
- mutex_lock(&qib_mutex);
-
- dd = qib_lookup(unit);
-
- if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
- !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
-
- dc = get_client(dd);
- if (!dc) {
- ret = -ENOMEM;
- goto bail;
- }
- dc->next = dd->diag_client;
- dd->diag_client = dc;
- fp->private_data = dc;
- ret = 0;
-bail:
- mutex_unlock(&qib_mutex);
-
- return ret;
-}
-
-/**
- * qib_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: qib_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t qib_diagpkt_write(struct file *fp,
- const char __user *data,
- size_t count, loff_t *off)
-{
- u32 __iomem *piobuf;
- u32 plen, pbufn, maxlen_reserve;
- struct qib_diag_xpkt dp;
- u32 *tmpbuf = NULL;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- ssize_t ret = 0;
-
- if (count != sizeof(dp)) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(&dp, data, sizeof(dp))) {
- ret = -EFAULT;
- goto bail;
- }
-
- dd = qib_lookup(dp.unit);
- if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & QIB_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp.version != _DIAG_XPKT_VERS) {
- qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp.version);
- ret = -EINVAL;
- goto bail;
- }
- /* send count must be an exact number of dwords */
- if (dp.len & 3) {
- ret = -EINVAL;
- goto bail;
- }
- if (!dp.port || dp.port > dd->num_pports) {
- ret = -EINVAL;
- goto bail;
- }
- ppd = &dd->pport[dp.port - 1];
-
- /*
- * need total length before first word written, plus 2 Dwords. One Dword
- * is for padding so we get the full user data when not aligned on
- * a word boundary. The other Dword is to make sure we have room for the
- * ICRC which gets tacked on later.
- */
- maxlen_reserve = 2 * sizeof(u32);
- if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
- ret = -EINVAL;
- goto bail;
- }
-
- plen = sizeof(u32) + dp.len;
-
- tmpbuf = vmalloc(plen);
- if (!tmpbuf) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- u64_to_user_ptr(dp.data),
- dp.len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- plen >>= 2; /* in dwords */
-
- if (dp.pbc_wd == 0)
- dp.pbc_wd = plen;
-
- piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
- if (!piobuf) {
- ret = -EBUSY;
- goto bail;
- }
- /* disarm it just to be extra sure */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
-
- /* disable header check on pbufn for this packet */
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
-
- writeq(dp.pbc_wd, piobuf);
- /*
- * Copy all but the trigger word, then flush, so it's written
- * to chip before trigger word, then write trigger word, then
- * flush again, so packet is sent.
- */
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
- qib_flush_wc();
- __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
- } else
- qib_pio_copy(piobuf + 2, tmpbuf, plen);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- /*
- * Ensure buffer is written to the chip, then re-enable
- * header checks (if supported by chip). The txchk
- * code will ensure seen by chip before returning.
- */
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- ret = sizeof(dp);
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static int qib_diag_release(struct inode *in, struct file *fp)
-{
- mutex_lock(&qib_mutex);
- return_client(fp->private_data);
- fp->private_data = NULL;
- mutex_unlock(&qib_mutex);
- return 0;
-}
-
-/*
- * Chip-specific code calls to register its interest in
- * a specific range.
- */
-struct diag_observer_list_elt {
- struct diag_observer_list_elt *next;
- const struct diag_observer *op;
-};
-
-int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- if (!dd || !op)
- return -EINVAL;
- olp = vmalloc(sizeof(*olp));
- if (!olp)
- return -ENOMEM;
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp->op = op;
- olp->next = dd->diag_observer_list;
- dd->diag_observer_list = olp;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-
- return 0;
-}
-
-/* Remove all registered observers when device is closed */
-static void qib_unregister_observers(struct qib_devdata *dd)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- while (olp) {
- /* Pop one observer, let go of lock */
- dd->diag_observer_list = olp->next;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- vfree(olp);
- /* try again. */
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- }
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-}
-
-/*
- * Find the observer, if any, for the specified address. Initial implementation
- * is simple stack of observers. This must be called with diag transaction
- * lock held.
- */
-static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
- u32 addr)
-{
- struct diag_observer_list_elt *olp;
- const struct diag_observer *op = NULL;
-
- olp = dd->diag_observer_list;
- while (olp) {
- op = olp->op;
- if (addr >= op->bottom && addr <= op->top)
- break;
- olp = olp->next;
- }
- if (!olp)
- op = NULL;
-
- return op;
-}
-
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY && (*off || count != 8))
- ret = -EINVAL; /* prevent cat /dev/qib_diag* */
- else {
- unsigned long flags;
- u64 data64 = 0;
- int use_32;
- const struct diag_observer *op;
-
- use_32 = (count % 8) || (*off % 8);
- ret = -1;
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- /*
- * Check for observer on this address range.
- * we only support a single 32 or 64-bit read
- * via observer, currently.
- */
- op = diag_get_observer(dd, *off);
- if (op) {
- u32 offset = *off;
-
- ret = op->hook(dd, op, offset, &data64, 0, use_32);
- }
- /*
- * We need to release lock before any copy_to_user(),
- * whether implicit in qib_read_umem* or explicit below.
- */
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit rd
- */
- ret = qib_read_umem32(dd, data, (u32) *off,
- count);
- else
- ret = qib_read_umem64(dd, data, (u32) *off,
- count);
- } else if (ret == count) {
- /* Below finishes case where observer existed */
- ret = copy_to_user(data, &data64, use_32 ?
- sizeof(u32) : sizeof(u64));
- if (ret)
- ret = -EFAULT;
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == OPENED)
- dc->state = INIT;
- }
-bail:
- return ret;
-}
-
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY &&
- ((*off || count != 8) || dc->state != INIT))
- /* No writes except second-step of init seq */
- ret = -EINVAL; /* before any other write allowed */
- else {
- unsigned long flags;
- const struct diag_observer *op = NULL;
- int use_32 = (count % 8) || (*off % 8);
-
- /*
- * Check for observer on this address range.
- * We only support a single 32 or 64-bit write
- * via observer, currently. This helps, because
- * we would otherwise have to jump through hoops
- * to make "diag transaction" meaningful when we
- * cannot do a copy_from_user while holding the lock.
- */
- if (count == 4 || count == 8) {
- u64 data64;
- u32 offset = *off;
-
- ret = copy_from_user(&data64, data, count);
- if (ret) {
- ret = -EFAULT;
- goto bail;
- }
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- op = diag_get_observer(dd, *off);
- if (op)
- ret = op->hook(dd, op, offset, &data64, ~0Ull,
- use_32);
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- }
-
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit write
- */
- ret = qib_write_umem32(dd, (u32) *off, data,
- count);
- else
- ret = qib_write_umem64(dd, (u32) *off, data,
- count);
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == INIT)
- dc->state = READY; /* all read/write OK now */
- }
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
deleted file mode 100644
index 91fa5e160c0d..000000000000
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright (c) 2021 Cornelis Networks. All rights reserved.
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/prefetch.h>
-
-#include "qib.h"
-
-DEFINE_MUTEX(qib_mutex); /* general driver use */
-
-unsigned qib_ibmtu;
-module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
-MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
-
-unsigned qib_compat_ddr_negotiate = 1;
-module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
-MODULE_DESCRIPTION("Cornelis IB driver");
-
-/*
- * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers. This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define QIB_PIO_MAXIBHDR 128
-
-/*
- * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
- */
-#define QIB_MAX_PKT_RECV 64
-
-struct qlogic_ib_stats qib_stats;
-
-struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return dd->pcidev;
-}
-
-/*
- * Return count of units with at least one port ACTIVE.
- */
-int qib_count_active_units(void)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- unsigned long index, flags;
- int pidx, nunits_active = 0;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
- continue;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE))) {
- nunits_active++;
- break;
- }
- }
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
- return nunits_active;
-}
-
-/*
- * Return count of all units, optionally return in arguments
- * the number of usable (present) units, and the number of
- * ports that are up.
- */
-int qib_count_units(int *npresentp, int *nupp)
-{
- int nunits = 0, npresent = 0, nup = 0;
- struct qib_devdata *dd;
- unsigned long index, flags;
- int pidx;
- struct qib_pportdata *ppd;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- nunits++;
- if ((dd->flags & QIB_PRESENT) && dd->kregbase)
- npresent++;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE)))
- nup++;
- }
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
-
- return nunits;
-}
-
-/**
- * qib_wait_linkstate - wait for an IB link state change to occur
- * @ppd: the qlogic_ib device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route. Currently used only by
- * qib_set_linkstate. Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (ppd->state_wanted) {
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- ppd->state_wanted = state;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wait_event_interruptible_timeout(ppd->state_wait,
- (ppd->lflags & state),
- msecs_to_jiffies(msecs));
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->state_wanted = 0;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!(ppd->lflags & state))
- ret = -ETIMEDOUT;
- else
- ret = 0;
-bail:
- return ret;
-}
-
-int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
-{
- u32 lstate;
- int ret;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- switch (newstate) {
- case QIB_IB_LINKDOWN_ONLY:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_SLEEP:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_DISABLE:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKARM:
- if (ppd->lflags & QIBL_LINKARMED) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
- ret = -EINVAL;
- goto bail;
- }
- /*
- * Since the port can be ACTIVE when we ask for ARMED,
- * clear QIBL_LINKV so we can wait for a transition.
- * If the link isn't ARMED, then something else happened
- * and there is no point waiting for ARMED.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKV;
- break;
-
- case QIB_IB_LINKACTIVE:
- if (ppd->lflags & QIBL_LINKACTIVE) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & QIBL_LINKARMED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ret = qib_wait_linkstate(ppd, lstate, 10);
-
-bail:
- return ret;
-}
-
-/*
- * Get address of eager buffer from it's index (allocated in chunks, not
- * contiguous).
- */
-static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
-{
- const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
- const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
-
- return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
-}
-
-/*
- * Returns 1 if error was a CRC, else 0.
- * Needed for some chip's synthesized error counters.
- */
-static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
- u32 ctxt, u32 eflags, u32 l, u32 etail,
- __le32 *rhf_addr, struct qib_message_header *rhdr)
-{
- u32 ret = 0;
-
- if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
- ret = 1;
- else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
- /* For TIDERR and RC QPs premptively schedule a NAK */
- struct ib_header *hdr = (struct ib_header *)rhdr;
- struct ib_other_headers *ohdr = NULL;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp = NULL;
- u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
- u16 lid = be16_to_cpu(hdr->lrh[1]);
- int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- u32 qp_num;
- u32 opcode;
- u32 psn;
- int diff;
-
- /* Sanity check packet */
- if (tlen < 24)
- goto drop;
-
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- /* Get opcode and PSN from packet */
- opcode = be32_to_cpu(ohdr->bth[0]);
- opcode >>= 24;
- psn = be32_to_cpu(ohdr->bth[2]);
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (qp_num != QIB_MULTICAST_QPN) {
- int ruc_res;
-
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
-
- /*
- * Handle only RC QPs - for other QP types drop error
- * packet.
- */
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- ruc_res =
- qib_ruc_check_hdr(
- ibp, hdr,
- lnh == QIB_LRH_GRH,
- qp,
- be32_to_cpu(ohdr->bth[0]));
- if (ruc_res)
- goto unlock;
-
- /* Only deal with RDMA Writes for now */
- if (opcode <
- IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
- diff = qib_cmp24(psn, qp->r_psn);
- if (!qp->r_nak_state && diff >= 0) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state =
- IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence
- * NAK until all packets
- * in the receive queue have
- * been processed.
- * Otherwise, we end up
- * propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |=
- RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(
- &qp->rspwait,
- &rcd->qp_wait_list);
- }
- } /* Out of sequence NAK */
- } /* QP Request NAKs */
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- case IB_QPT_UC:
- default:
- /* For now don't handle any other QP types */
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
- rcu_read_unlock();
- } /* Unicast QP */
- } /* Valid packet with TIDErr */
-
-drop:
- return ret;
-}
-
-/*
- * qib_kreceive - receive a packet
- * @rcd: the qlogic_ib context
- * @llic: gets count of good packets needed to clear lli,
- * (used with chips that need need to track crcs for lli)
- *
- * called from interrupt handler for errors or receive interrupt
- * Returns number of CRC error packets, needed by some chips for
- * local link integrity tracking. crcs are adjusted down by following
- * good packets, if any, and count of good packets is also tracked.
- */
-u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- __le32 *rhf_addr;
- void *ebuf;
- const u32 rsize = dd->rcvhdrentsize; /* words */
- const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
- u32 etail = -1, l, hdrqtail;
- struct qib_message_header *hdr;
- u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
- int last;
- u64 lval;
- struct rvt_qp *qp, *nqp;
-
- l = rcd->head;
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (seq != rcd->seq_cnt)
- goto bail;
- hdrqtail = 0;
- } else {
- hdrqtail = qib_get_rcvhdrtail(rcd);
- if (l == hdrqtail)
- goto bail;
- smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
- }
-
- for (last = 0, i = 1; !last; i += !last) {
- hdr = dd->f_get_msgheader(dd, rhf_addr);
- eflags = qib_hdrget_err_flags(rhf_addr);
- etype = qib_hdrget_rcv_type(rhf_addr);
- /* total length */
- tlen = qib_hdrget_length_in_bytes(rhf_addr);
- ebuf = NULL;
- if ((dd->flags & QIB_NODMA_RTAIL) ?
- qib_hdrget_use_egr_buf(rhf_addr) :
- (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
- etail = qib_hdrget_index(rhf_addr);
- updegr = 1;
- if (tlen > sizeof(*hdr) ||
- etype >= RCVHQ_RCV_TYPE_NON_KD) {
- ebuf = qib_get_egrbuf(rcd, etail);
- prefetch_range(ebuf, tlen - sizeof(*hdr));
- }
- }
- if (!eflags) {
- u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
-
- if (lrh_len != tlen) {
- qib_stats.sps_lenerrs++;
- goto move_along;
- }
- }
- if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
- ebuf == NULL &&
- tlen > (dd->rcvhdrentsize - 2 + 1 -
- qib_hdrget_offset(rhf_addr)) << 2) {
- goto move_along;
- }
-
- /*
- * Both tiderr and qibhdrerr are set for all plain IB
- * packets; only qibhdrerr should be set.
- */
- if (unlikely(eflags))
- crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
- etail, rhf_addr, hdr);
- else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
- qib_ib_rcv(rcd, hdr, ebuf, tlen);
- if (crcs)
- crcs--;
- else if (llic && *llic)
- --*llic;
- }
-move_along:
- l += rsize;
- if (l >= maxcnt)
- l = 0;
- if (i == QIB_MAX_PKT_RECV)
- last = 1;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
- last = 1;
- } else if (l == hdrqtail)
- last = 1;
- /*
- * Update head regs etc., every 16 packets, if not last pkt,
- * to help prevent rcvhdrq overflows, when many packets
- * are processed and queue is nearly full.
- * Don't request an interrupt for intermediate updates.
- */
- lval = l;
- if (!last && !(i & 0xf)) {
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- updegr = 0;
- }
- }
-
- rcd->head = l;
-
- /*
- * Iterate over all QPs waiting to respond.
- * The list won't change since the IRQ is only run on one CPU.
- */
- list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
- list_del_init(&qp->rspwait);
- if (qp->r_flags & RVT_R_RSP_NAK) {
- qp->r_flags &= ~RVT_R_RSP_NAK;
- qib_send_rc_ack(qp);
- }
- if (qp->r_flags & RVT_R_RSP_SEND) {
- unsigned long flags;
-
- qp->r_flags &= ~RVT_R_RSP_SEND;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_OR_FLUSH_SEND)
- qib_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- rvt_put_qp(qp);
- }
-
-bail:
- /* Report number of packets consumed */
- if (npkts)
- *npkts = i;
-
- /*
- * Always write head at end, and setup rcv interrupt, even
- * if no packets were processed.
- */
- lval = (u64)rcd->head | dd->rhdrhead_intr_off;
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- return crcs;
-}
-
-/**
- * qib_set_mtu - set the MTU
- * @ppd: the perport data
- * @arg: the new MTU
- *
- * We can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
-{
- u32 piosize;
- int ret, chk;
-
- if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
- arg != 4096) {
- ret = -EINVAL;
- goto bail;
- }
- chk = ib_mtu_enum_to_int(qib_ibmtu);
- if (chk > 0 && arg > chk) {
- ret = -EINVAL;
- goto bail;
- }
-
- piosize = ppd->ibmaxlen;
- ppd->ibmtu = arg;
-
- if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
- /* Only if it's not the initial value (or reset to it) */
- if (piosize != ppd->init_ibmaxlen) {
- if (arg > piosize && arg <= ppd->init_ibmaxlen)
- piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
- } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
- piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
-
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
-{
- struct qib_devdata *dd = ppd->dd;
-
- ppd->lid = lid;
- ppd->lmc = lmc;
-
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
- lid | (~((1U << lmc) - 1)) << 16);
-
- qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
- dd->unit, ppd->port, lid);
-
- return 0;
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void qib_run_led_override(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- led_override_timer);
- struct qib_devdata *dd = ppd->dd;
- int timeoff;
- int ph_idx;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- ph_idx = ppd->led_override_phase++ & 1;
- ppd->led_override = ppd->led_override_vals[ph_idx];
- timeoff = ppd->led_override_timeoff;
-
- dd->f_setextled(ppd, 1);
- /*
- * don't re-fire the timer if user asked for it to be off; we let
- * it fire one more time after they turn it off to simplify
- */
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + timeoff);
-}
-
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
-{
- struct qib_devdata *dd = ppd->dd;
- int timeoff, freq;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- /* First check if we are blinking. If not, use 1HZ polling */
- timeoff = HZ;
- freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
- if (freq) {
- /* For blink, set each phase from one nybble of val */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = (val >> 4) & 0xF;
- timeoff = (HZ << 4)/freq;
- } else {
- /* Non-blink set both phases the same. */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = val & 0xF;
- }
- ppd->led_override_timeoff = timeoff;
-
- /*
- * If the timer has not already been started, do so. Use a "quick"
- * timeout so the function will be called soon, to look at our request.
- */
- if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
- /* Need to start timer */
- timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
- ppd->led_override_timer.expires = jiffies + 1;
- add_timer(&ppd->led_override_timer);
- } else {
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + 1);
- atomic_dec(&ppd->led_override_timer_active);
- }
-}
-
-/**
- * qib_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload). We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize. For
- * now, we only allow this if no user contexts are open that use chip resources
- */
-int qib_reset_device(int unit)
-{
- int ret, i;
- struct qib_devdata *dd = qib_lookup(unit);
- struct qib_pportdata *ppd;
- unsigned long flags;
- int pidx;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
- qib_devinfo(dd->pcidev,
- "Invalid unit number %u or not initialized or not present\n",
- unit);
- ret = -ENXIO;
- goto bail;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (dd->rcd)
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- if (!dd->rcd[i] || !dd->rcd[i]->cnt)
- continue;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (atomic_read(&ppd->led_override_timer_active)) {
- /* Need to stop LED timer, _then_ shut off LEDs */
- timer_delete_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
-
- /* Shut off LEDs after we are sure timer is not running */
- ppd->led_override = LED_OVER_BOTH_OFF;
- dd->f_setextled(ppd, 0);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
- }
-
- ret = dd->f_reset(dd);
- if (ret == 1)
- ret = qib_init(dd, 1);
- else
- ret = -EAGAIN;
- if (ret)
- qib_dev_err(dd,
- "Reinitialize unit %u after reset failed with %d\n",
- unit, ret);
- else
- qib_devinfo(dd->pcidev,
- "Reinitialized unit %u after resetting\n",
- unit);
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
deleted file mode 100644
index bf660c001b6d..000000000000
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * Functions specific to the serial EEPROM on cards handled by ib_qib.
- * The actual serail interface code is in qib_twsi.c. This file is a client
- */
-
-/**
- * qib_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: address to read from
- * @buff: where to store result
- * @len: number of bytes to receive
- */
-int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
- void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for read failed\n");
- else
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
- eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-/*
- * Actually update the eeprom, first doing write enable if
- * needed, then restoring write enable state.
- * Must be called with eep_lock held
- */
-static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
- const void *buf, int len)
-{
- int ret, pwen;
-
- pwen = dd->f_eeprom_wen(dd, 1);
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for write failed\n");
- else
- ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
- offset, buf, len);
- dd->f_eeprom_wen(dd, pwen);
- return ret;
-}
-
-/**
- * qib_eeprom_write - writes data to the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: where to place data
- * @buff: data to write
- * @len: number of bytes to write
- */
-int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
- const void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-static u8 flash_csum(struct qib_flash *ifp, int adjust)
-{
- u8 *ip = (u8 *) ifp;
- u8 csum = 0, len;
-
- /*
- * Limit length checksummed to max length of actual data.
- * Checksum of erased eeprom will still be bad, but we avoid
- * reading past the end of the buffer we were passed.
- */
- len = ifp->if_length;
- if (len > sizeof(struct qib_flash))
- len = sizeof(struct qib_flash);
- while (len--)
- csum += *ip++;
- csum -= ifp->if_csum;
- csum = ~csum;
- if (adjust)
- ifp->if_csum = csum;
-
- return csum;
-}
-
-/**
- * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
- * @dd: the qlogic_ib device
- *
- * We have the capability to use the nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void qib_get_eeprom_info(struct qib_devdata *dd)
-{
- void *buf;
- struct qib_flash *ifp;
- __be64 guid;
- int len, eep_stat;
- u8 csum, *bguid;
- int t = dd->unit;
- struct qib_devdata *dd0 = qib_lookup(0);
-
- if (t && dd0->nguid > 1 && t <= dd0->nguid) {
- u8 oguid;
-
- dd->base_guid = dd0->base_guid;
- bguid = (u8 *) &dd->base_guid;
-
- oguid = bguid[7];
- bguid[7] += t;
- if (oguid > bguid[7]) {
- if (bguid[6] == 0xff) {
- if (bguid[5] == 0xff) {
- qib_dev_err(dd,
- "Can't set GUID from base, wraps to OUI!\n");
- dd->base_guid = 0;
- goto bail;
- }
- bguid[5]++;
- }
- bguid[6]++;
- }
- dd->nguid = 1;
- goto bail;
- }
-
- /*
- * Read full flash, not just currently used part, since it may have
- * been written with a newer definition.
- * */
- len = sizeof(struct qib_flash);
- buf = vmalloc(len);
- if (!buf)
- goto bail;
-
- /*
- * Use "public" eeprom read function, which does locking and
- * figures out device. This will migrate to chip-specific.
- */
- eep_stat = qib_eeprom_read(dd, 0, buf, len);
-
- if (eep_stat) {
- qib_dev_err(dd, "Failed reading GUID from eeprom\n");
- goto done;
- }
- ifp = (struct qib_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- qib_devinfo(dd->pcidev,
- "Bad I2C flash checksum: 0x%x, not 0x%x\n",
- csum, ifp->if_csum);
- goto done;
- }
- if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
- *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
- qib_dev_err(dd,
- "Invalid GUID %llx from flash; ignoring\n",
- *(unsigned long long *) ifp->if_guid);
- /* don't allow GUID if all 0 or all 1's */
- goto done;
- }
-
- /* complain, but allow it */
- if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
- qib_devinfo(dd->pcidev,
- "Warning, GUID %llx is default, probably not correct!\n",
- *(unsigned long long *) ifp->if_guid);
-
- bguid = ifp->if_guid;
- if (!bguid[0] && !bguid[1] && !bguid[2]) {
- /*
- * Original incorrect GUID format in flash; fix in
- * core copy, by shifting up 2 octets; don't need to
- * change top octet, since both it and shifted are 0.
- */
- bguid[1] = bguid[3];
- bguid[2] = bguid[4];
- bguid[3] = 0;
- bguid[4] = 0;
- guid = *(__be64 *) ifp->if_guid;
- } else
- guid = *(__be64 *) ifp->if_guid;
- dd->base_guid = guid;
- dd->nguid = ifp->if_numguid;
- /*
- * Things are slightly complicated by the desire to transparently
- * support both the Pathscale 10-digit serial number and the QLogic
- * 13-character version.
- */
- if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
- ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
- char *snp = dd->serial;
-
- /*
- * This board has a Serial-prefix, which is stored
- * elsewhere for backward-compatibility.
- */
- memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
- snp[sizeof(ifp->if_sprefix)] = '\0';
- len = strlen(snp);
- snp += len;
- len = sizeof(dd->serial) - len;
- if (len > sizeof(ifp->if_serial))
- len = sizeof(ifp->if_serial);
- memcpy(snp, ifp->if_serial, len);
- } else {
- memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
- }
- if (!strstr(ifp->if_comment, "Tested successfully"))
- qib_dev_err(dd,
- "Board SN %s did not pass functional test: %s\n",
- dd->serial, ifp->if_comment);
-
-done:
- vfree(buf);
-
-bail:;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
deleted file mode 100644
index 29e4c59aa23b..000000000000
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ /dev/null
@@ -1,2401 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-#include <linux/pgtable.h>
-
-#include <rdma/ib.h>
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_user_sdma.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-static int qib_open(struct inode *, struct file *);
-static int qib_close(struct inode *, struct file *);
-static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
-static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static __poll_t qib_poll(struct file *, struct poll_table_struct *);
-static int qib_mmapf(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics. Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations qib_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_write,
- .write_iter = qib_write_iter,
- .open = qib_open,
- .release = qib_close,
- .poll = qib_poll,
- .mmap = qib_mmapf,
- .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
- struct page *page;
- u64 paddr = 0;
-
- page = vmalloc_to_page(p);
- if (page)
- paddr = page_to_pfn(page) << PAGE_SHIFT;
-
- return paddr;
-}
-
-static int qib_get_base_info(struct file *fp, void __user *ubase,
- size_t ubase_size)
-{
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- int ret = 0;
- struct qib_base_info *kinfo = NULL;
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- unsigned subctxt_cnt;
- int shared, master;
- size_t sz;
-
- subctxt_cnt = rcd->subctxt_cnt;
- if (!subctxt_cnt) {
- shared = 0;
- master = 0;
- subctxt_cnt = 1;
- } else {
- shared = 1;
- master = !subctxt_fp(fp);
- }
-
- sz = sizeof(*kinfo);
- /* If context sharing is not requested, allow the old size structure */
- if (!shared)
- sz -= 7 * sizeof(u64);
- if (ubase_size < sz) {
- ret = -EINVAL;
- goto bail;
- }
-
- kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
- if (kinfo == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ret = dd->f_get_base_info(rcd, kinfo);
- if (ret < 0)
- goto bail;
-
- kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
- kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
- kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
- kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
- /*
- * have to mmap whole thing
- */
- kinfo->spi_rcv_egrbuftotlen =
- rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
- kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
- kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
- rcd->rcvegrbuf_chunks;
- kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
- if (master)
- kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
- /*
- * for this use, may be cfgctxts summed over all chips that
- * are configured and present
- */
- kinfo->spi_nctxts = dd->cfgctxts;
- /* unit (chip/board) our context is on */
- kinfo->spi_unit = dd->unit;
- kinfo->spi_port = ppd->port;
- /* for now, only a single page */
- kinfo->spi_tid_maxsize = PAGE_SIZE;
-
- /*
- * Doing this per context, and based on the skip value, etc. This has
- * to be the actual buffer size, since the protocol code treats it
- * as an array.
- *
- * These have to be set to user addresses in the user code via mmap.
- * These values are used on return to user code for the mmap target
- * addresses only. For 32 bit, same 44 bit address problem, so use
- * the physical address, not virtual. Before 2.6.11, using the
- * page_address() macro worked, but in 2.6.11, even that returns the
- * full 64 bit address (upper bits all 1's). So far, using the
- * physical addresses (or chip offsets, for chip mapping) works, but
- * no doubt some future kernel release will change that, and we'll be
- * on to yet another method of dealing with this.
- * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
- * since the chips with non-zero rhf_offset don't normally
- * enable tail register updates to host memory, but for testing,
- * both can be enabled and used.
- */
- kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
- kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
- kinfo->spi_rhf_offset = dd->rhf_offset;
- kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
- kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
- /* setup per-unit (not port) status area for user programs */
- kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
- (char *) ppd->statusp -
- (char *) dd->pioavailregs_dma;
- kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!shared) {
- kinfo->spi_piocnt = rcd->piocnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs;
- kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
- } else if (master) {
- kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
- (rcd->piocnt % subctxt_cnt);
- /* Master's PIO buffers are after all the slave's */
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign *
- (rcd->piocnt - kinfo->spi_piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign * kinfo->spi_piocnt * slave;
- }
-
- if (shared) {
- kinfo->spi_sendbuf_status =
- cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
- /* only spi_subctxt_* fields should be set in this block! */
- kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
-
- kinfo->spi_subctxt_rcvegrbuf =
- cvt_kvaddr(rcd->subctxt_rcvegrbuf);
- kinfo->spi_subctxt_rcvhdr_base =
- cvt_kvaddr(rcd->subctxt_rcvhdr_base);
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values.
- */
- kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
- dd->palign;
- kinfo->spi_pioalign = dd->palign;
- kinfo->spi_qpair = QIB_KD_QP;
- /*
- * user mode PIO buffers are always 2KB, even when 4KB can
- * be received, and sent via the kernel; this is ibmaxlen
- * for 2K MTU.
- */
- kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
- kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
- kinfo->spi_ctxt = rcd->ctxt;
- kinfo->spi_subctxt = subctxt_fp(fp);
- kinfo->spi_sw_version = QIB_KERN_SWVERSION;
- kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
- kinfo->spi_hw_version = dd->revision;
-
- if (master)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
-
- sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
- if (copy_to_user(ubase, kinfo, sz))
- ret = -EFAULT;
-bail:
- kfree(kinfo);
- return ret;
-}
-
-/**
- * qib_tid_update - update a context TID
- * @rcd: the context
- * @fp: the qib device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller. To reduce search time, we
- * keep a cursor for each context, walking the shadow tid array to find
- * one that's not in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
- const struct qib_tid_info *ti)
-{
- int ret = 0, ntids;
- u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
- u16 *tidlist;
- struct qib_devdata *dd = rcd->dd;
- u64 physaddr;
- unsigned long vaddr;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
- struct page **pagep = NULL;
- unsigned subctxt = subctxt_fp(fp);
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- cnt = ti->tidcnt;
- if (!cnt) {
- ret = -EFAULT;
- goto done;
- }
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt) {
- tidcnt = dd->rcvtidcnt;
- tid = rcd->tidcursor;
- tidoff = 0;
- } else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- tidoff = dd->rcvtidcnt - tidcnt;
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- tidoff = tidcnt * (subctxt - 1);
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- }
- if (cnt > tidcnt) {
- /* make sure it all fits in tid_pg_list */
- qib_devinfo(dd->pcidev,
- "Process tried to allocate %u TIDs, only trying max (%u)\n",
- cnt, tidcnt);
- cnt = tidcnt;
- }
- pagep = (struct page **) rcd->tid_pg_list;
- tidlist = (u16 *) &pagep[dd->rcvtidcnt];
- pagep += tidoff;
- tidlist += tidoff;
-
- memset(tidmap, 0, sizeof(tidmap));
- /* before decrement; chip actual # */
- ntids = tidcnt;
- tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- /* virtual address of first page in transfer */
- vaddr = ti->tidvaddr;
- if (!access_ok((void __user *) vaddr,
- cnt * PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = qib_get_user_pages(vaddr, cnt, pagep);
- if (ret) {
- /*
- * if (ret == -EBUSY)
- * We can't continue because the pagep array won't be
- * initialized. This should never happen,
- * unless perhaps the user has mpin'ed the pages
- * themselves.
- */
- qib_devinfo(
- dd->pcidev,
- "Failed to lock addr %p, %u pages: errno %d\n",
- (void *) vaddr, cnt, -ret);
- goto done;
- }
- for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
- dma_addr_t daddr;
-
- for (; ntids--; tid++) {
- if (tid == tidcnt)
- tid = 0;
- if (!dd->pageshadow[ctxttid + tid])
- break;
- }
- if (ntids < 0) {
- /*
- * Oops, wrapped all the way through their TIDs,
- * and didn't have enough free; see comments at
- * start of routine
- */
- i--; /* last tidlist[i] not filled in */
- ret = -ENOMEM;
- break;
- }
- ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
- if (ret)
- break;
-
- tidlist[i] = tid + tidoff;
- /* we "know" system pages and TID pages are same size */
- dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] = daddr;
- /*
- * don't need atomic or it's overhead
- */
- __set_bit(tid, tidmap);
- physaddr = dd->physshadow[ctxttid + tid];
- /* PERFORMANCE: below should almost certainly be cached */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, physaddr);
- /*
- * don't check this tid in qib_ctxtshadow, since we
- * just filled it in; start with the next one.
- */
- tid++;
- }
-
- if (ret) {
- u32 limit;
-cleanup:
- /* jump here if copy out of updated info failed... */
- /* same code that's in qib_free_tid() */
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit((const unsigned long *)tidmap, limit);
- for (; tid < limit; tid++) {
- if (!test_bit(tid, tidmap))
- continue;
- if (dd->pageshadow[ctxttid + tid]) {
- dma_addr_t phys;
-
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly
- * be cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED,
- dd->tidinvalid);
- dma_unmap_page(&dd->pcidev->dev, phys,
- PAGE_SIZE, DMA_FROM_DEVICE);
- dd->pageshadow[ctxttid + tid] = NULL;
- }
- }
- qib_release_user_pages(pagep, cnt);
- } else {
- /*
- * Copy the updated array, with qib_tid's filled in, back
- * to user. Since we did the copy in already, this "should
- * never fail" If it does, we have to clean up...
- */
- if (copy_to_user((void __user *)
- (unsigned long) ti->tidlist,
- tidlist, cnt * sizeof(*tidlist))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (copy_to_user(u64_to_user_ptr(ti->tidmap),
- tidmap, sizeof(tidmap))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (tid == tidcnt)
- tid = 0;
- if (!rcd->subctxt_cnt)
- rcd->tidcursor = tid;
- else
- tidcursor_fp(fp) = tid;
- }
-
-done:
- return ret;
-}
-
-/**
- * qib_tid_free - free a context TID
- * @rcd: the context
- * @subctxt: the subcontext
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance. We check that the TID is in range for this context
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted. We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
- const struct qib_tid_info *ti)
-{
- int ret = 0;
- u32 tid, ctxttid, limit, tidcnt;
- struct qib_devdata *dd = rcd->dd;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap),
- sizeof(tidmap))) {
- ret = -EFAULT;
- goto done;
- }
-
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt)
- tidcnt = dd->rcvtidcnt;
- else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- ctxttid += dd->rcvtidcnt - tidcnt;
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- ctxttid += tidcnt * (subctxt - 1);
- }
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit(tidmap, limit);
- for (; tid < limit; tid++) {
- /*
- * small optimization; if we detect a run of 3 or so without
- * any set, use find_first_bit again. That's mainly to
- * accelerate the case where we wrapped, so we have some at
- * the beginning, and some at the end, and a big gap
- * in the middle.
- */
- if (!test_bit(tid, tidmap))
- continue;
-
- if (dd->pageshadow[ctxttid + tid]) {
- struct page *p;
- dma_addr_t phys;
-
- p = dd->pageshadow[ctxttid + tid];
- dd->pageshadow[ctxttid + tid] = NULL;
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly be
- * cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
- dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
- DMA_FROM_DEVICE);
- qib_release_user_pages(&p, 1);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_set_part_key - set a partition key
- * @rcd: the context
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed). This is somewhat tricky, since multiple contexts may set
- * the same key, so we reference count them, and clean up at exit. All 4
- * partition keys are packed into a single qlogic_ib register. It's an
- * error for a process to set the same pkey multiple times. We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that. I've used the atomic operations, and no locking, and only make
- * a single pass through what's available. This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- int i, pidx = -1;
- bool any = false;
- u16 lkey = key & 0x7FFF;
-
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
- /* nothing to do; this key always valid */
- return 0;
-
- if (!lkey)
- return -EINVAL;
-
- /*
- * Set the full membership bit, because it has to be
- * set in the register or the packet, and it seems
- * cleaner to set in the register than to force all
- * callers to set it.
- */
- key |= 0x8000;
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i] && pidx == -1)
- pidx = i;
- if (rcd->pkeys[i] == key)
- return -EEXIST;
- }
- if (pidx == -1)
- return -EBUSY;
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any = true;
- continue;
- }
- if (ppd->pkeys[i] == key) {
- atomic_t *pkrefs = &ppd->pkeyrefs[i];
-
- if (atomic_inc_return(pkrefs) > 1) {
- rcd->pkeys[pidx] = key;
- return 0;
- }
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any = true;
- }
- if ((ppd->pkeys[i] & 0x7FFF) == lkey)
- /*
- * It makes no sense to have both the limited and
- * full membership PKEY set at the same time since
- * the unlimited one will disable the limited one.
- */
- return -EEXIST;
- }
- if (!any)
- return -EBUSY;
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- rcd->pkeys[pidx] = key;
- ppd->pkeys[i] = key;
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- return 0;
- }
- }
- return -EBUSY;
-}
-
-/**
- * qib_manage_rcvq - manage a context's receive queue
- * @rcd: the context
- * @subctxt: the subcontext
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the context, for use in queue
- * overflow conditions. start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
- int start_stop)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned int rcvctrl_op;
-
- if (subctxt)
- goto bail;
- /* atomically clear receive enable ctxt. */
- if (start_stop) {
- /*
- * On enable, force in-memory copy of the tail register to
- * 0, so that protocol code doesn't have to worry about
- * whether or not the chip has yet updated the in-memory
- * copy or not on return from the system call. The chip
- * always resets it's tail register back to 0 on a
- * transition from disabled to enabled.
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
- rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
- } else
- rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
- dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
- /* always; new head should be equal to new tail; see above */
-bail:
- return 0;
-}
-
-static void qib_clean_part_key(struct qib_ctxtdata *rcd,
- struct qib_devdata *dd)
-{
- int i, j, pchanged = 0;
- struct qib_pportdata *ppd = rcd->ppd;
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i])
- continue;
- for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
- /* check for match independent of the global bit */
- if ((ppd->pkeys[j] & 0x7fff) !=
- (rcd->pkeys[i] & 0x7fff))
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
- ppd->pkeys[j] = 0;
- pchanged++;
- }
- break;
- }
- rcd->pkeys[i] = 0;
- }
- if (pchanged)
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-}
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
- unsigned len, void *kvaddr, u32 write_ok, char *what)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long pfn;
- int ret;
-
- if ((vma->vm_end - vma->vm_start) > len) {
- qib_devinfo(dd->pcidev,
- "FAIL on %s: len %lx > %x\n", what,
- vma->vm_end - vma->vm_start, len);
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * shared context user code requires rcvhdrq mapped r/w, others
- * only allowed readonly mapping.
- */
- if (!write_ok) {
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "%s must be mapped readonly\n", what);
- ret = -EPERM;
- goto bail;
- }
-
- /* don't allow them to later change with mprotect */
- vm_flags_clear(vma, VM_MAYWRITE);
- }
-
- pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, vma->vm_start, pfn,
- len, vma->vm_page_prot);
- if (ret)
- qib_devinfo(dd->pcidev,
- "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
- what, rcd->ctxt, pfn, len, ret);
-bail:
- return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
- u64 ureg)
-{
- unsigned long phys;
- unsigned long sz;
- int ret;
-
- /*
- * This is real hardware, so use io_remap. This is the mechanism
- * for the user process to update the head registers for their ctxt
- * in the chip.
- */
- sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
- if ((vma->vm_end - vma->vm_start) > sz) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap userreg: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EFAULT;
- } else {
- phys = dd->physaddr + ureg;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
- ret = io_remap_pfn_range(vma, vma->vm_start,
- phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
- return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
- struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- unsigned piobufs, unsigned piocnt)
-{
- unsigned long phys;
- int ret;
-
- /*
- * When we map the PIO buffers in the chip, we want to map them as
- * writeonly, no read possible; unfortunately, x86 doesn't allow
- * for this in hardware, but we still prevent users from asking
- * for it.
- */
- if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap piobufs: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto bail;
- }
-
- phys = dd->physaddr + piobufs;
-
-#if defined(__powerpc__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
-
- /*
- * don't allow them to later change to readable with mprotect (for when
- * not initially mapped readable, as is normally the case)
- */
- vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
-
- /* We used PAT if wc_cookie == 0 */
- if (!dd->wc_cookie)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-bail:
- return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
- struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long start, size;
- size_t total_size, i;
- unsigned long pfn;
- int ret;
-
- size = rcd->rcvegrbuf_size;
- total_size = rcd->rcvegrbuf_chunks * size;
- if ((vma->vm_end - vma->vm_start) > total_size) {
- qib_devinfo(dd->pcidev,
- "FAIL on egr bufs: reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) total_size);
- ret = -EINVAL;
- goto bail;
- }
-
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /* don't allow them to later change to writable with mprotect */
- vm_flags_clear(vma, VM_MAYWRITE);
-
- start = vma->vm_start;
-
- for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
- pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, start, pfn, size,
- vma->vm_page_prot);
- if (ret < 0)
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_file_vma_fault - handle a VMA page fault.
- */
-static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
-{
- struct page *page;
-
- page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- if (!page)
- return VM_FAULT_SIGBUS;
-
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct qib_file_vm_ops = {
- .fault = qib_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
- struct qib_ctxtdata *rcd, unsigned subctxt)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned subctxt_cnt;
- unsigned long len;
- void *addr;
- size_t size;
- int ret = 0;
-
- subctxt_cnt = rcd->subctxt_cnt;
- size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
-
- /*
- * Each process has all the subctxt uregbase, rcvhdrq, and
- * rcvegrbufs mmapped - as an array for all the processes,
- * and also separately for this process.
- */
- if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
- addr = rcd->subctxt_uregbase;
- size = PAGE_SIZE * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
- addr = rcd->subctxt_rcvhdr_base;
- size = rcd->rcvhdrq_size * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
- addr = rcd->subctxt_rcvegrbuf;
- size *= subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
- PAGE_SIZE * subctxt)) {
- addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt)) {
- addr = rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt;
- size = rcd->rcvhdrq_size;
- } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
- addr = rcd->user_event_mask;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
- size * subctxt)) {
- addr = rcd->subctxt_rcvegrbuf + size * subctxt;
- /* rcvegrbufs are read-only on the slave */
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /*
- * Don't allow permission to later change to writable
- * with mprotect.
- */
- vm_flags_clear(vma, VM_MAYWRITE);
- } else
- goto bail;
- len = vma->vm_end - vma->vm_start;
- if (len > size) {
- ret = -EINVAL;
- goto bail;
- }
-
- vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
- vma->vm_ops = &qib_file_vm_ops;
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * qib_mmapf - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
- * buffers in the chip. We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
-{
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- u64 pgaddr, ureg;
- unsigned piobufs, piocnt;
- int ret, match = 1;
-
- rcd = ctxt_fp(fp);
- if (!rcd || !(vma->vm_flags & VM_SHARED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd = rcd->dd;
-
- /*
- * This is the qib_do_user_init() code, mapping the shared buffers
- * and per-context user registers into the user process. The address
- * referred to by vm_pgoff is the file offset passed via mmap().
- * For shared contexts, this is the kernel vmalloc() address of the
- * pages to share with the master.
- * For non-shared or master ctxts, this is a physical address.
- * We only do one mmap for each space mapped.
- */
- pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
- /*
- * Check for 0 in case one of the allocations failed, but user
- * called mmap anyway.
- */
- if (!pgaddr) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Physical addresses must fit in 40 bits for our hardware.
- * Check for kernel virtual addresses first, anything else must
- * match a HW or memory address.
- */
- ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto bail;
- }
-
- ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!rcd->subctxt_cnt) {
- /* ctxt is not shared */
- piocnt = rcd->piocnt;
- piobufs = rcd->piobufs;
- } else if (!subctxt_fp(fp)) {
- /* caller is the master */
- piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
- (rcd->piocnt % rcd->subctxt_cnt);
- piobufs = rcd->piobufs +
- dd->palign * (rcd->piocnt - piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- /* caller is a slave */
- piocnt = rcd->piocnt / rcd->subctxt_cnt;
- piobufs = rcd->piobufs + dd->palign * piocnt * slave;
- }
-
- if (pgaddr == ureg)
- ret = mmap_ureg(vma, dd, ureg);
- else if (pgaddr == piobufs)
- ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
- else if (pgaddr == dd->pioavailregs_phys)
- /* in-memory copy of pioavail registers */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- (void *) dd->pioavailregs_dma, 0,
- "pioavail registers");
- else if (pgaddr == rcd->rcvegr_phys)
- ret = mmap_rcvegrbufs(vma, rcd);
- else if (pgaddr == (u64) rcd->rcvhdrq_phys)
- /*
- * The rcvhdrq itself; multiple pages, contiguous
- * from an i/o perspective. Shared contexts need
- * to map r/w, so we allow writing.
- */
- ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
- rcd->rcvhdrq, 1, "rcvhdrq");
- else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
- /* in-memory copy of rcvhdrq tail register */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr, 0,
- "rcvhdrq tail");
- else
- match = 0;
- if (!match)
- ret = -EINVAL;
-
- vma->vm_private_data = NULL;
-
- if (ret < 0)
- qib_devinfo(dd->pcidev,
- "mmap Failure %d: off %llx len %lx\n",
- -ret, (unsigned long long)pgaddr,
- vma->vm_end - vma->vm_start);
-bail:
- return ret;
-}
-
-static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- __poll_t pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (rcd->urgent != rcd->urgent_poll) {
- pollflag = EPOLLIN | EPOLLRDNORM;
- rcd->urgent_poll = rcd->urgent;
- } else {
- pollflag = 0;
- set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
- }
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- __poll_t pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (dd->f_hdrqempty(rcd)) {
- set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
- pollflag = 0;
- } else
- pollflag = EPOLLIN | EPOLLRDNORM;
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
-{
- struct qib_ctxtdata *rcd;
- __poll_t pollflag;
-
- rcd = ctxt_fp(fp);
- if (!rcd)
- pollflag = EPOLLERR;
- else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
- pollflag = qib_poll_urgent(rcd, fp, pt);
- else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
- pollflag = qib_poll_next(rcd, fp, pt);
- else /* invalid */
- pollflag = EPOLLERR;
-
- return pollflag;
-}
-
-static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
-{
- struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = current->nr_cpus_allowed;
- const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- int local_cpu;
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it on the local NUMA node.
- */
- if ((weight >= qib_cpulist_count) &&
- (cpumask_weight(local_mask) <= qib_cpulist_count)) {
- for_each_cpu(local_cpu, local_mask)
- if (!test_and_set_bit(local_cpu, qib_cpulist)) {
- fd->rec_cpu_num = local_cpu;
- return;
- }
- }
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it, as a rendevous for all
- * users of the driver. If they don't actually later
- * set affinity to this cpu, or set it to some other cpu,
- * it just means that sooner or later we don't recommend
- * a cpu, and let the scheduler do it's best.
- */
- if (weight >= qib_cpulist_count) {
- int cpu;
-
- cpu = find_first_zero_bit(qib_cpulist,
- qib_cpulist_count);
- if (cpu == qib_cpulist_count)
- qib_dev_err(dd,
- "no cpus avail for affinity PID %u\n",
- current->pid);
- else {
- __set_bit(cpu, qib_cpulist);
- fd->rec_cpu_num = cpu;
- }
- }
-}
-
-/*
- * Check that userland and driver are compatible for subcontexts.
- */
-static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
-{
- /* this code is written long-hand for clarity */
- if (QIB_USER_SWMAJOR != user_swmajor) {
- /* no promise of compatibility if major mismatch */
- return 0;
- }
- if (QIB_USER_SWMAJOR == 1) {
- switch (QIB_USER_SWMINOR) {
- case 0:
- case 1:
- case 2:
- /* no subctxt implementation so cannot be compatible */
- return 0;
- case 3:
- /* 3 is only compatible with itself */
- return user_swminor == 3;
- default:
- /* >= 4 are compatible (or are expected to be) */
- return user_swminor <= QIB_USER_SWMINOR;
- }
- }
- /* make no promises yet for future major versions */
- return 0;
-}
-
-static int init_subctxts(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- const struct qib_user_info *uinfo)
-{
- int ret = 0;
- unsigned num_subctxts;
- size_t size;
-
- /*
- * If the user is requesting zero subctxts,
- * skip the subctxt allocation.
- */
- if (uinfo->spu_subctxt_cnt <= 0)
- goto bail;
- num_subctxts = uinfo->spu_subctxt_cnt;
-
- /* Check for subctxt compatibility */
- if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
- uinfo->spu_userversion & 0xffff)) {
- qib_devinfo(dd->pcidev,
- "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
- (int) (uinfo->spu_userversion >> 16),
- (int) (uinfo->spu_userversion & 0xffff),
- QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
- goto bail;
- }
- if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
- if (!rcd->subctxt_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
- /* Note: rcd->rcvhdrq_size isn't initialized yet. */
- size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE) * num_subctxts;
- rcd->subctxt_rcvhdr_base = vmalloc_user(size);
- if (!rcd->subctxt_rcvhdr_base) {
- ret = -ENOMEM;
- goto bail_ureg;
- }
-
- rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
- rcd->rcvegrbuf_size *
- num_subctxts);
- if (!rcd->subctxt_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail_rhdr;
- }
-
- rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
- rcd->subctxt_id = uinfo->spu_subctxt_id;
- rcd->active_slaves = 1;
- rcd->redirect_seq_cnt = 1;
- set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- goto bail;
-
-bail_rhdr:
- vfree(rcd->subctxt_rcvhdr_base);
-bail_ureg:
- vfree(rcd->subctxt_uregbase);
- rcd->subctxt_uregbase = NULL;
-bail:
- return ret;
-}
-
-static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
- struct file *fp, const struct qib_user_info *uinfo)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- void *ptmp = NULL;
- int ret;
- int numa_id;
-
- assign_ctxt_affinity(fp, dd);
-
- numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
- cpu_to_node(fd->rec_cpu_num) :
- numa_node_id()) : dd->assigned_node_id;
-
- rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
-
- /*
- * Allocate memory for use in qib_tid_update() at open to
- * reduce cost of expected send setup per message segment
- */
- if (rcd)
- ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
- dd->rcvtidcnt * sizeof(struct page **),
- GFP_KERNEL);
-
- if (!rcd || !ptmp) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata memory, failing open\n");
- ret = -ENOMEM;
- goto bailerr;
- }
- rcd->userversion = uinfo->spu_userversion;
- ret = init_subctxts(dd, rcd, uinfo);
- if (ret)
- goto bailerr;
- rcd->tid_pg_list = ptmp;
- rcd->pid = current->pid;
- init_waitqueue_head(&dd->rcd[ctxt]->wait);
- get_task_comm(rcd->comm, current);
- ctxt_fp(fp) = rcd;
- qib_stats.sps_ctxts++;
- dd->freectxts--;
- ret = 0;
- goto bail;
-
-bailerr:
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- dd->rcd[ctxt] = NULL;
- kfree(rcd);
- kfree(ptmp);
-bail:
- return ret;
-}
-
-static inline int usable(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
- (ppd->lflags & QIBL_LINKACTIVE);
-}
-
-/*
- * Select a context on the given device, either using a requested port
- * or the port based on the context number.
- */
-static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
- const struct qib_user_info *uinfo)
-{
- struct qib_pportdata *ppd = NULL;
- int ret, ctxt;
-
- if (port) {
- if (!usable(dd->pport + port - 1)) {
- ret = -ENETDOWN;
- goto done;
- } else
- ppd = dd->pport + port - 1;
- }
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
- ctxt++)
- ;
- if (ctxt == dd->cfgctxts) {
- ret = -EBUSY;
- goto done;
- }
- if (!ppd) {
- u32 pidx = ctxt % dd->num_pports;
-
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- else {
- for (pidx = 0; pidx < dd->num_pports && !ppd;
- pidx++)
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- }
- }
- ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
-done:
- return ret;
-}
-
-static int find_free_ctxt(int unit, struct file *fp,
- const struct qib_user_info *uinfo)
-{
- struct qib_devdata *dd = qib_lookup(unit);
- int ret;
-
- if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
- ret = -ENODEV;
- else
- ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
-
- return ret;
-}
-
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
- unsigned alg)
-{
- struct qib_devdata *udd = NULL;
- int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
- u32 port = uinfo->spu_port, ctxt;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (nup == 0) {
- ret = -ENETDOWN;
- goto done;
- }
-
- if (alg == QIB_PORT_ALG_ACROSS) {
- unsigned inuse = ~0U;
-
- /* find device (with ACTIVE ports) with fewest ctxts in use */
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
- unsigned cused = 0, cfree = 0, pusable = 0;
-
- if (!dd)
- continue;
- if (port && port <= dd->num_pports &&
- usable(dd->pport + port - 1))
- pusable = 1;
- else
- for (i = 0; i < dd->num_pports; i++)
- if (usable(dd->pport + i))
- pusable++;
- if (!pusable)
- continue;
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
- ctxt++)
- if (dd->rcd[ctxt])
- cused++;
- else
- cfree++;
- if (cfree && cused < inuse) {
- udd = dd;
- inuse = cused;
- }
- }
- if (udd) {
- ret = choose_port_ctxt(fp, udd, port, uinfo);
- goto done;
- }
- } else {
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- ret = choose_port_ctxt(fp, dd, port, uinfo);
- if (!ret)
- goto done;
- if (ret == -EBUSY)
- dusable++;
- }
- }
- }
- ret = dusable ? -EBUSY : -ENETDOWN;
-
-done:
- return ret;
-}
-
-static int find_shared_ctxt(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
-
- devmax = qib_count_units(NULL, NULL);
-
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- /* device portion of usable() */
- if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
- continue;
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- /* Skip ctxts which are not yet open */
- if (!rcd || !rcd->cnt)
- continue;
- /* Skip ctxt if it doesn't match the requested one */
- if (rcd->subctxt_id != uinfo->spu_subctxt_id)
- continue;
- /* Verify the sharing process matches the master */
- if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
- rcd->userversion != uinfo->spu_userversion ||
- rcd->cnt >= rcd->subctxt_cnt) {
- ret = -EINVAL;
- goto done;
- }
- ctxt_fp(fp) = rcd;
- subctxt_fp(fp) = rcd->cnt++;
- rcd->subpid[subctxt_fp(fp)] = current->pid;
- tidcursor_fp(fp) = 0;
- rcd->active_slaves |= 1 << subctxt_fp(fp);
- ret = 1;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-static int qib_open(struct inode *in, struct file *fp)
-{
- /* The real work is performed later in qib_assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
-}
-
-static int find_hca(unsigned int cpu, int *unit)
-{
- int ret = 0, devmax, npresent, nup, ndev;
-
- *unit = -1;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- if (pcibus_to_node(dd->pcidev->bus) < 0) {
- ret = -EINVAL;
- goto done;
- }
- if (cpu_to_node(cpu) ==
- pcibus_to_node(dd->pcidev->bus)) {
- *unit = ndev;
- goto done;
- }
- }
- }
-done:
- return ret;
-}
-
-static int do_qib_user_sdma_queue_create(struct file *fp)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_ctxtdata *rcd = fd->rcd;
- struct qib_devdata *dd = rcd->dd;
-
- if (dd->flags & QIB_HAS_SEND_DMA) {
-
- fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
- dd->unit,
- rcd->ctxt,
- fd->subctxt);
- if (!fd->pq)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*
- * Get ctxt early, so can set affinity prior to memory allocation.
- */
-static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
-{
- int ret;
- int i_minor;
- unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
-
- /* Check to be sure we haven't already initialized this file */
- if (ctxt_fp(fp)) {
- ret = -EINVAL;
- goto done;
- }
-
- /* for now, if major version is different, bail */
- swmajor = uinfo->spu_userversion >> 16;
- if (swmajor != QIB_USER_SWMAJOR) {
- ret = -ENODEV;
- goto done;
- }
-
- swminor = uinfo->spu_userversion & 0xffff;
-
- if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
- alg = uinfo->spu_port_alg;
-
- mutex_lock(&qib_mutex);
-
- if (qib_compatible_subctxts(swmajor, swminor) &&
- uinfo->spu_subctxt_cnt) {
- ret = find_shared_ctxt(fp, uinfo);
- if (ret > 0) {
- ret = do_qib_user_sdma_queue_create(fp);
- if (!ret)
- assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
- goto done_ok;
- }
- }
-
- i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
- if (i_minor)
- ret = find_free_ctxt(i_minor - 1, fp, uinfo);
- else {
- int unit;
- const unsigned int cpu = cpumask_first(current->cpus_ptr);
- const unsigned int weight = current->nr_cpus_allowed;
-
- if (weight == 1 && !test_bit(cpu, qib_cpulist))
- if (!find_hca(cpu, &unit) && unit >= 0)
- if (!find_free_ctxt(unit, fp, uinfo)) {
- ret = 0;
- goto done_chk_sdma;
- }
- ret = get_a_ctxt(fp, uinfo, alg);
- }
-
-done_chk_sdma:
- if (!ret)
- ret = do_qib_user_sdma_queue_create(fp);
-done_ok:
- mutex_unlock(&qib_mutex);
-
-done:
- return ret;
-}
-
-
-static int qib_do_user_init(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int ret;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_devdata *dd;
- unsigned uctxt;
-
- /* Subctxts don't need to initialize anything since master did it. */
- if (subctxt_fp(fp)) {
- ret = wait_event_interruptible(rcd->wait,
- !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* some ctxts may get extra buffers, calculate that here */
- uctxt = rcd->ctxt - dd->first_user_ctxt;
- if (uctxt < dd->ctxts_extrabuf) {
- rcd->piocnt = dd->pbufsctxt + 1;
- rcd->pio_base = rcd->piocnt * uctxt;
- } else {
- rcd->piocnt = dd->pbufsctxt;
- rcd->pio_base = rcd->piocnt * uctxt +
- dd->ctxts_extrabuf;
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values. So check and handle.
- */
- if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
- if (rcd->pio_base >= dd->piobcnt2k) {
- qib_dev_err(dd,
- "%u:ctxt%u: no 2KB buffers available\n",
- dd->unit, rcd->ctxt);
- ret = -ENOBUFS;
- goto bail;
- }
- rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
- qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
- rcd->ctxt, rcd->piocnt);
- }
-
- rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_USER, rcd);
- /*
- * try to ensure that processes start up with consistent avail update
- * for their own range, at least. If system very quiet, it might
- * have the in-memory copy out of date at startup for this range of
- * buffers, when a context gets re-used. Do after the chg_pioavail
- * and before the rest of setup, so it's "almost certain" the dma
- * will have occurred (can't 100% guarantee, but should be many
- * decimals of 9s, with this ordering), given how much else happens
- * after this.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-
- /*
- * Now allocate the rcvhdr Q and eager TIDs; skip the TID
- * array for time being. If rcd->ctxt > chip-supported,
- * we need to do extra stuff here to handle by handling overflow
- * through ctxt 0, someday
- */
- ret = qib_create_rcvhdrq(dd, rcd);
- if (!ret)
- ret = qib_setup_eagerbufs(rcd);
- if (ret)
- goto bail_pio;
-
- rcd->tidcursor = 0; /* start at beginning after open */
-
- /* initialize poll variables... */
- rcd->urgent = 0;
- rcd->urgent_poll = 0;
-
- /*
- * Now enable the ctxt for receive.
- * For chips that are set to DMA the tail register to memory
- * when they change (and when the update bit transitions from
- * 0 to 1. So for those chips, we turn it off and then back on.
- * This will (very briefly) affect any other open ctxts, but the
- * duration is very short, and therefore isn't an issue. We
- * explicitly set the in-memory tail copy to 0 beforehand, so we
- * don't have to wait to be sure the DMA update has happened
- * (chip resets head/tail to 0 on transition to enable).
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
-
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
- rcd->ctxt);
-
- /* Notify any waiting slaves */
- if (rcd->subctxt_cnt) {
- clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- wake_up(&rcd->wait);
- }
- return 0;
-
-bail_pio:
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_KERN, rcd);
-bail:
- return ret;
-}
-
-/**
- * unlock_expected_tids - unlock any expected TID entries context still had
- * in use
- * @rcd: ctxt
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using f_clear_tids.
- */
-static void unlock_expected_tids(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
- int i, maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- struct page *p = dd->pageshadow[i];
- dma_addr_t phys;
-
- if (!p)
- continue;
-
- phys = dd->physshadow[i];
- dd->physshadow[i] = dd->tidinvalid;
- dd->pageshadow[i] = NULL;
- dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
- DMA_FROM_DEVICE);
- qib_release_user_pages(&p, 1);
- }
-}
-
-static int qib_close(struct inode *in, struct file *fp)
-{
- struct qib_filedata *fd;
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- unsigned long flags;
- unsigned ctxt;
-
- mutex_lock(&qib_mutex);
-
- fd = fp->private_data;
- fp->private_data = NULL;
- rcd = fd->rcd;
- if (!rcd) {
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* ensure all pio buffer writes in progress are flushed */
- qib_flush_wc();
-
- /* drain user sdma queue */
- if (fd->pq) {
- qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
- qib_user_sdma_queue_destroy(fd->pq);
- }
-
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- if (--rcd->cnt) {
- /*
- * XXX If the master closes the context before the slave(s),
- * revoke the mmap for the eager receive queue so
- * the slave(s) don't wait for receive data forever.
- */
- rcd->active_slaves &= ~(1 << fd->subctxt);
- rcd->subpid[fd->subctxt] = 0;
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- /* early; no interrupt users after this */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- ctxt = rcd->ctxt;
- dd->rcd[ctxt] = NULL;
- rcd->pid = 0;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- if (rcd->rcvwait_to || rcd->piowait_to ||
- rcd->rcvnowait || rcd->pionowait) {
- rcd->rcvwait_to = 0;
- rcd->piowait_to = 0;
- rcd->rcvnowait = 0;
- rcd->pionowait = 0;
- }
- if (rcd->flag)
- rcd->flag = 0;
-
- if (dd->kregbase) {
- /* atomically clear receive enable ctxt and intr avail. */
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
-
- /* clean up the pkeys for this ctxt user */
- qib_clean_part_key(rcd, dd);
- qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
- qib_chg_pioavailkernel(dd, rcd->pio_base,
- rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
-
- dd->f_clear_tids(dd, rcd);
-
- if (dd->pageshadow)
- unlock_expected_tids(rcd);
- qib_stats.sps_ctxts--;
- dd->freectxts++;
- }
-
- mutex_unlock(&qib_mutex);
- qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
-
-bail:
- kfree(fd);
- return 0;
-}
-
-static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
-{
- struct qib_ctxt_info info;
- int ret;
- size_t sz;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_filedata *fd;
-
- fd = fp->private_data;
-
- info.num_active = qib_count_active_units();
- info.unit = rcd->dd->unit;
- info.port = rcd->ppd->port;
- info.ctxt = rcd->ctxt;
- info.subctxt = subctxt_fp(fp);
- /* Number of user ctxts available for this device. */
- info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
- info.num_subctxts = rcd->subctxt_cnt;
- info.rec_cpu = fd->rec_cpu_num;
- sz = sizeof(info);
-
- if (copy_to_user(uinfo, &info, sz)) {
- ret = -EFAULT;
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
- u32 __user *inflightp)
-{
- const u32 val = qib_user_sdma_inflight_counter(pq);
-
- if (put_user(val, inflightp))
- return -EFAULT;
-
- return 0;
-}
-
-static int qib_sdma_get_complete(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- u32 __user *completep)
-{
- u32 val;
- int err;
-
- if (!pq)
- return -EINVAL;
-
- err = qib_user_sdma_make_progress(ppd, pq);
- if (err < 0)
- return err;
-
- val = qib_user_sdma_complete_counter(pq);
- if (put_user(val, completep))
- return -EFAULT;
-
- return 0;
-}
-
-static int disarm_req_delay(struct qib_ctxtdata *rcd)
-{
- int ret = 0;
-
- if (!usable(rcd->ppd)) {
- int i;
- /*
- * if link is down, or otherwise not usable, delay
- * the caller up to 30 seconds, so we don't thrash
- * in trying to get the chip back to ACTIVE, and
- * set flag so they make the call again.
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- for (i = 0; !usable(rcd->ppd) && i < 300; i++)
- msleep(100);
- ret = -ENETDOWN;
- }
- return ret;
-}
-
-/*
- * Find all user contexts in use, and set the specified bit in their
- * event mask.
- * See also find_ctxt() for a similar use, that is specific to send buffers.
- */
-int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
- for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
- ctxt++) {
- rcd = ppd->dd->rcd[ctxt];
- if (!rcd)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(evtbit, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(evtbit, &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
-
- return ret;
-}
-
-/*
- * clear the event notifier events for this context.
- * For the DISARM_BUFS case, we also take action (this obsoletes
- * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
- * compatibility.
- * Other bits don't currently require actions, just atomically clear.
- * User process then performs actions appropriate to bit having been
- * set, if desired, and checks again in future.
- */
-static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
- unsigned long events)
-{
- int ret = 0, i;
-
- for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
- if (!test_bit(i, &events))
- continue;
- if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- } else
- clear_bit(i, &rcd->user_event_mask[subctxt]);
- }
- return ret;
-}
-
-static ssize_t qib_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- const struct qib_cmd __user *ucmd;
- struct qib_ctxtdata *rcd;
- const void __user *src;
- size_t consumed, copy = 0;
- struct qib_cmd cmd;
- ssize_t ret = 0;
- void *dest;
-
- if (!ib_safe_file_access(fp)) {
- pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
- task_tgid_vnr(current), current->comm);
- return -EACCES;
- }
-
- if (count < sizeof(cmd.type)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct qib_cmd __user *) data;
-
- if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd.type);
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- case QIB_CMD_USER_INIT:
- copy = sizeof(cmd.cmd.user_info);
- dest = &cmd.cmd.user_info;
- src = &ucmd->cmd.user_info;
- break;
-
- case QIB_CMD_RECV_CTRL:
- copy = sizeof(cmd.cmd.recv_ctrl);
- dest = &cmd.cmd.recv_ctrl;
- src = &ucmd->cmd.recv_ctrl;
- break;
-
- case QIB_CMD_CTXT_INFO:
- copy = sizeof(cmd.cmd.ctxt_info);
- dest = &cmd.cmd.ctxt_info;
- src = &ucmd->cmd.ctxt_info;
- break;
-
- case QIB_CMD_TID_UPDATE:
- case QIB_CMD_TID_FREE:
- copy = sizeof(cmd.cmd.tid_info);
- dest = &cmd.cmd.tid_info;
- src = &ucmd->cmd.tid_info;
- break;
-
- case QIB_CMD_SET_PART_KEY:
- copy = sizeof(cmd.cmd.part_key);
- dest = &cmd.cmd.part_key;
- src = &ucmd->cmd.part_key;
- break;
-
- case QIB_CMD_DISARM_BUFS:
- case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
- copy = 0;
- src = NULL;
- dest = NULL;
- break;
-
- case QIB_CMD_POLL_TYPE:
- copy = sizeof(cmd.cmd.poll_type);
- dest = &cmd.cmd.poll_type;
- src = &ucmd->cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- copy = sizeof(cmd.cmd.armlaunch_ctrl);
- dest = &cmd.cmd.armlaunch_ctrl;
- src = &ucmd->cmd.armlaunch_ctrl;
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- copy = sizeof(cmd.cmd.sdma_inflight);
- dest = &cmd.cmd.sdma_inflight;
- src = &ucmd->cmd.sdma_inflight;
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- copy = sizeof(cmd.cmd.sdma_complete);
- dest = &cmd.cmd.sdma_complete;
- src = &ucmd->cmd.sdma_complete;
- break;
-
- case QIB_CMD_ACK_EVENT:
- copy = sizeof(cmd.cmd.event_mask);
- dest = &cmd.cmd.event_mask;
- src = &ucmd->cmd.event_mask;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- if (copy) {
- if ((count - consumed) < copy) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(dest, src, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
-
- rcd = ctxt_fp(fp);
- if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- if (rcd) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- break;
-
- case QIB_CMD_USER_INIT:
- ret = qib_do_user_init(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- ret = qib_get_base_info(fp, u64_to_user_ptr(
- cmd.cmd.user_info.spu_base_info),
- cmd.cmd.user_info.spu_base_info_size);
- break;
-
- case QIB_CMD_RECV_CTRL:
- ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
- break;
-
- case QIB_CMD_CTXT_INFO:
- ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
- (unsigned long) cmd.cmd.ctxt_info);
- break;
-
- case QIB_CMD_TID_UPDATE:
- ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_TID_FREE:
- ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_SET_PART_KEY:
- ret = qib_set_part_key(rcd, cmd.cmd.part_key);
- break;
-
- case QIB_CMD_DISARM_BUFS:
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- break;
-
- case QIB_CMD_PIOAVAILUPD:
- qib_force_pio_avail_update(rcd->dd);
- break;
-
- case QIB_CMD_POLL_TYPE:
- rcd->poll_type = cmd.cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_inflight);
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- ret = qib_sdma_get_complete(rcd->ppd,
- user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_complete);
- break;
-
- case QIB_CMD_ACK_EVENT:
- ret = qib_user_event_ack(rcd, subctxt_fp(fp),
- cmd.cmd.event_mask);
- break;
- }
-
- if (ret >= 0)
- ret = consumed;
-
-bail:
- return ret;
-}
-
-static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct qib_filedata *fp = iocb->ki_filp->private_data;
- struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
- struct qib_user_sdma_queue *pq = fp->pq;
-
- if (!user_backed_iter(from) || !from->nr_segs || !pq)
- return -EINVAL;
-
- return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
-}
-
-static const struct class qib_class = {
- .name = "ipath",
-};
-static dev_t qib_dev;
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp)
-{
- const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
- struct cdev *cdev;
- struct device *device = NULL;
- int ret;
-
- cdev = cdev_alloc();
- if (!cdev) {
- pr_err("Could not allocate cdev for minor %d, %s\n",
- minor, name);
- ret = -ENOMEM;
- goto done;
- }
-
- cdev->owner = THIS_MODULE;
- cdev->ops = fops;
- kobject_set_name(&cdev->kobj, name);
-
- ret = cdev_add(cdev, dev, 1);
- if (ret < 0) {
- pr_err("Could not add cdev for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto err_cdev;
- }
-
- device = device_create(&qib_class, NULL, dev, NULL, "%s", name);
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
-err_cdev:
- cdev_del(cdev);
- cdev = NULL;
-done:
- *cdevp = cdev;
- *devp = device;
- return ret;
-}
-
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
-{
- struct device *device = *devp;
-
- if (device) {
- device_unregister(device);
- *devp = NULL;
- }
-
- if (*cdevp) {
- cdev_del(*cdevp);
- *cdevp = NULL;
- }
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_device;
-
-int __init qib_dev_init(void)
-{
- int ret;
-
- ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
- if (ret < 0) {
- pr_err("Could not allocate chrdev region (err %d)\n", -ret);
- goto done;
- }
-
- ret = class_register(&qib_class);
- if (ret) {
- pr_err("Could not create device class (err %d)\n", -ret);
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
- }
-
-done:
- return ret;
-}
-
-void qib_dev_cleanup(void)
-{
- if (class_is_registered(&qib_class))
- class_unregister(&qib_class);
-
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
-static void qib_user_remove(struct qib_devdata *dd)
-{
- if (atomic_dec_return(&user_count) == 0)
- qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
-
- qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
-}
-
-static int qib_user_add(struct qib_devdata *dd)
-{
- char name[10];
- int ret;
-
- if (atomic_inc_return(&user_count) == 1) {
- ret = qib_cdev_init(0, "ipath", &qib_file_ops,
- &wildcard_cdev, &wildcard_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath%d", dd->unit);
- ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
- &dd->user_cdev, &dd->user_device);
- if (ret)
- qib_user_remove(dd);
-done:
- return ret;
-}
-
-/*
- * Create per-unit files in /dev
- */
-int qib_device_create(struct qib_devdata *dd)
-{
- int r, ret;
-
- r = qib_user_add(dd);
- ret = qib_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
-}
-
-/*
- * Remove per-unit files in /dev
- * void, core kernel returns no errors for this stuff
- */
-void qib_device_remove(struct qib_devdata *dd)
-{
- qib_user_remove(dd);
- qib_diag_remove(dd);
-}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
deleted file mode 100644
index 2098de762bf5..000000000000
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/fs.h>
-#include <linux/fs_context.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-
-#include "qib.h"
-
-#define QIBFS_MAGIC 0x726a77
-
-static struct super_block *qib_super;
-
-#define private2dd(file) (file_inode(file)->i_private)
-
-static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct file_operations *fops,
- void *data)
-{
- int error;
- struct inode *inode = new_inode(dir->i_sb);
-
- if (!inode) {
- dput(dentry);
- error = -EPERM;
- goto bail;
- }
-
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_blocks = 0;
- simple_inode_init_ts(inode);
-
- inode->i_private = data;
- if (S_ISDIR(mode)) {
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- inc_nlink(dir);
- }
-
- inode->i_fop = fops;
-
- d_instantiate(dentry, inode);
- error = 0;
-
-bail:
- return error;
-}
-
-static int create_file(const char *name, umode_t mode,
- struct dentry *parent, struct dentry **dentry,
- const struct file_operations *fops, void *data)
-{
- int error;
-
- inode_lock(d_inode(parent));
- *dentry = lookup_noperm(&QSTR(name), parent);
- if (!IS_ERR(*dentry))
- error = qibfs_mknod(d_inode(parent), *dentry,
- mode, fops, data);
- else
- error = PTR_ERR(*dentry);
- inode_unlock(d_inode(parent));
-
- return error;
-}
-
-static ssize_t driver_stats_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- qib_stats.sps_ints = qib_sps_ints();
- return simple_read_from_buffer(buf, count, ppos, &qib_stats,
- sizeof(qib_stats));
-}
-
-/*
- * driver stats field names, one line per stat, single string. Used by
- * programs like ipathstats to print the stats in a way which works for
- * different versions of drivers, without changing program source.
- * if qlogic_ib_stats changes, this needs to change. Names need to be
- * 12 chars or less (w/o newline), for proper display by ipathstats utility.
- */
-static const char qib_statnames[] =
- "KernIntr\n"
- "ErrorIntr\n"
- "Tx_Errs\n"
- "Rcv_Errs\n"
- "H/W_Errs\n"
- "NoPIOBufs\n"
- "CtxtsOpen\n"
- "RcvLen_Errs\n"
- "EgrBufFull\n"
- "EgrHdrFull\n"
- ;
-
-static ssize_t driver_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return simple_read_from_buffer(buf, count, ppos, qib_statnames,
- sizeof(qib_statnames) - 1); /* no null */
-}
-
-static const struct file_operations driver_ops[] = {
- { .read = driver_stats_read, .llseek = generic_file_llseek, },
- { .read = driver_names_read, .llseek = generic_file_llseek, },
-};
-
-/* read the per-device counters */
-static ssize_t dev_counters_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-device counters */
-static ssize_t dev_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-static const struct file_operations cntr_ops[] = {
- { .read = dev_counters_read, .llseek = generic_file_llseek, },
- { .read = dev_names_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * Could use file_inode(file)->i_ino to figure out which file,
- * instead of separate routine for each, but for now, this works...
- */
-
-/* read the per-port names (same for each port) */
-static ssize_t portnames_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-/* read the per-port counters for port 1 (pidx 0) */
-static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-port counters for port 2 (pidx 1) */
-static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-static const struct file_operations portcntr_ops[] = {
- { .read = portnames_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * read the per-port QSFP data for port 1 (pidx 0)
- */
-static ssize_t qsfp_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-/*
- * read the per-port QSFP data for port 2 (pidx 1)
- */
-static ssize_t qsfp_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- if (dd->num_pports < 2)
- return -ENODEV;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-static const struct file_operations qsfp_ops[] = {
- { .read = qsfp_1_read, .llseek = generic_file_llseek, },
- { .read = qsfp_2_read, .llseek = generic_file_llseek, },
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos < 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (pos >= sizeof(struct qib_flash)) {
- ret = 0;
- goto bail;
- }
-
- if (count > sizeof(struct qib_flash) - pos)
- count = sizeof(struct qib_flash) - pos;
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
-
- dd = private2dd(file);
- if (qib_eeprom_read(dd, pos, tmp, count)) {
- qib_dev_err(dd, "failed to read from flash\n");
- ret = -ENXIO;
- goto bail_tmp;
- }
-
- if (copy_to_user(buf, tmp, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos != 0 || count != sizeof(struct qib_flash))
- return -EINVAL;
-
- tmp = memdup_user(buf, count);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
-
- dd = private2dd(file);
- if (qib_eeprom_write(dd, pos, tmp, count)) {
- ret = -ENXIO;
- qib_dev_err(dd, "failed to write to flash\n");
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
- return ret;
-}
-
-static const struct file_operations flash_ops = {
- .read = flash_read,
- .write = flash_write,
- .llseek = default_llseek,
-};
-
-static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
-{
- struct dentry *dir, *tmp;
- char unit[10];
- int ret, i;
-
- /* create the per-unit directory */
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
- &simple_dir_operations, dd);
- if (ret) {
- pr_err("create_file(%s) failed: %d\n", unit, ret);
- goto bail;
- }
-
- /* create the files in the new directory */
- ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/counters) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[1], dd);
- if (ret) {
- pr_err("create_file(%s/counter_names) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, "portcounter_names", ret);
- goto bail;
- }
- for (i = 1; i <= dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i);
- /* create the files in the new directory */
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[i], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- if (!(dd->flags & QIB_HAS_QSFP))
- continue;
- sprintf(fname, "qsfp%d", i);
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &qsfp_ops[i - 1], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- }
-
- ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
- &flash_ops, dd);
- if (ret)
- pr_err("create_file(%s/flash) failed: %d\n",
- unit, ret);
-bail:
- return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
- struct qib_devdata *dd)
-{
- struct dentry *dir;
- char unit[10];
-
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root);
-
- if (IS_ERR(dir)) {
- pr_err("Lookup of %s failed\n", unit);
- return PTR_ERR(dir);
- }
- simple_recursive_removal(dir, NULL);
- dput(dir);
- return 0;
-}
-
-/*
- * This fills everything in when the fs is mounted, to handle umount/mount
- * after device init. The direct add_cntr_files() call handles adding
- * them from the init code, when the fs is already mounted.
- */
-static int qibfs_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct qib_devdata *dd;
- unsigned long index;
- int ret;
-
- static const struct tree_descr files[] = {
- [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
- [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
- {""},
- };
-
- ret = simple_fill_super(sb, QIBFS_MAGIC, files);
- if (ret) {
- pr_err("simple_fill_super failed: %d\n", ret);
- goto bail;
- }
-
- xa_for_each(&qib_dev_table, index, dd) {
- ret = add_cntr_files(sb, dd);
- if (ret)
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-static int qibfs_get_tree(struct fs_context *fc)
-{
- int ret = get_tree_single(fc, qibfs_fill_super);
- if (ret == 0)
- qib_super = fc->root->d_sb;
- return ret;
-}
-
-static const struct fs_context_operations qibfs_context_ops = {
- .get_tree = qibfs_get_tree,
-};
-
-static int qibfs_init_fs_context(struct fs_context *fc)
-{
- fc->ops = &qibfs_context_ops;
- return 0;
-}
-
-static void qibfs_kill_super(struct super_block *s)
-{
- kill_litter_super(s);
- qib_super = NULL;
-}
-
-int qibfs_add(struct qib_devdata *dd)
-{
- int ret;
-
- /*
- * On first unit initialized, qib_super will not yet exist
- * because nobody has yet tried to mount the filesystem, so
- * we can't consider that to be an error; if an error occurs
- * during the mount, that will get a complaint, so this is OK.
- * add_cntr_files() for all units is done at mount from
- * qibfs_fill_super(), so one way or another, everything works.
- */
- if (qib_super == NULL)
- ret = 0;
- else
- ret = add_cntr_files(qib_super, dd);
- return ret;
-}
-
-int qibfs_remove(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (qib_super)
- ret = remove_device_files(qib_super, dd);
-
- return ret;
-}
-
-static struct file_system_type qibfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "ipathfs",
- .init_fs_context = qibfs_init_fs_context,
- .kill_sb = qibfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init qib_init_qibfs(void)
-{
- return register_filesystem(&qibfs_fs_type);
-}
-
-int __exit qib_exit_qibfs(void)
-{
- return unregister_filesystem(&qibfs_fs_type);
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
deleted file mode 100644
index 2640d283eee6..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ /dev/null
@@ -1,3533 +0,0 @@
-/*
- * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 6120 PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_6120_regs.h"
-
-static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
-static u8 qib_6120_phys_portstate(u64);
-static u32 qib_6120_iblink_state(u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the Intel Intel_IB PCI-Express chip.
- *
- */
-
-/* KREG_IDX uses machine-generated #defines */
-#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
-#define kr_sendpiosize KREG_IDX(SendPIOSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_control KREG_IDX(Control)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_revision KREG_IDX(Revision)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
-#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
-#define kr_serdes_stat KREG_IDX(SerdesStat)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
- QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxBadFormatCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkProblemCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK << \
- QIB_6120_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* link training states, from IBC */
-#define IB_6120_LT_STATE_DISABLED 0x00
-#define IB_6120_LT_STATE_LINKUP 0x01
-#define IB_6120_LT_STATE_POLLACTIVE 0x02
-#define IB_6120_LT_STATE_POLLQUIET 0x03
-#define IB_6120_LT_STATE_SLEEPDELAY 0x04
-#define IB_6120_LT_STATE_SLEEPQUIET 0x05
-#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
-#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
-#define IB_6120_LT_STATE_CFGIDLE 0x0b
-#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_6120_L_STATE_DOWN 0x0
-#define IB_6120_L_STATE_INIT 0x1
-#define IB_6120_L_STATE_ARM 0x2
-#define IB_6120_L_STATE_ACTIVE 0x3
-#define IB_6120_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_6120_physportstate[0x20] = {
- [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- void *dummy_hdrq; /* used after ctxt close */
- dma_addr_t dummy_hdrq_phys;
- spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
- spinlock_t user_tid_lock; /* no back to back user TID writes */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* shadow for kr_ibcctrl */
- u32 lastlinkrecov; /* link recovery issue */
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 ncntrs;
- u32 nportcntrs;
- /* used with gpio interrupts to implement IB counters */
- u32 rxfc_unsupvl_errs;
- u32 overrun_thresh_errs;
- /*
- * these count only cases where _successive_ LocalLinkIntegrity
- * errors were seen in the receive headers of IB standard packets
- */
- u32 lli_errs;
- u32 lli_counter;
- u64 lli_thresh;
- u64 sword; /* total dwords sent (sample result) */
- u64 rword; /* total dwords received (sample result) */
- u64 spkts; /* total packets sent (sample result) */
- u64 rpkts; /* total packets received (sample result) */
- u64 xmit_wait; /* # of ticks no data sent (sample result) */
- struct timer_list pma_timer;
- struct qib_pportdata *ppd;
- char emsgbuf[128];
- char bitsmsgbuf[64];
- u8 pma_sample_status;
-};
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_6120_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 0
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-#define QLOGIC_IB_I_BITSEXTANT \
- ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-
-/* Bits in GPIO for the added IB link interrupts */
-#define GPIO_RXUVL_BIT 3
-#define GPIO_OVRUN_BIT 4
-#define GPIO_LLI_BIT 5
-#define GPIO_ERRINTR_MASK 0x38
-
-
-#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
-#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
-#define QLOGIC_IB_RT_IS_VALID(tid) \
- (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
-#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define QLOGIC_IB_RT_ADDR_SHIFT 10
-
-#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
-#define QLOGIC_IB_R_TAILUPD_SHIFT 31
-#define IBA6120_R_PKEY_DIS_SHIFT 30
-
-#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 6120 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- /* variables for sanity checking interrupt and errors */
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr))
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
- ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
- ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
- ERR_MASK(HardwareErr))
-
-#define QLOGIC_IB_E_PKTERRS ( \
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip issue can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void qib_6120_txe_recover(struct qib_devdata *dd)
-{
- if (!qib_unordered_wc())
- qib_devinfo(dd->pcidev,
- "Recovering from TXE PIO parity error\n");
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips
- */
-static void qib_6120_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_6120_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_6120_set_intr_state(dd, 1);
-}
-
-/**
- * qib_handle_6120_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. Reuse the same message buffer as
- * handle_6120_errors() to avoid excessive stack usage.
- */
-static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- return;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- return;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- qib_6120_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
-
- if (!hwerrs)
- qib_6120_clear_freeze(dd);
- else
- isfatal = 1;
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
- ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs)
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- qib_dev_err(dd, "%s hardware error\n", msg);
- else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
-done:
- return iserr;
-}
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[2];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
-
- if (sbuf[0] || sbuf[1])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
-{
- int ret = 1;
- u32 ibstate = qib_6120_iblink_state(ibcs);
- u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- if (linkrecov != dd->cspec->lastlinkrecov) {
- /* and no more until active again */
- dd->cspec->lastlinkrecov = 0;
- qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
- ret = 0;
- }
- if (ibstate == IB_PORT_ACTIVE)
- dd->cspec->lastlinkrecov =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- return ret;
-}
-
-static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_6120_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above.
- */
- mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
- ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
- qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- u32 ibstate = qib_6120_iblink_state(ibcs);
- int handle = 1;
-
- if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
- handle = chk_6120_linkrecovery(dd, ibcs);
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (handle && qib_6120_phys_portstate(ibcs) ==
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- handle = 0;
- if (handle)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/**
- * qib_6120_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_6120_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* init so all hwerrors interrupt, and enter freeze, ajdust below */
- val = ~0ULL;
- if (dd->minrev < 2) {
- /*
- * Avoid problem with internal interface bus parity
- * checking. Fixed in Rev2.
- */
- val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
- }
- /* avoid some intel cpu's speculative read freeze mode issue */
- val &= ~TXEMEMPARITYERR_PIOBUF;
-
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- qib_write_kreg(dd, kr_rcvbthqp,
- dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
- QIB_KD_QP);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear,
- ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of control reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/**
- * qib_6120_bringup_serdes - bring up the serdes
- * @ppd: the qlogic_ib device
- */
-static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, config1, prev_val, hwstat, ibc;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- dd->cspec->lli_thresh = 0xf;
- ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= SYM_MASK(SerdesCfg0, ResetPLL) |
- SYM_MASK(SerdesCfg0, RxDetEnX) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD));
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- qib_read_kreg64(dd, kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
- SYM_MASK(SerdesCfg0, ResetPLL) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD)));
- val |= (SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX));
-
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
- /* need to compensate for Tx inversion in partner */
- val &= ~SYM_MASK(XGXSCfg, polarity_inv);
- val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
- }
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- qib_write_kreg(dd, kr_serdes_cfg1, config1);
-
- /* base and port guid same for single port */
- ppd->guid = dd->base_guid;
-
- /*
- * the process of setting and un-resetting the serdes normally
- * causes a serdes PLL error, so check for that and clear it
- * here. Also clearr hwerr bit in errstatus, but not others.
- */
- hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
- if (hwstat) {
- /* should just have PLL, clear all set, in an case */
- qib_write_kreg(dd, kr_hwerrclear, hwstat);
- qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
- }
-
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return 0;
-}
-
-/**
- * qib_6120_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
- dd->cspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_ibsymbolerr);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->ibsymsnap;
- val -= dd->cspec->ibsymdelta;
- write_6120_creg(dd, cr_ibsymbolerr, val);
- }
- if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_iblinkerrrecov);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->iblnkerrsnap;
- val -= dd->cspec->iblnkerrdelta;
- write_6120_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
-}
-
-/**
- * qib_6120_setup_setextled - set the state of the two external LEDs
- * @ppd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
-{
- u64 extctl, val, lst, ltst;
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_6120_phys_portstate(val);
- lst = qib_6120_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
-
- if (ltst == IB_PHYSPORTSTATE_LINKUP)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-/**
- * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
-*/
-static void qib_6120_setup_cleanup(struct qib_devdata *dd)
-{
- qib_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
- if (dd->cspec->dummy_hdrq) {
- dma_free_coherent(&dd->pcidev->dev,
- ALIGN(dd->rcvhdrcnt *
- dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE),
- dd->cspec->dummy_hdrq,
- dd->cspec->dummy_hdrq_phys);
- dd->cspec->dummy_hdrq = NULL;
- }
-}
-
-static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling
- */
-static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat = 0;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- handle_6120_errors(dd, estat);
- }
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
- u32 to_clear = 0;
-
- /*
- * GPIO_3..5 on IBA6120 Rev2 chips indicate
- * errors that we need to count.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /* First the error-counter case. */
- if (gpiostatus & GPIO_ERRINTR_MASK) {
- /* want to clear the bits we see asserted. */
- to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
-
- /*
- * Count appropriately, clear bits out of our copy,
- * as they have been "handled".
- */
- if (gpiostatus & (1 << GPIO_RXUVL_BIT))
- dd->cspec->rxfc_unsupvl_errs++;
- if (gpiostatus & (1 << GPIO_OVRUN_BIT))
- dd->cspec->overrun_thresh_errs++;
- if (gpiostatus & (1 << GPIO_LLI_BIT))
- dd->cspec->lli_errs++;
- gpiostatus &= ~GPIO_ERRINTR_MASK;
- }
- if (gpiostatus) {
- /*
- * Some unexpected bits remain. If they could have
- * caused the interrupt, complain and clear.
- * To avoid repetition of this condition, also clear
- * the mask. It is almost certainly due to error.
- */
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
-
- /*
- * Also check that the chip reflects our shadow,
- * and report issues, If they caused the interrupt.
- * we will suppress by refreshing from the shadow.
- */
- if (mask & gpiostatus) {
- to_clear |= (gpiostatus & mask);
- dd->cspec->gpio_mask &= ~(gpiostatus & mask);
- qib_write_kreg(dd, kr_gpio_mask,
- dd->cspec->gpio_mask);
- }
- }
- if (to_clear)
- qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
- }
-}
-
-static irqreturn_t qib_6120intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u32 istat, ctxtrbits, rmask, crcs = 0;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg32(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_6120_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1U << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- crcs += qib_kreceive(dd->rcd[i],
- &dd->cspec->lli_counter,
- NULL);
- }
- rmask <<= 1;
- }
- if (crcs) {
- u32 cntr = dd->cspec->lli_counter;
-
- cntr += crcs;
- if (cntr) {
- if (cntr > dd->cspec->lli_thresh) {
- dd->cspec->lli_counter = 0;
- dd->cspec->lli_errs++;
- } else
- dd->cspec->lli_counter += cntr;
- }
- }
-
-
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- */
-static void qib_setup_6120_interrupt(struct qib_devdata *dd)
-{
- int ret;
-
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- if (SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-
- ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup interrupt (irq=%d): %d\n",
- pci_irq_vector(dd->pcidev, 0), ret);
-}
-
-/**
- * pe_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void pe_boardname(struct qib_devdata *dd)
-{
- u32 boardid;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 2:
- dd->boardname = "InfiniPath_QLE7140";
- break;
- default:
- qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
- dd->boardname = "Unknown_InfiniPath_6120";
- break;
- }
-
- if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int qib_6120_setup_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use ERROR so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_6120_set_intr_state(dd, 0);
-
- dd->cspec->ibdeltainprog = 0;
- dd->cspec->ibsymdelta = 0;
- dd->cspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler re-ordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- /* for Rev2 error interrupts; nop for rev 1 */
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- }
- return ret;
-}
-
-/**
- * qib_6120_put_tid - write a TID in chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags;
- int tidx;
- spinlock_t *tidlockp; /* select appropriate spinlock */
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Avoid chip issue by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip problem
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the ctxt 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
- ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- writel(pa, tidp32);
-}
-
-
-/**
- * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the context
- *
- * clear all TID entries for a context, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_6120_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_6120_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_6120_tidtemplate(struct qib_devdata *dd)
-{
- u32 egrsize = dd->rcvegrbufsize;
-
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per ctxt instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->tidtemplate = 2U << 29;
- dd->tidinvalid = 0;
-}
-
-int __attribute__((weak)) qib_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * qib_6120_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- if (qib_unordered_wc())
- kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
-
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-
-static struct qib_message_header *
-qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- return (struct qib_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void qib_6120_config_ctxts(struct qib_devdata *dd)
-{
- dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
- if (qib_n_krcv_queues > 1) {
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > dd->ctxtcnt)
- dd->first_user_ctxt = dd->ctxtcnt;
- dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-}
-
-static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Used when we close any ctxt, for DMA already in flight
- * at close. Can't be done until we know hdrq size, so not
- * early in chip init.
- */
-static void alloc_dummy_hdrq(struct qib_devdata *dd)
-{
- dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
- dd->rcd[0]->rcvhdrq_size,
- &dd->cspec->dummy_hdrq_phys,
- GFP_ATOMIC);
- if (!dd->cspec->dummy_hdrq) {
- qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
- /* fallback to just 0'ing */
- dd->cspec->dummy_hdrq_phys = 0UL;
- }
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specific, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
-
- if (ctxt == 0 && !dd->cspec->dummy_hdrq)
- alloc_dummy_hdrq(dd);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- /*
- * Be paranoid, and never write 0's to these, just use an
- * unused page. Of course,
- * rcvhdraddr points to a large chunk of memory, so this
- * could still trash things, but at least it won't trash
- * page 0, and by disabling the ctxt, it should stop "soon",
- * even if a packet or two is in already in flight after we
- * disabled the ctxt. Only 6120 has this issue.
- */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
- i, dd->cspec->dummy_hdrq_phys);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. Only operations actually used
- * are implemented yet.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, PIOEnable) |
- SYM_MASK(SendCtrl, PIOBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if (op & QIB_SENDCTRL_AVAIL_BLIP)
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_6120 - read a per-port counter
- * @ppd: the qlogic_ib device
- * @reg: the counter to snapshot
- */
-static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = 0xffff,
- [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
- [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = 0xffff,
- [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
- [QIBPORTCNTR_RXVLERR] = 0xffff,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = 0xffff,
- [QIBPORTCNTR_PSINTERVAL] = 0xffff,
- [QIBPORTCNTR_PSSTART] = 0xffff,
- [QIBPORTCNTR_PSSTAT] = 0xffff,
- [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- /* handle counters requests not implemented as chip counters */
- if (reg == QIBPORTCNTR_LLI)
- ret = dd->cspec->lli_errs;
- else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
- ret = dd->cspec->overrun_thresh_errs;
- else if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_6120_creg32(dd, cr_portovfl + i);
- } else if (reg == QIBPORTCNTR_PSSTAT)
- ret = dd->cspec->pma_sample_status;
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if (creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv)
- ret = read_6120_creg(dd, creg);
- else
- ret = read_6120_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->ibsymsnap;
- ret -= dd->cspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->iblnkerrsnap;
- ret -= dd->cspec->iblnkerrdelta;
- }
- if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
- ret += dd->cspec->rxfc_unsupvl_errs;
-
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr6120indices contains the corresponding register indices.
- */
-static const char cntr6120names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n";
-
-static const size_t cntr6120indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
-};
-
-/*
- * same as cntr6120names and cntr6120indices, but for port-specific counters.
- * portcntr6120indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr6120names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "E IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr6120indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_6120_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr6120names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr6120names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
- dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
- sizeof(u64),
- GFP_KERNEL);
-}
-
-static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)cntr6120names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- if (pos >= ret) {
- ret = 0; /* final read after getting everything */
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr6120names;
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_6120(ppd,
- portcntr6120indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_6120_creg32(dd,
- portcntr6120indices[i]);
- }
- }
-done:
- return ret;
-}
-
-static void qib_chk_6120_errormask(struct qib_devdata *dd)
-{
- static u32 fixed;
- u32 ctrl;
- unsigned long errormask;
- unsigned long hwerrs;
-
- if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
- return;
-
- errormask = qib_read_kreg64(dd, kr_errmask);
-
- if (errormask == dd->cspec->errormask)
- return;
- fixed++;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- ctrl = qib_read_kreg32(dd, kr_control);
-
- qib_write_kreg(dd, kr_errmask,
- dd->cspec->errormask);
-
- if ((hwerrs & dd->cspec->hwerrmask) ||
- (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, 0ULL);
- /* force re-interrupt of pending events, just in case */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- qib_devinfo(dd->pcidev,
- "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
- fixed, errormask, (unsigned long)dd->cspec->errormask,
- ctrl, hwerrs);
- }
-}
-
-/**
- * qib_get_6120_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_6120_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
- qib_portcntr_6120(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-
- qib_chk_6120_errormask(dd);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/* no interrupt fallback for these chips */
-static int qib_6120_nointr_fallback(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int ret;
-
- switch (which) {
- case QIB_IB_CFG_LWID:
- ret = ppd->link_width_active;
- break;
-
- case QIB_IB_CFG_SPD:
- ret = ppd->link_speed_active;
- break;
-
- case QIB_IB_CFG_LWID_ENB:
- ret = ppd->link_width_enabled;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ret = ppd->link_speed_enabled;
- break;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- break;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->dd->cspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- break;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- ret = 0; /* no heartbeat on this chip */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- ret = 250; /* 1 usec. */
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-/*
- * We assume range checking is already done, if needed.
- */
-static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret = 0;
- u64 val64;
- u16 lcmd, licmd;
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB:
- ppd->link_width_enabled = val;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ppd->link_speed_enabled = val;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, val64);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- dd->cspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl |= (u64)val <<
- SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!dd->cspec->ibdeltainprog) {
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap =
- read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_6120_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_HRTBT:
- ret = -EINVAL;
- break;
-
- default:
- ret = -EINVAL;
- }
-bail:
- return ret;
-}
-
-static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void pma_6120_timer(struct timer_list *t)
-{
- struct qib_chip_specific *cs = timer_container_of(cs, t, pma_timer);
- struct qib_pportdata *ppd = cs->ppd;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned long flags;
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer,
- jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
- } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
- u64 ta, tb, tc, td, te;
-
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
-
- cs->sword = ta - cs->sword;
- cs->rword = tb - cs->rword;
- cs->spkts = tc - cs->spkts;
- cs->rpkts = td - cs->rpkts;
- cs->xmit_wait = te - cs->xmit_wait;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-}
-
-/*
- * Note that the caller has the ibp->rvp.lock held.
- */
-static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- struct qib_chip_specific *cs = ppd->dd->cspec;
-
- if (start && intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
- } else if (intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
- } else {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- cs->sword = 0;
- cs->rword = 0;
- cs->spkts = 0;
- cs->rpkts = 0;
- cs->xmit_wait = 0;
- }
-}
-
-static u32 qib_6120_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_6120_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_6120_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_6120_L_STATE_ACTIVE:
- case IB_6120_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_6120_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_6120_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_6120_physportstate[state];
-}
-
-static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (ibup) {
- if (ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 0;
- ppd->dd->cspec->ibsymdelta +=
- read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
- ppd->dd->cspec->ibsymsnap;
- ppd->dd->cspec->iblnkerrdelta +=
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
- ppd->dd->cspec->iblnkerrsnap;
- }
- qib_hol_init(ppd);
- } else {
- ppd->dd->cspec->lli_counter = 0;
- if (!ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 1;
- ppd->dd->cspec->ibsymsnap =
- read_6120_creg32(ppd->dd, cr_ibsymbolerr);
- ppd->dd->cspec->iblnkerrsnap =
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
- }
- qib_hol_down(ppd);
- }
-
- qib_6120_setup_setextled(ppd, ibup);
-
- return 0;
-}
-
-/* Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_6120_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- (((char __iomem *) dd->kregbase) +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_6120_chip_params(), so split out as separate function
- */
-static void set_6120_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_6120_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int init_6120_variables(struct qib_devdata *dd)
-{
- int ret = 0;
- struct qib_pportdata *ppd;
- u32 sbufs;
-
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
- dd->cspec->ppd = ppd;
- ppd->cpspec = NULL; /* not used in this chip */
-
- spin_lock_init(&dd->cspec->kernel_tid_lock);
- spin_lock_init(&dd->cspec->user_tid_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_6120_chip_params(dd);
- pe_boardname(dd); /* fill in boardname */
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
-
- if (qib_unordered_wc())
- dd->flags |= QIB_PIO_FLUSH_WC;
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /* these can't change for this chip, so set once */
- ppd->link_width_active = ppd->link_width_enabled;
- ppd->link_speed_active = ppd->link_speed_enabled;
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = 0;
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_6120_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
- timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
-
- dd->ureg_align = qib_read_kreg32(dd, kr_palign);
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
- qib_6120_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_6120_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
-
- ret = qib_create_ctxts(dd);
- init_6120_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel, otherwise 16 */
- sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
-
- dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- if (ret)
- goto bail;
-bail:
- return ret;
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
-done:
- return buf;
-}
-
-static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_6120_link_buf(ppd, pbufnum);
- else {
-
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->piobcnt2k + dd->piobcnt4k - 1;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-static int init_sdma_6120_regs(struct qib_pportdata *ppd)
-{
- return -ENODEV;
-}
-
-static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
-{
-}
-
-static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
-}
-
-static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-/*
- * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
- * The chip ignores the bit if set.
- */
-static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
-}
-
-static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
-{
- rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
- rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
-}
-
-static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 avail, struct qib_ctxtdata *rcd)
-{
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- (void) qib_write_kreg(dd, kr_scratch, val);
-}
-
-static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 6120 boards never disable EEPROM Write */
-static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba6120_funcs - set up the chip-specific function pointers
- * @pdev: pci_dev of the qlogic_ib device
- * @ent: pci_device_id matching this chip
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- *
- * It also allocates/partially-inits the qib_devdata struct for
- * this device.
- */
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_6120_bringup_serdes;
- dd->f_cleanup = qib_6120_setup_cleanup;
- dd->f_clear_tids = qib_6120_clear_tids;
- dd->f_free_irq = qib_free_irq;
- dd->f_get_base_info = qib_6120_get_base_info;
- dd->f_get_msgheader = qib_6120_get_msgheader;
- dd->f_getsendbuf = qib_6120_getsendbuf;
- dd->f_gpio_mod = gpio_6120_mod;
- dd->f_eeprom_wen = qib_6120_eeprom_wen;
- dd->f_hdrqempty = qib_6120_hdrqempty;
- dd->f_ib_updown = qib_6120_ib_updown;
- dd->f_init_ctxt = qib_6120_init_ctxt;
- dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
- dd->f_intr_fallback = qib_6120_nointr_fallback;
- dd->f_late_initreg = qib_late_6120_initreg;
- dd->f_setpbc_control = qib_6120_setpbc_control;
- dd->f_portcntr = qib_portcntr_6120;
- dd->f_put_tid = (dd->minrev >= 2) ?
- qib_6120_put_tid_2 :
- qib_6120_put_tid;
- dd->f_quiet_serdes = qib_6120_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_6120_mod;
- dd->f_read_cntrs = qib_read_6120cntrs;
- dd->f_read_portcntrs = qib_read_6120portcntrs;
- dd->f_reset = qib_6120_setup_reset;
- dd->f_init_sdma_regs = init_sdma_6120_regs;
- dd->f_sdma_busy = qib_sdma_6120_busy;
- dd->f_sdma_gethead = qib_sdma_6120_gethead;
- dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
- dd->f_sendctrl = sendctrl_6120_mod;
- dd->f_set_armlaunch = qib_set_6120_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
- dd->f_iblink_state = qib_6120_iblink_state;
- dd->f_ibphys_portstate = qib_6120_phys_portstate;
- dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
- dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
- dd->f_set_ib_loopback = qib_6120_set_loopback;
- dd->f_set_intr_state = qib_6120_set_intr_state;
- dd->f_setextled = qib_6120_setup_setextled;
- dd->f_txchk_change = qib_6120_txchk_change;
- dd->f_update_usrhead = qib_update_6120_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
- dd->f_xgxs_reset = qib_6120_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_6120_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_6120_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped and accessible,
- * but chip registers are not set up until start of
- * init_6120_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = init_6120_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- if (qib_pcie_params(dd, 8, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_6120_interrupt(dd);
- /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
- qib_6120_init_hwerrors(dd);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
deleted file mode 100644
index 0b347d1129fa..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ /dev/null
@@ -1,4596 +0,0 @@
-/*
- * Copyright (c) 2011 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
-static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
-static u32 qib_7220_iblink_state(u64);
-static u8 qib_7220_phys_portstate(u64);
-static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
-static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in qib_sd7220.c.
- */
-
-/* Below uses machine-generated qib_chipnum_regs.h file */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
-#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_senddmabase KREG_IDX(SendDmaBase)
-#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
-#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
-#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
-#define kr_senddmahead KREG_IDX(SendDmaHead)
-#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
-#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
-#define kr_senddmastatus KREG_IDX(SendDmaStatus)
-#define kr_senddmatail KREG_IDX(SendDmaTail)
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-
-#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
- QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxVersionErrCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkMalformCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
-#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define cr_psstat CREG_IDX(PSStat)
-#define cr_psstart CREG_IDX(PSStart)
-#define cr_psinterval CREG_IDX(PSInterval)
-#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK << \
- QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7220_IBCHG 0x81
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_7220_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_revision bits */
-#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
-#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET (1U << 7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 32
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
-#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-
-#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
-#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-
-/* variables for sanity checking interrupt and errors */
-#define QLOGIC_IB_I_BITSEXTANT \
- (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
- QLOGIC_IB_I_SERDESTRIMDONE)
-
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr) | \
- QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
- QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
- QLOGIC_IB_HWE_SDMAMEMREADERR | \
- QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
- QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
- QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendSpecialTriggerErr) | \
- ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
- ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
- ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
- ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(InvalidEEPCmd))
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-
-#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
- SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
-#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
-
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
-#define IBA7220_IBC_SPEED_SDR (1 << 2)
-#define IBA7220_IBC_SPEED_DDR (1 << 3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
-#define QIB_TWSI_TEMP_DEV 0x98
-
-/* HW counter clock is at 4nsec */
-#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_PKEY_DIS_SHIFT 34
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_CTXTCFG_SHIFT 36
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-/* packet rate matching delay multiplier */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 8,
- [IB_RATE_5_GBPS] = 4,
- [IB_RATE_10_GBPS] = 2,
- [IB_RATE_20_GBPS] = 1
-};
-
-#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
-#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7220_LT_STATE_DISABLED 0x00
-#define IB_7220_LT_STATE_LINKUP 0x01
-#define IB_7220_LT_STATE_POLLACTIVE 0x02
-#define IB_7220_LT_STATE_POLLQUIET 0x03
-#define IB_7220_LT_STATE_SLEEPDELAY 0x04
-#define IB_7220_LT_STATE_SLEEPQUIET 0x05
-#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_CFGIDLE 0x0b
-#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_7220_L_STATE_DOWN 0x0
-#define IB_7220_L_STATE_INIT 0x1
-#define IB_7220_L_STATE_ARM 0x2
-#define IB_7220_L_STATE_ACTIVE 0x3
-#define IB_7220_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7220_physportstate[0x20] = {
- [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-int qib_special_trigger;
-module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
-MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- (1ULL << (SYM_LSB(regname, fldname) + (bit))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 7220 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely that we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
- "PCIe cpl header queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
- "PCIe cpl data queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
- "Send DMA memory read"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
- "uC PLL clock not locked"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
- "IB uC memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
-
-#define QLOGIC_IB_E_PKTERRS (\
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* Convenience for decoding Send DMA errors */
-#define QLOGIC_IB_E_SDMAERRS ( \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SendBufMisuseErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void autoneg_7220_work(struct work_struct *);
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- * because we don't need to force the update of pioavail.
- */
-static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[3];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- /* read these before writing errorclear */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- if (sbuf[0] || sbuf[1] || sbuf[2])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static void qib_7220_txe_recover(struct qib_devdata *dd)
-{
- qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
- qib_disarm_7220_senderrbufs(dd->pport);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
-
- spin_lock(&dd->sendctrl_lock);
-
- dd->sendctrl |= set_sendctrl;
- dd->sendctrl &= ~clr_sendctrl;
-
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- spin_unlock(&dd->sendctrl_lock);
-}
-
-static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
- u64 err, char *buf, size_t blen)
-{
- static const struct {
- u64 err;
- const char *msg;
- } errs[] = {
- { ERR_MASK(SDmaGenMismatchErr),
- "SDmaGenMismatch" },
- { ERR_MASK(SDmaOutOfBoundErr),
- "SDmaOutOfBound" },
- { ERR_MASK(SDmaTailOutOfBoundErr),
- "SDmaTailOutOfBound" },
- { ERR_MASK(SDmaBaseErr),
- "SDmaBase" },
- { ERR_MASK(SDma1stDescErr),
- "SDma1stDesc" },
- { ERR_MASK(SDmaRpyTagErr),
- "SDmaRpyTag" },
- { ERR_MASK(SDmaDwEnErr),
- "SDmaDwEn" },
- { ERR_MASK(SDmaMissingDwErr),
- "SDmaMissingDw" },
- { ERR_MASK(SDmaUnexpDataErr),
- "SDmaUnexpData" },
- { ERR_MASK(SDmaDescAddrMisalignErr),
- "SDmaDescAddrMisalign" },
- { ERR_MASK(SendBufMisuseErr),
- "SendBufMisuse" },
- { ERR_MASK(SDmaDisabledErr),
- "SDmaDisabled" },
- };
- int i;
- size_t bidx = 0;
-
- for (i = 0; i < ARRAY_SIZE(errs); i++) {
- if (err & errs[i].err)
- bidx += scnprintf(buf + bidx, blen - bidx,
- "%s ", errs[i].msg);
- }
-}
-
-/*
- * This is called as part of link down clean up so disarm and flush
- * all send buffers so that SMP packets can be sent.
- */
-static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- /* This will trigger the Abort interrupt */
- sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
-}
-
-static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg(ppd->dd, kr_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
-}
-
-static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
-}
-
-#define DISABLES_SDMA ( \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDma1stDescErr) | \
- ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaDwEnErr))
-
-static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
- char *msg;
-
- errs &= QLOGIC_IB_E_SDMAERRS;
-
- msg = dd->cspec->sdmamsgbuf;
- qib_decode_7220_sdma_errs(ppd, errs, msg,
- sizeof(dd->cspec->sdmamsgbuf));
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs & ERR_MASK(SendBufMisuseErr)) {
- unsigned long sbuf[3];
-
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- qib_dev_err(ppd->dd,
- "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
- ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
- sbuf[0]);
- }
-
- if (errs & ERR_MASK(SDmaUnexpDataErr))
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
- ppd->port);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s20_idle:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & ERR_MASK(SDmaDisabledErr))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s99_running:
- if (errs & DISABLES_SDMA)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e7220_err_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(SendSpecialTriggerErr))
- strlcat(buf, "sendspecialtrigger ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
- if (err & QLOGIC_IB_E_SDMAERRS)
- qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
- if (err & ERR_MASK(InvalidEEPCmd))
- strlcat(buf, "invalideepromcmd ", blen);
-done:
- return iserr;
-}
-
-static void reenable_7220_chase(struct timer_list *t)
-{
- struct qib_chippport_specific *cpspec = timer_container_of(cpspec, t,
- chase_timer);
- struct qib_pportdata *ppd = &cpspec->pportdata;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7220_LT_STATE_CFGRCVFCFG:
- case IB_7220_LT_STATE_CFGWAITRMT:
- case IB_7220_LT_STATE_TXREVLANES:
- case IB_7220_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end)) {
- ppd->cpspec->chase_end = 0;
- qib_set_ib_7220_lstate(ppd,
- QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies +
- QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
- } else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
-
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-}
-
-static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
-
- if (errs & QLOGIC_IB_E_SDMAERRS)
- sdma_7220_errors(ppd, errs);
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_7220_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = ERR_MASK(IBStatusChanged) |
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
- ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
-
- qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
- ERR_MASK(SDmaDisabledErr));
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs;
-
- ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_7220_chase(ppd, ibcs);
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active =
- ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (qib_7220_phys_portstate(ibcs) !=
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7220_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_7220_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_7220_set_intr_state(dd, 1);
-}
-
-/**
- * qib_7220_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * handle_7220_errors() to avoid excessive stack usage.
- */
-static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
- RXE_PARITY))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
-
- if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
- qib_sd7220_clr_ibpar(dd);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC)) {
- qib_7220_txe_recover(dd);
- hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC);
- }
- if (hwerrs)
- isfatal = 1;
- else
- qib_7220_clear_freeze(dd);
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
- ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * For /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7220_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7220_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
- QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
- qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* default to all hwerrors become interrupts, */
-
- val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to qib_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is qib_sd7220_init().
- */
-
-/**
- * qib_7220_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, prev_val, guid, ibc;
- int ret = 0;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- if (!ppd->cpspec->ibcddrctrl) {
- /* not on re-init after reset */
- ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
-
- if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_speed_enabled == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
-
- /* enable automatic lane reversal detection for receive */
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
- qib_write_kreg(dd, kr_scratch, 0);
-
- ret = qib_sd7220_init(dd);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- val |= QLOGIC_IB_XGXS_FC_SAFE;
- if (val != prev_val) {
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- /* first time through, set port guid */
- if (!ppd->guid)
- ppd->guid = dd->base_guid;
- guid = be64_to_cpu(ppd->guid);
-
- qib_write_kreg(dd, kr_hrtbt_guid, guid);
- if (!ret) {
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, dd->control);
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
- return ret;
-}
-
-/**
- * qib_7220_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.function) /* if initted */
- timer_delete_sync(&ppd->cpspec->chase_timer);
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7220_creg(dd, cr_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7220_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
- qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
-
- shutdown_7220_relock_poll(ppd->dd);
- val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
- val |= QLOGIC_IB_XGXS_RESET;
- qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
-}
-
-/**
- * qib_setup_7220_setextled - set the state of the two external LEDs
- * @ppd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val, lst, ltst;
- unsigned long flags;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_7220_phys_portstate(val);
- lst = qib_7220_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
- if (ltst == IB_PHYSPORTSTATE_LINKUP) {
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
-}
-
-/*
- * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
- *
- */
-static void qib_setup_7220_cleanup(struct qib_devdata *dd)
-{
- qib_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
-}
-
-/*
- * This is only called for SDmaInt.
- * SDmaDisabled is handled on the error path.
- */
-static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- /* too chatty to print here */
- __qib_sdma_intr(ppd);
- break;
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint) {
- if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- goto done;
- /*
- * blip the availupd off, next write will be on, so
- * we ensure an avail update, regardless of threshold or
- * buffers becoming free, whenever we want an interrupt
- */
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
- ~SYM_MASK(SendCtrl, SendBufAvailUpd));
- qib_write_kreg(dd, kr_scratch, 0ULL);
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- } else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-done:
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd,
- "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to alert developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
-
- if (gpiostatus) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * A bit set in status and (chip) Mask register
- * would cause an interrupt. Since we are not
- * expecting any, report it. Also check that the
- * chip reflects our shadow, report issues,
- * and refresh from the shadow.
- */
- /*
- * Clear any troublemakers, and update chip
- * from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
- }
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- else
- handle_7220_errors(dd, estat);
- }
-}
-
-static irqreturn_t qib_7220intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_7220_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- qib_kreceive(dd->rcd[i], NULL, NULL);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- /* only call for SDmaInt */
- if (istat & QLOGIC_IB_I_SDMAINT)
- sdma_7220_intr(dd->pport, istat);
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSI interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7220_interrupt(struct qib_devdata *dd)
-{
- int ret;
-
- ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret)
- qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
- dd->pcidev->msi_enabled ? "MSI" : "INTx",
- pci_irq_vector(dd->pcidev, 0), ret);
-}
-
-/**
- * qib_7220_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void qib_7220_boardname(struct qib_devdata *dd)
-{
- u32 boardid;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 1:
- dd->boardname = "InfiniPath_QLE7240";
- break;
- case 2:
- dd->boardname = "InfiniPath_QLE7280";
- break;
- default:
- qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
- dd->boardname = "Unknown_InfiniPath_7220";
- break;
- }
-
- if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_setup_7220_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_7220_set_intr_state(dd, 0);
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler reordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- /* hold IBC in reset, no sends, etc till later */
- qib_write_kreg(dd, kr_control, 0ULL);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7220_init_hwerrors(dd);
-
- /* do setup similar to speed or link-width changes */
- if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
- dd->cspec->presets_needed = 1;
- spin_lock_irqsave(&dd->pport->lflags_lock, flags);
- dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
- dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * qib_7220_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
-}
-
-/**
- * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_7220_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7220_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7220_tidtemplate(struct qib_devdata *dd)
-{
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7220_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_7220_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
-
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-static void qib_7220_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_portcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1) {
- dd->qpn_mask = 0x3e;
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 5)
- dd->ctxtcnt = 5;
- else if (nctxts <= 9)
- dd->ctxtcnt = 9;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 5, 9, or 17 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 9)
- dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
- else if (dd->ctxtcnt > 5)
- dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
- /* else configure for default 5 receive ctxts */
- if (dd->qpn_mask)
- dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
-}
-
-static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 1 or 0.
- */
- ret = (ppd->link_speed_active == QIB_IB_DDR);
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0, setforce = 0;
- u16 lcmd, licmd;
- unsigned long flags;
- u32 tmp = 0;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- ppd->link_width_enabled = val;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explicitly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
- !(val & (val - 1)))
- dd->cspec->presets_needed = 1;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else
- val = val == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + speed bits are contiguous */
- lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
- setforce = 1;
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- timer_delete_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7220_lstate(ppd, lcmd, licmd);
-
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
- /* If the width active on the chip does not match the
- * width in the shadow register, write the new active
- * width to the chip.
- * We don't have to worry about speed as the speed is taken
- * care of by set_7220_ibspeed_fast called by ib_updown.
- */
- if (ppd->link_width_enabled-1 != tmp) {
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |=
- (((u64)(ppd->link_width_enabled-1) & maskr) <<
- lsb);
- qib_write_kreg(dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > IBA7220_IBC_HRTBT_MASK) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- if (setforce) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-bail:
- return ret;
-}
-
-static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ddr;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- /* enable heart beat again */
- val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
- << IBA7220_IBC_HRTBT_SHIFT);
- ppd->cpspec->ibcddrctrl = ddr | val;
- qib_write_kreg(ppd->dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
- dd->rcd[ctxt]->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. To start, we assume the
- * "canonical" register layout of the first chips.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl,
- SSpecialTriggerEn);
- }
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, SPioEnable) |
- SYM_MASK(SendCtrl, SendBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_7220 - read a per-port counter
- * @ppd: the qlogic_ib device
- * @reg: the counter to snapshot
- */
-static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
- [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
- [QIBPORTCNTR_PSSTART] = cr_psstart,
- [QIBPORTCNTR_PSSTAT] = cr_psstat,
- [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_7220_creg32(dd, cr_portovfl + i);
- }
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if ((creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv))
- ret = read_7220_creg(dd, creg);
- else
- ret = read_7220_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= dd->pport->cpspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= dd->pport->cpspec->iblnkerrdelta;
- }
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" counters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7220indices contains the corresponding register indices.
- */
-static const char cntr7220names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n";
-
-static const size_t cntr7220indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
- cr_portovfl + 5,
- cr_portovfl + 6,
- cr_portovfl + 7,
- cr_portovfl + 8,
- cr_portovfl + 9,
- cr_portovfl + 10,
- cr_portovfl + 11,
- cr_portovfl + 12,
- cr_portovfl + 13,
- cr_portovfl + 14,
- cr_portovfl + 15,
- cr_portovfl + 16,
-};
-
-/*
- * same as cntr7220names and cntr7220indices, but for port-specific counters.
- * portcntr7220indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7220names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only */
- "RxVL15Drop\n" /* 7220 and 7322-only */
- "RxVlErr\n" /* 7220 and 7322-only */
- "XcessBufOvfl\n" /* 7220 and 7322-only */
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr7220indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_txsdmadesc,
- cr_rxdlidfltr,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7220_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7220names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr7220names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
- dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
- sizeof(u64),
- GFP_KERNEL);
-}
-
-static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->cntrs) {
- ret = 0;
- goto done;
- }
-
- if (namep) {
- *namep = (char *)cntr7220names;
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
-
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->portcntrs) {
- ret = 0;
- goto done;
- }
- if (namep) {
- *namep = (char *)portcntr7220names;
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7220(ppd,
- portcntr7220indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_7220_creg32(dd,
- portcntr7220indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7220_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_7220_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
- qib_portcntr_7220(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we are using MSI, try to fallback to INTx.
- */
-static int qib_7220_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->msi_lo)
- return 0;
-
- qib_devinfo(dd->pcidev,
- "MSI interrupt not detected, trying INTx interrupts\n");
-
- qib_free_irq(dd);
- dd->msi_lo = 0;
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
- qib_setup_7220_interrupt(dd);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
- int do_cleanup;
- unsigned long flags;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
- ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
- __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
- do_cleanup = 0;
- } else {
- do_cleanup = 1;
- qib_7220_sdma_hw_clean_up(ppd);
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- if (do_cleanup) {
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- }
-done:
- return buf;
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- pbc |= PBC_7220_VL15_SEND;
- while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
- if (i++ > 5)
- return;
- udelay(2);
- }
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK);
-
- if (speed == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7220_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- /*
- * Required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- autoneg_7220_send(ppd, 0);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
-
- toggle_7220_rclkrls(ppd->dd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7220_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u32 i;
- unsigned long flags;
-
- ppd = &container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->pportdata;
- dd = ppd->dd;
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
- == IB_7220_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- toggle_7220_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
- toggle_7220_rclkrls(dd);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- dd->cspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-static u32 qib_7220_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_7220_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7220_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7220_L_STATE_ACTIVE:
- case IB_7220_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_7220_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7220_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_7220_physportstate[state];
-}
-
-static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!ibup) {
- /*
- * When the link goes down we don't want AEQ running, so it
- * won't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values.
- */
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- qib_sd7220_presets(dd);
- qib_cancel_sends(ppd); /* initial disarm, etc. */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- /* this might better in qib_sd7220_presets() */
- set_7220_relock_poll(dd, ibup);
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
- (QIB_IB_DDR | QIB_IB_SDR) &&
- dd->cspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->cspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
- cr_iblinkerrrecov);
- }
- try_7220_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- autoneg_7220_send(ppd, 1);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
- udelay(2);
- toggle_7220_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- dd->cspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7220_ibspeed_fast(ppd,
- ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- qib_write_kreg(dd, kr_ncmodectrl, 0);
- symadj = 1;
- }
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- ppd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
-
- set_7220_relock_poll(dd, ibup);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Unlike 7322, the 7220 needs this, due to lack of
- * interrupt in some cases when we have sdma active
- * when the link goes down.
- */
- if (ppd->sdma_state.current_state !=
- qib_sdma_state_s20_idle)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e00_go_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
-
- if (symadj) {
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
- cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7220_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7220_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * qib_get_7220_chip_params(), so split out as separate function
- */
-static void set_7220_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
- /* init after possible re-map in init_chip_wc_pat() */
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
- SYM_MASK(SendCtrl, SPioEnable) | \
- SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
- SYM_MASK(SendCtrl, SendBufAvailUpd) | \
- SYM_MASK(SendCtrl, AvailUpdThld) | \
- SYM_MASK(SendCtrl, SDmaEnable) | \
- SYM_MASK(SendCtrl, SDmaIntEnable) | \
- SYM_MASK(SendCtrl, SDmaHalt) | \
- SYM_MASK(SendCtrl, SDmaSingleDescriptor))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx = offs / sizeof(u64);
- u64 local_data, all_bits;
-
- if (idx != kr_sendctrl) {
- qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
- offs, only_32 ? "32" : "64");
- return 0;
- }
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if ((mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
- (u32)local_data, (u32)dd->sendctrl);
- if ((local_data & SENDCTRL_SHADOWED) !=
- (dd->sendctrl & SENDCTRL_SHADOWED))
- qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
- (u32)local_data, (u32) dd->sendctrl);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- sval = (dd->sendctrl & ~mask);
- sval |= *data & SENDCTRL_SHADOWED & mask;
- dd->sendctrl = sval;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
- (u32)tval, (u32)sval);
- qib_write_kreg(dd, kr_sendctrl, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_observer = {
- sendctrl_hook, kr_sendctrl * sizeof(u64),
- kr_sendctrl * sizeof(u64)
-};
-
-/*
- * write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7220_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- qib_register_observer(dd, &sendctrl_observer);
- return ret;
-}
-
-static int qib_init_7220_variables(struct qib_devdata *dd)
-{
- struct qib_chippport_specific *cpspec;
- struct qib_pportdata *ppd;
- int ret = 0;
- u32 sbufs, updthresh;
-
- cpspec = (struct qib_chippport_specific *)(dd + 1);
- ppd = &cpspec->pportdata;
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
- dd->cspec->dd = dd;
- ppd->cpspec = cpspec;
-
- spin_lock_init(&dd->cspec->sdepb_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_7220_chip_params(dd);
- qib_7220_boardname(dd);
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- init_waitqueue_head(&cpspec->autoneg_wait);
- INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
-
- ppd->link_width_enabled = ppd->link_width_supported;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = rate_to_delay[0][1];
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- if (!qib_mini_init)
- qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
-
- timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
-
- qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset =
- dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7220_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
- dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (qib_sdma_fetch_arb)
- dd->control |= 1 << 4;
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
- qib_7220_config_ctxts(dd);
- qib_set_ctxtcnt(dd); /* needed for PAT setup */
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_7220_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- ret = qib_create_ctxts(dd);
- init_7220_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- updthresh = 8U; /* update threshold */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
-
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- /*
- * if we are at 16 user contexts, we will have one 7 sbufs
- * per context, so drop the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin
- */
- if ((dd->pbufsctxt - 2) < updthresh)
- updthresh = dd->pbufsctxt - 2;
-
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
-bail:
- return ret;
-}
-
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_7220_link_buf(ppd, pbufnum);
- else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->cspec->lastbuf_for_pio;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-/* these 2 "counters" are really control registers, and are always RW */
-static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- write_7220_creg(ppd->dd, cr_psinterval, intv);
- write_7220_creg(ppd->dd, cr_psstart, start);
-}
-
-/*
- * NOTE: no real attempt is made to generalize the SDMA stuff.
- * At some point "soon" we will have a new more generalized
- * set of sdma interface, and then we'll clean this up.
- */
-
-/* Must be called with sdma_lock held, or before init finished */
-static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg(ppd->dd, kr_senddmatail, tail);
-}
-
-static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-static struct sdma_set_state_action sdma_7220_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .go_s99_running_tofalse = 1,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7220_action_table;
-}
-
-static int init_sdma_7220_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned i, n;
- u64 senddmabufmask[3] = { 0 };
-
- /* Set SendDmaBase */
- qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- /* Set SendDmaHeadAddr */
- qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
-
- /*
- * Reserve all the former "kernel" piobufs, using high number range
- * so we get as many 4K buffers as possible
- */
- n = dd->piobcnt2k + dd->piobcnt4k;
- i = n - dd->cspec->sdmabufcnt;
-
- for (; i < n; ++i) {
- unsigned word = i / 64;
- unsigned bit = i & 63;
-
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
-
- ppd->sdma_state.first_sendbuf = i;
- ppd->sdma_state.last_sendbuf = n;
-
- return 0;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16)le64_to_cpu(*ppd->sdma_head_dma) :
- (u16)qib_read_kreg32(dd, kr_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail) {
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- } else if (swhead > swtail) {
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- } else {
- /* empty */
- sane = (hwhead == swhead);
- }
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* assume no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * Since the delay affects this packet but the amount of the delay is
- * based on the length of the previous packet, use the last delay computed
- * and save the delay count for this packet to be used next time
- * we get here.
- */
-static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret = ppd->cpspec->last_delay_mult;
-
- ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
- (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-
- /* Indicate VL15, if necessary */
- if (vl == 15)
- ret |= PBC_7220_VL15_SEND_CTRL;
- return ret;
-}
-
-static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (!rcd->ctxt) {
- rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
- rcd->rcvegr_tid_base = 0;
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
- (rcd->ctxt - 1) * rcd->rcvegrcnt;
- }
-}
-
-static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- unsigned long flags;
-
- switch (which) {
- case TXCHK_CHG_TYPE_KERN:
- /* see if we need to raise avail update threshold */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
- case TXCHK_CHG_TYPE_USER:
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
- }
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-/**
- * qib_7220_tempsense_rd - read register of temp sensor via TWSI
- * @dd: the qlogic_ib device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- int ret;
- u8 rdata;
-
- if (regnum > 7) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* return a bogus value for (the one) register we do not have */
- if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
- ret = 0;
- goto bail;
- }
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto bail;
-
- ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
- if (!ret)
- ret = rdata;
-
- mutex_unlock(&dd->eep_lock);
-
- /*
- * There are three possibilities here:
- * ret is actual value (0..255)
- * ret is -ENXIO or -EINVAL from twsi code or this file
- * ret is -EINTR from mutex_lock_interruptible.
- */
-bail:
- return ret;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 7220 boards never disable EEPROM Write */
-static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba7220_funcs - set up the chip-specific function pointers
- * @pdev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
- u32 boardid, minwidth;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
- sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7220_bringup_serdes;
- dd->f_cleanup = qib_setup_7220_cleanup;
- dd->f_clear_tids = qib_7220_clear_tids;
- dd->f_free_irq = qib_free_irq;
- dd->f_get_base_info = qib_7220_get_base_info;
- dd->f_get_msgheader = qib_7220_get_msgheader;
- dd->f_getsendbuf = qib_7220_getsendbuf;
- dd->f_gpio_mod = gpio_7220_mod;
- dd->f_eeprom_wen = qib_7220_eeprom_wen;
- dd->f_hdrqempty = qib_7220_hdrqempty;
- dd->f_ib_updown = qib_7220_ib_updown;
- dd->f_init_ctxt = qib_7220_init_ctxt;
- dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
- dd->f_intr_fallback = qib_7220_intr_fallback;
- dd->f_late_initreg = qib_late_7220_initreg;
- dd->f_setpbc_control = qib_7220_setpbc_control;
- dd->f_portcntr = qib_portcntr_7220;
- dd->f_put_tid = qib_7220_put_tid;
- dd->f_quiet_serdes = qib_7220_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7220_mod;
- dd->f_read_cntrs = qib_read_7220cntrs;
- dd->f_read_portcntrs = qib_read_7220portcntrs;
- dd->f_reset = qib_setup_7220_reset;
- dd->f_init_sdma_regs = init_sdma_7220_regs;
- dd->f_sdma_busy = qib_sdma_7220_busy;
- dd->f_sdma_gethead = qib_sdma_7220_gethead;
- dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
- dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7220_sdma_init_early;
- dd->f_sendctrl = sendctrl_7220_mod;
- dd->f_set_armlaunch = qib_set_7220_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
- dd->f_iblink_state = qib_7220_iblink_state;
- dd->f_ibphys_portstate = qib_7220_phys_portstate;
- dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7220_set_loopback;
- dd->f_set_intr_state = qib_7220_set_intr_state;
- dd->f_setextled = qib_setup_7220_setextled;
- dd->f_txchk_change = qib_7220_txchk_change;
- dd->f_update_usrhead = qib_update_7220_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
- dd->f_xgxs_reset = qib_7220_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7220_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7220_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7220_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7220_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
- switch (boardid) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
- if (qib_pcie_params(dd, minwidth, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_7220_interrupt(dd);
- qib_7220_init_hwerrors(dd);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
deleted file mode 100644
index 781b6a4fb002..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ /dev/null
@@ -1,8475 +0,0 @@
-/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7322 chip
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-
-#include "qib.h"
-#include "qib_7322_regs.h"
-#include "qib_qsfp.h"
-
-#include "qib_mad.h"
-#include "qib_verbs.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
-
-static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
-static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
-static irqreturn_t qib_7322intr(int irq, void *data);
-static irqreturn_t qib_7322bufavail(int irq, void *data);
-static irqreturn_t sdma_intr(int irq, void *data);
-static irqreturn_t sdma_idle_intr(int irq, void *data);
-static irqreturn_t sdma_progress_intr(int irq, void *data);
-static irqreturn_t sdma_cleanup_intr(int irq, void *data);
-static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *rcd);
-static u8 qib_7322_phys_portstate(u64);
-static u32 qib_7322_iblink_state(u64);
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd);
-static void force_h1(struct qib_pportdata *);
-static void adj_tx_serdes(struct qib_pportdata *);
-static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
-
-static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
-static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
-static void serdes_7322_los_enable(struct qib_pportdata *, int);
-static int serdes_7322_init_old(struct qib_pportdata *);
-static int serdes_7322_init_new(struct qib_pportdata *);
-static void dump_sdma_7322_state(struct qib_pportdata *);
-
-#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-/* LE2 serdes values for different cases */
-#define LE2_DEFAULT 5
-#define LE2_5m 4
-#define LE2_QME 0
-
-/* Below is special-purpose, so only really works for the IB SerDes blocks. */
-#define IBSD(hw_pidx) (hw_pidx + 2)
-
-/* these are variables for documentation and experimentation purposes */
-static const unsigned rcv_int_timeout = 375;
-static const unsigned rcv_int_count = 16;
-static const unsigned sdma_idle_cnt = 64;
-
-/* Time to stop altering Rx Equalization parameters, after link up. */
-#define RXEQ_DISABLE_MSECS 2500
-
-/*
- * Number of VLs we are configured to use (to allow for more
- * credits per vl, etc.)
- */
-ushort qib_num_cfg_vls = 2;
-module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
-MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
-
-static ushort qib_chase = 1;
-module_param_named(chase, qib_chase, ushort, S_IRUGO);
-MODULE_PARM_DESC(chase, "Enable state chase handling");
-
-static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
-module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation,
- "attenuation cutoff (dB) for long copper cable setup");
-
-static ushort qib_singleport;
-module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
-MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
-
-static ushort qib_krcvq01_no_msi;
-module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
-MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
-
-/*
- * Receive header queue sizes
- */
-static unsigned qib_rcvhdrcnt;
-module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
-
-static unsigned qib_rcvhdrsize;
-module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
-
-static unsigned qib_rcvhdrentsize;
-module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
-
-#define MAX_ATTEN_LEN 64 /* plenty for any real system */
-/* for read back, default index is ~5m copper cable */
-static char txselect_list[MAX_ATTEN_LEN] = "10";
-static struct kparam_string kp_txselect = {
- .string = txselect_list,
- .maxlen = MAX_ATTEN_LEN
-};
-static int setup_txselect(const char *, const struct kernel_param *);
-module_param_call(txselect, setup_txselect, param_get_string,
- &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect,
- "Tx serdes indices (for no QSFP or invalid QSFP data)");
-
-#define BOARD_QME7342 5
-#define BOARD_QMH7342 6
-#define BOARD_QMH7360 9
-#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QMH7342)
-#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QME7342)
-
-#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
-
-#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
-
-#define MASK_ACROSS(lsb, msb) \
- (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK)
-
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK << \
- QIB_7322_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-
-/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
-#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
- (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
-
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
-#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
-#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
-/* Below because most, but not all, fields of IntMask have that full suffix */
-#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
-
-
-#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
-#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
-#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
-#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define SendIBSLIDAssignMask \
- QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
-#define SendIBSLMCMask \
- QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
-
-#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
-#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
-#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
-#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
-#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
-#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_EEPROM_WEN_NUM 14
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
-
-/* HW counter clock is at 4nsec */
-#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
-
-/* full speed IB port 1 only */
-#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
-#define PORT_SPD_CAP_SHIFT 3
-
-/* full speed featuremask, both ports */
-#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
- */
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_contextcnt KREG_IDX(ContextCnt)
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_debugportval KREG_IDX(DebugPortValueReg)
-#define kr_fmask KREG_IDX(feature_mask)
-#define kr_act_fmask KREG_IDX(active_feature_mask)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intredirect KREG_IDX(IntRedirect0)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_pagealign KREG_IDX(PageAlign)
-#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
-#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
-#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
-#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_intgranted KREG_IDX(Int_Granted)
-#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
-#define kr_intblocked KREG_IDX(IntBlocked)
-#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
-
-/*
- * per-port kernel registers. Access only with qib_read_kreg_port()
- * or qib_write_kreg_port()
- */
-#define krp_errclear KREG_IBPORT_IDX(ErrClear)
-#define krp_errmask KREG_IBPORT_IDX(ErrMask)
-#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
-#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
-#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
-#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
-#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
-#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
-#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
-#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
-#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
-#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
-#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
-#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
-#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
-#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
-#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
-#define krp_psstart KREG_IBPORT_IDX(PSStart)
-#define krp_psstat KREG_IBPORT_IDX(PSStat)
-#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
-#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
-#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
-#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
-#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
-#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
-#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
-#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
-#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
-#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
-#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
-#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
-#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
-#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
-#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
-#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
-#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
-#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
-#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
-#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
-#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
-#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
-#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
-#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
-#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
-#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
-#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
-#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
-#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
-#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
-#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
-
-/*
- * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
- * or qib_write_kreg_ctxt()
- */
-#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-/*
- * TID Flow table, per context. Reduces
- * number of hdrq updates to one per flow (or on errors).
- * context 0 and 1 share same memory, but have distinct
- * addresses. Since for now, we never use expected sends
- * on kernel contexts, we don't worry about that (we initialize
- * those entries for ctxt 0/1 on driver load twice, for example).
- */
-#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
-#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
-
-/* these are the error bits in the tid flows, and are W1C */
-#define TIDFLOW_ERRBITS ( \
- (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
- (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
-
-/* Most (not all) Counters are per-IBport.
- * Requires LBIntCnt is at offset 0 in the group
- */
-#define CREG_IDX(regname) \
-((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-
-#define crp_badformat CREG_IDX(RxVersionErrCnt)
-#define crp_err_rlen CREG_IDX(RxLenErrCnt)
-#define crp_erricrc CREG_IDX(RxICRCErrCnt)
-#define crp_errlink CREG_IDX(RxLinkMalformCnt)
-#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define crp_pktrcv CREG_IDX(RxDataPktCnt)
-#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define crp_pktsend CREG_IDX(TxDataPktCnt)
-#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define crp_rcvebp CREG_IDX(RxEBPCnt)
-#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
-#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
-#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
-#define crp_sendstall CREG_IDX(TxFlowStallCnt)
-#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
-#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
-#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
-#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define crp_wordrcv CREG_IDX(RxDwordCnt)
-#define crp_wordsend CREG_IDX(TxDwordCnt)
-#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
-
-/* these are the (few) counters that are not port-specific */
-#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
- QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
-#define cr_lbint CREG_DEVIDX(LBIntCnt)
-#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
-#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
-#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
-#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
-#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
-
-/* no chip register for # of IB ports supported, so define */
-#define NUM_IB_PORTS 2
-
-/* 1 VL15 buffer per hardware IB port, no register for this, so define */
-#define NUM_VL15_BUFS NUM_IB_PORTS
-
-/*
- * context 0 and 1 are special, and there is no chip register that
- * defines this value, so we have to define it here.
- * These are all allocated to either 0 or 1 for single port
- * hardware configuration, otherwise each gets half
- */
-#define KCTXT0_EGRCNT 2048
-
-/* values for vl and port fields in PBC, 7322-specific */
-#define PBC_PORT_SEL_LSB 26
-#define PBC_PORT_SEL_RMASK 1
-#define PBC_VL_NUM_LSB 27
-#define PBC_VL_NUM_RMASK 7
-#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 16,
- [IB_RATE_5_GBPS] = 8,
- [IB_RATE_10_GBPS] = 4,
- [IB_RATE_20_GBPS] = 2,
- [IB_RATE_30_GBPS] = 2,
- [IB_RATE_40_GBPS] = 1
-};
-
-static const char * const qib_sdma_state_names[] = {
- [qib_sdma_state_s00_hw_down] = "s00_HwDown",
- [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
- [qib_sdma_state_s20_idle] = "s20_Idle",
- [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
- [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
- [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
- [qib_sdma_state_s99_running] = "s99_Running",
-};
-
-#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
-#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7322_LT_STATE_DISABLED 0x00
-#define IB_7322_LT_STATE_LINKUP 0x01
-#define IB_7322_LT_STATE_POLLACTIVE 0x02
-#define IB_7322_LT_STATE_POLLQUIET 0x03
-#define IB_7322_LT_STATE_SLEEPDELAY 0x04
-#define IB_7322_LT_STATE_SLEEPQUIET 0x05
-#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7322_LT_STATE_CFGIDLE 0x0b
-#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7322_LT_STATE_TXREVLANES 0x0d
-#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
-#define IB_7322_LT_STATE_CFGENH 0x10
-#define IB_7322_LT_STATE_CFGTEST 0x11
-#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
-#define IB_7322_LT_STATE_CFGWAITENH 0x13
-
-/* link state machine states from IBC */
-#define IB_7322_L_STATE_DOWN 0x0
-#define IB_7322_L_STATE_INIT 0x1
-#define IB_7322_L_STATE_ARM 0x2
-#define IB_7322_L_STATE_ACTIVE 0x3
-#define IB_7322_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7322_physportstate[0x20] = {
- [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
- [IB_7322_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
- [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMTTEST] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITENH] =
- IB_PHYSPORTSTATE_CFG_WAIT_ENH,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify {
- int rcv;
- void *arg;
- struct irq_affinity_notify notify;
-};
-#endif
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 main_int_mask; /* clear bits which have dedicated handlers */
- u64 int_enable_mask; /* for per port interrupts in single port mode */
- u64 errormask;
- u64 hwerrmask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- u32 r1;
- u32 num_msix_entries;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 stay_in_freeze;
- u32 recovery_ports_initted;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- u32 dca_ctrl;
- int rhdr_cpu[18];
- int sdma_cpu[2];
- u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
-#endif
- struct qib_msix_entry *msix_entries;
- unsigned long *sendchkenable;
- unsigned long *sendgrhchk;
- unsigned long *sendibchk;
- u32 rcvavail_timeout[18];
- char emsgbuf[128]; /* for device error interrupt msg buffer */
-};
-
-/* Table of entries in "human readable" form Tx Emphasis. */
-struct txdds_ent {
- u8 amp;
- u8 pre;
- u8 main;
- u8 post;
-};
-
-struct vendor_txdds_ent {
- u8 oui[QSFP_VOUI_LEN];
- u8 *partnum;
- struct txdds_ent sdr;
- struct txdds_ent ddr;
- struct txdds_ent qdr;
-};
-
-static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
-
-#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
-#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
-#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
-#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
-
-#define H1_FORCE_VAL 8
-#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
-#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
-
-/* The static and dynamic registers are paired, and the pairs indexed by spd */
-#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
- + ((spd) * 2))
-
-#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
-#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
-#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
-
-struct qib_chippport_specific {
- u64 __iomem *kpregbase;
- u64 __iomem *cpregbase;
- u64 *portcntrs;
- struct qib_pportdata *ppd;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct delayed_work ipg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 iblnkdownsnap;
- u64 iblnkdowndelta;
- u64 ibmalfdelta;
- u64 ibmalfsnap;
- u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
- u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- unsigned long qdr_dfe_time;
- unsigned long chase_end;
- u32 autoneg_tries;
- u32 recovery_init;
- u32 qdr_dfe_on;
- u32 qdr_reforce;
- /*
- * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
- * entry zero is unused, to simplify indexing
- */
- u8 h1_val;
- u8 no_eep; /* txselect table index to use if no qsfp info */
- u8 ipg_tries;
- u8 ibmalfusesnap;
- struct qib_qsfp_data qsfp_data;
- char epmsgbuf[192]; /* for port error interrupt msg buffer */
- char sdmamsgbuf[192]; /* for per-port sdma error messages */
-};
-
-static struct {
- const char *name;
- irq_handler_t handler;
- int lsb;
- int port; /* 0 if not port-specific, else port # */
- int dca;
-} irq_table[] = {
- { "", qib_7322intr, -1, 0, 0 },
- { " (buf avail)", qib_7322bufavail,
- SYM_LSB(IntStatus, SendBufAvail), 0, 0},
- { " (sdma 0)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
- { " (sdma 1)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
- { " (sdmaI 0)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
- { " (sdmaI 1)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
- { " (sdmaP 0)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
- { " (sdmaP 1)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
- { " (sdmaC 0)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
- { " (sdmaC 1)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static const struct dca_reg_map {
- int shadow_inx;
- int lsb;
- u64 mask;
- u16 regno;
-} dca_rcvhdr_reg_map[] = {
- { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
-};
-#endif
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7322_IBCHG 0x101
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value);
-static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
-static void write_7322_initregs(struct qib_devdata *);
-static void write_7322_init_portregs(struct qib_pportdata *);
-static void setup_7322_link_recovery(struct qib_pportdata *, u32);
-static void check_7322_rxe_status(struct qib_pportdata *);
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static void qib_setup_dca(struct qib_devdata *dd);
-static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
-static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
-#endif
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
- * qib_write_ureg - write virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *) &dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/*
- * not many sanity checks for the port-specific kernel register routines,
- * since they are only used when it's known to be safe.
-*/
-static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno)
-{
- if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
- return 0ULL;
- return readq(&ppd->cpspec->kpregbase[regno]);
-}
-
-static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->kpregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->cpspec->cpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readq(&ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readl(&ppd->cpspec->cpregbase[regno]);
-}
-
-/* bits in Control register */
-#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
-#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
-
-/* bits in general interrupt regs */
-#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
-#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
-#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
-#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
-#define QIB_I_C_ERROR INT_MASK(Err)
-
-#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
-#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
-#define QIB_I_GPIO INT_MASK(AssertGPIO)
-#define QIB_I_P_SDMAINT(pidx) \
- (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are "per port" */
-#define QIB_I_P_BITSEXTANT(pidx) \
- (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
- INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are common to a device */
-/* currently unused: QIB_I_SPIOSENT */
-#define QIB_I_C_BITSEXTANT \
- (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
- QIB_I_SPIOSENT | \
- QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
-
-#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
- QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
-
-/*
- * Error bits that are "per port".
- */
-#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
-#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
-#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
-#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
-#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
-#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
-#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
-#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
-#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
-#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
-#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
-#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
-#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
-#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
-#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
-#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
-#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
-#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
-#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
-#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
-#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
-#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
-#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
-#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
-#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
-#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
-#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
-#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
-
-#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
-#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
-#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
-#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
-#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
-#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
-#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
-#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
-#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
-#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
-#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
-
-/* Error bits that are common to a device */
-#define QIB_E_RESET ERR_MASK(ResetNegated)
-#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
-#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
-
-
-/*
- * Per chip (rather than per-port) errors. Most either do
- * nothing but trigger a print (because they self-recover, or
- * always occur in tandem with other errors that handle the
- * issue), or because they indicate errors with no recovery,
- * but we want to know that they happened.
- */
-#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
-#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
-#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
-#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
-#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
-#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
-#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
-#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
-
-/* SDMA chip errors (not per port)
- * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
- * the SDMAHALT error immediately, so we just print the dup error via the
- * E_AUTO mechanism. This is true of most of the per-port fatal errors
- * as well, but since this is port-independent, by definition, it's
- * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
- * packet send errors, and so are handled in the same manner as other
- * per-packet errors.
- */
-#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
-#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
-#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
-
-/*
- * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
- * it is used to print "common" packet errors.
- */
-#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
- QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
- QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_REBP)
-
-/* Error Bits that Packet-related (Receive, per-port) */
-#define QIB_E_P_RPKTERRS (\
- QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
- QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
- QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
- QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
- QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
-
-/*
- * Error bits that are Send-related (per port)
- * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
- * All of these potentially need to have a buffer disarmed
- */
-#define QIB_E_P_SPKTERRS (\
- QIB_E_P_SUNEXP_PKTNUM |\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMAXPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
-
-#define QIB_E_SPKTERRS ( \
- QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
- ERR_MASK_N(SendUnsupportedVLErr) | \
- QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
-
-#define QIB_E_P_SDMAERRS ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAUNEXPDATA | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories, and the repeat definition
- * is not a problem.
- */
-#define QIB_E_P_BITSEXTANT ( \
- QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
- QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
- QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
- )
-
-/*
- * These are errors that can occur when the link
- * changes state while a packet is being sent or received. This doesn't
- * cover things like EBP or VCRC that can be the result of a sending
- * having the link change state, so we receive a "known bad" packet.
- * All of these are "per port", so renamed:
- */
-#define QIB_E_P_LINK_PKTERRS (\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
- QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
- QIB_E_P_RUNEXPCHAR)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories (such as QIB_E_SPKTERRS),
- * and the repeat definition is not a problem.
- */
-#define QIB_E_C_BITSEXTANT (\
- QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
- QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
- QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
-
-/* Likewise Neuter E_SPKT_ERRS_IGNORE */
-#define E_SPKT_ERRS_IGNORE 0
-
-#define QIB_EXTS_MEMBIST_DISABLED \
- SYM_MASK(EXTStatus, MemBISTDisabled)
-#define QIB_EXTS_MEMBIST_ENDTEST \
- SYM_MASK(EXTStatus, MemBISTEndTest)
-
-#define QIB_E_SPIOARMLAUNCH \
- ERR_MASK(SendArmLaunchErr)
-
-#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
-#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
-
-/*
- * IBTA_1_2 is set when multiple speeds are enabled (normal),
- * and also if forced QDR (only QDR enabled). It's enabled for the
- * forced QDR case so that scrambling will be enabled by the TS3
- * exchange, when supported by both sides of the link.
- */
-#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
-#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
-#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
-#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
-#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
-#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
- SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
-#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
-
-#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
-#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
-
-#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
-#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-
-#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
- SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
- SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
-
-#define IBA7322_REDIRECT_VEC_PER_REG 12
-
-#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
-#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
-#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
-#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
-#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
-
-#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
-
-#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname , .sz = sizeof(#fldname) }
-#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
- HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
- HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
- HWE_AUTO(PCIESerdesPClkNotDetect),
- HWE_AUTO(PowerOnBISTFailed),
- HWE_AUTO(TempsenseTholdReached),
- HWE_AUTO(MemoryErr),
- HWE_AUTO(PCIeBusParityErr),
- HWE_AUTO(PcieCplTimeout),
- HWE_AUTO(PciePoisonedTLP),
- HWE_AUTO_P(SDmaMemReadErr, 1),
- HWE_AUTO_P(SDmaMemReadErr, 0),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
- HWE_AUTO_P(IBCBusToSPCParityErr, 1),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
- HWE_AUTO(statusValidNoEop),
- HWE_AUTO(LATriggered),
- { .mask = 0, .sz = 0 }
-};
-
-#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
- E_AUTO(RcvEgrFullErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(ResetNegated),
- E_AUTO(HardwareErr),
- E_AUTO(InvalidAddrErr),
- E_AUTO(SDmaVL15Err),
- E_AUTO(SBufVL15MisUseErr),
- E_AUTO(InvalidEEPCmd),
- E_AUTO(RcvContextShareErr),
- E_AUTO(SendVLMismatchErr),
- E_AUTO(SendArmLaunchErr),
- E_AUTO(SendSpecialTriggerErr),
- E_AUTO(SDmaWrongPortErr),
- E_AUTO(SDmaBufMaskDuplicateErr),
- { .mask = 0, .sz = 0 }
-};
-
-static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
- E_P_AUTO(IBStatusChanged),
- E_P_AUTO(SHeadersErr),
- E_P_AUTO(VL15BufMisuseErr),
- /*
- * SDmaHaltErr is not really an error, make it clearer;
- */
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
- .sz = 11},
- E_P_AUTO(SDmaDescAddrMisalignErr),
- E_P_AUTO(SDmaUnexpDataErr),
- E_P_AUTO(SDmaMissingDwErr),
- E_P_AUTO(SDmaDwEnErr),
- E_P_AUTO(SDmaRpyTagErr),
- E_P_AUTO(SDma1stDescErr),
- E_P_AUTO(SDmaBaseErr),
- E_P_AUTO(SDmaTailOutOfBoundErr),
- E_P_AUTO(SDmaOutOfBoundErr),
- E_P_AUTO(SDmaGenMismatchErr),
- E_P_AUTO(SendBufMisuseErr),
- E_P_AUTO(SendUnsupportedVLErr),
- E_P_AUTO(SendUnexpectedPktNumErr),
- E_P_AUTO(SendDroppedDataPktErr),
- E_P_AUTO(SendDroppedSmpPktErr),
- E_P_AUTO(SendPktLenErr),
- E_P_AUTO(SendUnderRunErr),
- E_P_AUTO(SendMaxPktLenErr),
- E_P_AUTO(SendMinPktLenErr),
- E_P_AUTO(RcvIBLostLinkErr),
- E_P_AUTO(RcvHdrErr),
- E_P_AUTO(RcvHdrLenErr),
- E_P_AUTO(RcvBadTidErr),
- E_P_AUTO(RcvBadVersionErr),
- E_P_AUTO(RcvIBFlowErr),
- E_P_AUTO(RcvEBPErr),
- E_P_AUTO(RcvUnsupportedVLErr),
- E_P_AUTO(RcvUnexpectedCharErr),
- E_P_AUTO(RcvShortPktLenErr),
- E_P_AUTO(RcvLongPktLenErr),
- E_P_AUTO(RcvMaxPktLenErr),
- E_P_AUTO(RcvMinPktLenErr),
- E_P_AUTO(RcvICRCErr),
- E_P_AUTO(RcvVCRCErr),
- E_P_AUTO(RcvFormatErr),
- { .mask = 0, .sz = 0 }
-};
-
-/*
- * Below generates "auto-message" for interrupts not specific to any port or
- * context
- */
-#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-/* Below generates "auto-message" for interrupts specific to a port */
-#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_0), \
- SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/* For some reason, the SerDesTrimDone bits are reversed */
-#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_1), \
- SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/*
- * Below generates "auto-message" for interrupts specific to a context,
- * with ctxt-number appended
- */
-#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##0IntMask), \
- SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
-
-#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs hdrchk_msgs[] = {
- TXSYMPTOM_AUTO_P(NonKeyPacket),
- TXSYMPTOM_AUTO_P(GRHFail),
- TXSYMPTOM_AUTO_P(PkeyFail),
- TXSYMPTOM_AUTO_P(QPFail),
- TXSYMPTOM_AUTO_P(SLIDFail),
- TXSYMPTOM_AUTO_P(RawIPV6),
- TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0, .sz = 0 }
-};
-
-#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used,
- * because we don't need to force the update of pioavail
- */
-static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 i;
- int any;
- u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
- unsigned long sbuf[4];
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- any = 0;
- for (i = 0; i < regcnt; ++i) {
- sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
- if (sbuf[i]) {
- any = 1;
- qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
- }
- }
-
- if (any)
- qib_disarm_piobufs_set(dd, sbuf, piobcnt);
-}
-
-/* No txe_recover yet, if ever */
-
-/* No decode__errors yet */
-static void err_decode(char *msg, size_t len, u64 errs,
- const struct qib_hwerror_msgs *msp)
-{
- u64 these, lmask;
- int took, multi, n = 0;
-
- while (errs && msp && msp->mask) {
- multi = (msp->mask & (msp->mask - 1));
- while (errs & msp->mask) {
- these = (errs & msp->mask);
- lmask = (these & (these - 1)) ^ these;
- if (len) {
- if (n++) {
- /* separate the strings */
- *msg++ = ',';
- len--;
- }
- /* msp->sz counts the nul */
- took = min_t(size_t, msp->sz - (size_t)1, len);
- memcpy(msg, msp->msg, took);
- len -= took;
- msg += took;
- if (len)
- *msg = '\0';
- }
- errs &= ~lmask;
- if (len && multi) {
- /* More than one bit this mask */
- int idx = -1;
-
- while (lmask & msp->mask) {
- ++idx;
- lmask >>= 1;
- }
- took = scnprintf(msg, len, "_%d", idx);
- len -= took;
- msg += took;
- }
- }
- ++msp;
- }
- /* If some bits are left, show in hex. */
- if (len && errs)
- snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
- (unsigned long long) errs);
-}
-
-/* only called if r1 set */
-static void flush_fifo(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *piobuf;
- u32 bufn;
- u32 *hdr;
- u64 pbc;
- const unsigned hdrwords = 7;
- static struct ib_header ibhdr = {
- .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
- .lrh[1] = IB_LID_PERMISSIVE,
- .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
- .lrh[3] = IB_LID_PERMISSIVE,
- .u.oth.bth[0] = cpu_to_be32(
- (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
- .u.oth.bth[1] = cpu_to_be32(0),
- .u.oth.bth[2] = cpu_to_be32(0),
- .u.oth.u.ud.deth[0] = cpu_to_be32(0),
- .u.oth.u.ud.deth[1] = cpu_to_be32(0),
- };
-
- /*
- * Send a dummy VL15 packet to flush the launch FIFO.
- * This will not actually be sent since the TxeBypassIbc bit is set.
- */
- pbc = PBC_7322_VL15_SEND |
- (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
- (hdrwords + SIZE_OF_CRC);
- piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
- if (!piobuf)
- return;
- writeq(pbc, piobuf);
- hdr = (u32 *) &ibhdr;
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf + 2, hdr, hdrwords);
- qib_sendbuf_done(dd, bufn);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
- set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
-
- spin_lock(&dd->sendctrl_lock);
-
- /* If we are draining everything, block sends first */
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- ppd->p_sendctrl |= set_sendctrl;
- ppd->p_sendctrl &= ~clr_sendctrl;
-
- if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
- qib_write_kreg_port(ppd, krp_sendctrl,
- ppd->p_sendctrl |
- SYM_MASK(SendCtrl_0, SDmaCleanup));
- else
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock(&dd->sendctrl_lock);
-
- if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
- flush_fifo(ppd);
-}
-
-static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
-}
-
-static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg_port(ppd, krp_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg_port(ppd, krp_senddmatail, tail);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- /*
- * Drain all FIFOs.
- * The hardware doesn't require this but we do it so that verbs
- * and user applications don't wait for link active to send stale
- * data.
- */
- sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
-
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
- qib_7322_sdma_sendctrl(ppd,
- ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
-}
-
-#define DISABLES_SDMA ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- errs &= QIB_E_P_SDMAERRS;
- err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
- errs, qib_7322p_error_msgs);
-
- if (errs & QIB_E_P_SDMAUNEXPDATA)
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
- ppd->port);
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs != QIB_E_P_SDMAHALT) {
- /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
- qib_dev_porterr(dd, ppd->port,
- "SDMA %s 0x%016llx %s\n",
- qib_sdma_state_names[ppd->sdma_state.current_state],
- errs, ppd->cpspec->sdmamsgbuf);
- dump_sdma_7322_state(ppd);
- }
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * handle per-device errors (not per-port errors)
- */
-static noinline void handle_7322_errors(struct qib_devdata *dd)
-{
- char *msg;
- u64 iserr = 0;
- u64 errs;
- u64 mask;
-
- qib_stats.sps_errints++;
- errs = qib_read_kreg64(dd, kr_errstatus);
- if (!errs) {
- qib_devinfo(dd->pcidev,
- "device error interrupt, but no error bits set!\n");
- goto done;
- }
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & QIB_E_HARDWARE) {
- *msg = '\0';
- qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- }
-
- if (errs & QIB_E_SPKTERRS) {
- qib_disarm_7322_senderrbufs(dd->pport);
- qib_stats.sps_txerrs++;
- } else if (errs & QIB_E_INVALIDADDR)
- qib_stats.sps_txerrs++;
- else if (errs & QIB_E_ARMLAUNCH) {
- qib_stats.sps_txerrs++;
- qib_disarm_7322_senderrbufs(dd->pport);
- }
- qib_write_kreg(dd, kr_errclear, errs);
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = QIB_E_HARDWARE;
- *msg = '\0';
-
- err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
- qib_7322error_msgs);
-
- /*
- * Getting reset is a tragedy for all ports. Mark the device
- * _and_ the ports as "offline" in way meaningful to each.
- */
- if (errs & QIB_E_RESET) {
- int pidx;
-
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_err(dd, "%s error\n", msg);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-
-done:
- return;
-}
-
-static void qib_error_tasklet(struct tasklet_struct *t)
-{
- struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
-
- handle_7322_errors(dd);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-static void reenable_chase(struct timer_list *t)
-{
- struct qib_chippport_specific *cp = timer_container_of(cp, t,
- chase_timer);
- struct qib_pportdata *ppd = cp->ppd;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
- u8 ibclt)
-{
- ppd->cpspec->chase_end = 0;
-
- if (!qib_chase)
- return;
-
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
-}
-
-static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7322_LT_STATE_CFGRCVFCFG:
- case IB_7322_LT_STATE_CFGWAITRMT:
- case IB_7322_LT_STATE_TXREVLANES:
- case IB_7322_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end))
- disable_chase(ppd, tnow, ibclt);
- else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-
- if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
- ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
- ibclt == IB_7322_LT_STATE_LINKUP) &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
- force_h1(ppd);
- ppd->cpspec->qdr_reforce = 1;
- if (!ppd->dd->cspec->r1)
- serdes_7322_los_enable(ppd, 0);
- } else if (ppd->cpspec->qdr_reforce &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
- (ibclt == IB_7322_LT_STATE_CFGENH ||
- ibclt == IB_7322_LT_STATE_CFGIDLE ||
- ibclt == IB_7322_LT_STATE_LINKUP))
- force_h1(ppd);
-
- if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
- ppd->link_speed_enabled == QIB_IB_QDR &&
- (ibclt == IB_7322_LT_STATE_CFGTEST ||
- ibclt == IB_7322_LT_STATE_CFGENH ||
- (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
- adj_tx_serdes(ppd);
-
- if (ibclt != IB_7322_LT_STATE_LINKUP) {
- u8 ltstate = qib_7322_phys_portstate(ibcst);
- u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
- LinkTrainingState);
- if (!ppd->dd->cspec->r1 &&
- pibclt == IB_7322_LT_STATE_LINKUP &&
- ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- /* If the link went down (but no into recovery,
- * turn LOS back on */
- serdes_7322_los_enable(ppd, 1);
- if (!ppd->cpspec->qdr_dfe_on &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
- ppd->cpspec->qdr_dfe_on = 1;
- ppd->cpspec->qdr_dfe_time = 0;
- /* On link down, reenable QDR adaptation */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 :
- QDR_STATIC_ADAPT_DOWN);
- pr_info(
- "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
- ppd->dd->unit, ppd->port, ibclt);
- }
- }
-}
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
-
-/*
- * This is per-pport error handling.
- * will likely get it's own MSIx interrupt (one for each port,
- * although just a single handler).
- */
-static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
-{
- char *msg;
- u64 ignore_this_time = 0, iserr = 0, errs, fmask;
- struct qib_devdata *dd = ppd->dd;
-
- /* do this as soon as possible */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask)
- check_7322_rxe_status(ppd);
-
- errs = qib_read_kreg_port(ppd, krp_errstatus);
- if (!errs)
- qib_devinfo(dd->pcidev,
- "Port%d error interrupt, but no error bits set!\n",
- ppd->port);
- if (!fmask)
- errs &= ~QIB_E_P_IBSTATUSCHANGED;
- if (!errs)
- goto done;
-
- msg = ppd->cpspec->epmsgbuf;
- *msg = '\0';
-
- if (errs & ~QIB_E_P_BITSEXTANT) {
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
- if (!*msg)
- snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
- "no others");
- qib_dev_porterr(dd, ppd->port,
- "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
- (errs & ~QIB_E_P_BITSEXTANT), msg);
- *msg = '\0';
- }
-
- if (errs & QIB_E_P_SHDR) {
- u64 symptom;
-
- /* determine cause, then write to clear */
- symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
- qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
- hdrchk_msgs);
- *msg = '\0';
- /* senderrbuf cleared in SPKTERRS below */
- }
-
- if (errs & QIB_E_P_SPKTERRS) {
- if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- (errs & QIB_E_P_LINK_PKTERRS),
- qib_7322p_error_msgs);
- *msg = '\0';
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- }
- qib_disarm_7322_senderrbufs(ppd);
- } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
- qib_7322p_error_msgs);
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- *msg = '\0';
- }
-
- qib_write_kreg_port(ppd, krp_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- if (errs & QIB_E_P_RPKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & QIB_E_P_SPKTERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
-
- if (errs & QIB_E_P_SDMAERRS)
- sdma_7322_p_errors(ppd, errs);
-
- if (errs & QIB_E_P_IBSTATUSCHANGED) {
- u64 ibcs;
- u8 ltstate;
-
- ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- ltstate = qib_7322_phys_portstate(ibcs);
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_serdes_issues(ppd, ibcs);
- if (!(ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
- /*
- * We got our interrupt, so init code should be
- * happy and not try alternatives. Now squelch
- * other "chatter" from link-negotiation (pre Init)
- */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- }
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
- LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
- SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
- IB_PHYSPORTSTATE_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- else
- /*
- * Since going into a recovery state causes the link
- * state to go down and since recovery is transitory,
- * it is better if we "miss" ever seeing the link
- * training state go into recovery (i.e., ignore this
- * transition for link state special handling purposes)
- * without updating lastibcstat.
- */
- if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
- /* cause any pending enabled interrupts to be re-delivered */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- if (dd->cspec->num_msix_entries) {
- /* and same for MSIx */
- u64 val = qib_read_kreg64(dd, kr_intgranted);
-
- if (val)
- qib_write_kreg(dd, kr_intgranted, val);
- }
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7322_clear_freeze(struct qib_devdata *dd)
-{
- int pidx;
-
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /*
- * Force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- /* We need to purge per-port errs and reset mask, too */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
- qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
- }
- qib_7322_set_intr_state(dd, 1);
-}
-
-/* no error handling to speak of */
-/**
- * qib_7322_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * qib_handle_errors() to avoid excessive stack usage.
- */
-static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 ctrl;
- int isfatal = 0;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except BIST fail */
- qib_write_kreg(dd, kr_hwerrclear, hwerrs &
- ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /* no EEPROM logging, yet */
-
- if (hwerrs)
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
- /*
- * No recovery yet...
- */
- if ((hwerrs & ~HWE_MASK(LATriggered)) ||
- dd->cspec->stay_in_freeze) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->flags & QIB_INITTED)
- isfatal = 1;
- } else
- qib_7322_clear_freeze(dd);
- }
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strscpy(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
-
- /* Ignore esoteric PLL failures et al. */
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (hwerrs &
- (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
- int pidx = 0;
- int err;
- unsigned long flags;
- struct qib_pportdata *ppd = dd->pport;
-
- for (; pidx < dd->num_pports; ++pidx, ppd++) {
- err = 0;
- if (pidx == 0 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
- err++;
- if (pidx == 1 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
- err++;
- if (err) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- dump_sdma_7322_state(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7322_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7322_init_hwerrors(struct qib_devdata *dd)
-{
- int pidx;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
- if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
- QIB_EXTS_MEMBIST_ENDTEST)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* never clear BIST failure, so reported on each driver load */
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
- dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
- } else
- dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- * Also reset everything that we can, so we start
- * completely clean when re-enabled (before we
- * actually issue the disable to the IBC)
- */
- qib_7322_mini_pcs_reset(ppd);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /*
- * Clear status change interrupt reduction so the
- * new state is seen.
- */
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- }
-
- mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
- mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
-}
-
-/*
- * The total RCV buffer memory is 64KB, used for both ports, and is
- * in units of 64 bytes (same as IB flow control credit unit).
- * The consumedVL unit in the same registers are in 32 byte units!
- * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
- * and we can therefore allocate just 9 IB credits for 2 VL15 packets
- * in krp_rxcreditvl15, rather than 10.
- */
-#define RCV_BUF_UNITSZ 64
-#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
-
-static void set_vls(struct qib_pportdata *ppd)
-{
- int i, numvls, totcred, cred_vl, vl0extra;
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- numvls = qib_num_vls(ppd->vls_operational);
-
- /*
- * Set up per-VL credits. Below is kluge based on these assumptions:
- * 1) port is disabled at the time early_init is called.
- * 2) give VL15 17 credits, for two max-plausible packets.
- * 3) Give VL0-N the rest, with any rounding excess used for VL0
- */
- /* 2 VL15 packets @ 288 bytes each (including IB headers) */
- totcred = NUM_RCV_BUF_UNITS(dd);
- cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
- totcred -= cred_vl;
- qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
- cred_vl = totcred / numvls;
- vl0extra = totcred - cred_vl * numvls;
- qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
- for (i = 1; i < numvls; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
- for (; i < 8; i++) /* no buffer space for other VLs */
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
-
- /* Notify IBC that credits need to be recalculated */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- for (i = 0; i < numvls; i++)
- val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
- val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
-
- /* Change the number of operational VLs */
- ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
- ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-}
-
-/*
- * The code that deals with actual SerDes is in serdes_7322_init().
- * Compared to the code for iba7220, it is minimal.
- */
-static int serdes_7322_init(struct qib_pportdata *ppd);
-
-/**
- * qib_7322_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, guid, ibc;
- unsigned long flags;
-
- /*
- * SerDes model not in Pd, but still need to
- * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
- * eventually.
- */
- /* Put IBC in reset, sends disabled (should be in reset already) */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-
- /* ensure previous Tx parameters are not still forced */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
- /*
- * Flow control is sent this often, even if no changes in
- * buffer space occur. Units are 128ns for this chip.
- * Set to 3usec.
- */
- ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
-
- /*
- * Reset the PCS interface to the serdes (and also ibc, which is still
- * in reset from above). Writes new value of ibcctrl_a as last step.
- */
- qib_7322_mini_pcs_reset(ppd);
-
- if (!ppd->cpspec->ibcctrl_b) {
- unsigned lse = ppd->link_speed_enabled;
-
- /*
- * Not on re-init after reset, establish shadow
- * and force initial config.
- */
- ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
- krp_ibcctrl_b);
- ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_SPEED_DDR |
- IBA7322_IBC_SPEED_SDR |
- IBA7322_IBC_WIDTH_AUTONEG |
- SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
- if (lse & (lse - 1)) /* Muliple speeds enabled */
- ppd->cpspec->ibcctrl_b |=
- (lse << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
- IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_IBTA_1_2_MASK :
- (lse == QIB_IB_DDR) ?
- IBA7322_IBC_SPEED_DDR :
- IBA7322_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcctrl_b |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7322_IBC_WIDTH_4X_ONLY :
- IBA7322_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
- IBA7322_IBC_HRTBT_MASK);
- }
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
-
- /* setup so we have more time at CFGTEST to change H1 */
- val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
- val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
- val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
- qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
-
- serdes_7322_init(ppd);
-
- guid = be64_to_cpu(ppd->guid);
- if (!guid) {
- if (dd->base_guid)
- guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
- ppd->guid = cpu_to_be64(guid);
- }
-
- qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- /* Enable port */
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
- set_vls(ppd);
-
- /* initially come up DISABLED, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Also enable IBSTATUSCHG interrupt. */
- val = qib_read_kreg_port(ppd, krp_errmask);
- qib_write_kreg_port(ppd, krp_errmask,
- val | ERR_MASK_N(IBStatusChanged));
-
- /* Always zero until we start messing with SerDes for real */
- return 0;
-}
-
-/**
- * qib_7322_mini_quiet_serdes - set serdes to txidle
- * @ppd: the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- unsigned long flags;
-
- qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
- if (ppd->dd->cspec->r1)
- cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.function) /* if initted */
- timer_delete_sync(&ppd->cpspec->chase_timer);
-
- /*
- * Despite the name, actually disables IBC as well. Do it when
- * we are as sure as possible that no more packets can be
- * received, following the down and the PCS reset.
- * The actual disabling happens in qib_7322_mini_pci_reset(),
- * along with the PCS being reset.
- */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_7322_mini_pcs_reset(ppd);
-
- /*
- * Update the adjusted counters so the adjustment persists
- * across driver reload.
- */
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
- struct qib_devdata *dd = ppd->dd;
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7322_creg_port(ppd, crp_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
- }
- if (ppd->cpspec->iblnkdowndelta) {
- val = read_7322_creg32_port(ppd, crp_iblinkdown);
- val += ppd->cpspec->iblnkdowndelta;
- write_7322_creg_port(ppd, crp_iblinkdown, val);
- }
- /*
- * No need to save ibmalfdelta since IB perfcounters
- * are cleared on driver reload.
- */
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-}
-
-/**
- * qib_setup_7322_setextled - set the state of the two external LEDs
- * @ppd: physical port on the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- */
-static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val;
- unsigned long flags;
- int yel, grn;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- grn = (ppd->led_override & QIB_LED_PHYS);
- yel = (ppd->led_override & QIB_LED_LOG);
- } else if (on) {
- val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- grn = qib_7322_phys_portstate(val) ==
- IB_PHYSPORTSTATE_LINKUP;
- yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
- } else {
- grn = 0;
- yel = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & (ppd->port == 1 ?
- ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
- if (grn) {
- extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
- /*
- * Counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd.
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
- ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
- }
- if (yel)
- extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- switch (event) {
- case DCA_PROVIDER_ADD:
- if (dd->flags & QIB_DCA_ENABLED)
- break;
- if (!dca_add_requester(&dd->pcidev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
- break;
- case DCA_PROVIDER_REMOVE:
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA),
- dd->cspec->dca_ctrl);
- }
- break;
- }
- return 0;
-}
-
-static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
- const struct dca_reg_map *rmp;
-
- cspec->rhdr_cpu[rcd->ctxt] = cpu;
- rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
- qib_devinfo(dd->pcidev,
- "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- qib_write_kreg(dd, rmp->regno,
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
- unsigned pidx = ppd->port - 1;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->sdma_cpu[pidx] != cpu) {
- cspec->sdma_cpu[pidx] = cpu;
- cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
- SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
- SYM_MASK(DCACtrlF, SendDma0DCAOPH));
- cspec->dca_rcvhdr_ctrl[4] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
- (ppd->hw_pidx ?
- SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
- SYM_LSB(DCACtrlF, SendDma0DCAOPH));
- qib_devinfo(dd->pcidev,
- "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[4]);
- qib_write_kreg(dd, KREG_IDX(DCACtrlF),
- cspec->dca_rcvhdr_ctrl[4]);
- cspec->dca_ctrl |= ppd->hw_pidx ?
- SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
- SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_setup_dca(struct qib_devdata *dd)
-{
- struct qib_chip_specific *cspec = dd->cspec;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
- cspec->rhdr_cpu[i] = -1;
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- cspec->sdma_cpu[i] = -1;
- cspec->dca_rcvhdr_ctrl[0] =
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[1] =
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[2] =
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[3] =
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[4] =
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
- cspec->dca_rcvhdr_ctrl[i]);
- for (i = 0; i < cspec->num_msix_entries; i++)
- setup_dca_notifier(dd, i);
-}
-
-static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qib_irq_notify *n =
- container_of(notify, struct qib_irq_notify, notify);
- int cpu = cpumask_first(mask);
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- qib_update_rhdrq_dca(rcd, cpu);
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- qib_update_sdma_dca(ppd, cpu);
- }
-}
-
-static void qib_irq_notifier_release(struct kref *ref)
-{
- struct qib_irq_notify *n =
- container_of(ref, struct qib_irq_notify, notify.kref);
- struct qib_devdata *dd;
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- dd = rcd->dd;
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- dd = ppd->dd;
- }
- qib_devinfo(dd->pcidev,
- "release on HCA notify 0x%p n 0x%p\n", ref, n);
- kfree(n);
-}
-#endif
-
-static void qib_7322_free_irq(struct qib_devdata *dd)
-{
- u64 intgranted;
- int i;
-
- dd->cspec->main_int_mask = ~0ULL;
-
- for (i = 0; i < dd->cspec->num_msix_entries; i++) {
- /* only free IRQs that were allocated */
- if (dd->cspec->msix_entries[i].arg) {
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- reset_dca_notifier(dd, i);
-#endif
- irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
- NULL);
- free_cpumask_var(dd->cspec->msix_entries[i].mask);
- pci_free_irq(dd->pcidev, i,
- dd->cspec->msix_entries[i].arg);
- }
- }
-
- /* If num_msix_entries was 0, disable the INTx IRQ */
- if (!dd->cspec->num_msix_entries)
- pci_free_irq(dd->pcidev, 0, dd);
- else
- dd->cspec->num_msix_entries = 0;
-
- pci_free_irq_vectors(dd->pcidev);
-
- /* make sure no MSIx interrupts are left pending */
- intgranted = qib_read_kreg64(dd, kr_intgranted);
- if (intgranted)
- qib_write_kreg(dd, kr_intgranted, intgranted);
-}
-
-static void qib_setup_7322_cleanup(struct qib_devdata *dd)
-{
- int i;
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
- }
-#endif
-
- qib_7322_free_irq(dd);
- kfree(dd->cspec->cntrs);
- bitmap_free(dd->cspec->sendchkenable);
- bitmap_free(dd->cspec->sendgrhchk);
- bitmap_free(dd->cspec->sendibchk);
- kfree(dd->cspec->msix_entries);
- for (i = 0; i < dd->num_pports; i++) {
- unsigned long flags;
- u32 mask = QSFP_GPIO_MOD_PRS_N |
- (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
-
- kfree(dd->pport[i].cpspec->portcntrs);
- if (dd->flags & QIB_HAS_QSFP) {
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->gpio_mask &= ~mask;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- }
-}
-
-/* handle SDMA interrupts */
-static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- struct qib_pportdata *ppd0 = &dd->pport[0];
- struct qib_pportdata *ppd1 = &dd->pport[1];
- u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
- INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
- u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
- INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
-
- if (intr0)
- qib_sdma_intr(ppd0);
- if (intr1)
- qib_sdma_intr(ppd1);
-
- if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
- qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
- if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
- qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
-}
-
-/*
- * Set or clear the Send buffer available interrupt enable bit.
- */
-static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Somehow got an interrupt with reserved bits set in interrupt status.
- * Print a message so we know it happened, then clear them.
- * keep mainline interrupt handler cache-friendly
- */
-static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
-{
- u64 kills;
- char msg[128];
-
- kills = istat & ~QIB_I_BITSEXTANT;
- qib_dev_err(dd,
- "Clearing reserved interrupt(s) 0x%016llx: %s\n",
- (unsigned long long) kills, msg);
- qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
-}
-
-/* keep mainline interrupt handler cache-friendly */
-static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
-{
- u32 gpiostatus;
- int handled = 0;
- int pidx;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
- /*
- * Check for QSFP MOD_PRS changes
- * only works for single port if IB1 != pidx1
- */
- for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
- ++pidx) {
- struct qib_pportdata *ppd;
- struct qib_qsfp_data *qd;
- u32 mask;
-
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- mask = QSFP_GPIO_MOD_PRS_N;
- ppd = dd->pport + pidx;
- mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- if (gpiostatus & dd->cspec->gpio_mask & mask) {
- u64 pins;
-
- qd = &ppd->cpspec->qsfp_data;
- gpiostatus &= ~mask;
- pins = qib_read_kreg64(dd, kr_extstatus);
- pins >>= SYM_LSB(EXTStatus, GPIOIn);
- if (!(pins & mask)) {
- ++handled;
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- }
- }
-
- if (gpiostatus && !handled) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * Clear any troublemakers, and update chip from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- if (istat & ~QIB_I_BITSEXTANT)
- unknown_7322_ibits(dd, istat);
- if (istat & QIB_I_GPIO)
- unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR) {
- qib_write_kreg(dd, kr_errmask, 0ULL);
- tasklet_schedule(&dd->error_tasklet);
- }
- if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
- handle_7322_p_errors(dd->rcd[0]->ppd);
- if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
- handle_7322_p_errors(dd->rcd[1]->ppd);
-}
-
-/*
- * Dynamically adjust the rcv int timeout for a context based on incoming
- * packet rate.
- */
-static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
-
- /*
- * Dynamically adjust idle timeout on chip
- * based on number of packets processed.
- */
- if (npkts < rcv_int_count && timeout > 2)
- timeout >>= 1;
- else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
- timeout = min(timeout << 1, rcv_int_timeout);
- else
- return;
-
- dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
-}
-
-/*
- * This is the main interrupt handler.
- * It will normally only be used for low frequency interrupts but may
- * have to handle all interrupts if INTx is enabled or fewer than normal
- * MSIx interrupts were allocated.
- * This routine should ignore the interrupt bits for any of the
- * dedicated MSIx handlers.
- */
-static irqreturn_t qib_7322intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(istat == ~0ULL)) {
- qib_bad_intrstatus(dd);
- qib_dev_err(dd, "Interrupt status all f's, skipping\n");
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- istat &= dd->cspec->main_int_mask;
- if (unlikely(!istat)) {
- /* already handled, or shared and not us */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- /* handle "errors" of various kinds first, device ahead of port */
- if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
- QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
- INT_MASK_P(Err, 1))))
- unlikely_7322_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
- if (ctxtrbits) {
- rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- if (dd->rcd[i])
- qib_kreceive(dd->rcd[i], NULL, &npkts);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
- (ctxtrbits >> QIB_I_RCVURG_LSB);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
- sdma_7322_intr(dd, istat);
-
- if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Dedicated receive packet available interrupt handler.
- */
-static irqreturn_t qib_7322pintr(int irq, void *data)
-{
- struct qib_ctxtdata *rcd = data;
- struct qib_devdata *dd = rcd->dd;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
-
- qib_kreceive(rcd, NULL, &npkts);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send buffer available interrupt handler.
- */
-static irqreturn_t qib_7322bufavail(int irq, void *data)
-{
- struct qib_devdata *dd = data;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
-
- /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
- if (dd->flags & QIB_INITTED)
- qib_ib_piobufavail(dd);
- else
- qib_wantpiobuf_7322_intr(dd, 0);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA interrupt handler.
- */
-static irqreturn_t sdma_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA idle interrupt handler.
- */
-static irqreturn_t sdma_idle_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA progress interrupt handler.
- */
-static irqreturn_t sdma_progress_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaProgress, 1) :
- INT_MASK_P(SDmaProgress, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA cleanup interrupt handler.
- */
-static irqreturn_t sdma_cleanup_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_PM(SDmaCleanupDone, 1) :
- INT_MASK_PM(SDmaCleanupDone, 0));
- qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
-{
- if (!dd->cspec->msix_entries[msixnum].dca)
- return;
-
- qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
- dd->unit, pci_irq_vector(dd->pcidev, msixnum));
- irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
- dd->cspec->msix_entries[msixnum].notifier = NULL;
-}
-
-static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
-{
- struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
- struct qib_irq_notify *n;
-
- if (!m->dca)
- return;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- int ret;
-
- m->notifier = n;
- n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
- n->notify.notify = qib_irq_notifier_notify;
- n->notify.release = qib_irq_notifier_release;
- n->arg = m->arg;
- n->rcv = m->rcv;
- qib_devinfo(dd->pcidev,
- "set notifier irq %d rcv %d notify %p\n",
- n->notify.irq, n->rcv, &n->notify);
- ret = irq_set_affinity_notifier(
- n->notify.irq,
- &n->notify);
- if (ret) {
- m->notifier = NULL;
- kfree(n);
- }
- }
-}
-
-#endif
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSIx interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
-{
- int ret, i, msixnum;
- u64 redirect[6];
- u64 mask;
- const struct cpumask *local_mask;
- int firstcpu, secondcpu = 0, currrcvcpu = 0;
-
- if (!dd->num_pports)
- return;
-
- if (clearpend) {
- /*
- * if not switching interrupt types, be sure interrupts are
- * disabled, and then clear anything pending at this point,
- * because we are starting clean.
- */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7322_init_hwerrors(dd);
-
- /* clear any interrupt bits that might be set */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- /* make sure no pending MSIx intr, and clear diag reg */
- qib_write_kreg(dd, kr_intgranted, ~0ULL);
- qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
- }
-
- if (!dd->cspec->num_msix_entries) {
- /* Try to get INTx interrupt */
-try_intx:
- ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret) {
- qib_dev_err(
- dd,
- "Couldn't setup INTx interrupt (irq=%d): %d\n",
- pci_irq_vector(dd->pcidev, 0), ret);
- return;
- }
- dd->cspec->main_int_mask = ~0ULL;
- return;
- }
-
- /* Try to get MSIx interrupts */
- memset(redirect, 0, sizeof(redirect));
- mask = ~0ULL;
- msixnum = 0;
- local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- firstcpu = cpumask_first(local_mask);
- if (firstcpu >= nr_cpu_ids ||
- cpumask_weight(local_mask) == num_online_cpus()) {
- local_mask = topology_core_cpumask(0);
- firstcpu = cpumask_first(local_mask);
- }
- if (firstcpu < nr_cpu_ids) {
- secondcpu = cpumask_next(firstcpu, local_mask);
- if (secondcpu >= nr_cpu_ids)
- secondcpu = firstcpu;
- currrcvcpu = secondcpu;
- }
- for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
- irq_handler_t handler;
- void *arg;
- int lsb, reg, sh;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca = 0;
-#endif
- if (i < ARRAY_SIZE(irq_table)) {
- if (irq_table[i].port) {
- /* skip if for a non-configured port */
- if (irq_table[i].port > dd->num_pports)
- continue;
- arg = dd->pport + irq_table[i].port - 1;
- } else
- arg = dd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = irq_table[i].dca;
-#endif
- lsb = irq_table[i].lsb;
- handler = irq_table[i].handler;
- ret = pci_request_irq(dd->pcidev, msixnum, handler,
- NULL, arg, QIB_DRV_NAME "%d%s",
- dd->unit,
- irq_table[i].name);
- } else {
- unsigned ctxt;
-
- ctxt = i - ARRAY_SIZE(irq_table);
- /* per krcvq context receive interrupt */
- arg = dd->rcd[ctxt];
- if (!arg)
- continue;
- if (qib_krcvq01_no_msi && ctxt < 2)
- continue;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = 1;
-#endif
- lsb = QIB_I_RCVAVAIL_LSB + ctxt;
- handler = qib_7322pintr;
- ret = pci_request_irq(dd->pcidev, msixnum, handler,
- NULL, arg,
- QIB_DRV_NAME "%d (kctx)",
- dd->unit);
- }
-
- if (ret) {
- /*
- * Shouldn't happen since the enable said we could
- * have as many as we are trying to setup here.
- */
- qib_dev_err(dd,
- "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
- msixnum,
- pci_irq_vector(dd->pcidev, msixnum),
- ret);
- qib_7322_free_irq(dd);
- pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX);
- goto try_intx;
- }
- dd->cspec->msix_entries[msixnum].arg = arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->cspec->msix_entries[msixnum].dca = dca;
- dd->cspec->msix_entries[msixnum].rcv =
- handler == qib_7322pintr;
-#endif
- if (lsb >= 0) {
- reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
- sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
- SYM_LSB(IntRedirect0, vec1);
- mask &= ~(1ULL << lsb);
- redirect[reg] |= ((u64) msixnum) << sh;
- }
- qib_read_kreg64(dd, 2 * msixnum + 1 +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (firstcpu < nr_cpu_ids &&
- zalloc_cpumask_var(
- &dd->cspec->msix_entries[msixnum].mask,
- GFP_KERNEL)) {
- if (handler == qib_7322pintr) {
- cpumask_set_cpu(currrcvcpu,
- dd->cspec->msix_entries[msixnum].mask);
- currrcvcpu = cpumask_next(currrcvcpu,
- local_mask);
- if (currrcvcpu >= nr_cpu_ids)
- currrcvcpu = secondcpu;
- } else {
- cpumask_set_cpu(firstcpu,
- dd->cspec->msix_entries[msixnum].mask);
- }
- irq_set_affinity_hint(
- pci_irq_vector(dd->pcidev, msixnum),
- dd->cspec->msix_entries[msixnum].mask);
- }
- msixnum++;
- }
- /* Initialize the vector mapping */
- for (i = 0; i < ARRAY_SIZE(redirect); i++)
- qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
- dd->cspec->main_int_mask = mask;
- tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
-}
-
-/**
- * qib_7322_boardname - fill in the board name and note features
- * @dd: the qlogic_ib device
- *
- * info will be based on the board revision register
- */
-static unsigned qib_7322_boardname(struct qib_devdata *dd)
-{
- /* Will need enumeration of board-types here */
- u32 boardid;
- unsigned int features = DUAL_PORT_CAP;
-
- boardid = SYM_FIELD(dd->revision, Revision, BoardID);
-
- switch (boardid) {
- case 0:
- dd->boardname = "InfiniPath_QLE7342_Emulation";
- break;
- case 1:
- dd->boardname = "InfiniPath_QLE7340";
- dd->flags |= QIB_HAS_QSFP;
- features = PORT_SPD_CAP;
- break;
- case 2:
- dd->boardname = "InfiniPath_QLE7342";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 3:
- dd->boardname = "InfiniPath_QMI7342";
- break;
- case 4:
- dd->boardname = "InfiniPath_Unsupported7342";
- qib_dev_err(dd, "Unsupported version of QMH7342\n");
- features = 0;
- break;
- case BOARD_QMH7342:
- dd->boardname = "InfiniPath_QMH7342";
- features = 0x24;
- break;
- case BOARD_QME7342:
- dd->boardname = "InfiniPath_QME7342";
- break;
- case 8:
- dd->boardname = "InfiniPath_QME7362";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case BOARD_QMH7360:
- dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 15:
- dd->boardname = "InfiniPath_QLE7342_TEST";
- dd->flags |= QIB_HAS_QSFP;
- break;
- default:
- dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
- qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
- break;
- }
- dd->board_atten = 1; /* index into txdds_Xdr */
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-
- if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
- qib_devinfo(dd->pcidev,
- "IB%u: Forced to single port mode by module parameter\n",
- dd->unit);
- features &= PORT_SPD_CAP;
- }
-
- return features;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_do_7322_reset(struct qib_devdata *dd)
-{
- u64 val;
- u64 *msix_vecsave = NULL;
- int i, msix_entries, ret = 1;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- msix_entries = dd->cspec->num_msix_entries;
-
- /* no interrupts till re-initted */
- qib_7322_set_intr_state(dd, 0);
-
- qib_7322_free_irq(dd);
-
- if (msix_entries) {
- /* can be up to 512 bytes, too big for stack */
- msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
- sizeof(u64),
- GFP_KERNEL);
- }
-
- /*
- * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
- * info that is set up by the BIOS, so we have to save and restore
- * it ourselves. There is some risk something could change it,
- * after we save it, but since we have disabled the MSIx, it
- * shouldn't be touched...
- */
- for (i = 0; i < msix_entries; i++) {
- u64 vecaddr, vecdata;
-
- vecaddr = qib_read_kreg64(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- vecdata = qib_read_kreg64(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (msix_vecsave) {
- msix_vecsave[2 * i] = vecaddr;
- /* save it without the masked bit set */
- msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
- }
- }
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
- dd->pport->cpspec->ibmalfdelta = 0;
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
- dd->flags |= QIB_DOING_RESET;
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 3000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision)
- break;
- if (i == 5) {
- qib_dev_err(dd,
- "Failed to initialize after reset, unusable\n");
- ret = 0;
- goto bail;
- }
- }
-
- dd->flags |= QIB_PRESENT; /* it's back */
-
- if (msix_entries) {
- /* restore the MSIx vector address and data if saved above */
- for (i = 0; i < msix_entries; i++) {
- if (!msix_vecsave || !msix_vecsave[2 * i])
- continue;
- qib_write_kreg(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[2 * i]);
- qib_write_kreg(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[1 + 2 * i]);
- }
- }
-
- /* initialize the remaining registers. */
- for (i = 0; i < dd->num_pports; ++i)
- write_7322_init_portregs(&dd->pport[i]);
- write_7322_initregs(dd);
-
- if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- dd->cspec->num_msix_entries = msix_entries;
- qib_setup_7322_interrupt(dd, 1);
-
- for (i = 0; i < dd->num_pports; ++i) {
- struct qib_pportdata *ppd = &dd->pport[i];
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
-bail:
- dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
- kfree(msix_vecsave);
- return ret;
-}
-
-/**
- * qib_7322_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (!(dd->flags & QIB_PRESENT))
- return;
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7322_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
-}
-
-/**
- * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close().
- */
-static void qib_7322_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7322_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7322_tidtemplate(struct qib_devdata *dd)
-{
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make it per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7322_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7322_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_7322_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-
-static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
- QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
- QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
- if (rcd->dd->cspec->r1)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-/*
- * Configure number of contexts.
- */
-static void qib_7322_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1 && dd->num_pports) {
- dd->first_user_ctxt = NUM_IB_PORTS +
- (qib_n_krcv_queues - 1) * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
- } else {
- dd->first_user_ctxt = NUM_IB_PORTS;
- dd->n_krcv_queues = 1;
- }
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 6)
- dd->ctxtcnt = 6;
- else if (nctxts <= 10)
- dd->ctxtcnt = 10;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->ctxtcnt = dd->num_pports;
- else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 6, 10, or 18 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 10)
- dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
- else if (dd->ctxtcnt > 6)
- dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
- /* else configure for default 6 receive ctxts */
-
- /* The XRC opcode is 5. */
- dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
-
- /*
- * RcvCtrl *must* be written here so that the
- * chip understands how to change rcvegrcnt below.
- */
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- if (qib_rcvhdrcnt)
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
- else
- dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
- dd->num_pports > 1 ? 1024U : 2048U);
-}
-
-static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
-
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
-
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 3, 1 or 0.
- */
- if (ppd->link_speed_active == QIB_IB_QDR)
- ret = 3;
- else if (ppd->link_speed_active == QIB_IB_DDR)
- ret = 1;
- else
- ret = 0;
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
-done:
- return ret;
-}
-
-/*
- * Below again cribbed liberally from older version. Do not lean
- * heavily on it.
- */
-#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
-#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
- | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0;
- u16 lcmd, licmd;
- unsigned long flags;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7322_IBC_DLIDLMC_SHIFT;
- maskr = IBA7322_IBC_DLIDLMC_MASK;
- /*
- * For header-checking, the SLID in the packet will
- * be masked with SendIBSLMCMask, and compared
- * with SendIBSLIDAssignMask. Make sure we do not
- * set any bits not covered by the mask, or we get
- * false-positives.
- */
- qib_write_kreg_port(ppd, krp_sendslid,
- val & (val >> 16) & SendIBSLIDAssignMask);
- qib_write_kreg_port(ppd, krp_sendslidmask,
- (val >> 16) & SendIBSLMCMask);
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- ppd->link_width_enabled = val;
- /* convert IB value to chip register value */
- if (val == IB_WIDTH_1X)
- val = 0;
- else if (val == IB_WIDTH_4X)
- val = 1;
- else
- val = 3;
- maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
- lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * As with width, only write the actual register if the
- * link is currently down, otherwise takes effect on next
- * link change. Since setting is being explicitly requested
- * (via MAD or sysfs), clear autoneg failure status if speed
- * autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- val <<= IBA7322_IBC_SPEED_LSB;
- maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- if (val & (val - 1)) {
- /* Muliple speeds enabled */
- val |= IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (val & IBA7322_IBC_SPEED_QDR)
- val |= IBA7322_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + min/max + speed bits are contiguous */
- lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg_port(ppd, krp_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a |= (u64)val <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- ppd->cpspec->ibmalfusesnap = 1;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- if (ppd->cpspec->ibmalfusesnap) {
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfdelta +=
- read_7322_creg32_port(ppd,
- crp_errlink) -
- ppd->cpspec->ibmalfsnap;
- }
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- timer_delete_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7322_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_OP_VLS:
- if (ppd->vls_operational != val) {
- ppd->vls_operational = val;
- set_vls(ppd);
- }
- goto bail;
-
- case QIB_IB_CFG_VL_HIGH_LIMIT:
- qib_write_kreg_port(ppd, krp_highprio_limit, val);
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > 3) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PORT:
- /* val is the port number of the switch we are connected to. */
- if (ppd->dd->cspec->r1) {
- cancel_delayed_work(&ppd->cpspec->ipg_work);
- ppd->cpspec->ipg_tries = 0;
- }
- goto bail;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
- ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(dd, kr_scratch, 0);
-bail:
- return ret;
-}
-
-static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ctrlb;
-
- /* only IBC loopback, may add serdes and xgxs loopbacks later */
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
- Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
- Loopback);
- /* enable heart beat again */
- val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
- << IBA7322_IBC_HRTBT_LSB);
- ppd->cpspec->ibcctrl_b = ctrlb | val;
- qib_write_kreg_port(ppd, krp_ibcctrl_b,
- ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u32 val = qib_read_kreg_port(ppd, regno);
-
- vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
- SYM_RMASK(LowPriority0_0, VirtualLane);
- vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
- SYM_RMASK(LowPriority0_0, Weight);
- }
-}
-
-static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u64 val;
-
- val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
- SYM_LSB(LowPriority0_0, VirtualLane)) |
- ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
- SYM_LSB(LowPriority0_0, Weight));
- qib_write_kreg_port(ppd, regno, val);
- }
- if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- }
-}
-
-static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- get_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- get_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- set_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- set_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- /*
- * Need to write timeout register before updating rcvhdrhead to ensure
- * that the timer is enabled on reception of a packet.
- */
- if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
- adjust_rcv_timeout(rcd, npkts);
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_TIDFLOW_ENB | \
- QIB_RCVCTRL_TIDFLOW_DIS | \
- QIB_RCVCTRL_TAILUPD_ENB | \
- QIB_RCVCTRL_TAILUPD_DIS | \
- QIB_RCVCTRL_INTRAVAIL_ENB | \
- QIB_RCVCTRL_INTRAVAIL_DIS | \
- QIB_RCVCTRL_BP_ENB | \
- QIB_RCVCTRL_BP_DIS)
-
-#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_PKEY_DIS | \
- QIB_RCVCTRL_PKEY_ENB)
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TIDFLOW_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TIDFLOW_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (ctxt < 0) {
- mask = (1ULL << dd->ctxtcnt) - 1;
- rcd = NULL;
- } else {
- mask = (1ULL << ctxt);
- rcd = dd->rcd[ctxt];
- }
- if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
- ppd->p_rcvctrl |=
- (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- }
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
- rcd->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
- rcd->rcvhdrq_phys);
- rcd->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- ppd->p_rcvctrl &=
- ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (op & QIB_RCVCTRL_BP_ENB)
- dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
- if (op & QIB_RCVCTRL_BP_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
- /*
- * Decide which registers to write depending on the ops enabled.
- * Special case is "flush" (no bits set at all)
- * which needs to write both.
- */
- if (op == 0 || (op & RCVCTRL_COMMON_MODS))
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if (op == 0 || (op & RCVCTRL_PORT_MODS))
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- /* be sure enabling write seen; hd/tl should be 0 */
- (void) qib_read_kreg32(dd, kr_scratch);
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
- dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- unsigned f;
-
- /* Now that the context is disabled, clear these registers. */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, ctxt);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, i);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function where there are multiple such registers with
- * slightly different layouts.
- * The chip doesn't allow back-to-back sendctrl writes, so write
- * the scratch register after writing sendctrl.
- *
- * Which register is written depends on the operation.
- * Most operate on the common register, while
- * SEND_ENB and SEND_DIS operate on the per-port ones.
- * SEND_ENB is included in common because it can change SPCL_TRIG
- */
-#define SENDCTRL_COMMON_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_AVAIL_DIS | \
- QIB_SENDCTRL_AVAIL_ENB | \
- QIB_SENDCTRL_AVAIL_BLIP | \
- QIB_SENDCTRL_DISARM | \
- QIB_SENDCTRL_DISARM_ALL | \
- QIB_SENDCTRL_SEND_ENB)
-
-#define SENDCTRL_PORT_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_SEND_ENB | \
- QIB_SENDCTRL_SEND_DIS | \
- QIB_SENDCTRL_FLUSH)
-
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the dd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
- }
-
- /* Then the ppd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_SEND_DIS)
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- /*
- * Disarm any buffers that are not yet launched,
- * disabling updates until done.
- */
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
-
- /*
- * Now drain all the fifos. The Abort bit should never be
- * needed, so for now, at least, we don't use it.
- */
- tmp_ppd_sendctrl |=
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
- SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
- SYM_MASK(SendCtrl_0, TxeBypassIbc);
- qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmSendBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
-#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
-#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
-
-/**
- * qib_portcntr_7322 - read a per-port chip counter
- * @ppd: the qlogic_ib pport
- * @reg: the counter to read (not a chip offset)
- */
-static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 ret = 0ULL;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u32 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
- [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
- [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = crp_erricrc,
- [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = crp_badformat,
- [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = crp_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
- [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
- /*
- * the next 3 aren't really counters, but were implemented
- * as counters in older chips, so still get accessed as
- * though they were counters from this code.
- */
- [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
- [QIBPORTCNTR_PSSTART] = krp_psstart,
- [QIBPORTCNTR_PSSTAT] = krp_psstat,
- /* pseudo-counter, summed for all ports */
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg] & _PORT_CNTR_IDXMASK;
-
- /* handle non-counters and special cases first */
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts (skip if mini_init) */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (!rcd || rcd->ppd != ppd)
- continue;
- ret += read_7322_creg32(dd, cr_base_egrovfl + i);
- }
- goto done;
- } else if (reg == QIBPORTCNTR_RXDROPPKT) {
- /*
- * Used as part of the synthesis of port_rcv_errors
- * in the verbs code for IBTA counters. Not needed for 7322,
- * because all the errors are already counted by other cntrs.
- */
- goto done;
- } else if (reg == QIBPORTCNTR_PSINTERVAL ||
- reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
- /* were counters in older chips, now per-port kernel regs */
- ret = qib_read_kreg_port(ppd, creg);
- goto done;
- }
-
- /*
- * Only fast increment counters are 64 bits; use 32 bit reads to
- * avoid two independent reads when on Opteron.
- */
- if (xlator[reg] & _PORT_64BIT_FLAG)
- ret = read_7322_creg_port(ppd, creg);
- else
- ret = read_7322_creg32_port(ppd, creg);
- if (creg == crp_ibsymbolerr) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= ppd->cpspec->ibsymdelta;
- } else if (creg == crp_iblinkerrrecov) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= ppd->cpspec->iblnkerrdelta;
- } else if (creg == crp_errlink)
- ret -= ppd->cpspec->ibmalfdelta;
- else if (creg == crp_iblinkdown)
- ret += ppd->cpspec->iblnkdowndelta;
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7322indices contains the corresponding register indices.
- */
-static const char cntr7322names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "RxTIDFloDrop\n" /* 7322 only */
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n"
- "Ctx17EgrOvfl\n"
- ;
-
-static const u32 cntr7322indices[] = {
- cr_lbint | _PORT_64BIT_FLAG,
- cr_lbstall | _PORT_64BIT_FLAG,
- cr_tidfull,
- cr_tidinvalid,
- cr_rxtidflowdrop,
- cr_base_egrovfl + 0,
- cr_base_egrovfl + 1,
- cr_base_egrovfl + 2,
- cr_base_egrovfl + 3,
- cr_base_egrovfl + 4,
- cr_base_egrovfl + 5,
- cr_base_egrovfl + 6,
- cr_base_egrovfl + 7,
- cr_base_egrovfl + 8,
- cr_base_egrovfl + 9,
- cr_base_egrovfl + 10,
- cr_base_egrovfl + 11,
- cr_base_egrovfl + 12,
- cr_base_egrovfl + 13,
- cr_base_egrovfl + 14,
- cr_base_egrovfl + 15,
- cr_base_egrovfl + 16,
- cr_base_egrovfl + 17,
-};
-
-/*
- * same as cntr7322names and cntr7322indices, but for port-specific counters.
- * portcntr7322indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7322names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
- "RxVL15Drop\n"
- "RxVlErr\n"
- "XcessBufOvfl\n"
- "RxQPBadCtxt\n" /* 7322-only from here down */
- "TXBadHeader\n"
- ;
-
-static const u32 portcntr7322indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- crp_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- crp_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- crp_txsdmadesc | _PORT_64BIT_FLAG,
- crp_rxdlidfltr,
- crp_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- crp_rcvflowctrlviol,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- crp_txminmaxlenerr,
- crp_txdroppedpkt,
- crp_txlenerr,
- crp_txunderrun,
- crp_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
- crp_rxqpinvalidctxt,
- crp_txhdrerr,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7322_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7322names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr7322names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
- for (i = 0; i < dd->num_pports; ++i) {
- dd->pport[i].cpspec->portcntrs =
- kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
- GFP_KERNEL);
- }
-}
-
-static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *) cntr7322names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- if (cntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg(dd,
- cntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32(dd,
- cntr7322indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr7322names;
- } else {
- struct qib_pportdata *ppd = &dd->pport[port];
- u64 *cntr = ppd->cpspec->portcntrs;
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7322(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg_port(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32_port(ppd,
- portcntr7322indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7322_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * VESTIGIAL IBA7322 has no "small fast counters", so the only
- * real purpose of this function is to maintain the notion of
- * "active time", which in turn is only logged into the eeprom,
- * which we don;t have, yet, for 7322-based boards.
- *
- * called from add_timer
- */
-static void qib_get_7322_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd;
- unsigned long flags;
- u64 traffic_wds;
- int pidx;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- /*
- * If port isn't enabled or not operational ports, or
- * diags is running (can cause memory diags to fail)
- * skip this port this time.
- */
- if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
- || dd->diag_client)
- continue;
-
- /*
- * Maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
- qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
- spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
- traffic_wds -= ppd->dd->traffic_wds;
- ppd->dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
- if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
- QIB_IB_QDR) &&
- (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) &&
- ppd->cpspec->qdr_dfe_time &&
- time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
- ppd->cpspec->qdr_dfe_on = 0;
-
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_INIT_R1 :
- QDR_STATIC_ADAPT_INIT);
- force_h1(ppd);
- }
- }
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we were using MSIx, try to fallback to INTx.
- */
-static int qib_7322_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->cspec->num_msix_entries)
- return 0; /* already using INTx */
-
- qib_devinfo(dd->pcidev,
- "MSIx interrupt not detected, trying INTx interrupts\n");
- qib_7322_free_irq(dd);
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
- qib_setup_7322_interrupt(dd, 0);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well, then return to previous state (which may be still in reset)
- * NOTE: some callers of this "know" this writes the current value
- * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
- * check all callers.
- */
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
- SYM_MASK(IBPCSConfig_0, xcv_treset) |
- SYM_MASK(IBPCSConfig_0, tx_rx_reset);
-
- val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
-
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- qib_write_kreg(dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, statusValidNoEopClear));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum, control, len;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- control = qib_7322_setpbc_control(ppd, len, 0, 15);
- pbc = ((u64) control << 32) | len;
- while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
- if (i++ > 15)
- return;
- udelay(2);
- }
- /* disable header check on this packet, since it can't be valid */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
- /* and re-enable hdr check */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- u64 newctrlb;
-
- newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK);
-
- if (speed & (speed - 1)) /* multiple speeds */
- newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- newctrlb |= speed == QIB_IB_QDR ?
- IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
- ((speed == QIB_IB_DDR ?
- IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
-
- if (newctrlb == ppd->cpspec->ibcctrl_b)
- return;
-
- ppd->cpspec->ibcctrl_b = newctrlb;
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7322_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_autoneg_7322_send(ppd, 0);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- u32 i;
- unsigned long flags;
-
- ppd = container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->ppd;
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
- == IB_7322_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- ppd->cpspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-/*
- * This routine is used to request IPG set in the QLogic switch.
- * Only called if r1.
- */
-static void try_7322_ipg(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- unsigned delay;
- int ret;
-
- agent = ibp->rvp.send_agent;
- if (!agent)
- goto retry;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- goto retry;
-
- if (!ibp->smi_ah) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->smi_ah = ibah_to_rvtah(ah);
- ret = 0;
- }
- } else {
- send_buf->ah = &ibp->smi_ah->ibah;
- ret = 0;
- }
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_SEND;
- smp->hop_cnt = 1;
- smp->attr_id = QIB_VENDOR_IPG;
- smp->attr_mod = 0;
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (ret)
- ib_free_send_mad(send_buf);
-retry:
- delay = 2 << ppd->cpspec->ipg_tries;
- queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
- msecs_to_jiffies(delay));
-}
-
-/*
- * Timeout handler for setting IPG.
- * Only called if r1.
- */
-static void ipg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
-
- ppd = container_of(work, struct qib_chippport_specific,
- ipg_work.work)->ppd;
- if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
- && ++ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
-}
-
-static u32 qib_7322_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
-
- switch (state) {
- case IB_7322_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7322_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7322_L_STATE_ACTIVE:
- case IB_7322_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_7322_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7322_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
- return qib_7322_physportstate[state];
-}
-
-static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- unsigned long flags;
- int mult;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- /* Update our picture of width and speed from chip */
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
- ppd->link_speed_active = QIB_IB_QDR;
- mult = 4;
- } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
- ppd->link_speed_active = QIB_IB_DDR;
- mult = 2;
- } else {
- ppd->link_speed_active = QIB_IB_SDR;
- mult = 1;
- }
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
- ppd->link_width_active = IB_WIDTH_4X;
- mult *= 4;
- } else
- ppd->link_width_active = IB_WIDTH_1X;
- ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
-
- if (!ibup) {
- u64 clr;
-
- /* Link went down. */
- /* do IPG MAD again after linkdown, even if last time failed */
- ppd->cpspec->ipg_tries = 0;
- clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
- SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
- if (clr)
- qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- struct qib_qsfp_data *qd =
- &ppd->cpspec->qsfp_data;
- /* unlock the Tx settings, speed may change */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
- qib_cancel_sends(ppd);
- /* on link down, ensure sane pcs state */
- qib_7322_mini_pcs_reset(ppd);
- /* schedule the qsfp refresh which should turn the link
- off */
- if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- clr = read_7322_creg32_port(ppd, crp_iblinkdown);
- if (clr == ppd->cpspec->iblnkdownsnap)
- ppd->cpspec->iblnkdowndelta++;
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & QIB_IB_DDR)
- && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and auto-negotiation enabled */
- ++ppd->cpspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymdelta +=
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr) -
- ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta +=
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) -
- ppd->cpspec->iblnkerrsnap;
- }
- try_7322_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- qib_autoneg_7322_send(ppd, 1);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- udelay(2);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
- symadj = 1;
- }
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- symadj = 1;
- if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
- if (!ppd->cpspec->recovery_init)
- setup_7322_link_recovery(ppd, 0);
- ppd->cpspec->qdr_dfe_time = jiffies +
- msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
- }
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- }
- if (symadj) {
- ppd->cpspec->iblnkdownsnap =
- read_7322_creg32_port(ppd, crp_iblinkdown);
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
- crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7322_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/* Enable writes to config EEPROM, if possible. Returns previous state */
-static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- int prev_wen;
- u32 mask;
-
- mask = 1 << QIB_EEPROM_WEN_NUM;
- prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
- gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
-
- return prev_wen & 1;
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7322_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->palign = qib_read_kreg32(dd, kr_pagealign);
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport[0].ibmtu = (u32)mtu;
- dd->pport[1].ibmtu = (u32)mtu;
-
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_7322_chip_params(), so split out as separate function
- */
-static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
-
- dd->cspec->cregbase = (u64 __iomem *)(cregbase +
- (char __iomem *)dd->kregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-
- /* port registers are defined as relative to base of chip */
- dd->pport[0].cpspec->kpregbase =
- (u64 __iomem *)((char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->kpregbase =
- (u64 __iomem *)(dd->palign +
- (char __iomem *)dd->kregbase);
- dd->pport[0].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
-}
-
-/*
- * This is a fairly special-purpose observer, so we only support
- * the port-specific parts of SendCtrl
- */
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
- SYM_MASK(SendCtrl_0, SDmaEnable) | \
- SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
- SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
- SYM_MASK(SendCtrl_0, SDmaHalt) | \
- SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
- SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op, u32 offs,
- u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx;
- unsigned pidx;
- struct qib_pportdata *ppd = NULL;
- u64 local_data, all_bits;
-
- /*
- * The fixed correspondence between Physical ports and pports is
- * severed. We need to hunt for the ppd that corresponds
- * to the offset we got. And we have to do that without admitting
- * we know the stride, apparently.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- u64 __iomem *psptr;
- u32 psoffs;
-
- ppd = dd->pport + pidx;
- if (!ppd->cpspec->kpregbase)
- continue;
-
- psptr = ppd->cpspec->kpregbase + krp_sendctrl;
- psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
- if (psoffs == offs)
- break;
- }
-
- /* If pport is not being managed by driver, just avoid shadows. */
- if (pidx >= dd->num_pports)
- ppd = NULL;
-
- /* In any case, "idx" is flat index in kreg space */
- idx = offs / sizeof(u64);
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (!ppd || (mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- if (ppd) {
- sval = ppd->p_sendctrl & ~mask;
- sval |= *data & SENDCTRL_SHADOWED & mask;
- ppd->p_sendctrl = sval;
- } else
- sval = *data & SENDCTRL_SHADOWED & mask;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_write_kreg(dd, idx, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_0_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
- KREG_IDX(SendCtrl_0) * sizeof(u64)
-};
-
-static const struct diag_observer sendctrl_1_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
- KREG_IDX(SendCtrl_1) * sizeof(u64)
-};
-
-static ushort sdma_fetch_prio = 8;
-module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
-
-/* Besides logging QSFP events, we set appropriate TxDDS values */
-static void init_txdds_table(struct qib_pportdata *ppd, int override);
-
-static void qsfp_7322_event(struct work_struct *work)
-{
- struct qib_qsfp_data *qd;
- struct qib_pportdata *ppd;
- unsigned long pwrup;
- unsigned long flags;
- int ret;
- u32 le2;
-
- qd = container_of(work, struct qib_qsfp_data, work);
- ppd = qd->ppd;
- pwrup = qd->t_insert +
- msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
-
- /* Delay for 20 msecs to allow ModPrs resistor to setup */
- mdelay(QSFP_MODPRS_LAG_MSEC);
-
- if (!qib_qsfp_mod_present(ppd)) {
- ppd->cpspec->qsfp_data.modpresent = 0;
- /* Set the physical link to disabled */
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else {
- /*
- * Some QSFP's not only do not respond until the full power-up
- * time, but may behave badly if we try. So hold off responding
- * to insertion.
- */
- while (1) {
- if (time_is_before_jiffies(pwrup))
- break;
- msleep(20);
- }
-
- ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
-
- /*
- * Need to change LE2 back to defaults if we couldn't
- * read the cable type (to handle cable swaps), so do this
- * even on failure to read cable information. We don't
- * get here for QME, so IS_QME check not needed here.
- */
- if (!ret && !ppd->dd->cspec->r1) {
- if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
- le2 = LE2_QME;
- else if (qd->cache.atten[1] >= qib_long_atten &&
- QSFP_IS_CU(qd->cache.tech))
- le2 = LE2_5m;
- else
- le2 = LE2_DEFAULT;
- } else
- le2 = LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
- /*
- * We always change parameteters, since we can choose
- * values for cables without eeproms, and the cable may have
- * changed from a cable with full or partial eeprom content
- * to one with partial or no content.
- */
- init_txdds_table(ppd, 0);
- /* The physical link is being re-enabled only when the
- * previous state was DISABLED and the VALID bit is not
- * set. This should only happen when the cable has been
- * physically pulled. */
- if (!ppd->cpspec->qsfp_data.modpresent &&
- (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
- ppd->cpspec->qsfp_data.modpresent = 1;
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- }
-}
-
-/*
- * There is little we can do but complain to the user if QSFP
- * initialization fails.
- */
-static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
- struct qib_devdata *dd = ppd->dd;
- u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
-
- mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- qd->ppd = ppd;
- qib_qsfp_init(qd, qsfp_7322_event);
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
- dd->cspec->gpio_mask |= mod_prs_bit;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-/*
- * called at device initialization time, and also if the txselect
- * module parameter is changed. This is used for cables that don't
- * have valid QSFP EEPROMs (not present, or attenuation is zero).
- * We initialize to the default, then if there is a specific
- * unit,port match, we use that (and set it immediately, for the
- * current speed, if the link is at INIT or better).
- * String format is "default# unit#,port#=# ... u,p=#", separators must
- * be a SPACE character. A newline terminates. The u,p=# tuples may
- * optionally have "u,p=#,#", where the final # is the H1 value
- * The last specific match is used (actually, all are used, but last
- * one is the one that winds up set); if none at all, fall back on default.
- */
-static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
-{
- char *nxt, *str;
- u32 pidx, unit, port, deflt, h1;
- unsigned long val;
- int any = 0, seth1;
- int txdds_size;
-
- str = txselect_list;
-
- /* default number is validated in setup_txselect() */
- deflt = simple_strtoul(str, &nxt, 0);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->pport[pidx].cpspec->no_eep = deflt;
-
- txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
- if (IS_QME(dd) || IS_QMH(dd))
- txdds_size += TXDDS_MFG_SZ;
-
- while (*nxt && nxt[1]) {
- str = ++nxt;
- unit = simple_strtoul(str, &nxt, 0);
- if (nxt == str || !*nxt || *nxt != ',') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- port = simple_strtoul(str, &nxt, 0);
- if (nxt == str || *nxt != '=') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- val = simple_strtoul(str, &nxt, 0);
- if (nxt == str) {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- if (val >= txdds_size)
- continue;
- seth1 = 0;
- h1 = 0; /* gcc thinks it might be used uninitted */
- if (*nxt == ',' && nxt[1]) {
- str = ++nxt;
- h1 = (u32)simple_strtoul(str, &nxt, 0);
- if (nxt == str)
- while (*nxt && *nxt++ != ' ') /* skip */
- ;
- else
- seth1 = 1;
- }
- for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
- ++pidx) {
- struct qib_pportdata *ppd = &dd->pport[pidx];
-
- if (ppd->port != port || !ppd->link_speed_supported)
- continue;
- ppd->cpspec->no_eep = val;
- if (seth1)
- ppd->cpspec->h1_val = h1;
- /* now change the IBC and serdes, overriding generic */
- init_txdds_table(ppd, 1);
- /* Re-enable the physical state machine on mezz boards
- * now that the correct settings have been set.
- * QSFP boards are handles by the QSFP event handler */
- if (IS_QMH(dd) || IS_QME(dd))
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- any++;
- }
- if (*nxt == '\n')
- break; /* done */
- }
- if (change && !any) {
- /* no specific setting, use the default.
- * Change the IBC and serdes, but since it's
- * general, don't override specific settings.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- init_txdds_table(&dd->pport[pidx], 0);
- }
-}
-
-/* handle the txselect parameter changing */
-static int setup_txselect(const char *str, const struct kernel_param *kp)
-{
- struct qib_devdata *dd;
- unsigned long index, val;
- char *n;
-
- if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
- pr_info("txselect_values string too long\n");
- return -ENOSPC;
- }
- val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- pr_info("txselect_values must start with a number < %d\n",
- TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return -EINVAL;
- }
- strscpy(txselect_list, str, sizeof(txselect_list));
-
- xa_for_each(&qib_dev_table, index, dd)
- if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
- set_no_qsfp_atten(dd, 1);
- return 0;
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7322_initreg(struct qib_devdata *dd)
-{
- int ret = 0, n;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
-
- n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
- /* driver sends get pkey, lid, etc. checking also, to catch bugs */
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- qib_register_observer(dd, &sendctrl_0_observer);
- qib_register_observer(dd, &sendctrl_1_observer);
-
- dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
- /*
- * Set SendDmaFetchPriority and init Tx params, including
- * QSFP handler on boards that have QSFP.
- * First set our default attenuation entry for cables that
- * don't have valid attenuation.
- */
- set_no_qsfp_atten(dd, 0);
- for (n = 0; n < dd->num_pports; ++n) {
- struct qib_pportdata *ppd = dd->pport + n;
-
- qib_write_kreg_port(ppd, krp_senddmaprioritythld,
- sdma_fetch_prio & 0xf);
- /* Initialize qsfp if present on board. */
- if (dd->flags & QIB_HAS_QSFP)
- qib_init_7322_qsfp(ppd);
- }
- dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return ret;
-}
-
-/* per IB port errors. */
-#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
- MASK_ACROSS(8, 15))
-#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
-#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
- MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
- MASK_ACROSS(0, 11))
-
-/*
- * Write the initialization per-port registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c).
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_init_portregs(struct qib_pportdata *ppd)
-{
- u64 val;
- int i;
-
- if (!ppd->link_speed_supported) {
- /* no buffer credits for this port */
- for (i = 1; i < 8; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- return;
- }
-
- /*
- * Set the number of supported virtual lanes in IBC,
- * for flow control packet handling on unsupported VLs
- */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
- val |= (u64)(ppd->vls_supported - 1) <<
- SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
-
- /* enable tx header checking */
- qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
- IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
- IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
-
- qib_write_kreg_port(ppd, krp_ncmodectrl,
- SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
-
- /*
- * Unconditionally clear the bufmask bits. If SDMA is
- * enabled, we'll set them appropriately later.
- */
- qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
- if (ppd->dd->cspec->r1)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
-}
-
-/*
- * Write the initialization per-device registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c). Also write per-port
- * registers that are affected by overall device config, such as QP mapping
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_initregs(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int i, pidx;
- u64 val;
-
- /* Set Multicast QPs received by port 2 to map to context one. */
- qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- unsigned n, regno;
- unsigned long flags;
-
- if (dd->n_krcv_queues < 2 ||
- !dd->pport[pidx].link_speed_supported)
- continue;
-
- ppd = &dd->pport[pidx];
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Initialize QP to context mapping */
- regno = krp_rcvqpmaptable;
- val = 0;
- if (dd->num_pports > 1)
- n = dd->first_user_ctxt / dd->num_pports;
- else
- n = dd->first_user_ctxt - 1;
- for (i = 0; i < 32; ) {
- unsigned ctxt;
-
- if (dd->num_pports > 1)
- ctxt = (i % n) * dd->num_pports + pidx;
- else if (i % n)
- ctxt = (i % n) + 1;
- else
- ctxt = ppd->hw_pidx;
- val |= ctxt << (5 * (i % 6));
- i++;
- if (i % 6 == 0) {
- qib_write_kreg_port(ppd, regno, val);
- val = 0;
- regno++;
- }
- }
- qib_write_kreg_port(ppd, regno, val);
- }
-
- /*
- * Setup up interrupt mitigation for kernel contexts, but
- * not user contexts (user contexts use interrupts when
- * stalled waiting for any packet, so want those interrupts
- * right away).
- */
- for (i = 0; i < dd->first_user_ctxt; i++) {
- dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
- }
-
- /*
- * Initialize as (disabled) rcvflow tables. Application code
- * will setup each flow as it uses the flow.
- * Doesn't clear any of the error bits that might be set.
- */
- val = TIDFLOW_ERRBITS; /* these are W1C */
- for (i = 0; i < dd->cfgctxts; i++) {
- int flow;
-
- for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
- qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
- }
-
- /*
- * dual cards init to dual port recovery, single port cards to
- * the one port. Dual port cards may later adjust to 1 port,
- * and then back to dual port if both ports are connected
- * */
- if (dd->num_pports)
- setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
-}
-
-static int qib_init_7322_variables(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned features, pidx, sbufcnt;
- int ret, mtu;
- u32 sbufs, updthresh;
- resource_size_t vl15off;
-
- /* pport structs are contiguous, allocated after devdata */
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- ppd[0].dd = dd;
- ppd[1].dd = dd;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + 2);
-
- ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
- ppd[1].cpspec = &ppd[0].cpspec[1];
- ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
- ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
-
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
- dd->cspec->r1 = dd->minrev == 1;
-
- get_7322_chip_params(dd);
- features = qib_7322_boardname(dd);
-
- /* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
-
- dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
- !dd->cspec->sendibchk) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ppd = dd->pport;
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
- QIB_HAS_THRESH_UPDATE |
- (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- /*
- * Setup initial values. These may change when PAT is enabled, but
- * we need these to do initial chip register accesses.
- */
- qib_7322_set_baseaddrs(dd);
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
-
- dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
- /* all hwerrors become interrupts, unless special purposed */
- dd->cspec->hwerrmask = ~0ULL;
- /* link_recovery setup causes these errors, so ignore them,
- * other than clearing them when they occur */
- dd->cspec->hwerrmask &=
- ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
- SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
- HWE_MASK(LATriggered));
-
- for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
- struct qib_chippport_specific *cp = ppd->cpspec;
-
- ppd->link_speed_supported = features & PORT_SPD_CAP;
- features >>= PORT_SPD_CAP_SHIFT;
- if (!ppd->link_speed_supported) {
- /* single port mode (7340, or configured) */
- dd->skip_kctxt_mask |= 1 << pidx;
- if (pidx == 0) {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- ppd[0] = ppd[1];
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_0)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_0));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
- SYM_MASK(IntMask, SDmaIdleIntMask_0) |
- SYM_MASK(IntMask, SDmaProgressIntMask_0) |
- SYM_MASK(IntMask, SDmaIntMask_0) |
- SYM_MASK(IntMask, ErrIntMask_0) |
- SYM_MASK(IntMask, SendDoneIntMask_0));
- } else {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_1)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_1));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
- SYM_MASK(IntMask, SDmaIdleIntMask_1) |
- SYM_MASK(IntMask, SDmaProgressIntMask_1) |
- SYM_MASK(IntMask, SDmaIntMask_1) |
- SYM_MASK(IntMask, ErrIntMask_1) |
- SYM_MASK(IntMask, SendDoneIntMask_1));
- }
- continue;
- }
-
- dd->num_pports++;
- ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
- if (ret) {
- dd->num_pports--;
- goto bail;
- }
-
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
- switch (qib_num_cfg_vls) {
- case 1:
- ppd->vls_supported = IB_VL_VL0;
- break;
- case 2:
- ppd->vls_supported = IB_VL_VL0_1;
- break;
- default:
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u, using 4 VLs\n",
- qib_num_cfg_vls);
- qib_num_cfg_vls = 4;
- fallthrough;
- case 4:
- ppd->vls_supported = IB_VL_VL0_3;
- break;
- case 8:
- if (mtu <= 2048)
- ppd->vls_supported = IB_VL_VL0_7;
- else {
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u for MTU %d , using 4 VLs\n",
- qib_num_cfg_vls, mtu);
- ppd->vls_supported = IB_VL_VL0_3;
- qib_num_cfg_vls = 4;
- }
- break;
- }
- ppd->vls_operational = ppd->vls_supported;
-
- init_waitqueue_head(&cp->autoneg_wait);
- INIT_DELAYED_WORK(&cp->autoneg_work,
- autoneg_7322_work);
- if (ppd->dd->cspec->r1)
- INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
-
- /*
- * For Mez and similar cards, no qsfp info, so do
- * the "cable info" setup here. Can be overridden
- * in adapter-specific routines.
- */
- if (!(dd->flags & QIB_HAS_QSFP)) {
- if (!IS_QMH(dd) && !IS_QME(dd))
- qib_devinfo(dd->pcidev,
- "IB%u:%u: Unknown mezzanine card type\n",
- dd->unit, ppd->port);
- cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
- /*
- * Choose center value as default tx serdes setting
- * until changed through module parameter.
- */
- ppd->cpspec->no_eep = IS_QMH(dd) ?
- TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
- } else
- cp->h1_val = H1_FORCE_VAL;
-
- /* Avoid writes to chip for mini_init */
- if (!qib_mini_init)
- write_7322_init_portregs(ppd);
-
- timer_setup(&cp->chase_timer, reenable_chase, 0);
-
- ppd++;
- }
-
- dd->rcvhdrentsize = qib_rcvhdrentsize ?
- qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = qib_rcvhdrsize ?
- qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- dd->rcvegrbufsize = max(mtu, 2048);
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7322_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset.
- */
- dd->rhdrhead_intr_off =
- (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
-
- qib_7322_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- /*
- * We do not set WC on the VL15 buffers to avoid
- * a rare problem with unaligned writes from
- * interrupt-flushed store buffers, so we need
- * to map those separately here. We can't solve
- * this for the rarely used mtrr case.
- */
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
-
- /* vl15 buffers start just after the 4k buffers */
- vl15off = dd->physaddr + (dd->piobufbase >> 32) +
- dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap(vl15off,
- NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base) {
- ret = -ENOMEM;
- goto bail;
- }
-
- qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
- if (!dd->num_pports) {
- qib_dev_err(dd, "No ports enabled, giving up initialization\n");
- goto bail; /* no error, so can still figure out why err */
- }
-
- write_7322_initregs(dd);
- ret = qib_create_ctxts(dd);
- init_7322_cntrnames(dd);
-
- updthresh = 8U; /* update threshold */
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
- dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
-
- /*
- * If we have 16 user contexts, we will have 7 sbufs
- * per context, so reduce the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin.
- */
- if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
- updthresh = dd->pbufsctxt - 2;
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld)) |
- SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
-bail:
- if (!dd->ctxtcnt)
- dd->ctxtcnt = 1; /* for other initialization code */
-
- return ret;
-}
-
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
-
- /* last is same for 2k and 4k, because we use 4k if all 2k busy */
- if (pbc & PBC_7322_VL15_SEND) {
- first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
- last = first;
- } else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- last = dd->cspec->lastbuf_for_pio;
- }
- return qib_getsendbuf_range(dd, pbufnum, first, last);
-}
-
-static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- qib_write_kreg_port(ppd, krp_psinterval, intv);
- qib_write_kreg_port(ppd, krp_psstart, start);
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
- qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-static void dump_sdma_7322_state(struct qib_pportdata *ppd)
-{
- u64 reg, reg1, reg2;
-
- reg = qib_read_kreg_port(ppd, krp_senddmastatus);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmastatus: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_sendctrl);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sendctrl: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabase);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabase: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- /* get bufuse bits, clear them, and print them again if non-zero */
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- reg = qib_read_kreg_port(ppd, krp_senddmatail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmatail: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmahead);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmahead: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaheadaddr: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmalengen);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmalengen: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmadesccnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaidlecnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmapriorityhld: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmareloadcnt: 0x%016llx\n", reg);
-
- dump_sdma_state(ppd);
-}
-
-static struct sdma_set_state_action sdma_7322_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .go_s99_running_tofalse = 1,
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_drain = 0,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .op_drain = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7322_action_table;
-}
-
-static int init_sdma_7322_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned lastbuf, erstbuf;
- u64 senddmabufmask[3] = { 0 };
- int n;
-
- qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
- qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
- qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
-
- if (dd->num_pports)
- n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
- else
- n = dd->cspec->sdmabufcnt; /* failsafe for init */
- erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
- ((dd->num_pports == 1 || ppd->port == 2) ? n :
- dd->cspec->sdmabufcnt);
- lastbuf = erstbuf + n;
-
- ppd->sdma_state.first_sendbuf = erstbuf;
- ppd->sdma_state.last_sendbuf = lastbuf;
- for (; erstbuf < lastbuf; ++erstbuf) {
- unsigned word = erstbuf / BITS_PER_LONG;
- unsigned bit = erstbuf & (BITS_PER_LONG - 1);
-
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return 0;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16) le64_to_cpu(*ppd->sdma_head_dma) :
- (u16) qib_read_kreg_port(ppd, krp_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail)
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- else if (swhead > swtail)
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- else
- /* empty */
- sane = (hwhead == swhead);
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* proceed as if no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * The delay affects the next packet and the amount of the delay is
- * based on the length of the this packet.
- */
-static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret;
-
- ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
-
- /* Indicate VL15, else set the VL in the control word */
- if (vl == 15)
- ret |= PBC_7322_VL15_SEND_CTRL;
- else
- ret |= vl << PBC_VL_NUM_LSB;
- ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
-
- return ret;
-}
-
-/*
- * Enable the per-port VL15 send buffers for use.
- * They follow the rest of the buffers, without a config parameter.
- * This was in initregs, but that is done before the shadow
- * is set up, and this has to be done after the shadow is
- * set up.
- */
-static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
-{
- unsigned vl15bufs;
-
- vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
- qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
- TXCHK_CHG_TYPE_KERN, NULL);
-}
-
-static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (rcd->ctxt < NUM_IB_PORTS) {
- if (rcd->dd->num_pports > 1) {
- rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
- rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
- } else {
- rcd->rcvegrcnt = KCTXT0_EGRCNT;
- rcd->rcvegr_tid_base = 0;
- }
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
- (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
- }
-}
-
-#define QTXSLEEPS 5000
-static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- const int last = start + len - 1;
- const int lastr = last / BITS_PER_LONG;
- u32 sleeps = 0;
- int wait = rcd != NULL;
- unsigned long flags;
-
- while (wait) {
- unsigned long shadow = 0;
- int cstart, previ = -1;
-
- /*
- * when flipping from kernel to user, we can't change
- * the checking type if the buffer is allocated to the
- * driver. It's OK the other direction, because it's
- * from close, and we have just disarm'ed all the
- * buffers. All the kernel to kernel changes are also
- * OK.
- */
- for (cstart = start; cstart <= last; cstart++) {
- i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- / BITS_PER_LONG;
- if (i != previ) {
- shadow = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- previ = i;
- }
- if (test_bit(((2 * cstart) +
- QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- % BITS_PER_LONG, &shadow))
- break;
- }
-
- if (cstart > last)
- break;
-
- if (sleeps == QTXSLEEPS)
- break;
- /* make sure we see an updated copy next time around */
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- sleeps++;
- msleep(20);
- }
-
- switch (which) {
- case TXCHK_CHG_TYPE_DIS1:
- /*
- * disable checking on a range; used by diags; just
- * one buffer, but still written generically
- */
- for (i = start; i <= last; i++)
- clear_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_ENAB1:
- /*
- * (re)enable checking on a range; used by diags; just
- * one buffer, but still written generically; read
- * scratch to be sure buffer actually triggered, not
- * just flushed from processor.
- */
- qib_read_kreg32(dd, kr_scratch);
- for (i = start; i <= last; i++)
- set_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_KERN:
- /* usable by kernel */
- for (i = start; i <= last; i++) {
- set_bit(i, dd->cspec->sendibchk);
- clear_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- /* see if we need to raise avail update threshold */
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
-
- case TXCHK_CHG_TYPE_USER:
- /* for user process */
- for (i = start; i <= last; i++) {
- clear_bit(i, dd->cspec->sendibchk);
- set_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
-
- default:
- break;
- }
-
- for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
- qib_write_kreg(dd, kr_sendcheckmask + i,
- dd->cspec->sendchkenable[i]);
-
- for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
- qib_write_kreg(dd, kr_sendgrhcheckmask + i,
- dd->cspec->sendgrhchk[i]);
- qib_write_kreg(dd, kr_sendibpktmask + i,
- dd->cspec->sendibchk[i]);
- }
-
- /*
- * Be sure whatever we did was seen by the chip and acted upon,
- * before we return. Mostly important for which >= 2.
- */
- qib_read_kreg32(dd, kr_scratch);
-}
-
-
-/* useful for trigger analyzers, etc. */
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-/* Dummy for now, use chip regs soon */
-static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-/**
- * qib_init_iba7322_funcs - set up the chip-specific function pointers
- * @pdev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * Also allocates, inits, and returns the devdata struct for this
- * device instance
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret, i;
- u32 tabsize, actual_cnt = 0;
-
- dd = qib_alloc_devdata(pdev,
- NUM_IB_PORTS * sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific) +
- NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7322_bringup_serdes;
- dd->f_cleanup = qib_setup_7322_cleanup;
- dd->f_clear_tids = qib_7322_clear_tids;
- dd->f_free_irq = qib_7322_free_irq;
- dd->f_get_base_info = qib_7322_get_base_info;
- dd->f_get_msgheader = qib_7322_get_msgheader;
- dd->f_getsendbuf = qib_7322_getsendbuf;
- dd->f_gpio_mod = gpio_7322_mod;
- dd->f_eeprom_wen = qib_7322_eeprom_wen;
- dd->f_hdrqempty = qib_7322_hdrqempty;
- dd->f_ib_updown = qib_7322_ib_updown;
- dd->f_init_ctxt = qib_7322_init_ctxt;
- dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
- dd->f_intr_fallback = qib_7322_intr_fallback;
- dd->f_late_initreg = qib_late_7322_initreg;
- dd->f_setpbc_control = qib_7322_setpbc_control;
- dd->f_portcntr = qib_portcntr_7322;
- dd->f_put_tid = qib_7322_put_tid;
- dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7322_mod;
- dd->f_read_cntrs = qib_read_7322cntrs;
- dd->f_read_portcntrs = qib_read_7322portcntrs;
- dd->f_reset = qib_do_7322_reset;
- dd->f_init_sdma_regs = init_sdma_7322_regs;
- dd->f_sdma_busy = qib_sdma_7322_busy;
- dd->f_sdma_gethead = qib_sdma_7322_gethead;
- dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
- dd->f_sendctrl = sendctrl_7322_mod;
- dd->f_set_armlaunch = qib_set_7322_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
- dd->f_iblink_state = qib_7322_iblink_state;
- dd->f_ibphys_portstate = qib_7322_phys_portstate;
- dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7322_set_loopback;
- dd->f_get_ib_table = qib_7322_get_ib_table;
- dd->f_set_ib_table = qib_7322_set_ib_table;
- dd->f_set_intr_state = qib_7322_set_intr_state;
- dd->f_setextled = qib_setup_7322_setextled;
- dd->f_txchk_change = qib_7322_txchk_change;
- dd->f_update_usrhead = qib_update_7322_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
- dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
- dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7322_sdma_init_early;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7322_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7322_notify_dca;
-#endif
- /*
- * Do remaining PCIe setup and save PCIe values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7322_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7322_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init || !dd->num_pports)
- goto bail;
-
- /*
- * Determine number of vectors we want; depends on port count
- * and number of configured kernel receive queues actually used.
- * Should also depend on whether sdma is enabled or not, but
- * that's such a rare testing case it's not worth worrying about.
- */
- tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
- for (i = 0; i < tabsize; i++)
- if ((i < ARRAY_SIZE(irq_table) &&
- irq_table[i].port <= dd->num_pports) ||
- (i >= ARRAY_SIZE(irq_table) &&
- dd->rcd[i - ARRAY_SIZE(irq_table)]))
- actual_cnt++;
- /* reduce by ctxt's < 2 */
- if (qib_krcvq01_no_msi)
- actual_cnt -= dd->num_pports;
-
- tabsize = actual_cnt;
- dd->cspec->msix_entries = kcalloc(tabsize,
- sizeof(struct qib_msix_entry),
- GFP_KERNEL);
- if (!dd->cspec->msix_entries)
- tabsize = 0;
-
- if (qib_pcie_params(dd, 8, &tabsize))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- /* may be less than we wanted, if not enough available */
- dd->cspec->num_msix_entries = tabsize;
-
- /* setup interrupt handler */
- qib_setup_7322_interrupt(dd, 1);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (!dca_add_requester(&pdev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
-#endif
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
-
-/*
- * Set the table entry at the specified index from the table specifed.
- * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
- * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
- * 'idx' below addresses the correct entry, while its 4 LSBs select the
- * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
- */
-#define DDS_ENT_AMP_LSB 14
-#define DDS_ENT_MAIN_LSB 9
-#define DDS_ENT_POST_LSB 5
-#define DDS_ENT_PRE_XTRA_LSB 3
-#define DDS_ENT_PRE_LSB 0
-
-/*
- * Set one entry in the TxDDS table for spec'd port
- * ridx picks one of the entries, while tp points
- * to the appropriate table entry.
- */
-static void set_txdds(struct qib_pportdata *ppd, int ridx,
- const struct txdds_ent *tp)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 pack_ent;
- int regidx;
-
- /* Get correct offset in chip-space, and in source table */
- regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
- /*
- * We do not use qib_write_kreg_port() because it was intended
- * only for registers in the lower "port specific" pages.
- * So do index calculation by hand.
- */
- if (ppd->hw_pidx)
- regidx += (dd->palign / sizeof(u64));
-
- pack_ent = tp->amp << DDS_ENT_AMP_LSB;
- pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
- pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
- pack_ent |= tp->post << DDS_ENT_POST_LSB;
- qib_write_kreg(dd, regidx, pack_ent);
- /* Prevent back-to-back writes by hitting scratch */
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-static const struct vendor_txdds_ent vendor_txdds[] = {
- { /* Amphenol 1m 30awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470002 ",
- { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
- },
- { /* Amphenol 3m 28awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470004 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
- },
- { /* Finisar 3m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
- { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
- },
- { /* Finisar 30m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
- { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
- },
- { /* Finisar Default OM2 Optical */
- { 0x00, 0x90, 0x65 }, NULL,
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
- },
- { /* Gore 1m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
- },
- { /* Gore 2m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
- },
- { /* Gore 1m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
- },
- { /* Gore 3m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
- },
- { /* Gore 5m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
- { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
- },
- { /* Gore 7m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
- },
- { /* Gore 5m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
- },
- { /* Gore 7m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
- },
- { /* Intersil 12m 24awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
- },
- { /* Intersil 10m 28awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
- },
- { /* Intersil 7m 30awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
- },
- { /* Intersil 5m 32awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
- { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
- },
- { /* Intersil Default Active */
- { 0x00, 0x30, 0xB4 }, NULL,
- { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
- },
- { /* Luxtera 20m Active Optical */
- { 0x00, 0x25, 0x63 }, NULL,
- { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
- },
- { /* Molex 1M Cu loopback */
- { 0x00, 0x09, 0x3A }, "74763-0025 ",
- { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
- },
- { /* Molex 2m 28awg NoEq */
- { 0x00, 0x09, 0x3A }, "74757-2201 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
- },
-};
-
-static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 1 }, /* 2 dB */
- { 0, 0, 0, 2 }, /* 3 dB */
- { 0, 0, 0, 3 }, /* 4 dB */
- { 0, 0, 0, 4 }, /* 5 dB */
- { 0, 0, 0, 5 }, /* 6 dB */
- { 0, 0, 0, 6 }, /* 7 dB */
- { 0, 0, 0, 7 }, /* 8 dB */
- { 0, 0, 0, 8 }, /* 9 dB */
- { 0, 0, 0, 9 }, /* 10 dB */
- { 0, 0, 0, 10 }, /* 11 dB */
- { 0, 0, 0, 11 }, /* 12 dB */
- { 0, 0, 0, 12 }, /* 13 dB */
- { 0, 0, 0, 13 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 8 }, /* 2 dB */
- { 0, 0, 0, 8 }, /* 3 dB */
- { 0, 0, 0, 9 }, /* 4 dB */
- { 0, 0, 0, 9 }, /* 5 dB */
- { 0, 0, 0, 10 }, /* 6 dB */
- { 0, 0, 0, 10 }, /* 7 dB */
- { 0, 0, 0, 11 }, /* 8 dB */
- { 0, 0, 0, 11 }, /* 9 dB */
- { 0, 0, 0, 12 }, /* 10 dB */
- { 0, 0, 0, 12 }, /* 11 dB */
- { 0, 0, 0, 13 }, /* 12 dB */
- { 0, 0, 0, 13 }, /* 13 dB */
- { 0, 0, 0, 14 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
- { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
- { 0, 1, 0, 11 }, /* 4 dB */
- { 0, 1, 0, 13 }, /* 5 dB */
- { 0, 1, 0, 15 }, /* 6 dB */
- { 0, 1, 3, 15 }, /* 7 dB */
- { 0, 1, 7, 15 }, /* 8 dB */
- { 0, 1, 7, 15 }, /* 9 dB */
- { 0, 1, 8, 15 }, /* 10 dB */
- { 0, 1, 9, 15 }, /* 11 dB */
- { 0, 1, 10, 15 }, /* 12 dB */
- { 0, 2, 6, 15 }, /* 13 dB */
- { 0, 2, 7, 15 }, /* 14 dB */
- { 0, 2, 8, 15 }, /* 15 dB */
- { 0, 2, 9, 15 }, /* 16 dB */
-};
-
-/*
- * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
- * These are mostly used for mez cards going through connectors
- * and backplane traces, but can be used to add other "unusual"
- * table values as well.
- */
-static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 0 }, /* QME7342 mfg settings */
- { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
-};
-
-static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
- unsigned atten)
-{
- /*
- * The attenuation table starts at 2dB for entry 1,
- * with entry 0 being the loopback entry.
- */
- if (atten <= 2)
- atten = 1;
- else if (atten > TXDDS_TABLE_SZ)
- atten = TXDDS_TABLE_SZ - 1;
- else
- atten--;
- return txdds + atten;
-}
-
-/*
- * if override is set, the module parameter txselect has a value
- * for this specific port, so use it, rather than our normal mechanism.
- */
-static void find_best_ent(struct qib_pportdata *ppd,
- const struct txdds_ent **sdr_dds,
- const struct txdds_ent **ddr_dds,
- const struct txdds_ent **qdr_dds, int override)
-{
- struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
- int idx;
-
- /* Search table of known cables */
- for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
- const struct vendor_txdds_ent *v = vendor_txdds + idx;
-
- if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
- (!v->partnum ||
- !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
- *sdr_dds = &v->sdr;
- *ddr_dds = &v->ddr;
- *qdr_dds = &v->qdr;
- return;
- }
- }
-
- /* Active cables don't have attenuation so we only set SERDES
- * settings to account for the attenuation of the board traces. */
- if (!override && QSFP_IS_ACTIVE(qd->tech)) {
- *sdr_dds = txdds_sdr + ppd->dd->board_atten;
- *ddr_dds = txdds_ddr + ppd->dd->board_atten;
- *qdr_dds = txdds_qdr + ppd->dd->board_atten;
- return;
- }
-
- if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
- qd->atten[1])) {
- *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
- *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
- *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
- return;
- } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
- /*
- * If we have no (or incomplete) data from the cable
- * EEPROM, or no QSFP, or override is set, use the
- * module parameter value to index into the attentuation
- * table.
- */
- idx = ppd->cpspec->no_eep;
- *sdr_dds = &txdds_sdr[idx];
- *ddr_dds = &txdds_ddr[idx];
- *qdr_dds = &txdds_qdr[idx];
- } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
- /* similar to above, but index into the "extra" table. */
- idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
- *sdr_dds = &txdds_extra_sdr[idx];
- *ddr_dds = &txdds_extra_ddr[idx];
- *qdr_dds = &txdds_extra_qdr[idx];
- } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
- ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
- pr_info("IB%u:%u use idx %u into txdds_mfg\n",
- ppd->dd->unit, ppd->port, idx);
- *sdr_dds = &txdds_extra_mfg[idx];
- *ddr_dds = &txdds_extra_mfg[idx];
- *qdr_dds = &txdds_extra_mfg[idx];
- } else {
- /* this shouldn't happen, it's range checked */
- *sdr_dds = txdds_sdr + qib_long_atten;
- *ddr_dds = txdds_ddr + qib_long_atten;
- *qdr_dds = txdds_qdr + qib_long_atten;
- }
-}
-
-static void init_txdds_table(struct qib_pportdata *ppd, int override)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
- int idx;
- int single_ent = 0;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
-
- /* for mez cards or override, use the selected value for all entries */
- if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
- single_ent = 1;
-
- /* Fill in the first entry with the best entry found. */
- set_txdds(ppd, 0, sdr_dds);
- set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
- set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
- if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) {
- dds = (struct txdds_ent *)(ppd->link_speed_active ==
- QIB_IB_QDR ? qdr_dds :
- (ppd->link_speed_active ==
- QIB_IB_DDR ? ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
- }
-
- /* Fill in the remaining entries with the default table values. */
- for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
- set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
- set_txdds(ppd, idx + TXDDS_TABLE_SZ,
- single_ent ? ddr_dds : txdds_ddr + idx);
- set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
- single_ent ? qdr_dds : txdds_qdr + idx);
- }
-}
-
-#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
-#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
-#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
-#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
-#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
-#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
-#define AHB_TRANS_TRIES 10
-
-/*
- * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
- * 5=subsystem which is why most calls have "chan + chan >> 1"
- * for the channel argument.
- */
-static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
- u32 data, u32 mask)
-{
- u32 rd_data, wr_data, sz_mask;
- u64 trans, acc, prev_acc;
- u32 ret = 0xBAD0BAD;
- int tries;
-
- prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
- /* From this point on, make sure we return access */
- acc = (quad << 1) | 1;
- qib_write_kreg(dd, KR_AHB_ACC, acc);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
- goto bail;
- }
-
- /* If mask is not all 1s, we need to read, but different SerDes
- * entities have different sizes
- */
- sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
- wr_data = data & mask & sz_mask;
- if ((~mask & sz_mask) != 0) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- /* Re-read in case host split reads and read data first */
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
- wr_data |= (rd_data & ~mask & sz_mask);
- }
-
- /* If mask is not zero, we need to write. */
- if (mask & sz_mask) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
- trans |= AHB_WR;
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- }
- ret = wr_data;
-bail:
- qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
- return ret;
-}
-
-static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
- unsigned mask)
-{
- struct qib_devdata *dd = ppd->dd;
- int chan;
-
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
- data, mask);
- ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
- 0, 0);
- }
-}
-
-static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
-{
- u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
-
- if (enable && !state) {
- pr_info("IB%u:%u Turning LOS on\n",
- ppd->dd->unit, ppd->port);
- data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- } else if (!enable && state) {
- pr_info("IB%u:%u Turning LOS off\n",
- ppd->dd->unit, ppd->port);
- data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- }
- qib_write_kreg_port(ppd, krp_serdesctrl, data);
-}
-
-static int serdes_7322_init(struct qib_pportdata *ppd)
-{
- int ret = 0;
-
- if (ppd->dd->cspec->r1)
- ret = serdes_7322_init_old(ppd);
- else
- ret = serdes_7322_init_new(ppd);
- return ret;
-}
-
-static int serdes_7322_init_old(struct qib_pportdata *ppd)
-{
- u32 le_val;
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* Patch some SerDes defaults to "Better for IB" */
- /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
-
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
-
- /* May be overridden in qsfp_7322_event */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
-
- /* enable LE1 adaptation for all but QME, which is disabled */
- le_val = IS_QME(ppd->dd) ? 0 : 1;
- ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
- ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- serdes_7322_los_enable(ppd, 1);
-
- /* rxbistena; set 0 to avoid effects of it switch later */
- ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
-
- /* Configure 4 DFE taps, and only they adapt */
- ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
-
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
-
- /*
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
-
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* rx offset center enabled */
- ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
-
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
- ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
- }
-
- /* Set the frequency loop bandwidth to 15 */
- ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
-
- return 0;
-}
-
-static int serdes_7322_init_new(struct qib_pportdata *ppd)
-{
- unsigned long tend;
- u32 le_val, rxcaldone;
- int chan, chan_done = (1 << SERDES_CHANS) - 1;
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* START OF LSI SUGGESTED SERDES BRINGUP */
- /* Reset - Calibration Setup */
- /* Stop DFE adaptaion */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
- /* Disable autoadapt for LE1 */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
- /* Disable LE2 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
- /* Disable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- /* Disable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
- /* Disable Timing Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
- /* Disable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
- /* Disable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
- /* Disable RX Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- /* Disable RX Offset Calibration */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
- /* Select BB CDR */
- ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
- /* CDR Step Size */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
- /* Enable phase Calibration */
- ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
- /* DFE Bandwidth [2:14-12] */
- ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
- /* DFE Config (4 taps only) */
- ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
- /* Gain Loop Bandwidth */
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
- ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
- } else {
- ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
- }
- /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
- /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
- /* Data Rate Select [5:7-6] (leave as default) */
- /* RX Parallel Word Width [3:10-8] (leave as default) */
-
- /* RX REST */
- /* Single- or Multi-channel reset */
- /* RX Analog reset */
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
- msleep(20);
- /* RX Analog reset */
- ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
- msleep(20);
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
- msleep(20);
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- /* Turn on LOS on initial SERDES init */
- serdes_7322_los_enable(ppd, 1);
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* RX LATCH CALIBRATION */
- /* Enable Eyefinder Phase Calibration latch */
- ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
- /* Enable RX Offset Calibration latch */
- ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
- msleep(20);
- /* Start Calibration */
- ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tend = jiffies + msecs_to_jiffies(500);
- while (chan_done && !time_is_before_jiffies(tend)) {
- msleep(20);
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
- (~chan_done & (1 << chan)) == 0)
- chan_done &= ~(1 << chan);
- }
- }
- if (chan_done) {
- pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
- IBSD(ppd->hw_pidx), chan_done);
- } else {
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
- pr_info("Serdes %d chan %d calibration failed\n",
- IBSD(ppd->hw_pidx), chan);
- }
- }
-
- /* Turn off Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- msleep(20);
-
- /* BRING RX UP */
- /* Set LE2 value (May be overridden in qsfp_7322_event) */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
- /* Set LE2 Loop bandwidth */
- ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
- /* Enable LE2 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
- msleep(20);
- /* Enable H0 only */
- ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
- /* Enable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- msleep(20);
- /* Set Frequency Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
- /* Enable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
- /* Set Timing Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
- /* Enable Timing Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
- msleep(50);
- /* Enable DFE
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
- /* Disable auto adapt for LE1 */
- ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
- msleep(20);
- /* Enable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
- /* Enable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* VGA output common mode */
- ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- return 0;
-}
-
-/* start adjust QMH serdes parameters */
-
-static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 9, code << 9, 0x3f << 9);
-}
-
-static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
- int enable, u32 tapenable)
-{
- if (enable)
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 3 << 10, 0x1f << 10);
- else
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 0, 0x1f << 10);
-}
-
-/* Set clock to 1, 0, 1, 0 */
-static void clock_man(struct qib_pportdata *ppd, int chan)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
-}
-
-/*
- * write the current Tx serdes pre,post,main,amp settings into the serdes.
- * The caller must pass the settings appropriate for the current speed,
- * or not care if they are correct for the current speed.
- */
-static void write_tx_serdes_param(struct qib_pportdata *ppd,
- struct txdds_ent *txdds)
-{
- u64 deemph;
-
- deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
- /* field names for amp, main, post, pre, respectively */
- deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
-
- deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- tx_override_deemphasis_select);
- deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a);
- deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena);
- deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena);
- deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena);
- qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
-}
-
-/*
- * Set the parameters for mez cards on link bounce, so they are
- * always exactly what was requested. Similar logic to init_txdds
- * but does just the serdes.
- */
-static void adj_tx_serdes(struct qib_pportdata *ppd)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
- dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
- qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
- ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
-}
-
-/* set QDR forced value for H1, if needed */
-static void force_h1(struct qib_pportdata *ppd)
-{
- int chan;
-
- ppd->cpspec->qdr_reforce = 0;
- if (!ppd->dd->cspec->r1)
- return;
-
- for (chan = 0; chan < SERDES_CHANS; chan++) {
- set_man_mode_h1(ppd, chan, 1, 0);
- set_man_code(ppd, chan, ppd->cpspec->h1_val);
- clock_man(ppd, chan);
- set_man_mode_h1(ppd, chan, 0, 0);
- }
-}
-
-#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
-#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
-
-#define R_OPCODE_LSB 3
-#define R_OP_NOP 0
-#define R_OP_SHIFT 2
-#define R_OP_UPDATE 3
-#define R_TDI_LSB 2
-#define R_TDO_LSB 1
-#define R_RDY 1
-
-static int qib_r_grab(struct qib_devdata *dd)
-{
- u64 val = SJA_EN;
-
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- return 0;
-}
-
-/* qib_r_wait_for_rdy() not only waits for the ready bit, it
- * returns the current state of R_TDO
- */
-static int qib_r_wait_for_rdy(struct qib_devdata *dd)
-{
- u64 val;
- int timeout;
-
- for (timeout = 0; timeout < 100 ; ++timeout) {
- val = qib_read_kreg32(dd, kr_r_access);
- if (val & R_RDY)
- return (val >> R_TDO_LSB) & 1;
- }
- return -1;
-}
-
-static int qib_r_shift(struct qib_devdata *dd, int bisten,
- int len, u8 *inp, u8 *outp)
-{
- u64 valbase, val;
- int ret, pos;
-
- valbase = SJA_EN | (bisten << BISTEN_LSB) |
- (R_OP_SHIFT << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- goto bail;
- for (pos = 0; pos < len; ++pos) {
- val = valbase;
- if (outp) {
- outp[pos >> 3] &= ~(1 << (pos & 7));
- outp[pos >> 3] |= (ret << (pos & 7));
- }
- if (inp) {
- int tdi = inp[pos >> 3] >> (pos & 7);
-
- val |= ((tdi & 1) << R_TDI_LSB);
- }
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- break;
- }
- /* Restore to NOP between operations. */
- val = SJA_EN | (bisten << BISTEN_LSB);
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
-
- if (ret >= 0)
- ret = pos;
-bail:
- return ret;
-}
-
-static int qib_r_update(struct qib_devdata *dd, int bisten)
-{
- u64 val;
- int ret;
-
- val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret >= 0) {
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- return ret;
-}
-
-#define BISTEN_PORT_SEL 15
-#define LEN_PORT_SEL 625
-#define BISTEN_AT 17
-#define LEN_AT 156
-#define BISTEN_ETM 16
-#define LEN_ETM 632
-
-#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-
-/* these are common for all IB port use cases. */
-static u8 reset_at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
- 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
- 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
- 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
- 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
-};
-static u8 at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-
-/* used for IB1 or IB2, only one in use */
-static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
- 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
- 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
- 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
- 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
- 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB1 is in use */
-static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB2 is in use */
-static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
- 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
- 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/*
- * Do setup to properly handle IB link recovery; if port is zero, we
- * are initializing to cover both ports; otherwise we are initializing
- * to cover a single port card, or the port has reached INIT and we may
- * need to switch coverage types.
- */
-static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
-{
- u8 *portsel, *etm;
- struct qib_devdata *dd = ppd->dd;
-
- if (!ppd->dd->cspec->r1)
- return;
- if (!both) {
- dd->cspec->recovery_ports_initted++;
- ppd->cpspec->recovery_init = 1;
- }
- if (!both && dd->cspec->recovery_ports_initted == 1) {
- portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
- etm = atetm_1port;
- } else {
- portsel = portsel_2port;
- etm = atetm_2port;
- }
-
- if (qib_r_grab(dd) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
- portsel, NULL) < 0 ||
- qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0)
- qib_dev_err(dd, "Failed IB link recovery setup\n");
-}
-
-static void check_7322_rxe_status(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 fmask;
-
- if (dd->cspec->recovery_ports_initted != 1)
- return; /* rest doesn't apply to dualport */
- qib_write_kreg(dd, kr_control, dd->control |
- SYM_MASK(Control, FreezeMode));
- (void)qib_read_kreg64(dd, kr_scratch);
- udelay(3); /* ibcreset asserted 400ns, be sure that's over */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask) {
- /*
- * require a powercycle before we'll work again, and make
- * sure we get no more interrupts, and don't turn off
- * freeze.
- */
- ppd->dd->cspec->stay_in_freeze = 1;
- qib_7322_set_intr_state(ppd->dd, 0);
- qib_write_kreg(dd, kr_fmask, 0ULL);
- qib_dev_err(dd, "HCA unusable until powercycled\n");
- return; /* eventually reset */
- }
-
- qib_write_kreg(ppd->dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
-
- /* don't do the full clear_freeze(), not needed for this */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
- /* take IBC out of reset */
- if (ppd->link_speed_supported) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_read_kreg32(dd, kr_scratch);
- if (ppd->lflags & QIBL_IB_LINK_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
deleted file mode 100644
index 1c45814f5646..000000000000
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ /dev/null
@@ -1,1782 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-#include <rdma/rdma_vt.h>
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_mad.h"
-#ifdef CONFIG_DEBUG_FS
-#include "qib_debugfs.h"
-#include "qib_verbs.h"
-#endif
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * min buffers we want to have per context, after driver
- */
-#define QIB_MIN_USER_CTXT_BUFCNT 7
-
-#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
-#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
-#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
-
-/*
- * Number of ctxts we are configured to use (to allow for more pio
- * buffers per ctxt, etc.) Zero means use chip value.
- */
-ushort qib_cfgctxts;
-module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
-
-unsigned qib_numa_aware;
-module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
-MODULE_PARM_DESC(numa_aware,
- "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
-
-/*
- * If set, do not write to any regs if avoidable, hack to allow
- * check for deranged default register values.
- */
-ushort qib_mini_init;
-module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
-MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
-
-unsigned qib_n_krcv_queues;
-module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
-MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
-
-unsigned qib_cc_table_size;
-module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-
-static void verify_interrupt(struct timer_list *);
-
-DEFINE_XARRAY_FLAGS(qib_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
-u32 qib_cpulist_count;
-unsigned long *qib_cpulist;
-
-/* set number of contexts we'll actually use */
-void qib_set_ctxtcnt(struct qib_devdata *dd)
-{
- if (!qib_cfgctxts) {
- dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
- if (dd->cfgctxts > dd->ctxtcnt)
- dd->cfgctxts = dd->ctxtcnt;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->cfgctxts = dd->ctxtcnt;
- else if (qib_cfgctxts <= dd->ctxtcnt)
- dd->cfgctxts = qib_cfgctxts;
- else
- dd->cfgctxts = dd->ctxtcnt;
- dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
- dd->cfgctxts - dd->first_user_ctxt;
-}
-
-/*
- * Common code for creating the receive context array.
- */
-int qib_create_ctxts(struct qib_devdata *dd)
-{
- unsigned i;
- int local_node_id = pcibus_to_node(dd->pcidev->bus);
-
- if (local_node_id < 0)
- local_node_id = numa_node_id();
- dd->assigned_node_id = local_node_id;
-
- /*
- * Allocate full ctxtcnt array, rather than just cfgctxts, because
- * cleanup iterates across all possible ctxts.
- */
- dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
- if (!dd->rcd)
- return -ENOMEM;
-
- /* create (one or more) kctxt */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
-
- if (dd->skip_kctxt_mask & (1 << i))
- continue;
-
- ppd = dd->pport + (i % dd->num_pports);
-
- rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
- if (!rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
- kfree(dd->rcd);
- dd->rcd = NULL;
- return -ENOMEM;
- }
- rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
- rcd->seq_cnt = 1;
- }
- return 0;
-}
-
-/*
- * Common code for user and kernel context setup.
- */
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
- int node_id)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
-
- rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
- if (rcd) {
- INIT_LIST_HEAD(&rcd->qp_wait_list);
- rcd->node_id = node_id;
- rcd->ppd = ppd;
- rcd->dd = dd;
- rcd->cnt = 1;
- rcd->ctxt = ctxt;
- dd->rcd[ctxt] = rcd;
-#ifdef CONFIG_DEBUG_FS
- if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
- rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
- GFP_KERNEL, node_id);
- if (!rcd->opstats) {
- kfree(rcd);
- qib_dev_err(dd,
- "Unable to allocate per ctxt stats buffer\n");
- return NULL;
- }
- }
-#endif
- dd->f_init_ctxt(rcd);
-
- /*
- * To avoid wasting a lot of memory, we allocate 32KB chunks
- * of physically contiguous memory, advance through it until
- * used up and then allocate more. Of course, we need
- * memory to store those extra pointers, now. 32KB seems to
- * be the most that is "safe" under memory pressure
- * (creating large files and then copying them over
- * NFS while doing lots of MPI jobs). The OOM killer can
- * get invoked, even though we say we can sleep and this can
- * cause significant system problems....
- */
- rcd->rcvegrbuf_size = 0x8000;
- rcd->rcvegrbufs_perchunk =
- rcd->rcvegrbuf_size / dd->rcvegrbufsize;
- rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
- rcd->rcvegrbufs_perchunk - 1) /
- rcd->rcvegrbufs_perchunk;
- rcd->rcvegrbufs_perchunk_shift =
- ilog2(rcd->rcvegrbufs_perchunk);
- }
- return rcd;
-}
-
-/*
- * Common code for initializing the physical port structure.
- */
-int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
- u8 hw_pidx, u8 port)
-{
- int size;
-
- ppd->dd = dd;
- ppd->hw_pidx = hw_pidx;
- ppd->port = port; /* IB port number, not index */
-
- spin_lock_init(&ppd->sdma_lock);
- spin_lock_init(&ppd->lflags_lock);
- spin_lock_init(&ppd->cc_shadow_lock);
- init_waitqueue_head(&ppd->state_wait);
-
- timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
-
- ppd->qib_wq = NULL;
- ppd->ibport_data.pmastats =
- alloc_percpu(struct qib_pma_counters);
- if (!ppd->ibport_data.pmastats)
- return -ENOMEM;
- ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
- if (!(ppd->ibport_data.rvp.rc_acks) ||
- !(ppd->ibport_data.rvp.rc_qacks) ||
- !(ppd->ibport_data.rvp.rc_delayed_comp))
- return -ENOMEM;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
- goto bail;
-
- ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
- IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
-
- ppd->cc_max_table_entries =
- ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
-
- size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
- * IB_CCT_ENTRIES;
- ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries)
- goto bail;
-
- size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
- ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries)
- goto bail_1;
-
- size = sizeof(struct cc_table_shadow);
- ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries_shadow)
- goto bail_2;
-
- size = sizeof(struct ib_cc_congestion_setting_attr);
- ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries_shadow)
- goto bail_3;
-
- return 0;
-
-bail_3:
- kfree(ppd->ccti_entries_shadow);
- ppd->ccti_entries_shadow = NULL;
-bail_2:
- kfree(ppd->congestion_entries);
- ppd->congestion_entries = NULL;
-bail_1:
- kfree(ppd->ccti_entries);
- ppd->ccti_entries = NULL;
-bail:
- /* User is intentionally disabling the congestion control agent */
- if (!qib_cc_table_size)
- return 0;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
- qib_cc_table_size = 0;
- qib_dev_err(dd,
- "Congestion Control table size %d less than minimum %d for port %d\n",
- qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
- }
-
- qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
- port);
- return 0;
-}
-
-static int init_pioavailregs(struct qib_devdata *dd)
-{
- int ret, pidx;
- u64 *status_page;
-
- dd->pioavailregs_dma = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
- GFP_KERNEL);
- if (!dd->pioavailregs_dma) {
- qib_dev_err(dd,
- "failed to allocate PIOavail reg area in memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * We really want L2 cache aligned, but for current CPUs of
- * interest, they are the same.
- */
- status_page = (u64 *)
- ((char *) dd->pioavailregs_dma +
- ((2 * L1_CACHE_BYTES +
- dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
- /* device status comes first, for backwards compatibility */
- dd->devstatusp = status_page;
- *status_page++ = 0;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- dd->pport[pidx].statusp = status_page;
- *status_page++ = 0;
- }
-
- /*
- * Setup buffer to hold freeze and other messages, accessible to
- * apps, following statusp. This is per-unit, not per port.
- */
- dd->freezemsg = (char *) status_page;
- *dd->freezemsg = 0;
- /* length of msg buffer is "whatever is left" */
- ret = (char *) status_page - (char *) dd->pioavailregs_dma;
- dd->freezelen = PAGE_SIZE - ret;
-
- ret = 0;
-
-done:
- return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the qlogic_ib device
- *
- * allocate the shadow TID array, so we can qib_munlock previous
- * entries. It may make more sense to move the pageshadow to the
- * ctxt data structure, so we only allocate memory for ctxts actually
- * in use, since we at 8k per ctxt, now.
- * We don't want failures here to prevent use of the driver/chip,
- * so no return value.
- */
-static void init_shadow_tids(struct qib_devdata *dd)
-{
- struct page **pages;
- dma_addr_t *addrs;
-
- pages = vzalloc(array_size(sizeof(struct page *),
- dd->cfgctxts * dd->rcvtidcnt));
- if (!pages)
- goto bail;
-
- addrs = vzalloc(array_size(sizeof(dma_addr_t),
- dd->cfgctxts * dd->rcvtidcnt));
- if (!addrs)
- goto bail_free;
-
- dd->pageshadow = pages;
- dd->physshadow = addrs;
- return;
-
-bail_free:
- vfree(pages);
-bail:
- dd->pageshadow = NULL;
-}
-
-/*
- * Do initialization for device that is only needed on
- * first detect, not on resets.
- */
-static int loadtime_init(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
- qib_dev_err(dd,
- "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
- QIB_CHIP_SWVERSION,
- (int)(dd->revision >>
- QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK,
- (unsigned long long) dd->revision);
- ret = -ENOSYS;
- goto done;
- }
-
- if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
- qib_devinfo(dd->pcidev, "%s", dd->boardversion);
-
- spin_lock_init(&dd->pioavail_lock);
- spin_lock_init(&dd->sendctrl_lock);
- spin_lock_init(&dd->uctxt_lock);
- spin_lock_init(&dd->qib_diag_trans_lock);
- spin_lock_init(&dd->eep_st_lock);
- mutex_init(&dd->eep_lock);
-
- if (qib_mini_init)
- goto done;
-
- ret = init_pioavailregs(dd);
- init_shadow_tids(dd);
-
- qib_get_eeprom_info(dd);
-
- /* setup time (don't start yet) to verify we got interrupt */
- timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
-done:
- return ret;
-}
-
-/**
- * init_after_reset - re-initialize after a reset
- * @dd: the qlogic_ib device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_after_reset(struct qib_devdata *dd)
-{
- int i;
-
- /*
- * Ensure chip does no sends or receives, tail updates, or
- * pioavail updates while we re-initialize. This is mostly
- * for the driver data structures, not chip registers.
- */
- for (i = 0; i < dd->num_pports; ++i) {
- /*
- * ctxt == -1 means "all contexts". Only really safe for
- * _dis_abling things, as here.
- */
- dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_TAILUPD_DIS, -1);
- /* Redundant across ports for some, but no big deal. */
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
- QIB_SENDCTRL_AVAIL_DIS);
- }
-
- return 0;
-}
-
-static void enable_chip(struct qib_devdata *dd)
-{
- u64 rcvmask;
- int i;
-
- /*
- * Enable PIO send, and update of PIOavail regs to memory.
- */
- for (i = 0; i < dd->num_pports; ++i)
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
- QIB_SENDCTRL_AVAIL_ENB);
- /*
- * Enable kernel ctxts' receive and receive interrupt.
- * Other ctxts done as user opens and inits them.
- */
- rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
- rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
- QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (rcd)
- dd->f_rcvctrl(rcd->ppd, rcvmask, i);
- }
-}
-
-static void verify_interrupt(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, intrchk_timer);
- u64 int_counter;
-
- if (!dd)
- return; /* being torn down */
-
- /*
- * If we don't have a lid or any interrupts, let the user know and
- * don't bother checking again.
- */
- int_counter = qib_int_counter(dd) - dd->z_int_counter;
- if (int_counter == 0) {
- if (!dd->f_intr_fallback(dd))
- dev_err(&dd->pcidev->dev,
- "No interrupts detected, not usable.\n");
- else /* re-arm the timer to see if fallback works */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- }
-}
-
-static void init_piobuf_state(struct qib_devdata *dd)
-{
- int i, pidx;
- u32 uctxts;
-
- /*
- * Ensure all buffers are free, and fifos empty. Buffers
- * are common, so only do once for port 0.
- *
- * After enable and qib_chg_pioavailkernel so we can safely
- * enable pioavail updates and PIOENABLE. After this, packets
- * are ready and able to go out.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
-
- /*
- * If not all sendbufs are used, add the one to each of the lower
- * numbered contexts. pbufsctxt and lastctxt_piobuf are
- * calculated in chip-specific code because it may cause some
- * chip-specific adjustments to be made.
- */
- uctxts = dd->cfgctxts - dd->first_user_ctxt;
- dd->ctxts_extrabuf = dd->pbufsctxt ?
- dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
-
- /*
- * Set up the shadow copies of the piobufavail registers,
- * which we compare against the chip registers for now, and
- * the in memory DMA'ed copies of the registers.
- * By now pioavail updates to memory should have occurred, so
- * copy them into our working/shadow registers; this is in
- * case something went wrong with abort, but mostly to get the
- * initial values of the generation bit correct.
- */
- for (i = 0; i < dd->pioavregs; i++) {
- __le64 tmp;
-
- tmp = dd->pioavailregs_dma[i];
- /*
- * Don't need to worry about pioavailkernel here
- * because we will call qib_chg_pioavailkernel() later
- * in initialization, to busy out buffers as needed.
- */
- dd->pioavailshadow[i] = le64_to_cpu(tmp);
- }
- while (i < ARRAY_SIZE(dd->pioavailshadow))
- dd->pioavailshadow[i++] = 0; /* for debugging sanity */
-
- /* after pioavailshadow is setup */
- qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
- TXCHK_CHG_TYPE_KERN, NULL);
- dd->f_initvl15_bufs(dd);
-}
-
-/**
- * qib_create_workqueues - create per port workqueues
- * @dd: the qlogic_ib device
- */
-static int qib_create_workqueues(struct qib_devdata *dd)
-{
- int pidx;
- struct qib_pportdata *ppd;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (!ppd->qib_wq) {
- ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d",
- WQ_MEM_RECLAIM,
- dd->unit, pidx);
- if (!ppd->qib_wq)
- goto wq_error;
- }
- }
- return 0;
-wq_error:
- pr_err("create_singlethread_workqueue failed for port %d\n",
- pidx + 1);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- }
- return -ENOMEM;
-}
-
-static void qib_free_pportdata(struct qib_pportdata *ppd)
-{
- free_percpu(ppd->ibport_data.pmastats);
- free_percpu(ppd->ibport_data.rvp.rc_acks);
- free_percpu(ppd->ibport_data.rvp.rc_qacks);
- free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
- ppd->ibport_data.pmastats = NULL;
-}
-
-/**
- * qib_init - do the actual initialization sequence on the chip
- * @dd: the qlogic_ib device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip. This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0). We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int qib_init(struct qib_devdata *dd, int reinit)
-{
- int ret = 0, pidx, lastfail = 0;
- u32 portok = 0;
- unsigned i;
- struct qib_ctxtdata *rcd;
- struct qib_pportdata *ppd;
- unsigned long flags;
-
- /* Set linkstate to unknown, so we can watch for a transition. */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKV);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- if (reinit)
- ret = init_after_reset(dd);
- else
- ret = loadtime_init(dd);
- if (ret)
- goto done;
-
- /* Bypass most chip-init, to get to device creation */
- if (qib_mini_init)
- return 0;
-
- ret = dd->f_late_initreg(dd);
- if (ret)
- goto done;
-
- /* dd->rcd can be NULL if early init failed */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- /*
- * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
- * re-init, the simplest way to handle this is to free
- * existing, and re-allocate.
- * Need to re-create rest of ctxt 0 ctxtdata as well.
- */
- rcd = dd->rcd[i];
- if (!rcd)
- continue;
-
- lastfail = qib_create_rcvhdrq(dd, rcd);
- if (!lastfail)
- lastfail = qib_setup_eagerbufs(rcd);
- if (lastfail)
- qib_dev_err(dd,
- "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
- }
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- int mtu;
-
- if (lastfail)
- ret = lastfail;
- ppd = dd->pport + pidx;
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1) {
- mtu = QIB_DEFAULT_MTU;
- qib_ibmtu = 0; /* don't leave invalid value */
- }
- /* set max we can ever have for this driver load */
- ppd->init_ibmaxlen = min(mtu > 2048 ?
- dd->piosize4k : dd->piosize2k,
- dd->rcvegrbufsize +
- (dd->rcvhdrentsize << 2));
- /*
- * Have to initialize ibmaxlen, but this will normally
- * change immediately in qib_set_mtu().
- */
- ppd->ibmaxlen = ppd->init_ibmaxlen;
- qib_set_mtu(ppd, mtu);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- lastfail = dd->f_bringup_serdes(ppd);
- if (lastfail) {
- qib_devinfo(dd->pcidev,
- "Failed to bringup IB port %u\n", ppd->port);
- lastfail = -ENETDOWN;
- continue;
- }
-
- portok++;
- }
-
- if (!portok) {
- /* none of the ports initialized */
- if (!ret && lastfail)
- ret = lastfail;
- else if (!ret)
- ret = -ENETDOWN;
- /* but continue on, so we can debug cause */
- }
-
- enable_chip(dd);
-
- init_piobuf_state(dd);
-
-done:
- if (!ret) {
- /* chip is OK for user apps; mark it as initialized */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- /*
- * Set status even if port serdes is not initialized
- * so that diags will work.
- */
- *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
- QIB_STATUS_INITTED;
- if (!ppd->link_speed_enabled)
- continue;
- if (dd->flags & QIB_HAS_SEND_DMA)
- ret = qib_setup_sdma(ppd);
- timer_setup(&ppd->hol_timer, qib_hol_event, 0);
- ppd->hol_state = QIB_HOL_UP;
- }
-
- /* now we can enable all interrupts from the chip */
- dd->f_set_intr_state(dd, 1);
-
- /*
- * Setup to verify we get an interrupt, and fallback
- * to an alternate if necessary and possible.
- */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- /* start stats retrieval timer */
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
- }
-
- /* if ret is non-zero, we probably should do some cleanup here... */
- return ret;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining. If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
-{
- return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
-{
-}
-
-struct qib_devdata *qib_lookup(int unit)
-{
- return xa_load(&qib_dev_table, unit);
-}
-
-/*
- * Stop the timers during unit shutdown, or after an error late
- * in initialization.
- */
-static void qib_stop_timers(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int pidx;
-
- if (dd->stats_timer.function)
- timer_delete_sync(&dd->stats_timer);
- if (dd->intrchk_timer.function)
- timer_delete_sync(&dd->intrchk_timer);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->hol_timer.function)
- timer_delete_sync(&ppd->hol_timer);
- if (ppd->led_override_timer.function) {
- timer_delete_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
- if (ppd->symerr_clear_timer.function)
- timer_delete_sync(&ppd->symerr_clear_timer);
- }
-}
-
-/**
- * qib_shutdown_device - shut down a device
- * @dd: the qlogic_ib device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled. It does not free any data structures.
- * Everything it does has to be setup again by qib_init(dd, 1)
- */
-static void qib_shutdown_device(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- if (dd->flags & QIB_SHUTDOWN)
- return;
- dd->flags |= QIB_SHUTDOWN;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- spin_lock_irq(&ppd->lflags_lock);
- ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE |
- QIBL_LINKV);
- spin_unlock_irq(&ppd->lflags_lock);
- *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
- }
- dd->flags &= ~QIB_INITTED;
-
- /* mask interrupts, but not errors */
- dd->f_set_intr_state(dd, 0);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
- QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_PKEY_ENB, -1);
- /*
- * Gracefully stop all sends allowing any in progress to
- * trickle out first.
- */
- dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
- }
-
- /*
- * Enough for anything that's going to trickle out to have actually
- * done so.
- */
- udelay(20);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_setextled(ppd, 0); /* make sure LEDs are off */
-
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
-
- dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
- QIB_SENDCTRL_SEND_DIS);
- /*
- * Clear SerdesEnable.
- * We can't count on interrupts since we are stopping.
- */
- dd->f_quiet_serdes(ppd);
-
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- qib_free_pportdata(ppd);
- }
-
-}
-
-/**
- * qib_free_ctxtdata - free a context's allocated data
- * @dd: the qlogic_ib device
- * @rcd: the ctxtdata structure
- *
- * free up any allocated data for a context
- * This should not touch anything that would affect a simultaneous
- * re-allocation of context data, because it is called after qib_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- */
-void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- if (!rcd)
- return;
-
- if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
- if (rcd->rcvhdrtail_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
- rcd->rcvhdrtail_kvaddr = NULL;
- }
- }
- if (rcd->rcvegrbuf) {
- unsigned e;
-
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- void *base = rcd->rcvegrbuf[e];
- size_t size = rcd->rcvegrbuf_size;
-
- dma_free_coherent(&dd->pcidev->dev, size,
- base, rcd->rcvegrbuf_phys[e]);
- }
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
- rcd->rcvegrbuf_chunks = 0;
- }
-
- kfree(rcd->tid_pg_list);
- vfree(rcd->user_event_mask);
- vfree(rcd->subctxt_uregbase);
- vfree(rcd->subctxt_rcvegrbuf);
- vfree(rcd->subctxt_rcvhdr_base);
-#ifdef CONFIG_DEBUG_FS
- kfree(rcd->opstats);
- rcd->opstats = NULL;
-#endif
- kfree(rcd);
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration. Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire). On chips that use an address-based
- * trigger to send packets to the wire, this is easy. On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void qib_verify_pioperf(struct qib_devdata *dd)
-{
- u32 pbnum, cnt, lcnt;
- u32 __iomem *piobuf;
- u32 *addr;
- u64 msecs, emsecs;
-
- piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
- if (!piobuf) {
- qib_devinfo(dd->pcidev,
- "No PIObufs for checking perf, skipping\n");
- return;
- }
-
- /*
- * Enough to give us a reasonable test, less than piobuf size, and
- * likely multiple of store buffer length.
- */
- cnt = 1024;
-
- addr = vmalloc(cnt);
- if (!addr)
- goto done;
-
- preempt_disable(); /* we want reasonably accurate elapsed time */
- msecs = 1 + jiffies_to_msecs(jiffies);
- for (lcnt = 0; lcnt < 10000U; lcnt++) {
- /* wait until we cross msec boundary */
- if (jiffies_to_msecs(jiffies) >= msecs)
- break;
- udelay(1);
- }
-
- dd->f_set_armlaunch(dd, 0);
-
- /*
- * length 0, no dwords actually sent
- */
- writeq(0, piobuf);
- qib_flush_wc();
-
- /*
- * This is only roughly accurate, since even with preempt we
- * still take interrupts that could take a while. Running for
- * >= 5 msec seems to get us "close enough" to accurate values.
- */
- msecs = jiffies_to_msecs(jiffies);
- for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
- qib_pio_copy(piobuf + 64, addr, cnt >> 2);
- emsecs = jiffies_to_msecs(jiffies) - msecs;
- }
-
- /* 1 GiB/sec, slightly over IB SDR line rate */
- if (lcnt < (emsecs * 1024U))
- qib_dev_err(dd,
- "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
- lcnt / (u32) emsecs);
-
- preempt_enable();
-
- vfree(addr);
-
-done:
- /* disarm piobuf, so it's available again */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
- qib_sendbuf_done(dd, pbnum);
- dd->f_set_armlaunch(dd, 1);
-}
-
-void qib_free_devdata(struct qib_devdata *dd)
-{
- unsigned long flags;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- __xa_erase(&qib_dev_table, dd->unit);
- xa_unlock_irqrestore(&qib_dev_table, flags);
-
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_exit(&dd->verbs_dev);
-#endif
- free_percpu(dd->int_counter);
- rvt_dealloc_device(&dd->verbs_dev.rdi);
-}
-
-u64 qib_int_counter(struct qib_devdata *dd)
-{
- int cpu;
- u64 int_counter = 0;
-
- for_each_possible_cpu(cpu)
- int_counter += *per_cpu_ptr(dd->int_counter, cpu);
- return int_counter;
-}
-
-u64 qib_sps_ints(void)
-{
- unsigned long index, flags;
- struct qib_devdata *dd;
- u64 sps_ints = 0;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- sps_ints += qib_int_counter(dd);
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
- return sps_ints;
-}
-
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
- * "extra" is for chip-specific data.
- */
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
-{
- struct qib_devdata *dd;
- int ret, nports;
-
- /* extra is * number of ports */
- nports = extra / sizeof(struct qib_pportdata);
- dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
- nports);
- if (!dd)
- return ERR_PTR(-ENOMEM);
-
- ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b,
- GFP_KERNEL);
- if (ret < 0) {
- qib_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
- goto bail;
- }
- rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
-
- dd->int_counter = alloc_percpu(u64);
- if (!dd->int_counter) {
- ret = -ENOMEM;
- qib_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
- goto bail;
- }
-
- if (!qib_cpulist_count) {
- u32 count = num_online_cpus();
-
- qib_cpulist = bitmap_zalloc(count, GFP_KERNEL);
- if (qib_cpulist)
- qib_cpulist_count = count;
- }
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_init(&dd->verbs_dev);
-#endif
- return dd;
-bail:
- if (!list_empty(&dd->list))
- list_del_init(&dd->list);
- rvt_dealloc_device(&dd->verbs_dev.rdi);
- return ERR_PTR(ret);
-}
-
-/*
- * Called from freeze mode handlers, and from PCI error
- * reporting code. Should be paranoid about state of
- * system and data structures.
- */
-void qib_disable_after_error(struct qib_devdata *dd)
-{
- if (dd->flags & QIB_INITTED) {
- u32 pidx;
-
- dd->flags &= ~QIB_INITTED;
- if (dd->pport)
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- struct qib_pportdata *ppd;
-
- ppd = dd->pport + pidx;
- if (dd->flags & QIB_PRESENT) {
- qib_set_linkstate(ppd,
- QIB_IB_LINKDOWN_DISABLE);
- dd->f_setextled(ppd, 0);
- }
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
- }
-
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- if (dd->devstatusp)
- *dd->devstatusp |= QIB_STATUS_HWERROR;
-}
-
-static void qib_remove_one(struct pci_dev *);
-static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
-static void qib_shutdown_one(struct pci_dev *);
-
-#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
-#define PFX QIB_DRV_NAME ": "
-
-static const struct pci_device_id qib_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
-
-static struct pci_driver qib_driver = {
- .name = QIB_DRV_NAME,
- .probe = qib_init_one,
- .remove = qib_remove_one,
- .shutdown = qib_shutdown_one,
- .id_table = qib_pci_tbl,
- .err_handler = &qib_pci_err_handler,
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
-static struct notifier_block dca_notifier = {
- .notifier_call = qib_notify_dca,
- .next = NULL,
- .priority = 0
-};
-
-static int qib_notify_dca_device(struct device *device, void *data)
-{
- struct qib_devdata *dd = dev_get_drvdata(device);
- unsigned long event = *(unsigned long *)data;
-
- return dd->f_notify_dca(dd, event);
-}
-
-static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
-{
- int rval;
-
- rval = driver_for_each_device(&qib_driver.driver, NULL,
- &event, qib_notify_dca_device);
- return rval ? NOTIFY_BAD : NOTIFY_DONE;
-}
-
-#endif
-
-/*
- * Do all the generic driver unit- and chip-independent memory
- * allocation and initialization.
- */
-static int __init qib_ib_init(void)
-{
- int ret;
-
- ret = qib_dev_init();
- if (ret)
- goto bail;
-
- /*
- * These must be called before the driver is registered with
- * the PCI subsystem.
- */
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_register_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_init();
-#endif
- ret = pci_register_driver(&qib_driver);
- if (ret < 0) {
- pr_err("Unable to register driver: error %d\n", -ret);
- goto bail_dev;
- }
-
- /* not fatal if it doesn't work */
- if (qib_init_qibfs())
- pr_err("Unable to register ipathfs\n");
- goto bail; /* all OK */
-
-bail_dev:
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
- qib_dev_cleanup();
-bail:
- return ret;
-}
-
-module_init(qib_ib_init);
-
-/*
- * Do the non-unit driver cleanup, memory free, etc. at unload.
- */
-static void __exit qib_ib_cleanup(void)
-{
- int ret;
-
- ret = qib_exit_qibfs();
- if (ret)
- pr_err(
- "Unable to cleanup counter filesystem: error %d\n",
- -ret);
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
- pci_unregister_driver(&qib_driver);
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
-
- qib_cpulist_count = 0;
- bitmap_free(qib_cpulist);
-
- WARN_ON(!xa_empty(&qib_dev_table));
- qib_dev_cleanup();
-}
-
-module_exit(qib_ib_cleanup);
-
-/* this can only be called after a successful initialization */
-static void cleanup_device_data(struct qib_devdata *dd)
-{
- int ctxt;
- int pidx;
- struct qib_ctxtdata **tmp;
- unsigned long flags;
-
- /* users can't do anything more with chip */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (dd->pport[pidx].statusp)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
-
- spin_lock(&dd->pport[pidx].cc_shadow_lock);
-
- kfree(dd->pport[pidx].congestion_entries);
- dd->pport[pidx].congestion_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries);
- dd->pport[pidx].ccti_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries_shadow);
- dd->pport[pidx].ccti_entries_shadow = NULL;
- kfree(dd->pport[pidx].congestion_entries_shadow);
- dd->pport[pidx].congestion_entries_shadow = NULL;
-
- spin_unlock(&dd->pport[pidx].cc_shadow_lock);
- }
-
- qib_disable_wc(dd);
-
- if (dd->pioavailregs_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *) dd->pioavailregs_dma,
- dd->pioavailregs_phys);
- dd->pioavailregs_dma = NULL;
- }
-
- if (dd->pageshadow) {
- struct page **tmpp = dd->pageshadow;
- dma_addr_t *tmpd = dd->physshadow;
- int i;
-
- for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
- int ctxt_tidbase = ctxt * dd->rcvtidcnt;
- int maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- if (!tmpp[i])
- continue;
- dma_unmap_page(&dd->pcidev->dev, tmpd[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
- qib_release_user_pages(&tmpp[i], 1);
- tmpp[i] = NULL;
- }
- }
-
- dd->pageshadow = NULL;
- vfree(tmpp);
- dd->physshadow = NULL;
- vfree(tmpd);
- }
-
- /*
- * Free any resources still in use (usually just kernel contexts)
- * at unload; we do for ctxtcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that rcd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- tmp = dd->rcd;
- dd->rcd = NULL;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
- struct qib_ctxtdata *rcd = tmp[ctxt];
-
- tmp[ctxt] = NULL; /* debugging paranoia */
- qib_free_ctxtdata(dd, rcd);
- }
- kfree(tmp);
-}
-
-/*
- * Clean up on unit shutdown, or error during unit load after
- * successful initialization.
- */
-static void qib_postinit_cleanup(struct qib_devdata *dd)
-{
- /*
- * Clean up chip-specific stuff.
- * We check for NULL here, because it's outside
- * the kregbase check, and we need to call it
- * after the free_irq. Thus it's possible that
- * the function pointers were never initialized.
- */
- if (dd->f_cleanup)
- dd->f_cleanup(dd);
-
- qib_pcie_ddcleanup(dd);
-
- cleanup_device_data(dd);
-
- qib_free_devdata(dd);
-}
-
-static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret, j, pidx, initfail;
- struct qib_devdata *dd = NULL;
-
- ret = qib_pcie_init(pdev, ent);
- if (ret)
- goto bail;
-
- /*
- * Do device-specific initialiation, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_QLOGIC_IB_6120:
-#ifdef CONFIG_PCI_MSI
- dd = qib_init_iba6120_funcs(pdev, ent);
-#else
- qib_early_err(&pdev->dev,
- "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
- ent->device);
- dd = ERR_PTR(-ENODEV);
-#endif
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7220:
- dd = qib_init_iba7220_funcs(pdev, ent);
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7322:
- dd = qib_init_iba7322_funcs(pdev, ent);
- break;
-
- default:
- qib_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
- ret = -ENODEV;
- }
-
- if (IS_ERR(dd))
- ret = PTR_ERR(dd);
- if (ret)
- goto bail; /* error already printed */
-
- ret = qib_create_workqueues(dd);
- if (ret)
- goto bail;
-
- /* do the generic initialization */
- initfail = qib_init(dd, 0);
-
- ret = qib_register_ib_device(dd);
-
- /*
- * Now ready for use. this should be cleared whenever we
- * detect a reset, or initiate one. If earlier failure,
- * we still create devices, so diags, etc. can be used
- * to determine cause of problem.
- */
- if (!qib_mini_init && !initfail && !ret)
- dd->flags |= QIB_INITTED;
-
- j = qib_device_create(dd);
- if (j)
- qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
- j = qibfs_add(dd);
- if (j)
- qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
- -j);
-
- if (qib_mini_init || initfail || ret) {
- qib_stop_timers(dd);
- flush_workqueue(ib_wq);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_quiet_serdes(dd->pport + pidx);
- if (qib_mini_init)
- goto bail;
- if (!j) {
- (void) qibfs_remove(dd);
- qib_device_remove(dd);
- }
- if (!ret)
- qib_unregister_ib_device(dd);
- qib_postinit_cleanup(dd);
- if (initfail)
- ret = initfail;
- goto bail;
- }
-
- ret = qib_enable_wc(dd);
- if (ret) {
- qib_dev_err(dd,
- "Write combining not enabled (err %d): performance may be poor\n",
- -ret);
- ret = 0;
- }
-
- qib_verify_pioperf(dd);
-bail:
- return ret;
-}
-
-static void qib_remove_one(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- int ret;
-
- /* unregister from IB core */
- qib_unregister_ib_device(dd);
-
- /*
- * Disable the IB link, disable interrupts on the device,
- * clear dma engines, etc.
- */
- if (!qib_mini_init)
- qib_shutdown_device(dd);
-
- qib_stop_timers(dd);
-
- /* wait until all of our (qsfp) queue_work() calls complete */
- flush_workqueue(ib_wq);
-
- ret = qibfs_remove(dd);
- if (ret)
- qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
- -ret);
-
- qib_device_remove(dd);
-
- qib_postinit_cleanup(dd);
-}
-
-static void qib_shutdown_one(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
-
- qib_shutdown_device(dd);
-}
-
-/**
- * qib_create_rcvhdrq - create a receive header queue
- * @dd: the qlogic_ib device
- * @rcd: the context data
- *
- * This must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- unsigned amt;
- int old_node_id;
-
- if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
-
- amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
- &rcd->rcvhdrq_phys, GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
-
- if (!rcd->rcvhdrq) {
- qib_dev_err(dd,
- "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
- goto bail;
- }
-
- if (rcd->ctxt >= dd->first_user_ctxt) {
- rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
- if (!rcd->user_event_mask)
- goto bail_free_hdrq;
- }
-
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvhdrtail_kvaddr)
- goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
- }
-
- rcd->rcvhdrq_size = amt;
- }
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
- if (rcd->rcvhdrtail_kvaddr)
- memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
- return 0;
-
-bail_free:
- qib_dev_err(dd,
- "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
- rcd->ctxt);
- vfree(rcd->user_event_mask);
- rcd->user_event_mask = NULL;
-bail_free_hdrq:
- dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
-bail:
- return -ENOMEM;
-}
-
-/**
- * qib_setup_eagerbufs - allocate eager buffers, both kernel and user contexts.
- * @rcd: the context we are setting up.
- *
- * Allocate the eager TID buffers and program them into hip.
- * They are no longer completely contiguous, we do multiple allocation
- * calls. Otherwise we get the OOM code involved, by asking for too
- * much per call, with disastrous results on some kernels.
- */
-int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
- size_t size;
- int old_node_id;
-
- egrcnt = rcd->rcvegrcnt;
- egroff = rcd->rcvegr_tid_base;
- egrsize = dd->rcvegrbufsize;
-
- chunk = rcd->rcvegrbuf_chunks;
- egrperchunk = rcd->rcvegrbufs_perchunk;
- size = rcd->rcvegrbuf_size;
- if (!rcd->rcvegrbuf) {
- rcd->rcvegrbuf =
- kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf)
- goto bail;
- }
- if (!rcd->rcvegrbuf_phys) {
- rcd->rcvegrbuf_phys =
- kmalloc_array_node(chunk,
- sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf_phys)
- goto bail_rcvegrbuf;
- }
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- if (rcd->rcvegrbuf[e])
- continue;
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvegrbuf[e] =
- dma_alloc_coherent(&dd->pcidev->dev, size,
- &rcd->rcvegrbuf_phys[e],
- GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvegrbuf[e])
- goto bail_rcvegrbuf_phys;
- }
-
- rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
-
- for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
- dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
- unsigned i;
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvegrbuf[chunk], 0, size);
-
- for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
- dd->f_put_tid(dd, e + egroff +
- (u64 __iomem *)
- ((char __iomem *)
- dd->kregbase +
- dd->rcvegrbase),
- RCVHQ_RCV_TYPE_EAGER, pa);
- pa += egrsize;
- }
- cond_resched(); /* don't hog the cpu */
- }
-
- return 0;
-
-bail_rcvegrbuf_phys:
- for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
- dma_free_coherent(&dd->pcidev->dev, size,
- rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
-bail:
- return -ENOMEM;
-}
-
-/*
- * Note: Changes to this routine should be mirrored
- * for the diagnostics routine qib_remap_ioaddr32().
- * There is also related code for VL15 buffers in qib_init_7322_variables().
- * The teardown code that unmaps is in qib_pcie_ddcleanup()
- */
-int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
-{
- u64 __iomem *qib_kregbase = NULL;
- void __iomem *qib_piobase = NULL;
- u64 __iomem *qib_userbase = NULL;
- u64 qib_kreglen;
- u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
- u64 qib_pio4koffset = dd->piobufbase >> 32;
- u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
- u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
- u64 qib_physaddr = dd->physaddr;
- u64 qib_piolen;
- u64 qib_userlen = 0;
-
- /*
- * Free the old mapping because the kernel will try to reuse the
- * old mapping and not create a new mapping with the
- * write combining attribute.
- */
- iounmap(dd->kregbase);
- dd->kregbase = NULL;
-
- /*
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- */
- if (dd->piobcnt4k == 0) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio2klen;
- } else if (qib_pio2koffset < qib_pio4koffset) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
- } else {
- qib_kreglen = qib_pio4koffset;
- qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
- }
- qib_piolen += vl15buflen;
- /* Map just the configured ports (not all hw ports) */
- if (dd->uregbase > qib_kreglen)
- qib_userlen = dd->ureg_align * dd->cfgctxts;
-
- /* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
- if (!qib_kregbase)
- goto bail;
-
- qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
- if (!qib_piobase)
- goto bail_kregbase;
-
- if (qib_userlen) {
- qib_userbase = ioremap(qib_physaddr + dd->uregbase,
- qib_userlen);
- if (!qib_userbase)
- goto bail_piobase;
- }
-
- dd->kregbase = qib_kregbase;
- dd->kregend = (u64 __iomem *)
- ((char __iomem *) qib_kregbase + qib_kreglen);
- dd->piobase = qib_piobase;
- dd->pio2kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio2koffset - qib_kreglen);
- if (dd->piobcnt4k)
- dd->pio4kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio4koffset - qib_kreglen);
- if (qib_userlen)
- /* ureg will now be accessed relative to dd->userbase */
- dd->userbase = qib_userbase;
- return 0;
-
-bail_piobase:
- iounmap(qib_piobase);
-bail_kregbase:
- iounmap(qib_kregbase);
-bail:
- return -ENOMEM;
-}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
deleted file mode 100644
index 93357823c6c0..000000000000
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/**
- * qib_format_hwmsg - format a single hwerror message
- * @msg: message buffer
- * @msgl: length of message buffer
- * @hwmsg: message to add to message buffer
- */
-static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * qib_format_hwerrors - format hardware error messages for display
- * @hwerrs: hardware errors bit vector
- * @hwerrmsgs: hardware error descriptions
- * @nhwerrmsgs: number of hwerrmsgs
- * @msg: message buffer
- * @msgl: message buffer length
- */
-void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
-static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
-{
- struct ib_event event;
- struct qib_devdata *dd = ppd->dd;
-
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = ppd->port;
- event.event = ev;
- ib_dispatch_event(&event);
-}
-
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- u32 lstate;
- u8 ltstate;
- enum ib_event_type ev = 0;
-
- lstate = dd->f_iblink_state(ibcs); /* linkstate */
- ltstate = dd->f_ibphys_portstate(ibcs);
-
- /*
- * If linkstate transitions into INIT from any of the various down
- * states, or if it transitions from any of the up (INIT or better)
- * states into any of the down states (except link recovery), then
- * call the chip-specific code to take appropriate actions.
- *
- * ppd->lflags could be 0 if this is the first time the interrupt
- * handlers has been called but the link is already up.
- */
- if (lstate >= IB_PORT_INIT &&
- (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
- ltstate == IB_PHYSPORTSTATE_LINKUP) {
- /* transitioned to UP */
- if (dd->f_ib_updown(ppd, 1, ibcs))
- goto skip_ibchange; /* chip-code handled */
- } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
- if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
- ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
- dd->f_ib_updown(ppd, 0, ibcs))
- goto skip_ibchange; /* chip-code handled */
- qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
- }
-
- if (lstate != IB_PORT_DOWN) {
- /* lstate is INIT, ARMED, or ACTIVE */
- if (lstate != IB_PORT_ACTIVE) {
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (lstate == IB_PORT_ARMED) {
- ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- } else {
- ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /* start a 75msec timer to clear symbol errors */
- mod_timer(&ppd->symerr_clear_timer,
- msecs_to_jiffies(75));
- } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /* active, but not active defered */
- qib_hol_up(ppd); /* useful only for 6120 now */
- *ppd->statusp |=
- QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
- qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_sdma_process_event(ppd,
- qib_sdma_event_e30_go_running);
- ev = IB_EVENT_PORT_ACTIVE;
- dd->f_setextled(ppd, 1);
- }
- } else { /* down */
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKACTIVE | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
-
-skip_ibchange:
- ppd->lastibcstat = ibcs;
- if (ev)
- signal_ib_event(ppd, ev);
-}
-
-void qib_clear_symerror_on_linkup(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- symerr_clear_timer);
-
- if (ppd->lflags & QIBL_LINKACTIVE)
- return;
-
- ppd->ibport_data.z_symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
-}
-
-/*
- * Handle receive interrupts for user ctxts; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll.
- */
-void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
-{
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
- if (!(ctxtr & (1ULL << i)))
- continue;
- rcd = dd->rcd[i];
- if (!rcd || !rcd->cnt)
- continue;
-
- if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
- wake_up_interruptible(&rcd->wait);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
- rcd->ctxt);
- } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
- &rcd->flag)) {
- rcd->urgent++;
- wake_up_interruptible(&rcd->wait);
- }
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-}
-
-void qib_bad_intrstatus(struct qib_devdata *dd)
-{
- static int allbits;
-
- /* separate routine, for better optimization of qib_intr() */
-
- /*
- * We print the message and disable interrupts, in hope of
- * having a better chance of debugging the problem.
- */
- qib_dev_err(dd,
- "Read of chip interrupt status failed disabling interrupts\n");
- if (allbits++) {
- /* disable interrupt delivery, something is very wrong */
- if (allbits == 2)
- dd->f_set_intr_state(dd, 0);
- if (allbits == 3) {
- qib_dev_err(dd,
- "2nd bad interrupt status, unregistering interrupts\n");
- dd->flags |= QIB_BADINTR;
- dd->flags &= ~QIB_INITTED;
- dd->f_free_irq(dd);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
deleted file mode 100644
index d99932b2ce21..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ /dev/null
@@ -1,2450 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-static int reply(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int reply_failure(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
-}
-
-static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
-{
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- int ret;
- unsigned long flags;
- unsigned long timeout;
-
- agent = ibp->rvp.send_agent;
- if (!agent)
- return;
-
- /* o14-3.2.1 */
- if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
- return;
-
- /* o14-2 */
- if (ibp->rvp.trap_timeout &&
- time_before(jiffies, ibp->rvp.trap_timeout))
- return;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- return;
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_TRAP;
- ibp->rvp.tid++;
- smp->tid = cpu_to_be64(ibp->rvp.tid);
- smp->attr_id = IB_SMP_ATTR_NOTICE;
- /* o14-1: smp->mkey = 0; */
- memcpy(smp->data, data, len);
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (!ibp->rvp.sm_ah) {
- if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, (u16)ibp->rvp.sm_lid);
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->rvp.sm_ah = ibah_to_rvtah(ah);
- ret = 0;
- }
- } else
- ret = -EINVAL;
- } else {
- send_buf->ah = &ibp->rvp.sm_ah->ibah;
- ret = 0;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (!ret) {
- /* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
- ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
- } else {
- ib_free_send_mad(send_buf);
- ibp->rvp.trap_timeout = 0;
- }
-}
-
-/*
- * Send a bad P_Key trap (ch. 14.3.8).
- */
-void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
-{
- struct ib_mad_notice_attr data;
-
- ibp->rvp.n_pkt_drops++;
- ibp->rvp.pkey_violations++;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_PKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_257_258.lid1 = lid1;
- data.details.ntc_257_258.lid2 = lid2;
- data.details.ntc_257_258.key = cpu_to_be32(key);
- data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
- data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a bad M_Key trap (ch. 14.3.9).
- */
-static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
-{
- struct ib_mad_notice_attr data;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_256.lid = data.issuer_lid;
- data.details.ntc_256.method = smp->method;
- data.details.ntc_256.attr_id = smp->attr_id;
- data.details.ntc_256.attr_mod = smp->attr_mod;
- data.details.ntc_256.mkey = smp->mkey;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- u8 hop_cnt;
-
- data.details.ntc_256.dr_slid = smp->dr_slid;
- data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- hop_cnt = smp->hop_cnt;
- if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
- data.details.ntc_256.dr_trunc_hop |=
- IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
- }
- data.details.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
- hop_cnt);
- }
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Port Capability Mask Changed trap (ch. 14.3.11).
- */
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask =
- cpu_to_be32(ibp->rvp.port_cap_flags);
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a System Image GUID Changed trap (ch. 14.3.12).
- */
-void qib_sys_guid_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_145.lid = data.issuer_lid;
- data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Node Description Changed trap (ch. 14.3.13).
- */
-void qib_node_desc_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.local_changes = 1;
- data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-static int subn_get_nodedescription(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- if (smp->attr_mod)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
- return reply(smp);
-}
-
-static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 majrev, minrev;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* GUID 0 is illegal */
- if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = dd->pport[pidx].guid;
-
- nip->base_version = 1;
- nip->class_version = 1;
- nip->node_type = 1; /* channel adapter */
- nip->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- nip->sys_guid = ib_qib_sys_image_guid;
- nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
- nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
- nip->device_id = cpu_to_be16(dd->deviceid);
- majrev = dd->majrev;
- minrev = dd->minrev;
- nip->revision = cpu_to_be32((majrev << 16) | minrev);
- nip->local_port_num = port;
- nip->vendor_id[0] = QIB_SRC_OUI_1;
- nip->vendor_id[1] = QIB_SRC_OUI_2;
- nip->vendor_id[2] = QIB_SRC_OUI_3;
-
- return reply(smp);
-}
-
-static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- __be64 g = ppd->guid;
- unsigned i;
-
- /* GUID 0 is illegal */
- if (g == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else {
- /* The first is a copy of the read-only HW GUID. */
- p[0] = g;
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- p[i] = ibp->guids[i - 1];
- }
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
- (u32)n);
- return 0;
-}
-
-static int get_phyerrthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
- (u32)n);
- return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @ppd: the physical port data
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
- IB_LINKINITCMD_SLEEP;
-}
-
-static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
-{
- int valid_mkey = 0;
- int ret = 0;
-
- /* Is the mkey in the process of expiring? */
- if (ibp->rvp.mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
- /* Clear timeout and mkey protection field. */
- ibp->rvp.mkey_lease_timeout = 0;
- ibp->rvp.mkeyprot = 0;
- }
-
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
- ibp->rvp.mkey == smp->mkey)
- valid_mkey = 1;
-
- /* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET ||
- smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->rvp.mkey_lease_timeout = 0;
-
- if (!valid_mkey) {
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- /* Bad mkey not a violation below level 2 */
- if (ibp->rvp.mkeyprot < 2)
- break;
- fallthrough;
- case IB_MGMT_METHOD_SET:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->rvp.mkey_violations != 0xFFFF)
- ++ibp->rvp.mkey_violations;
- if (!ibp->rvp.mkey_lease_timeout &&
- ibp->rvp.mkey_lease_period)
- ibp->rvp.mkey_lease_timeout = jiffies +
- ibp->rvp.mkey_lease_period * HZ;
- /* Generate a trap notice. */
- qib_bad_mkey(ibp, smp);
- ret = 1;
- }
- }
-
- return ret;
-}
-
-static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- u8 mtu;
- int ret;
- u32 state;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt) {
- smp->status |= IB_SMP_INVALID_FIELD;
- ret = reply(smp);
- goto bail;
- }
- if (port_num != port) {
- ibp = to_iport(ibdev, port_num);
- ret = check_mkey(ibp, smp, 0);
- if (ret) {
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
- }
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
-
- /* Clear all fields. Only set the non-zero fields. */
- memset(smp->data, 0, sizeof(smp->data));
-
- /* Only return the mkey if the protection field allows it. */
- if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->rvp.mkey != smp->mkey &&
- ibp->rvp.mkeyprot == 1))
- pip->mkey = ibp->rvp.mkey;
- pip->gid_prefix = ibp->rvp.gid_prefix;
- pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16((u16)ibp->rvp.sm_lid);
- pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
- /* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
- pip->local_port_num = port;
- pip->link_width_enabled = ppd->link_width_enabled;
- pip->link_width_supported = ppd->link_width_supported;
- pip->link_width_active = ppd->link_width_active;
- state = dd->f_iblink_state(ppd->lastibcstat);
- pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
-
- pip->portphysstate_linkdown =
- (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
- (get_linkdowndefaultstate(ppd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
- pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
- ppd->link_speed_enabled;
- switch (ppd->ibmtu) {
- default: /* something is wrong; fall through */
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- }
- pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
- pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
- pip->vl_high_limit = ibp->rvp.vl_high_limit;
- pip->vl_arb_high_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
- pip->vl_arb_low_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
- /* InitTypeReply = 0 */
- pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- /* HCAs ignore VLStallCount and HOQLife */
- /* pip->vlstallcnt_hoqlife; */
- pip->operationalvl_pei_peo_fpi_fpo =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
- pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
- /* P_KeyViolations are counted by hardware. */
- pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
- pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
- /* Only the hardware GUID is supported for now */
- pip->guid_cap = QIB_GUIDS_PER_PORT;
- pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
- /* 32.768 usec. response time (guessing) */
- pip->resv_resptimevalue = 3;
- pip->localphyerrors_overrunerrors =
- (get_phyerrthreshold(ppd) << 4) |
- get_overrunthreshold(ppd);
- /* pip->max_credit_hint; */
- if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
- u32 v;
-
- v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
- pip->link_roundtrip_latency[0] = v >> 16;
- pip->link_roundtrip_latency[1] = v >> 8;
- pip->link_roundtrip_latency[2] = v;
- }
-
- ret = reply(smp);
-
-bail:
- return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd = dd->pport + port - 1;
- /*
- * always a kernel context, no locking needed.
- * If we get here with ppd setup, no need to check
- * that pd is valid.
- */
- struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
-
- memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
-
- return 0;
-}
-
-static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- u16 *p = (u16 *) smp->data;
- __be16 *q = (__be16 *) smp->data;
-
- /* 64 blocks of 32 16-bit P_Key entries */
-
- memset(smp->data, 0, sizeof(smp->data));
- if (startpx == 0) {
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- get_pkeys(dd, port, p);
-
- for (i = 0; i < n; i++)
- q[i] = cpu_to_be16(p[i]);
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned i;
-
- /* The first entry is read-only. */
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- ibp->guids[i - 1] = p[i];
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /* The only GUID we support is the first read-only entry. */
- return subn_get_guidinfo(smp, ibdev, port);
-}
-
-/**
- * subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- struct ib_event event;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
- unsigned long flags;
- u16 lid, smlid;
- u8 lwe;
- u8 lse;
- u8 state;
- u8 vls;
- u8 msl;
- u16 lstate;
- int ret, ore, mtu;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt)
- goto err;
- /* Port attributes can only be set on the receiving port */
- if (port_num != port)
- goto get_only;
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
- event.device = ibdev;
- event.element.port_num = port;
-
- ibp->rvp.mkey = pip->mkey;
- ibp->rvp.gid_prefix = pip->gid_prefix;
- ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
- lid = be16_to_cpu(pip->lid);
- /* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
- if (ppd->lid != lid)
- qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
- if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
- qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
- qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
-
- smlid = be16_to_cpu(pip->sm_lid);
- msl = pip->neighbormtu_mastersmsl & 0xF;
- /* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (ibp->rvp.sm_ah) {
- if (smlid != ibp->rvp.sm_lid)
- rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
- smlid);
- if (msl != ibp->rvp.sm_sl)
- rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
- if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_lid = smlid;
- if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_sl = msl;
- event.event = IB_EVENT_SM_CHANGE;
- ib_dispatch_event(&event);
- }
-
- /* Allow 1x or 4x to be set (see 14.2.6.6). */
- lwe = pip->link_width_enabled;
- if (lwe) {
- if (lwe == 0xFF)
- set_link_width_enabled(ppd, ppd->link_width_supported);
- else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lwe != ppd->link_width_enabled)
- set_link_width_enabled(ppd, lwe);
- }
-
- lse = pip->linkspeedactive_enabled & 0xF;
- if (lse) {
- /*
- * The IB 1.2 spec. only allows link speed values
- * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
- * speeds.
- */
- if (lse == 15)
- set_link_speed_enabled(ppd,
- ppd->link_speed_supported);
- else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lse != ppd->link_speed_enabled)
- set_link_speed_enabled(ppd, lse);
- }
-
- /* Set link down default state. */
- switch (pip->portphysstate_linkdown & 0xF) {
- case 0: /* NOP */
- break;
- case 1: /* SLEEP */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_SLEEP);
- break;
- case 2: /* POLL */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_POLL);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- ibp->rvp.vl_high_limit = pip->vl_high_limit;
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
- ibp->rvp.vl_high_limit);
-
- mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
- if (mtu == -1)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- qib_set_mtu(ppd, mtu);
-
- /* Set operational VLs */
- vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
- if (vls) {
- if (vls > ppd->vls_supported)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
- }
-
- if (pip->mkey_violations == 0)
- ibp->rvp.mkey_violations = 0;
-
- if (pip->pkey_violations == 0)
- ibp->rvp.pkey_violations = 0;
-
- if (pip->qkey_violations == 0)
- ibp->rvp.qkey_violations = 0;
-
- ore = pip->localphyerrors_overrunerrors;
- if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- if (set_overrunthreshold(ppd, (ore & 0xF)))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
- /*
- * Do the port state change now that the other link parameters
- * have been set.
- * Changing the port physical state only makes sense if the link
- * is down or is being set to down.
- */
- state = pip->linkspeed_portstate & 0xF;
- lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
- if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /*
- * Only state changes of DOWN, ARM, and ACTIVE are valid
- * and must be in the correct state to take effect (see 7.2.6).
- */
- switch (state) {
- case IB_PORT_NOP:
- if (lstate == 0)
- break;
- fallthrough;
- case IB_PORT_DOWN:
- if (lstate == 0)
- lstate = QIB_IB_LINKDOWN_ONLY;
- else if (lstate == 1)
- lstate = QIB_IB_LINKDOWN_SLEEP;
- else if (lstate == 2)
- lstate = QIB_IB_LINKDOWN;
- else if (lstate == 3)
- lstate = QIB_IB_LINKDOWN_DISABLE;
- else {
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_set_linkstate(ppd, lstate);
- /*
- * Don't send a reply if the response would be sent
- * through the disabled port.
- */
- if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- goto done;
- }
- qib_wait_linkstate(ppd, QIBL_LINKV, 10);
- break;
- case IB_PORT_ARMED:
- qib_set_linkstate(ppd, QIB_IB_LINKARM);
- break;
- case IB_PORT_ACTIVE:
- qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- if (clientrereg) {
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
-
- /* restore re-reg bit per o14-12.2.1 */
- pip->clientrereg_resv_subnetto |= clientrereg;
-
- goto get_only;
-
-err:
- smp->status |= IB_SMP_INVALID_FIELD;
-get_only:
- ret = subn_get_portinfo(smp, ibdev, port);
-done:
- return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @ppd: the qlogic_ib device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (ppd->pkeys[i] != key)
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
- ppd->pkeys[i] = 0;
- ret = 1;
- goto bail;
- }
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @ppd: the qlogic_ib device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- u16 lkey = key & 0x7FFF;
- int any = 0;
- int ret;
-
- if (lkey == 0x7FFF) {
- ret = 0;
- goto bail;
- }
-
- /* Look for an empty slot or a matching PKEY. */
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any++;
- continue;
- }
- /* If it matches exactly, try to increment the ref count */
- if (ppd->pkeys[i] == key) {
- if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
- ret = 0;
- goto bail;
- }
- /* Lost the race. Look for an empty slot below. */
- atomic_dec(&ppd->pkeyrefs[i]);
- any++;
- }
- /*
- * It makes no sense to have both the limited and unlimited
- * PKEY set at the same time since the unlimited one will
- * disable the limited one.
- */
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- /* for qibstats, etc. */
- ppd->pkeys[i] = key;
- ret = 1;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for ctxt 0
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
- int i;
- int changed = 0;
-
- /*
- * IB port one/two always maps to context zero/one,
- * always a kernel context, no locking needed
- * If we get here with ppd setup, no need to check
- * that rcd is valid.
- */
- ppd = dd->pport + (port - 1);
- rcd = dd->rcd[ppd->hw_pidx];
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = rcd->pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The value of this PKEY table entry is changing.
- * Remove the old entry in the hardware's array of PKEYs.
- */
- if (okey & 0x7FFF)
- changed |= rm_pkey(ppd, okey);
- if (key & 0x7FFF) {
- int ret = add_pkey(ppd, key);
-
- if (ret < 0)
- key = 0;
- else
- changed |= ret;
- }
- rcd->pkeys[i] = key;
- }
- if (changed) {
- struct ib_event event;
-
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
- }
- return 0;
-}
-
-static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- __be16 *p = (__be16 *) smp->data;
- u16 *q = (u16 *) smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- for (i = 0; i < n; i++)
- q[i] = be16_to_cpu(p[i]);
-
- if (startpx != 0 || set_pkeys(dd, port, q) != 0)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_pkeytable(smp, ibdev, port);
-}
-
-static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
- smp->status |= IB_SMP_UNSUP_METHOD;
- else
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
- *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
-
- return reply(smp);
-}
-
-static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
- smp->status |= IB_SMP_UNSUP_METHOD;
- return reply(smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
- ibp->sl_to_vl[i] = *p >> 4;
- ibp->sl_to_vl[i + 1] = *p & 0xF;
- }
- qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
- _QIB_EVENT_SL2VL_CHANGE_BIT);
-
- return subn_get_sl_to_vl(smp, ibdev, port);
-}
-
-static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_vl_arb(smp, ibdev, port);
-}
-
-static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- /*
- * For now, we only send the trap once so no need to process this.
- * o13-6, o13-7,
- * o14-3.a4 The SMA shall not send any message in response to a valid
- * SubnTrapRepress() message.
- */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-}
-
-static int pma_get_classportinfo(struct ib_pma_mad *pmp,
- struct ib_device *ibdev)
-{
- struct ib_class_port_info *p =
- (struct ib_class_port_info *)pmp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- if (pmp->mad_hdr.attr_mod != 0)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- /* Note that AllPortSelect is not valid */
- p->base_version = 1;
- p->class_version = 1;
- p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
- /*
- * Set the most significant bit of CM2 to indicate support for
- * congestion statistics
- */
- ib_set_cpi_capmask2(p,
- dd->psxmitwait_supported <<
- (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- ib_set_cpi_resp_time(p, 18);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
- p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->counter_width = 4; /* 32 bit counters */
- p->counter_mask0_9 = COUNTER_MASK0_9;
- p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
- p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- p->counter_select[0] = ibp->rvp.pma_counter_select[0];
- p->counter_select[1] = ibp->rvp.pma_counter_select[1];
- p->counter_select[2] = ibp->rvp.pma_counter_select[2];
- p->counter_select[3] = ibp->rvp.pma_counter_select[3];
- p->counter_select[4] = ibp->rvp.pma_counter_select[4];
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status, xmit_flags;
- int ret;
-
- if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
-
- /* Port Sampling code owns the PS* HW counters */
- xmit_flags = ppd->cong_stats.flags;
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE ||
- (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
- xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
- ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
- ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
- ibp->rvp.pma_tag = be16_to_cpu(p->tag);
- ibp->rvp.pma_counter_select[0] = p->counter_select[0];
- ibp->rvp.pma_counter_select[1] = p->counter_select[1];
- ibp->rvp.pma_counter_select[2] = p->counter_select[2];
- ibp->rvp.pma_counter_select[3] = p->counter_select[3];
- ibp->rvp.pma_counter_select[4] = p->counter_select[4];
- dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
- ibp->rvp.pma_sample_start);
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
- return ret;
-}
-
-static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-/* This function assumes that the xmit_wait lock is already held */
-static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
-{
- u32 delta;
-
- delta = get_counter(&ppd->ibport_data, ppd,
- IB_PMA_PORT_XMIT_WAIT);
- return ppd->cong_stats.counter + delta;
-}
-
-static void cache_hw_sample_counters(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- ppd->cong_stats.counter_cache.psxmitdata =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
- ppd->cong_stats.counter_cache.psrcvdata =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
- ppd->cong_stats.counter_cache.psxmitpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
- ppd->cong_stats.counter_cache.psrcvpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
- ppd->cong_stats.counter_cache.psxmitwait =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
-}
-
-static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->cong_stats.counter_cache.psxmitdata;
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->cong_stats.counter_cache.psrcvdata;
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->cong_stats.counter_cache.psxmitpkts;
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->cong_stats.counter_cache.psrcvpkts;
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->cong_stats.counter_cache.psxmitwait;
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult *p =
- (struct ib_pma_portsamplesresult *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
- p->counter[i] = cpu_to_be32(
- get_cache_hw_sample_counters(
- ppd, ibp->rvp.pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult_ext *p =
- (struct ib_pma_portsamplesresult_ext *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- /* Port Sampling code owns the PS* HW counters */
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- /* 64 bits */
- p->extended_width = cpu_to_be32(0x80000000);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
- p->counter[i] = cpu_to_be64(
- get_cache_hw_sample_counters(
- ppd, ibp->rvp.pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
- u8 port_select = p->port_select;
-
- qib_get_counters(ppd, &cntrs);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16((u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter = (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
- if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
- p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
- if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
- p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
- if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
- p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_packets =
- cpu_to_be32((u32)cntrs.port_xmit_packets);
- if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
- p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_packets =
- cpu_to_be32((u32) cntrs.port_rcv_packets);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- /* Congestion PMA packets start at offset 24 not 64 */
- struct ib_pma_portcounters_cong *p =
- (struct ib_pma_portcounters_cong *)pmp->reserved;
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
- u64 xmit_wait_counter;
- unsigned long flags;
-
- /*
- * This check is performed only in the GET method because the
- * SET method ends up calling this anyway.
- */
- if (!dd->psxmitwait_supported)
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- if (port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- qib_get_counters(ppd, &cntrs);
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- xmit_wait_counter = xmit_wait_get_value_delta(ppd);
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -=
- ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
-
- memset(pmp->reserved, 0, sizeof(pmp->reserved));
- memset(pmp->data, 0, sizeof(pmp->data));
-
- /*
- * Set top 3 bits to indicate interval in picoseconds in
- * remaining bits.
- */
- p->port_check_rate =
- cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
- (dd->psxmitwait_check_rate &
- ~(QIB_XMIT_RATE_PICO << 13)));
- p->port_adr_events = cpu_to_be64(0);
- p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
- p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
- p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
- p->port_xmit_packets =
- cpu_to_be64(cntrs.port_xmit_packets);
- p->port_rcv_packets =
- cpu_to_be64(cntrs.port_rcv_packets);
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16(
- (u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter =
- (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16(
- (u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-
- return reply((struct ib_smp *)pmp);
-}
-
-static void qib_snapshot_pmacounters(
- struct qib_ibport *ibp,
- struct qib_pma_counters *pmacounters)
-{
- struct qib_pma_counters *p;
- int cpu;
-
- memset(pmacounters, 0, sizeof(*pmacounters));
- for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(ibp->pmastats, cpu);
- pmacounters->n_unicast_xmit += p->n_unicast_xmit;
- pmacounters->n_unicast_rcv += p->n_unicast_rcv;
- pmacounters->n_multicast_xmit += p->n_multicast_xmit;
- pmacounters->n_multicast_rcv += p->n_multicast_rcv;
- }
-}
-
-static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters_ext *p =
- (struct ib_pma_portcounters_ext *)pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- /* Adjust counters for any resets done. */
- swords -= ibp->z_port_xmit_data;
- rwords -= ibp->z_port_rcv_data;
- spkts -= ibp->z_port_xmit_packets;
- rpkts -= ibp->z_port_rcv_packets;
-
- p->port_xmit_data = cpu_to_be64(swords);
- p->port_rcv_data = cpu_to_be64(rwords);
- p->port_xmit_packets = cpu_to_be64(spkts);
- p->port_rcv_packets = cpu_to_be64(rpkts);
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
- - ibp->z_unicast_xmit);
- p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
- - ibp->z_unicast_rcv);
- p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
- - ibp->z_multicast_xmit);
- p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
- - ibp->z_multicast_rcv);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
-
- /*
- * Since the HW doesn't support clearing counters, we save the
- * current count and subtract it from future responses.
- */
- qib_get_counters(ppd, &cntrs);
-
- if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
-
- if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
-
- if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- ibp->rvp.n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
-
- return pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
- int ret = 0;
- unsigned long flags;
-
- qib_get_counters(ppd, &cntrs);
- /* Get counter values before we save them */
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
-
- if (counter_select & IB_PMA_SEL_CONG_XMIT) {
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- ppd->cong_stats.counter = 0;
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
- 0x0);
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
- }
- if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- }
- if (counter_select & IB_PMA_SEL_CONG_ALL) {
- ibp->z_symbol_error_counter =
- cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter =
- cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards =
- cntrs.port_xmit_discards;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->rvp.n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- return ret;
-}
-
-static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = swords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
- ibp->z_port_rcv_data = rwords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = spkts;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = rpkts;
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
- ibp->z_unicast_xmit = pma.n_unicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
- ibp->z_unicast_rcv = pma.n_unicast_rcv;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
- ibp->z_multicast_xmit = pma.n_multicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
- ibp->z_multicast_rcv = pma.n_multicast_rcv;
-
- return pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_smp *smp = (struct ib_smp *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int ret;
-
- *out_mad = *in_mad;
- if (smp->class_version != 1) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply(smp);
- goto bail;
- }
-
- ret = check_mkey(ibp, smp, mad_flags);
- if (ret) {
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- /*
- * If this is a get/set portinfo, we already check the
- * M_Key if the MAD is for another port and the M_Key
- * is OK on the receiving port. This check is needed
- * to increment the error counters when the M_Key
- * fails to match on *both* ports.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET) &&
- port_num && port_num <= ibdev->phys_port_cnt &&
- port != port_num)
- (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_NODE_DESC:
- ret = subn_get_nodedescription(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_NODE_INFO:
- ret = subn_get_nodeinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_get_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_get_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_get_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_get_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_get_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- fallthrough;
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_set_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_set_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_set_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_set_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_set_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- fallthrough;
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (smp->attr_id == IB_SMP_ATTR_NOTICE)
- ret = subn_trap_repress(smp, ibdev, port);
- else {
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- }
- goto bail;
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_REPORT_RESP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- case IB_MGMT_METHOD_SEND:
- if (ib_get_smp_direction(smp) &&
- smp->attr_id == QIB_VENDOR_IPG) {
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
- smp->data[0]);
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- } else
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- smp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply(smp);
- }
-
-bail:
- return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
- int ret;
-
- *out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != 1) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_CLASS_PORT_INFO:
- ret = pma_get_classportinfo(pmp, ibdev);
- goto bail;
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT:
- ret = pma_get_portsamplesresult(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT_EXT:
- ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_get_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_get_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_set_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_set_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_set_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_set_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) pmp);
- }
-
-bail:
- return ret;
-}
-
-static int cc_get_classportinfo(struct ib_cc_mad *ccp,
- struct ib_device *ibdev)
-{
- struct ib_cc_classportinfo_attr *p =
- (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
-
- p->base_version = 1;
- p->class_version = 1;
- p->cap_mask = 0;
-
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- p->resp_time_value = 18;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_info(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_info_attr *p =
- (struct ib_cc_info_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- p->congestion_info = 0;
- p->control_table_cap = ppd->cc_max_table_entries;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- int i;
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct ib_cc_congestion_entry_shadow *entries;
-
- spin_lock(&ppd->cc_shadow_lock);
-
- entries = ppd->congestion_entries_shadow->entries;
- p->port_control = cpu_to_be16(
- ppd->congestion_entries_shadow->port_control);
- p->control_map = cpu_to_be16(
- ppd->congestion_entries_shadow->control_map);
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- p->entries[i].ccti_increase = entries[i].ccti_increase;
- p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
- p->entries[i].trigger_threshold = entries[i].trigger_threshold;
- p->entries[i].ccti_min = entries[i].ccti_min;
- }
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 max_cct_block;
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- spin_lock(&ppd->cc_shadow_lock);
-
- max_cct_block =
- (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
- max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
-
- if (cct_block_index > max_cct_block) {
- spin_unlock(&ppd->cc_shadow_lock);
- goto bail;
- }
-
- ccp->attr_mod = cpu_to_be32(cct_block_index);
-
- cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
-
- cct_entry--;
-
- p->ccti_limit = cpu_to_be16(cct_entry);
-
- entries = &ppd->ccti_entries_shadow->
- entries[IB_CCT_ENTRIES * cct_block_index];
- cct_entry %= IB_CCT_ENTRIES;
-
- for (i = 0; i <= cct_entry; i++)
- p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int i;
-
- ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
-
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- ppd->congestion_entries[i].ccti_increase =
- p->entries[i].ccti_increase;
-
- ppd->congestion_entries[i].ccti_timer =
- be16_to_cpu(p->entries[i].ccti_timer);
-
- ppd->congestion_entries[i].trigger_threshold =
- p->entries[i].trigger_threshold;
-
- ppd->congestion_entries[i].ccti_min =
- p->entries[i].ccti_min;
- }
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- /* If this packet is the first in the sequence then
- * zero the total table entry count.
- */
- if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
- ppd->total_cct_entry = 0;
-
- cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
-
- /* ccti_limit is 0 to 63 */
- ppd->total_cct_entry += (cct_entry + 1);
-
- if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
- goto bail;
-
- ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
-
- entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
-
- for (i = 0; i <= cct_entry; i++)
- entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
-
- spin_lock(&ppd->cc_shadow_lock);
-
- ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
- memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
- (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
-
- ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
- ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
- memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
- IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int process_cc(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- *out_mad = *in_mad;
-
- if (ccp->class_version != 2) {
- ccp->status |= IB_SMP_UNSUP_VERSION;
- return reply((struct ib_smp *)ccp);
- }
-
- switch (ccp->method) {
- case IB_MGMT_METHOD_GET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CLASSPORTINFO:
- return cc_get_classportinfo(ccp, ibdev);
- case IB_CC_ATTR_CONGESTION_INFO:
- return cc_get_congestion_info(ccp, ibdev, port);
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- return cc_get_congestion_setting(ccp, ibdev, port);
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- return cc_get_congestion_control_table(ccp, ibdev, port);
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- return reply((struct ib_smp *) ccp);
- }
- case IB_MGMT_METHOD_SET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- return cc_set_congestion_setting(ccp, ibdev, port);
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- return cc_set_congestion_control_table(ccp, ibdev, port);
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- return reply((struct ib_smp *) ccp);
- }
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- return IB_MAD_RESULT_SUCCESS;
- }
-
- /* method is unsupported */
- ccp->status |= IB_SMP_UNSUP_METHOD;
- return reply((struct ib_smp *) ccp);
-}
-
-/**
- * qib_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in: the incoming MAD
- * @out: any outgoing MAD reply
- * @out_mad_size: size of the outgoing MAD reply
- * @out_mad_pkey_index: unused
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in, struct ib_mad *out,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
-{
- int ret;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- switch (in->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in, out);
- goto bail;
-
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in, out);
- goto bail;
-
- case IB_MGMT_CLASS_CONG_MGMT:
- if (!ppd->congestion_entries_shadow ||
- !qib_cc_table_size) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- ret = process_cc(ibdev, mad_flags, port, in, out);
- goto bail;
-
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- }
-
-bail:
- return ret;
-}
-
-static void xmit_wait_timer_func(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- cong_stats.timer);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- unsigned long flags;
- u8 status;
-
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- /* save counter cache */
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- } else
- goto done;
- }
- ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
-done:
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
- mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
-}
-
-void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
-
- /* Initialize xmit_wait structure */
- dd->pport[port_idx].cong_stats.counter = 0;
- timer_setup(&dd->pport[port_idx].cong_stats.timer,
- xmit_wait_timer_func, 0);
- dd->pport[port_idx].cong_stats.timer.expires = 0;
- add_timer(&dd->pport[port_idx].cong_stats.timer);
-}
-
-void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
-
- if (dd->pport[port_idx].cong_stats.timer.function)
- timer_delete_sync(&dd->pport[port_idx].cong_stats.timer);
-
- if (dd->pport[port_idx].ibport_data.smi_ah)
- rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
- RDMA_DESTROY_AH_SLEEPABLE);
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
deleted file mode 100644
index 57e99dc0d80c..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _QIB_MAD_H
-#define _QIB_MAD_H
-
-#include <rdma/ib_pma.h>
-
-#define IB_SMP_UNSUP_VERSION \
-cpu_to_be16(IB_MGMT_MAD_STATUS_BAD_VERSION)
-
-#define IB_SMP_UNSUP_METHOD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD)
-
-#define IB_SMP_UNSUP_METH_ATTR \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
-
-#define IB_SMP_INVALID_FIELD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
-
-#define IB_VLARB_LOWPRI_0_31 1
-#define IB_VLARB_LOWPRI_32_63 2
-#define IB_VLARB_HIGHPRI_0_31 3
-#define IB_VLARB_HIGHPRI_32_63 4
-
-#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-
-struct ib_pma_portcounters_cong {
- u8 reserved;
- u8 reserved1;
- __be16 port_check_rate;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved2;
- u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
- __be16 reserved3;
- __be16 vl15_dropped;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_xmit_wait;
- __be64 port_adr_events;
-} __packed;
-
-#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
-#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
-
-#define QIB_XMIT_RATE_UNSUPPORTED 0x0
-#define QIB_XMIT_RATE_PICO 0x7
-/* number of 4nsec cycles equaling 2secs */
-#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-
-#define IB_PMA_SEL_CONG_ALL 0x01
-#define IB_PMA_SEL_CONG_PORT_DATA 0x02
-#define IB_PMA_SEL_CONG_XMIT 0x04
-#define IB_PMA_SEL_CONG_ROUTING 0x08
-
-/*
- * Congestion control class attributes
- */
-#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001)
-#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002)
-#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011)
-#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012)
-#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013)
-#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014)
-#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015)
-#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016)
-#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017)
-#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018)
-
-/* generalizations for threshold values */
-#define IB_CC_THRESHOLD_NONE 0x0
-#define IB_CC_THRESHOLD_MIN 0x1
-#define IB_CC_THRESHOLD_MAX 0xf
-
-/* CCA MAD header constants */
-#define IB_CC_MAD_LOGDATA_LEN 32
-#define IB_CC_MAD_MGMTDATA_LEN 192
-
-struct ib_cc_mad {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 class_specific;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- __be64 cckey;
-
- /* For CongestionLog attribute only */
- u8 log_data[IB_CC_MAD_LOGDATA_LEN];
-
- u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN];
-} __packed;
-
-/*
- * Congestion Control class portinfo capability mask bits
- */
-#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0)
-#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1)
-#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2)
-#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8)
-
-struct ib_cc_classportinfo_attr {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __packed;
-
-/* Congestion control traps */
-#define IB_CC_TRAP_KEY_VIOLATION 0x0000
-
-struct ib_cc_trap_key_violation_attr {
- __be16 source_lid;
- u8 method;
- u8 reserved1;
- __be16 attrib_id;
- __be32 attrib_mod;
- __be32 qp;
- __be64 cckey;
- u8 sgid[16];
- u8 padding[24];
-} __packed;
-
-/* Congestion info flags */
-#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1
-#define IB_CC_TABLE_CAP_DEFAULT 31
-
-struct ib_cc_info_attr {
- __be16 congestion_info;
- u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
-} __packed;
-
-struct ib_cc_key_info_attr {
- __be64 cckey;
- u8 protect;
- __be16 lease_period;
- __be16 violations;
-} __packed;
-
-#define IB_CC_CL_CA_LOGEVENTS_LEN 208
-
-struct ib_cc_log_attr {
- u8 log_type;
- u8 congestion_flags;
- __be16 threshold_event_counter;
- __be16 threshold_congestion_event_map;
- __be16 current_time_stamp;
- u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN];
-} __packed;
-
-#define IB_CC_CLEC_SERVICETYPE_RC 0x0
-#define IB_CC_CLEC_SERVICETYPE_UC 0x1
-#define IB_CC_CLEC_SERVICETYPE_RD 0x2
-#define IB_CC_CLEC_SERVICETYPE_UD 0x3
-
-struct ib_cc_log_event {
- u8 local_qp_cn_entry;
- u8 remote_qp_number_cn_entry[3];
- u8 sl_cn_entry:4;
- u8 service_type_cn_entry:4;
- __be32 remote_lid_cn_entry;
- __be32 timestamp_cn_entry;
-} __packed;
-
-/* Sixteen congestion entries */
-#define IB_CC_CCS_ENTRIES 16
-
-/* Port control flags */
-#define IB_CC_CCS_PC_SL_BASED 0x01
-
-struct ib_cc_congestion_entry {
- u8 ccti_increase;
- __be16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_entry_shadow {
- u8 ccti_increase;
- u16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_setting_attr {
- __be16 port_control;
- __be16 control_map;
- struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-struct ib_cc_congestion_setting_attr_shadow {
- u16 port_control;
- u16 control_map;
- struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
-#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
-
-/* 64 Congestion Control table entries in a single MAD */
-#define IB_CCT_ENTRIES 64
-#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
-
-struct ib_cc_table_entry {
- __be16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_entry_shadow {
- u16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_attr {
- __be16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-struct ib_cc_table_attr_shadow {
- u16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-#define CC_TABLE_SHADOW_MAX \
- (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
-
-struct cc_table_shadow {
- u16 ccti_last_entry;
- struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
-} __packed;
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
- cpu_to_be32(COUNTER_MASK(1, 0) | \
- COUNTER_MASK(1, 1) | \
- COUNTER_MASK(1, 2) | \
- COUNTER_MASK(1, 3) | \
- COUNTER_MASK(1, 4))
-
-#endif /* _QIB_MAD_H */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
deleted file mode 100644
index 58c1d62d341b..000000000000
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright (c) 2010 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
-#include "qib.h"
-
-/*
- * This file contains PCIe utility routines that are common to the
- * various QLogic InfiniPath adapters
- */
-
-/*
- * Code to adjust PCIe capabilities.
- * To minimize the change footprint, we call it
- * from qib_pcie_params, which every chip-specific
- * file calls, even though this violates some
- * expectations of harmlessness.
- */
-static void qib_tune_pcie_caps(struct qib_devdata *);
-static void qib_tune_pcie_coalesce(struct qib_devdata *);
-
-/*
- * Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore qib_dev_err() can't be used for error
- * printing.
- */
-int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- /*
- * This can happen (in theory) iff:
- * We did a chip reset, and then failed to reprogram the
- * BAR, or the chip reset due to an internal error. We then
- * unloaded the driver and reloaded it.
- *
- * Both reset cases set the BAR back to initial state. For
- * the latter case, the AER sticky error bit at offset 0x718
- * should be set, but the Linux kernel doesn't yet know
- * about that, it appears. If the original BAR was retained
- * in the kernel data structures, this may be OK.
- */
- qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
- }
-
- ret = pci_request_regions(pdev, QIB_DRV_NAME);
- if (ret) {
- qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
- goto bail;
- }
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (ret) {
- /*
- * If the 64 bit setup fails, try 32 bit. Some systems
- * do not setup 64 bit maps on systems with 2GB or less
- * memory installed.
- */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
- goto bail;
- }
- }
-
- pci_set_master(pdev);
- goto done;
-
-bail:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
-done:
- return ret;
-}
-
-/*
- * Do remaining PCIe setup, once dd is allocated, and save away
- * fields required to re-initialize after a chip reset, or for
- * various other purposes
- */
-int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned long len;
- resource_size_t addr;
-
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
- dd->kregbase = ioremap(addr, len);
- if (!dd->kregbase)
- return -ENOMEM;
-
- dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
- dd->physaddr = addr; /* used for io_remap, etc. */
-
- /*
- * Save BARs to rewrite after device reset. Save all 64 bits of
- * BAR, just in case.
- */
- dd->pcibar0 = addr;
- dd->pcibar1 = addr >> 32;
- dd->deviceid = ent->device; /* save for later use */
- dd->vendorid = ent->vendor;
-
- return 0;
-}
-
-/*
- * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
- * to releasing the dd memory.
- * void because none of the core pcie cleanup returns are void
- */
-void qib_pcie_ddcleanup(struct qib_devdata *dd)
-{
- u64 __iomem *base = (void __iomem *) dd->kregbase;
-
- dd->kregbase = NULL;
- iounmap(base);
- if (dd->piobase)
- iounmap(dd->piobase);
- if (dd->userbase)
- iounmap(dd->userbase);
- if (dd->piovl15base)
- iounmap(dd->piovl15base);
-
- pci_disable_device(dd->pcidev);
- pci_release_regions(dd->pcidev);
-
- pci_set_drvdata(dd->pcidev, NULL);
-}
-
-/*
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly.
- */
-static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
-{
- struct pci_dev *pdev = dd->pcidev;
- u16 control;
-
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
-
- /* now save the data (vector) info */
- pci_read_config_word(pdev,
- pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- &dd->msi_data);
-}
-
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
-{
- u16 linkstat, speed;
- int nvec;
- int maxvec;
- unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
-
- if (!pci_is_pcie(dd->pcidev)) {
- qib_dev_err(dd, "Can't find PCI Express capability!\n");
- /* set up something... */
- dd->lbus_width = 1;
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- nvec = -1;
- goto bail;
- }
-
- if (dd->flags & QIB_HAS_INTX)
- flags |= PCI_IRQ_INTX;
- maxvec = (nent && *nent) ? *nent : 1;
- nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
- if (nvec < 0)
- goto bail;
-
- /*
- * If nent exists, make sure to record how many vectors were allocated.
- * If msix_enabled is false, return 0 so the fallback code works
- * correctly.
- */
- if (nent)
- *nent = !dd->pcidev->msix_enabled ? 0 : nvec;
-
- if (dd->pcidev->msi_enabled)
- qib_cache_msi_info(dd, dd->pcidev->msi_cap);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
- /*
- * speed is bits 0-3, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->lbus_speed = 2500;
- break;
- }
-
- /*
- * Check against expected pcie width and complain if "wrong"
- * on first initialization, not afterwards (i.e., reset).
- */
- if (minw && linkstat < minw)
- qib_dev_err(dd,
- "PCIe width %u (x%u HCA), performance reduced\n",
- linkstat, minw);
-
- qib_tune_pcie_caps(dd);
-
- qib_tune_pcie_coalesce(dd);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->lbus_info, sizeof(dd->lbus_info),
- "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
- return nvec < 0 ? nvec : 0;
-}
-
-/**
- * qib_free_irq - Cleanup INTx and MSI interrupts
- * @dd: valid pointer to qib dev data
- *
- * Since cleanup for INTx and MSI interrupts is trivial, have a common
- * routine.
- *
- */
-void qib_free_irq(struct qib_devdata *dd)
-{
- pci_free_irq(dd->pcidev, 0, dd);
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Setup pcie interrupt stuff again after a reset. I'd like to just call
- * pci_enable_msi() again for msi, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- */
-int qib_reinit_intr(struct qib_devdata *dd)
-{
- int pos;
- u16 control;
- int ret = 0;
-
- /* If we aren't using MSI, don't restore it */
- if (!dd->msi_lo)
- goto bail;
-
- pos = dd->pcidev->msi_cap;
- if (!pos) {
- qib_dev_err(dd,
- "Can't find MSI capability, can't restore MSI settings\n");
- ret = 0;
- /* nothing special for MSIx, just MSI */
- goto bail;
- }
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->msi_lo);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->msi_data);
- ret = 1;
-bail:
- qib_free_irq(dd);
-
- if (!ret && (dd->flags & QIB_HAS_INTX))
- ret = 1;
-
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * These two routines are helper routines for the device reset code
- * to move all the pcie code out of the chip-specific driver code.
- */
-void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
-{
- pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
-}
-
-void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
-{
- int r;
-
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->pcibar0);
- if (r)
- qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->pcibar1);
- if (r)
- qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access, and restore cosmetic settings */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
- r = pci_enable_device(dd->pcidev);
- if (r)
- qib_dev_err(dd,
- "pci_enable_device failed after reset: %d\n", r);
-}
-
-
-static int qib_pcie_coalesce;
-module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_coalesce, "tune PCIe coalescing on some Intel chipsets");
-
-/*
- * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
- * chipsets. This is known to be unsafe for some revisions of some
- * of these chipsets, with some BIOS settings, and enabling it on those
- * systems may result in the system crashing, and/or data corruption.
- */
-static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
-{
- struct pci_dev *parent;
- u16 devid;
- u32 mask, bits, val;
-
- if (!qib_pcie_coalesce)
- return;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (parent->bus->parent) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
- if (!pci_is_pcie(parent))
- return;
- if (parent->vendor != 0x8086)
- return;
-
- /*
- * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
- * - bit 11: COALESCE_FORCE: need to set to 0
- * - bit 10: COALESCE_EN: need to set to 1
- * (but limitations on some on some chipsets)
- *
- * On the Intel 5000, 5100, and 7300 chipsets, there is
- * also: - bit 25:24: COALESCE_MODE, need to set to 0
- */
- devid = parent->device;
- if (devid >= 0x25e2 && devid <= 0x25fa) {
- /* 5000 P/V/X/Z */
- if (parent->revision <= 0xb2)
- bits = 1U << 10;
- else
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x65e2 && devid <= 0x65fa) {
- /* 5100 */
- bits = 1U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x4021 && devid <= 0x402e) {
- /* 5400 */
- bits = 7U << 10;
- mask = 7U << 10;
- } else if (devid >= 0x3604 && devid <= 0x360a) {
- /* 7300 */
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else {
- /* not one of the chipsets that we know about */
- return;
- }
- pci_read_config_dword(parent, 0x48, &val);
- val &= ~mask;
- val |= bits;
- pci_write_config_dword(parent, 0x48, val);
-}
-
-/*
- * BIOS may not set PCIe bus-utilization parameters for best performance.
- * Check and optionally adjust them to maximize our throughput.
- */
-static int qib_pcie_caps;
-module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-
-static void qib_tune_pcie_caps(struct qib_devdata *dd)
-{
- struct pci_dev *parent;
- u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
- u16 rc_mrrs, ep_mrrs, max_mrrs;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (!pci_is_root_bus(parent->bus)) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return;
-
- rc_mpss = parent->pcie_mpss;
- rc_mps = ffs(pcie_get_mps(parent)) - 8;
- /* Find out supported and configured values for endpoint (us) */
- ep_mpss = dd->pcidev->pcie_mpss;
- ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
-
- /* Find max payload supported by root, endpoint */
- if (rc_mpss > ep_mpss)
- rc_mpss = ep_mpss;
-
- /* If Supported greater than limit in module param, limit it */
- if (rc_mpss > (qib_pcie_caps & 7))
- rc_mpss = qib_pcie_caps & 7;
- /* If less than (allowed, supported), bump root payload */
- if (rc_mpss > rc_mps) {
- rc_mps = rc_mpss;
- pcie_set_mps(parent, 128 << rc_mps);
- }
- /* If less than (allowed, supported), bump endpoint payload */
- if (rc_mpss > ep_mps) {
- ep_mps = rc_mpss;
- pcie_set_mps(dd->pcidev, 128 << ep_mps);
- }
-
- /*
- * Now the Read Request size.
- * No field for max supported, but PCIe spec limits it to 4096,
- * which is code '5' (log2(4096) - 7)
- */
- max_mrrs = 5;
- if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
- max_mrrs = (qib_pcie_caps >> 4) & 7;
-
- max_mrrs = 128 << max_mrrs;
- rc_mrrs = pcie_get_readrq(parent);
- ep_mrrs = pcie_get_readrq(dd->pcidev);
-
- if (max_mrrs > rc_mrrs) {
- rc_mrrs = max_mrrs;
- pcie_set_readrq(parent, rc_mrrs);
- }
- if (max_mrrs > ep_mrrs) {
- ep_mrrs = max_mrrs;
- pcie_set_readrq(dd->pcidev, ep_mrrs);
- }
-}
-/* End of PCIe capability tuning */
-
-/*
- * From here through qib_pci_err_handler definition is invoked via
- * PCI error infrastructure, registered via pci
- */
-static pci_ers_result_t
-qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- switch (state) {
- case pci_channel_io_normal:
- qib_devinfo(pdev, "State Normal, ignoring\n");
- break;
-
- case pci_channel_io_frozen:
- qib_devinfo(pdev, "State Frozen, requesting reset\n");
- pci_disable_device(pdev);
- ret = PCI_ERS_RESULT_NEED_RESET;
- break;
-
- case pci_channel_io_perm_failure:
- qib_devinfo(pdev, "State Permanent Failure, disabling\n");
- if (dd) {
- /* no more register accesses! */
- dd->flags &= ~QIB_PRESENT;
- qib_disable_after_error(dd);
- }
- /* else early, or other problem */
- ret = PCI_ERS_RESULT_DISCONNECT;
- break;
-
- default: /* shouldn't happen */
- qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
- state);
- break;
- }
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_mmio_enabled(struct pci_dev *pdev)
-{
- u64 words = 0U;
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- if (dd && dd->pport) {
- words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
- if (words == ~0ULL)
- ret = PCI_ERS_RESULT_NEED_RESET;
- }
- qib_devinfo(pdev,
- "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
- words, ret);
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_slot_reset(struct pci_dev *pdev)
-{
- qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static void
-qib_pci_resume(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
-
- qib_devinfo(pdev, "QIB resume function called\n");
- /*
- * Running jobs will fail, since it's asynchronous
- * unlike sysfs-requested reset. Better than
- * doing nothing.
- */
- qib_init(dd, 1); /* same as re-init after reset */
-}
-
-const struct pci_error_handlers qib_pci_err_handler = {
- .error_detected = qib_pci_error_detected,
- .mmio_enabled = qib_pci_mmio_enabled,
- .slot_reset = qib_pci_slot_reset,
- .resume = qib_pci_resume,
-};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
deleted file mode 100644
index 10b8c444dd31..000000000000
--- a/drivers/infiniband/hw/qib/qib_pio_copy.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
- * @to: destination, in MMIO space (must be 64-bit aligned)
- * @from: source (must be 64-bit aligned)
- * @count: number of 32-bit quantities to copy
- *
- * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
- * time. Order of access is not guaranteed, nor is a memory barrier
- * performed afterwards.
- */
-void qib_pio_copy(void __iomem *to, const void *from, size_t count)
-{
-#ifdef CONFIG_64BIT
- u64 __iomem *dst = to;
- const u64 *src = from;
- const u64 *end = src + (count >> 1);
-
- while (src < end)
- __raw_writeq(*src++, dst++);
- if (count & 1)
- __raw_writel(*(const u32 *)src, dst);
-#else
- u32 __iomem *dst = to;
- const u32 *src = from;
- const u32 *end = src + count;
-
- while (src < end)
- __raw_writel(*src++, dst++);
-#endif
-}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
deleted file mode 100644
index 1974ceb9d405..000000000000
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <rdma/rdma_vt.h>
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-#endif
-
-#include "qib.h"
-
-static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off)
-{
- return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
-}
-
-static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off,
- unsigned n, u16 qpt_mask)
-{
- if (qpt_mask) {
- off++;
- if (((off & qpt_mask) >> 1) >= n)
- off = (off | qpt_mask) + 2;
- } else {
- off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
- }
- return off;
-}
-
-const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
-[IB_WR_RDMA_WRITE] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_RDMA_READ] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC,
-},
-
-[IB_WR_ATOMIC_CMP_AND_SWP] = {
- .length = sizeof(struct ib_atomic_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
-},
-
-[IB_WR_ATOMIC_FETCH_AND_ADD] = {
- .length = sizeof(struct ib_atomic_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
-},
-
-[IB_WR_RDMA_WRITE_WITH_IMM] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_SEND] = {
- .length = sizeof(struct ib_send_wr),
- .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
- BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_SEND_WITH_IMM] = {
- .length = sizeof(struct ib_send_wr),
- .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
- BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-};
-
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
-{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
-
- /*
- * Free the page if someone raced with us installing it.
- */
-
- spin_lock(&qpt->lock);
- if (map->page)
- free_page(page);
- else
- map->page = (void *)page;
- spin_unlock(&qpt->lock);
-}
-
-/*
- * Allocate the next available QPN or
- * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
- */
-int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u32 port)
-{
- u32 i, offset, max_scan, qpn;
- struct rvt_qpn_map *map;
- u32 ret;
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- u16 qpt_mask = dd->qpn_mask;
-
- if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- u32 n;
-
- ret = type == IB_QPT_GSI;
- n = 1 << (ret + 2 * (port - 1));
- spin_lock(&qpt->lock);
- if (qpt->flags & n)
- ret = -EINVAL;
- else
- qpt->flags |= n;
- spin_unlock(&qpt->lock);
- goto bail;
- }
-
- qpn = qpt->last + 2;
- if (qpn >= RVT_QPN_MAX)
- qpn = 2;
- if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt_mask) + 2;
- offset = qpn & RVT_BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
- max_scan = qpt->nmaps - !offset;
- for (i = 0;;) {
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page))
- break;
- }
- do {
- if (!test_and_set_bit(offset, map->page)) {
- qpt->last = qpn;
- ret = qpn;
- goto bail;
- }
- offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues, qpt_mask);
- qpn = mk_qpn(qpt, map, offset);
- /*
- * This test differs from alloc_pidmap().
- * If find_next_offset() does find a zero
- * bit, we don't need to check for QPN
- * wrapping around past our starting QPN.
- * We just need to be sure we don't loop
- * forever.
- */
- } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
- /*
- * In order to keep the number of pages allocated to a
- * minimum, we scan the all existing pages before increasing
- * the size of the bitmap table.
- */
- if (++i > max_scan) {
- if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
- break;
- map = &qpt->map[qpt->nmaps++];
- offset = 0;
- } else if (map < &qpt->map[qpt->nmaps]) {
- ++map;
- offset = 0;
- } else {
- map = &qpt->map[0];
- offset = 2;
- }
- qpn = mk_qpn(qpt, map, offset);
- }
-
- ret = -ENOMEM;
-
-bail:
- return ret;
-}
-
-/*
- * qib_free_all_qps - check for QPs still in use
- */
-unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- unsigned n, qp_inuse = 0;
-
- for (n = 0; n < dd->num_pports; n++) {
- struct qib_ibport *ibp = &dd->pport[n].ibport_data;
-
- rcu_read_lock();
- if (rcu_dereference(ibp->rvp.qp[0]))
- qp_inuse++;
- if (rcu_dereference(ibp->rvp.qp[1]))
- qp_inuse++;
- rcu_read_unlock();
- }
- return qp_inuse;
-}
-
-void qib_notify_qp_reset(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- atomic_set(&priv->s_dma_busy, 0);
-}
-
-void qib_notify_error_qp(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- spin_lock(&dev->rdi.pending_lock);
- if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
- list_del_init(&priv->iowait);
- }
- spin_unlock(&dev->rdi.pending_lock);
-
- if (!(qp->s_flags & RVT_S_BUSY)) {
- qp->s_hdrwords = 0;
- if (qp->s_rdma_mr) {
- rvt_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (priv->s_tx) {
- qib_put_txreq(priv->s_tx);
- priv->s_tx = NULL;
- }
- }
-}
-
-static int mtu_to_enum(u32 mtu)
-{
- int enum_mtu;
-
- switch (mtu) {
- case 4096:
- enum_mtu = IB_MTU_4096;
- break;
- case 2048:
- enum_mtu = IB_MTU_2048;
- break;
- case 1024:
- enum_mtu = IB_MTU_1024;
- break;
- case 512:
- enum_mtu = IB_MTU_512;
- break;
- case 256:
- enum_mtu = IB_MTU_256;
- break;
- default:
- enum_mtu = IB_MTU_2048;
- }
- return enum_mtu;
-}
-
-int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr)
-{
- int mtu, pmtu, pidx = qp->port_num - 1;
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- mtu = ib_mtu_enum_to_int(attr->path_mtu);
- if (mtu == -1)
- return -EINVAL;
-
- if (mtu > dd->pport[pidx].ibmtu)
- pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
- else
- pmtu = attr->path_mtu;
- return pmtu;
-}
-
-int qib_mtu_to_path_mtu(u32 mtu)
-{
- return mtu_to_enum(mtu);
-}
-
-u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
-{
- return ib_mtu_enum_to_int(pmtu);
-}
-
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- priv->owner = qp;
-
- priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
- if (!priv->s_hdr) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
- }
- init_waitqueue_head(&priv->wait_dma);
- INIT_WORK(&priv->s_work, _qib_do_send);
- INIT_LIST_HEAD(&priv->iowait);
-
- return priv;
-}
-
-void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- kfree(priv->s_hdr);
- kfree(priv);
-}
-
-void qib_stop_send_queue(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- cancel_work_sync(&priv->s_work);
-}
-
-void qib_quiesce_qp(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
- if (priv->s_tx) {
- qib_put_txreq(priv->s_tx);
- priv->s_tx = NULL;
- }
-}
-
-void qib_flush_qp_waiters(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- spin_lock(&dev->rdi.pending_lock);
- if (!list_empty(&priv->iowait))
- list_del_init(&priv->iowait);
- spin_unlock(&dev->rdi.pending_lock);
-}
-
-/**
- * qib_check_send_wqe - validate wr/wqe
- * @qp: The qp
- * @wqe: The built wqe
- * @call_send: Determine if the send should be posted or scheduled
- *
- * Returns 0 on success, -EINVAL on failure
- */
-int qib_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe, bool *call_send)
-{
- struct rvt_ah *ah;
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- case IB_QPT_UC:
- if (wqe->length > 0x80000000U)
- return -EINVAL;
- if (wqe->length > qp->pmtu)
- *call_send = false;
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- ah = rvt_get_swqe_ah(wqe);
- if (wqe->length > (1 << ah->log_pmtu))
- return -EINVAL;
- /* progress hint */
- *call_send = true;
- break;
- default:
- break;
- }
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-static const char * const qp_type_str[] = {
- "SMI", "GSI", "RC", "UC", "UD",
-};
-
-/**
- * qib_qp_iter_print - print information to seq_file
- * @s: the seq_file
- * @iter: the iterator
- */
-void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
-{
- struct rvt_swqe *wqe;
- struct rvt_qp *qp = iter->qp;
- struct qib_qp_priv *priv = qp->priv;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- seq_printf(s,
- "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
- iter->n,
- qp->ibqp.qp_num,
- qp_type_str[qp->ibqp.qp_type],
- qp->state,
- wqe->wr.opcode,
- qp->s_hdrwords,
- qp->s_flags,
- atomic_read(&priv->s_dma_busy),
- !list_empty(&priv->iowait),
- qp->timeout,
- wqe->ssn,
- qp->s_lsn,
- qp->s_last_psn,
- qp->s_psn, qp->s_next_psn,
- qp->s_sending_psn, qp->s_sending_hpsn,
- qp->s_last, qp->s_acked, qp->s_cur,
- qp->s_tail, qp->s_head, qp->s_size,
- qp->remote_qpn,
- rdma_ah_get_dlid(&qp->remote_ah_attr));
-}
-
-#endif
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
deleted file mode 100644
index 295d40a83bb6..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-#include "qib_qsfp.h"
-
-/*
- * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
- * in qib_twsi.c
- */
-#define QSFP_MAX_RETRY 4
-
-static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt, pass = 0;
- int stuck = 0;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
-
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL, and there
- * is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for read failed\n");
- ret = -EIO;
- stuck = 1;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
- continue;
- if (ret) {
- /* qib_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST. LP all high */
- dd->f_gpio_mod(dd, mask, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- if (stuck)
- qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
-
- if (pass >= QSFP_MAX_RETRY && ret)
- qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
- else if (pass)
- qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
-
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * qsfp_write
- * We do not ordinarily write the QSFP, but this is needed to select
- * the page on non-flat QSFPs, and possibly later unusual cases
- */
-static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
- int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL,
- * and there is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for write failed\n");
- ret = -EIO;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
- if (ret) {
- /* qib_twsi_blk_wr() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST, LP high */
- dd->f_gpio_mod(dd, mask, mask, mask);
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * For validation, we want to check the checksums, even of the
- * fields we do not otherwise use. This function reads the bytes from
- * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
- */
-static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
-{
- int ret;
- u16 cks;
- u8 bval;
-
- cks = 0;
- while (first < next) {
- ret = qsfp_read(ppd, first, &bval, 1);
- if (ret < 0)
- goto bail;
- cks += bval;
- ++first;
- }
- ret = cks & 0xFF;
-bail:
- return ret;
-
-}
-
-int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
-{
- int ret;
- int idx;
- u16 cks;
- u8 peek[4];
-
- /* ensure sane contents on invalid reads, for cable swaps */
- memset(cp, 0, sizeof(*cp));
-
- if (!qib_qsfp_mod_present(ppd)) {
- ret = -ENODEV;
- goto bail;
- }
-
- ret = qsfp_read(ppd, 0, peek, 3);
- if (ret < 0)
- goto bail;
- if ((peek[0] & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
-
- if ((peek[2] & 4) == 0) {
- /*
- * If cable is paged, rather than "flat memory", we need to
- * set the page to zero, Even if it already appears to be zero.
- */
- u8 poke = 0;
-
- ret = qib_qsfp_write(ppd, 127, &poke, 1);
- udelay(50);
- if (ret != 1) {
- qib_dev_porterr(ppd->dd, ppd->port,
- "Failed QSFP Page set\n");
- goto bail;
- }
- }
-
- ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
- if (ret < 0)
- goto bail;
- if ((cp->id & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
- cks = cp->id;
-
- ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
- if (ret < 0)
- goto bail;
- cks += cp->pwr;
-
- ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
- if (ret < 0)
- goto bail;
- cks += cp->len;
-
- ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
- if (ret < 0)
- goto bail;
- cks += cp->tech;
-
- ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
- cks += cp->vendor[idx];
-
- ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
- if (ret < 0)
- goto bail;
- cks += cp->xt_xcv;
-
- ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
- cks += cp->oui[idx];
-
- ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_PN_LEN; ++idx)
- cks += cp->partnum[idx];
-
- ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_REV_LEN; ++idx)
- cks += cp->rev[idx];
-
- ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
- cks += cp->atten[idx];
-
- ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- cks &= 0xFF;
- ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
- if (ret < 0)
- goto bail;
- if (cks != cp->cks1)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
- cks);
-
- /* Second checksum covers 192 to (serial, date, lot) */
- ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
- if (ret < 0)
- goto bail;
- cks = ret;
-
- ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_SN_LEN; ++idx)
- cks += cp->serial[idx];
-
- ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
- cks += cp->date[idx];
-
- ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
- cks += cp->lot[idx];
-
- ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
- if (ret < 0)
- goto bail;
- cks &= 0xFF;
- if (cks != cp->cks2)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
- cks);
- return 0;
-
-bail:
- cp->id = 0;
- return ret;
-}
-
-const char * const qib_qsfp_devtech[16] = {
- "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
- "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
- "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
- "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
-};
-
-#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
-#define QSFP_DEFAULT_HDR_CNT 224
-
-static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
-
-int qib_qsfp_mod_present(struct qib_pportdata *ppd)
-{
- u32 mask;
- int ret;
-
- mask = QSFP_GPIO_MOD_PRS_N <<
- (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
- ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
-
- return !((ret & mask) >>
- ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
-}
-
-/*
- * Initialize structures that control access to QSFP. Called once per port
- * on cards that support QSFP.
- */
-void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *))
-{
- u32 mask, highs;
-
- struct qib_devdata *dd = qd->ppd->dd;
-
- /* Initialize work struct for later QSFP events */
- INIT_WORK(&qd->work, fevent);
-
- /*
- * Later, we may want more validation. For now, just set up pins and
- * blip reset. If module is present, call qib_refresh_qsfp_cache(),
- * to do further init.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- highs = mask - QSFP_GPIO_MOD_RST_N;
- if (qd->ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- highs <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, highs, mask, mask);
- udelay(20); /* Generous RST dwell */
-
- dd->f_gpio_mod(dd, mask, mask, mask);
-}
-
-int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
-{
- struct qib_qsfp_cache cd;
- u8 bin_buff[QSFP_DUMP_CHUNK];
- char lenstr[6];
- int sofar, ret;
- int bidx = 0;
-
- sofar = 0;
- ret = qib_refresh_qsfp_cache(ppd, &cd);
- if (ret < 0)
- goto bail;
-
- lenstr[0] = ' ';
- lenstr[1] = '\0';
- if (QSFP_IS_CU(cd.tech))
- sprintf(lenstr, "%dM ", cd.len);
-
- sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
- (QSFP_PWR(cd.pwr) * 4));
-
- sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
- qib_qsfp_devtech[cd.tech >> 4]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
- QSFP_VEND_LEN, cd.vendor);
-
- sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
- QSFP_OUI(cd.oui));
-
- sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
- QSFP_PN_LEN, cd.partnum);
- sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
- QSFP_REV_LEN, cd.rev);
- if (QSFP_IS_CU(cd.tech))
- sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
- QSFP_ATTEN_SDR(cd.atten),
- QSFP_ATTEN_DDR(cd.atten));
- sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
- QSFP_SN_LEN, cd.serial);
- sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
- QSFP_DATE_LEN, cd.date);
- sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
- QSFP_LOT_LEN, cd.lot);
-
- while (bidx < QSFP_DEFAULT_HDR_CNT) {
- int iidx;
-
- ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
- if (ret < 0)
- goto bail;
- for (iidx = 0; iidx < ret; ++iidx) {
- sofar += scnprintf(buf + sofar, len-sofar, " %02X",
- bin_buff[iidx]);
- }
- sofar += scnprintf(buf + sofar, len - sofar, "\n");
- bidx += QSFP_DUMP_CHUNK;
- }
- ret = sofar;
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
deleted file mode 100644
index ad8dbd6ac0cf..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* QSFP support common definitions, for ib_qib driver */
-
-#define QSFP_DEV 0xA0
-#define QSFP_PWR_LAG_MSEC 2000
-#define QSFP_MODPRS_LAG_MSEC 20
-
-/*
- * Below are masks for various QSFP signals, for Port 1.
- * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
- * _N means asserted low
- */
-#define QSFP_GPIO_MOD_SEL_N (4)
-#define QSFP_GPIO_MOD_PRS_N (8)
-#define QSFP_GPIO_INT_N (0x10)
-#define QSFP_GPIO_MOD_RST_N (0x20)
-#define QSFP_GPIO_LP_MODE (0x40)
-#define QSFP_GPIO_PORT2_SHIFT 5
-
-#define QSFP_PAGESIZE 128
-/* Defined fields that QLogic requires of qualified cables */
-/* Byte 0 is Identifier, not checked */
-/* Byte 1 is reserved "status MSB" */
-/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
-/*
- * Rest of first 128 not used, although 127 is reserved for page select
- * if module is not "Flat memory".
- */
-/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
-#define QSFP_MOD_ID_OFFS 128
-/*
- * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
- * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
- */
-#define QSFP_MOD_PWR_OFFS 129
-/* Byte 130 is Connector type. Not QLogic req'd */
-/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
-/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
-/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
-/* Byte 141 is Extended Rate Select. Not QLogic req'd */
-/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
-/* Byte 146 is length for Copper. Units of 1 meter */
-#define QSFP_MOD_LEN_OFFS 146
-/*
- * Byte 147 is Device technology. D0..3 not Qlogc req'd
- * D4..7 select from 15 choices, translated by table:
- */
-#define QSFP_MOD_TECH_OFFS 147
-extern const char *const qib_qsfp_devtech[16];
-/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
-#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
-/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
-#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
-/* Attenuation should be valid for copper other than full/near Eq */
-#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
-/* Length is only valid if technology is "copper" */
-#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
-#define QSFP_TECH_1490 9
-
-#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
- oui[2])
-#define QSFP_OUI_AMPHENOL 0x415048
-#define QSFP_OUI_FINISAR 0x009065
-#define QSFP_OUI_GORE 0x002177
-
-/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
-#define QSFP_VEND_OFFS 148
-#define QSFP_VEND_LEN 16
-/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
-#define QSFP_IBXCV_OFFS 164
-/* Bytes 165..167 are Vendor OUI number */
-#define QSFP_VOUI_OFFS 165
-#define QSFP_VOUI_LEN 3
-/* Bytes 168..183 are Vendor Part Number, string */
-#define QSFP_PN_OFFS 168
-#define QSFP_PN_LEN 16
-/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
-#define QSFP_REV_OFFS 184
-#define QSFP_REV_LEN 2
-/*
- * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
- * If copper, they are attenuation in dB:
- * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
- */
-#define QSFP_ATTEN_OFFS 186
-#define QSFP_ATTEN_LEN 2
-/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
-/* Byte 190 is Max Case Temp. Not QLogic req'd */
-/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
-#define QSFP_CC_OFFS 191
-/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
-/* Bytes 196..211 are Serial Number, String */
-#define QSFP_SN_OFFS 196
-#define QSFP_SN_LEN 16
-/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
-#define QSFP_DATE_OFFS 212
-#define QSFP_DATE_LEN 6
-/* Bytes 218,219 are optional lot-code, string */
-#define QSFP_LOT_OFFS 218
-#define QSFP_LOT_LEN 2
-/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
-/* Byte 223 is LSB of sum of bytes 192..222 */
-#define QSFP_CC_EXT_OFFS 223
-
-/*
- * struct qib_qsfp_data encapsulates state of QSFP device for one port.
- * it will be part of port-chip-specific data if a board supports QSFP.
- *
- * Since multiple board-types use QSFP, and their pport_data structs
- * differ (in the chip-specific section), we need a pointer to its head.
- *
- * Avoiding premature optimization, we will have one work_struct per port,
- * and let the (increasingly inaccurately named) eep_lock arbitrate
- * access to common resources.
- *
- */
-
-/*
- * Hold the parts of the onboard EEPROM that we care about, so we aren't
- * coonstantly bit-boffing
- */
-struct qib_qsfp_cache {
- u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
- u8 pwr; /* in D6,7 */
- u8 len; /* in meters, Cu only */
- u8 tech;
- char vendor[QSFP_VEND_LEN];
- u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
- u8 oui[QSFP_VOUI_LEN];
- u8 partnum[QSFP_PN_LEN];
- u8 rev[QSFP_REV_LEN];
- u8 atten[QSFP_ATTEN_LEN];
- u8 cks1; /* Checksum of bytes 128..190 */
- u8 serial[QSFP_SN_LEN];
- u8 date[QSFP_DATE_LEN];
- u8 lot[QSFP_LOT_LEN];
- u8 cks2; /* Checsum of bytes 192..222 */
-};
-
-#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
-#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
-#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
-
-struct qib_qsfp_data {
- /* Helps to find our way */
- struct qib_pportdata *ppd;
- struct work_struct work;
- struct qib_qsfp_cache cache;
- unsigned long t_insert;
- u8 modpresent;
-};
-
-extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
- struct qib_qsfp_cache *cp);
-extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
-extern void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *));
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
deleted file mode 100644
index a1c20ffb4490..000000000000
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ /dev/null
@@ -1,2131 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "qib.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-
-static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
- u32 psn, u32 pmtu)
-{
- u32 len;
-
- len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- return rvt_restart_sge(ss, wqe, len);
-}
-
-/**
- * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @dev: the device for this QP
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
- struct ib_other_headers *ohdr, u32 pmtu)
-{
- struct rvt_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
-
- /* Don't send an ACK if we aren't supposed to. */
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto bail;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
-
- switch (qp->s_ack_state) {
- case OP(RDMA_READ_RESPONSE_LAST):
- case OP(RDMA_READ_RESPONSE_ONLY):
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- fallthrough;
- case OP(ATOMIC_ACKNOWLEDGE):
- /*
- * We can increment the tail pointer now that the last
- * response has been sent instead of only being
- * constructed.
- */
- if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
- fallthrough;
- case OP(SEND_ONLY):
- case OP(ACKNOWLEDGE):
- /* Check for no next entry in the queue. */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & RVT_S_ACK_PENDING)
- goto normal;
- goto bail;
- }
-
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST)) {
- /*
- * If a RDMA read response is being resent and
- * we haven't seen the duplicate request yet,
- * then stop sending the remaining responses the
- * responder has seen until the requester resends it.
- */
- len = e->rdma_sge.sge_length;
- if (len && !e->rdma_sge.mr) {
- qp->s_tail_ack_queue = qp->r_head_ack_queue;
- goto bail;
- }
- /* Copy SGE state in case we need to resend */
- qp->s_rdma_mr = e->rdma_sge.mr;
- if (qp->s_rdma_mr)
- rvt_get_mr(qp->s_rdma_mr);
- qp->s_ack_rdma_sge.sge = e->rdma_sge;
- qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- if (len > pmtu) {
- len = pmtu;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else {
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
- e->sent = 1;
- }
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- qp->s_ack_rdma_psn = e->psn;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- } else {
- /* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
- len = 0;
- qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = rvt_compute_aeth(qp);
- ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
- hwords += sizeof(ohdr->u.at) / sizeof(u32);
- bth2 = e->psn & QIB_PSN_MASK;
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- fallthrough;
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
- if (qp->s_rdma_mr)
- rvt_get_mr(qp->s_rdma_mr);
- len = qp->s_ack_rdma_sge.sge.sge_length;
- if (len > pmtu)
- len = pmtu;
- else {
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- break;
-
- default:
-normal:
- /*
- * Send a regular ACK.
- * Set the s_ack_state so we wait until after sending
- * the ACK before setting s_ack_state to ACKNOWLEDGE
- * (see above).
- */
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~RVT_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
- if (qp->s_nak_state)
- ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
- (qp->s_nak_state <<
- IB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- len = 0;
- bth0 = OP(ACKNOWLEDGE) << 24;
- bth2 = qp->s_ack_psn & QIB_PSN_MASK;
- }
- qp->s_rdma_ack_cnt++;
- qp->s_hdrwords = hwords;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0, bth2);
- return 1;
-
-bail:
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
- return 0;
-}
-
-/**
- * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- * @flags: unused
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_other_headers *ohdr;
- struct rvt_sge_state *ss;
- struct rvt_swqe *wqe;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
- u32 pmtu = qp->pmtu;
- char newreq;
- int ret = 0;
- int delta;
-
- ohdr = &priv->s_hdr->u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &priv->s_hdr->u.l.oth;
-
- /* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & RVT_S_RESP_PENDING) &&
- qib_make_rc_ack(dev, qp, ohdr, pmtu))
- goto done;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done;
- }
-
- if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
- goto bail;
-
- if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
- if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= RVT_S_WAIT_PSN;
- goto bail;
- }
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- }
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
-
- /* Send a request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /*
- * Resend an old request or start a new one.
- *
- * We keep track of the current SWQE so that
- * we don't reset the "furthest progress" state
- * if we need to back up.
- */
- newreq = 0;
- if (qp->s_cur == qp->s_tail) {
- /* Check if send work queue is empty. */
- if (qp->s_tail == READ_ONCE(qp->s_head))
- goto bail;
- /*
- * If a fence is requested, wait for previous
- * RDMA read and atomic operations to finish.
- */
- if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_FENCE;
- goto bail;
- }
- newreq = 1;
- qp->s_psn = wqe->psn;
- }
- /*
- * Note that we have to be careful not to modify the
- * original work request since we may need to resend
- * it.
- */
- len = wqe->length;
- ss = &qp->s_sge;
- bth2 = qp->s_psn & QIB_PSN_MASK;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- /* If no credit, return. */
- if (!rvt_rc_credit_avail(qp, wqe))
- goto bail;
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- goto no_flow_control;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- /* If no credit, return. */
- if (!rvt_rc_credit_avail(qp, wqe))
- goto bail;
-no_flow_control:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / sizeof(u32);
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after RETH */
- ohdr->u.rc.imm_data =
- wqe->rdma_wr.wr.ex.imm_data;
- hwords += 1;
- if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_READ:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
-
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
- if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- qp->s_state = OP(COMPARE_SWAP);
- put_ib_ateth_swap(wqe->atomic_wr.swap,
- &ohdr->u.atomic_eth);
- put_ib_ateth_compare(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
- } else {
- qp->s_state = OP(FETCH_ADD);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
- put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
- }
- put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
- &ohdr->u.atomic_eth);
- ohdr->u.atomic_eth.rkey = cpu_to_be32(
- wqe->atomic_wr.rkey);
- hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- qp->s_len = wqe->length;
- if (newreq) {
- qp->s_tail++;
- if (qp->s_tail >= qp->s_size)
- qp->s_tail = 0;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_psn = wqe->lpsn + 1;
- else
- qp->s_psn++;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
- * thread to indicate a SEND needs to be restarted from an
- * earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- fallthrough;
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- fallthrough;
- case OP(SEND_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_LAST is used by the ACK processing
- * thread to indicate a RDMA write needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- fallthrough;
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
- * thread to indicate a RDMA read needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr + len);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
- qp->s_psn = wqe->lpsn + 1;
- ss = NULL;
- len = 0;
- qp->s_cur++;
- if (qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_sending_hpsn = bth2;
- delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
- if (delta && delta % QIB_PSN_CREDIT == 0)
- bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & RVT_S_SEND_ONE) {
- qp->s_flags &= ~RVT_S_SEND_ONE;
- qp->s_flags |= RVT_S_WAIT_ACK;
- bth2 |= IB_BTH_REQ_ACK;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-/**
- * qib_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from qib_rc_rcv() and qib_kreceive().
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-void qib_send_rc_ack(struct rvt_qp *qp)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 pbc;
- u16 lrh0;
- u32 bth0;
- u32 hwords;
- u32 pbufn;
- u32 __iomem *piobuf;
- struct ib_header hdr;
- struct ib_other_headers *ohdr;
- u32 control;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto unlock;
-
- /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
- goto queue_ack;
-
- /* Construct the header with s_lock held so APM doesn't change it. */
- ohdr = &hdr.u.oth;
- lrh0 = QIB_LRH_BTH;
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
- hwords = 6;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH)) {
- hwords += qib_make_grh(ibp, &hdr.u.l.grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = QIB_LRH_GRH;
- }
- /* read pkey_index w/o lock (its atomic) */
- bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
- (qp->r_nak_state <<
- IB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = rvt_compute_aeth(qp);
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
- rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Don't try to send ACKs if the link isn't ACTIVE */
- if (!(ppd->lflags & QIBL_LINKACTIVE))
- goto done;
-
- control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
- qp->s_srate, lrh0 >> 12);
- /* length is + 1 for the control dword */
- pbc = ((u64) control << 32) | (hwords + 1);
-
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (!piobuf) {
- /*
- * We are out of PIO buffers at the moment.
- * Pass responsibility for sending the ACK to the
- * send tasklet so that when a PIO buffer becomes
- * available, the ACK is sent ahead of other outgoing
- * packets.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- goto queue_ack;
- }
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness
- * on some cpus or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
-
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- u32 *hdrp = (u32 *) &hdr;
-
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
- qib_flush_wc();
- __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
- } else
- qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
-
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- goto done;
-
-queue_ack:
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- this_cpu_inc(*ibp->rvp.rc_qacks);
- qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
-
- /* Schedule the send tasklet. */
- qib_schedule_send(qp);
- }
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from qib_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct rvt_qp *qp, u32 psn)
-{
- u32 n = qp->s_acked;
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
- u32 opcode;
-
- qp->s_cur = n;
-
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (qib_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
-
- /* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
- for (;;) {
- int diff;
-
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- wqe = rvt_get_swqe_ptr(qp, n);
- diff = qib_cmp24(psn, wqe->psn);
- if (diff < 0)
- break;
- qp->s_cur = n;
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (diff == 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
- opcode = wqe->wr.opcode;
- }
-
- /*
- * Set the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See qib_make_rc_req().
- */
- switch (opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-done:
- qp->s_psn = psn;
- /*
- * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
- * asynchronously before the send tasklet can get scheduled.
- * Doing it in qib_make_rc_req() is too late.
- */
- if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= RVT_S_WAIT_PSN;
-}
-
-/*
- * Back up requester to resend the last un-ACKed request.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- */
-void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
-{
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- struct qib_ibport *ibp;
-
- if (qp->s_retry == 0) {
- if (qp->s_mig_state == IB_MIG_ARMED) {
- qib_migrate_qp(qp);
- qp->s_retry = qp->s_retry_cnt;
- } else if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- return;
- } else /* XXX need to handle delayed completion */
- return;
- } else
- qp->s_retry--;
-
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->rvp.n_rc_resends++;
- else
- ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
- RVT_S_WAIT_ACK);
- if (wait)
- qp->s_flags |= RVT_S_SEND_ONE;
- reset_psn(qp, psn);
-}
-
-/*
- * Set qp->s_sending_psn to the next PSN after the given one.
- * This would be psn+1 except when RDMA reads are present.
- */
-static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
-{
- struct rvt_swqe *wqe;
- u32 n = qp->s_last;
-
- /* Find the work request corresponding to the given PSN. */
- for (;;) {
- wqe = rvt_get_swqe_ptr(qp, n);
- if (qib_cmp24(psn, wqe->lpsn) <= 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_sending_psn = wqe->lpsn + 1;
- else
- qp->s_sending_psn = psn + 1;
- break;
- }
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- }
-}
-
-/*
- * This should be called with the QP s_lock held and interrupts disabled.
- */
-void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
-{
- struct ib_other_headers *ohdr;
- struct rvt_swqe *wqe;
- u32 opcode;
- u32 psn;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
- return;
-
- /* Find out where the BTH is */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- WARN_ON(!qp->s_rdma_ack_cnt);
- qp->s_rdma_ack_cnt--;
- return;
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- reset_sending_psn(qp, psn);
-
- /*
- * Start timer after a packet requesting an ACK has been sent and
- * there are still requests that haven't been acked.
- */
- if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
- (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- rvt_add_retry_timer(qp);
-
- while (qp->s_last != qp->s_acked) {
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
- break;
- rvt_qp_complete_swqe(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- IB_WC_SUCCESS);
- }
- /*
- * If we were waiting for sends to complete before resending,
- * and they are now complete, restart sending.
- */
- if (qp->s_flags & RVT_S_WAIT_PSN &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~RVT_S_WAIT_PSN;
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- qib_schedule_send(qp);
- }
-}
-
-static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
-{
- qp->s_last_psn = psn;
-}
-
-/*
- * Generate a SWQE completion.
- * This is similar to qib_send_complete but has to check to be sure
- * that the SGEs are not being referenced if the SWQE is being resent.
- */
-static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
- struct rvt_swqe *wqe,
- struct qib_ibport *ibp)
-{
- /*
- * Don't decrement refcount and don't generate a
- * completion if the SWQE is being resent until the send
- * is finished.
- */
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
- rvt_qp_complete_swqe(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- IB_WC_SUCCESS);
- else
- this_cpu_inc(*ibp->rvp.rc_delayed_comp);
-
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, wqe->lpsn);
-
- /*
- * If we are completing a request which is in the process of
- * being resent, we can stop resending it since we know the
- * responder has already seen it.
- */
- if (qp->s_acked == qp->s_cur) {
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- qp->s_acked = qp->s_cur;
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- if (qp->s_acked != qp->s_tail) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- }
- } else {
- if (++qp->s_acked >= qp->s_size)
- qp->s_acked = 0;
- if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
- qp->s_draining = 0;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- }
- return wqe;
-}
-
-/*
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val, struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp;
- enum ib_wc_status status;
- struct rvt_swqe *wqe;
- int ret = 0;
- u32 ack_psn;
- int diff;
-
- /*
- * Note that NAKs implicitly ACK outstanding SEND and RDMA write
- * requests and implicitly NAK RDMA read and atomic requests issued
- * before the NAK'ed request. The MSN won't include the NAK'ed
- * request but will include an ACK'ed request(s).
- */
- ack_psn = psn;
- if (aeth >> IB_AETH_NAK_SHIFT)
- ack_psn--;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- ibp = to_iport(qp->ibqp.device, qp->port_num);
-
- /*
- * The MSN might be for a later WQE than the PSN indicates so
- * only complete WQEs that the PSN finishes.
- */
- while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
- /*
- * RDMA_READ_RESPONSE_ONLY is a special case since
- * we want to generate completion events for everything
- * before the RDMA read, copy the data, then generate
- * the completion for the read.
- */
- if (wqe->wr.opcode == IB_WR_RDMA_READ &&
- opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
- diff == 0) {
- ret = 1;
- goto bail;
- }
- /*
- * If this request is a RDMA read or atomic, and the ACK is
- * for a later operation, this ACK NAKs the RDMA read or
- * atomic. In other words, only a RDMA_READ_LAST or ONLY
- * can ACK a RDMA read and likewise for atomic ops. Note
- * that the NAK case can only happen if relaxed ordering is
- * used and requests are sent after an RDMA read or atomic
- * is sent but before the response is received.
- */
- if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
- ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /* Retry this request. */
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait,
- &rcd->qp_wait_list);
- }
- }
- /*
- * No need to process the ACK/NAK since we are
- * restarting an earlier request.
- */
- goto bail;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- u64 *vaddr = wqe->sg_list[0].vaddr;
- *vaddr = val;
- }
- if (qp->s_num_rd_atomic &&
- (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
- qp->s_num_rd_atomic--;
- /* Restart sending task if fence is complete */
- if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
- !qp->s_num_rd_atomic) {
- qp->s_flags &= ~(RVT_S_WAIT_FENCE |
- RVT_S_WAIT_ACK);
- qib_schedule_send(qp);
- } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
- qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_ACK);
- qib_schedule_send(qp);
- }
- }
- wqe = do_rc_completion(qp, wqe, ibp);
- if (qp->s_acked == qp->s_tail)
- break;
- }
-
- switch (aeth >> IB_AETH_NAK_SHIFT) {
- case 0: /* ACK */
- this_cpu_inc(*ibp->rvp.rc_acks);
- if (qp->s_acked != qp->s_tail) {
- /*
- * We are expecting more ACKs so
- * reset the retransmit timer.
- */
- rvt_mod_retry_timer(qp);
- /*
- * We can stop resending the earlier packets and
- * continue with the next packet the receiver wants.
- */
- if (qib_cmp24(qp->s_psn, psn) <= 0)
- reset_psn(qp, psn + 1);
- } else {
- /* No more acks - kill all timers */
- rvt_stop_rc_timers(qp);
- if (qib_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
- }
- }
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
- rvt_get_credit(qp, aeth);
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, psn);
- return 1;
-
- case 1: /* RNR NAK */
- ibp->rvp.n_rnr_naks++;
- if (qp->s_acked == qp->s_tail)
- goto bail;
- if (qp->s_flags & RVT_S_WAIT_RNR)
- goto bail;
- if (qp->s_rnr_retry == 0) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
- }
- if (qp->s_rnr_retry_cnt < 7)
- qp->s_rnr_retry--;
-
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
-
- ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- reset_psn(qp, psn);
-
- qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
- rvt_stop_rc_timers(qp);
- rvt_add_rnr_timer(qp, aeth);
- return 0;
-
- case 3: /* NAK */
- if (qp->s_acked == qp->s_tail)
- goto bail;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
- switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
- IB_AETH_CREDIT_MASK) {
- case 0: /* PSN sequence error */
- ibp->rvp.n_seq_naks++;
- /*
- * Back up to the responder's expected PSN.
- * Note that we might get a NAK in the middle of an
- * RDMA READ response which terminates the RDMA
- * READ.
- */
- qib_restart_rc(qp, psn, 0);
- qib_schedule_send(qp);
- break;
-
- case 1: /* Invalid Request */
- status = IB_WC_REM_INV_REQ_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 2: /* Remote Access Error */
- status = IB_WC_REM_ACCESS_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 3: /* Remote Operation Error */
- status = IB_WC_REM_OP_ERR;
- ibp->rvp.n_other_naks++;
-class_b:
- if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
- break;
-
- default:
- /* Ignore other reserved NAK error codes */
- goto reserved;
- }
- qp->s_retry = qp->s_retry_cnt;
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail;
-
- default: /* 2: reserved */
-reserved:
- /* Ignore reserved NAK codes. */
- goto bail;
- }
-
-bail:
- rvt_stop_rc_timers(qp);
- return ret;
-}
-
-/*
- * We have seen an out of sequence RDMA read middle or last packet.
- * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
- */
-static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
- struct qib_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
-
- /* Remove QP from retry timer */
- rvt_stop_rc_timers(qp);
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
-
- while (qib_cmp24(psn, wqe->lpsn) > 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- break;
- wqe = do_rc_completion(qp, wqe, ibp);
- }
-
- ibp->rvp.n_rdma_seq++;
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
-/**
- * qib_rc_rcv_resp - process an incoming RC response packet
- * @ibp: the port this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @rcd: the context pointer
- *
- * This is called from qib_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static void qib_rc_rcv_resp(struct qib_ibport *ibp,
- struct ib_other_headers *ohdr,
- void *data, u32 tlen,
- struct rvt_qp *qp,
- u32 opcode,
- u32 psn, u32 hdrsize, u32 pmtu,
- struct qib_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- enum ib_wc_status status;
- unsigned long flags;
- int diff;
- u32 pad;
- u32 aeth;
- u64 val;
-
- if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
- /*
- * If ACK'd PSN on SDMA busy list try to make progress to
- * reclaim SDMA credits.
- */
- if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
-
- /*
- * If send tasklet not running attempt to progress
- * SDMA queue.
- */
- if (!(qp->s_flags & RVT_S_BUSY)) {
- /* Acquire SDMA Lock */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /* Invoke sdma make progress */
- qib_sdma_make_progress(ppd);
- /* Release SDMA Lock */
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto ack_done;
-
- /* Ignore invalid responses. */
- if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
- goto ack_done;
-
- /* Ignore duplicate responses. */
- diff = qib_cmp24(psn, qp->s_last_psn);
- if (unlikely(diff <= 0)) {
- /* Update credits for "ghost" ACKs */
- if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
- aeth = be32_to_cpu(ohdr->u.aeth);
- if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
- rvt_get_credit(qp, aeth);
- }
- goto ack_done;
- }
-
- /*
- * Skip everything other than the PSN we expect, if we are waiting
- * for a reply to a restarted RDMA read or atomic op.
- */
- if (qp->r_flags & RVT_R_RDMAR_SEQ) {
- if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
- goto ack_done;
- qp->r_flags &= ~RVT_R_RDMAR_SEQ;
- }
-
- if (unlikely(qp->s_acked == qp->s_tail))
- goto ack_done;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- status = IB_WC_SUCCESS;
-
- switch (opcode) {
- case OP(ACKNOWLEDGE):
- case OP(ATOMIC_ACKNOWLEDGE):
- case OP(RDMA_READ_RESPONSE_FIRST):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE))
- val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
- else
- val = 0;
- if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
- opcode != OP(RDMA_READ_RESPONSE_FIRST))
- goto ack_done;
- hdrsize += 4;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_middle;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /* no AETH, no ACK */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
-read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_len_err;
- if (unlikely(pmtu >= qp->s_rdma_read_len))
- goto ack_len_err;
-
- /*
- * We got a response so update the timeout.
- * 4.096 usec. * (1 << qp->timeout)
- */
- rvt_mod_retry_timer(qp);
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
-
- if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
- qp->s_retry = qp->s_retry_cnt;
-
- /*
- * Update the RDMA receive state but do the copy w/o
- * holding the locks and blocking interrupts.
- */
- qp->s_rdma_read_len -= pmtu;
- update_last_psn(qp, psn);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- rvt_copy_sge(qp, &qp->s_rdma_read_sge,
- data, pmtu, false, false);
- goto bail;
-
- case OP(RDMA_READ_RESPONSE_ONLY):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
- goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 0 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen < (hdrsize + pad + 8)))
- goto ack_len_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_last;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /* ACKs READ req. */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 1 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen <= (hdrsize + pad + 8)))
- goto ack_len_err;
-read_last:
- tlen -= hdrsize + pad + 8;
- if (unlikely(tlen != qp->s_rdma_read_len))
- goto ack_len_err;
- aeth = be32_to_cpu(ohdr->u.aeth);
- rvt_copy_sge(qp, &qp->s_rdma_read_sge,
- data, tlen, false, false);
- WARN_ON(qp->s_rdma_read_sge.num_sge);
- (void) do_rc_ack(qp, aeth, psn,
- OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
- goto ack_done;
- }
-
-ack_op_err:
- status = IB_WC_LOC_QP_OP_ERR;
- goto ack_err;
-
-ack_seq_err:
- rdma_seq_err(qp, ibp, psn, rcd);
- goto ack_done;
-
-ack_len_err:
- status = IB_WC_LOC_LEN_ERR;
-ack_err:
- if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
- return;
-}
-
-/**
- * qib_rc_rcv_error - process an incoming duplicate or error RC packet
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @rcd: the context pointer
- *
- * This is called from qib_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
- void *data,
- struct rvt_qp *qp,
- u32 opcode,
- u32 psn,
- int diff,
- struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct rvt_ack_entry *e;
- unsigned long flags;
- u8 i, prev;
- int old_req;
-
- if (diff > 0) {
- /*
- * Packet sequence error.
- * A NAK will ACK earlier sends and RDMA writes.
- * Don't queue the NAK if we already sent one.
- */
- if (!qp->r_nak_state) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence NAK until all packets
- * in the receive queue have been processed.
- * Otherwise, we end up propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- }
- goto done;
- }
-
- /*
- * Handle a duplicate request. Don't re-execute SEND, RDMA
- * write or atomic op. Don't NAK errors, just silently drop
- * the duplicate request. Note that r_sge, r_len, and
- * r_rcv_len may be in use so don't modify them.
- *
- * We are supposed to ACK the earliest duplicate PSN but we
- * can coalesce an outstanding duplicate ACK. We have to
- * send the earliest so that RDMA reads can be restarted at
- * the requester's expected PSN.
- *
- * First, find where this duplicate PSN falls within the
- * ACKs previously sent.
- * old_req is true if there is an older response that is scheduled
- * to be sent before sending this one.
- */
- e = NULL;
- old_req = 1;
- ibp->rvp.n_rc_dupreq++;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = QIB_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (qib_cmp24(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue &&
- qib_cmp24(psn, e->lpsn) <= 0)
- old_req = 0;
- break;
- }
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST): {
- struct ib_reth *reth;
- u32 offset;
- u32 len;
-
- /*
- * If we didn't find the RDMA read request in the ack queue,
- * we can ignore this request.
- */
- if (!e || e->opcode != OP(RDMA_READ_REQUEST))
- goto unlock_done;
- /* RETH comes after BTH */
- reth = &ohdr->u.rc.reth;
- /*
- * Address range must be a subset of the original
- * request and start on pmtu boundaries.
- * We reuse the old ack_queue slot since the requester
- * should not back up and request an earlier PSN for the
- * same request.
- */
- offset = ((psn - e->psn) & QIB_PSN_MASK) *
- qp->pmtu;
- len = be32_to_cpu(reth->length);
- if (unlikely(offset + len != e->rdma_sge.sge_length))
- goto unlock_done;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- if (len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto unlock_done;
- } else {
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->psn = psn;
- if (old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- /*
- * If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != (u8) opcode || old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- default:
- /*
- * Ignore this operation if it doesn't request an ACK
- * or an earlier RDMA read or atomic is going to be resent.
- */
- if (!(psn & IB_BTH_REQ_ACK) || old_req)
- goto unlock_done;
- /*
- * Resend the most recent ACK if this request is
- * after all the previous RDMA reads and atomics.
- */
- if (i == qp->r_head_ack_queue) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->r_psn - 1;
- goto send_ack;
- }
- /*
- * Try to send a simple ACK to work around a Mellanox bug
- * which doesn't accept a RDMA read response or atomic
- * response as an ACK for earlier SENDs or RDMA writes.
- */
- if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
- goto send_ack;
- }
- /*
- * Resend the RDMA read or atomic op which
- * ACKs this duplicate request.
- */
- qp->s_tail_ack_queue = i;
- break;
- }
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= RVT_S_RESP_PENDING;
- qp->r_nak_state = 0;
- qib_schedule_send(qp);
-
-unlock_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return 1;
-
-send_ack:
- return 0;
-}
-
-static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
-}
-
-/**
- * qib_rc_rcv - process an incoming RC packet
- * @rcd: the context pointer
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from qib_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
- struct ib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- int diff;
- struct ib_reth *reth;
- unsigned long flags;
- int ret;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
-
- /*
- * Process responses (ACKs) before anything else. Note that the
- * packet sequence number will be for something in the send work
- * queue rather than the expected receive packet sequence number.
- * In other words, this QP is the requester.
- */
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, rcd);
- return;
- }
-
- /* Compute 24 bits worth of difference. */
- diff = qib_cmp24(psn, qp->r_psn);
- if (unlikely(diff)) {
- if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- return;
- goto send_ack;
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- default:
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- goto nack_inv;
- /*
- * Note that it is up to the requester to not send a new
- * RDMA read or atomic operation before receiving an ACK
- * for the previous operation.
- */
- break;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- rvt_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- fallthrough;
- case OP(SEND_MIDDLE):
- case OP(RDMA_WRITE_MIDDLE):
-send_middle:
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto nack_inv;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto nack_inv;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- /* consume RWQE */
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
- case OP(RDMA_WRITE_LAST):
-no_immediate_data:
- wc.wc_flags = 0;
- wc.ex.imm_data = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto nack_inv;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- qp->r_msn++;
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- break;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto nack_inv;
- /* consume RWQE */
- reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
- rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto nack_acc;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_FIRST))
- goto send_middle;
- else if (opcode == OP(RDMA_WRITE_ONLY))
- goto no_immediate_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto nack_op_err;
- if (!ret) {
- rvt_put_ss(&qp->r_sge);
- goto rnr_nak;
- }
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
-
- case OP(RDMA_READ_REQUEST): {
- struct rvt_ack_entry *e;
- u32 len;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- reth = &ohdr->u.rc.reth;
- len = be32_to_cpu(reth->length);
- if (len) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto nack_acc_unlck;
- /*
- * Update the next expected PSN. We add 1 later
- * below, so only add the remainder here.
- */
- qp->r_psn += rvt_div_mtu(qp, len - 1);
- } else {
- e->rdma_sge.mr = NULL;
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = qp->r_psn;
- /*
- * We need to increment the MSN here instead of when we
- * finish sending the result since a duplicate request would
- * increment it more than once.
- */
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
- struct rvt_ack_entry *e;
- u64 vaddr;
- atomic64_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- ateth = &ohdr->u.atomic_eth;
- vaddr = get_ib_ateth_vaddr(ateth);
- if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv_unlck;
- rkey = be32_to_cpu(ateth->rkey);
- /* Check rkey & NAK */
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = get_ib_ateth_swap(ateth);
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- get_ib_ateth_compare(ateth),
- sdata);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = psn;
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- default:
- /* NAK unknown opcodes. */
- goto nack_inv;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_ack_psn = psn;
- qp->r_nak_state = 0;
- /* Send an ACK if requested or required. */
- if (psn & (1 << 31))
- goto send_ack;
- return;
-
-rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- qp->r_ack_psn = qp->r_psn;
- /* Queue RNR NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_op_err:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_inv_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_acc_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
- rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
- qp->r_ack_psn = qp->r_psn;
-send_ack:
- qib_send_rc_ack(qp);
- return;
-
-sunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
deleted file mode 100644
index 1fa21938f310..000000000000
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/*
- * Switch to alternate path.
- * The QP s_lock should be held and interrupts disabled.
- */
-void qib_migrate_qp(struct rvt_qp *qp)
-{
- struct ib_event ev;
-
- qp->s_mig_state = IB_MIG_MIGRATED;
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
- qp->s_pkey_index = qp->s_alt_pkey_index;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-}
-
-static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- return ppd->guid;
- }
- return ibp->guids[index - 1];
-}
-
-static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
-{
- return (gid->global.interface_id == id &&
- (gid->global.subnet_prefix == gid_prefix ||
- gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
-}
-
-/*
- *
- * This should be called with the QP r_lock held.
- *
- * The s_lock will be acquired around the qib_migrate_qp() call.
- */
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0)
-{
- __be64 guid;
- unsigned long flags;
-
- if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH)
- goto err;
- } else {
- const struct ib_global_route *grh;
-
- if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH))
- goto err;
- grh = rdma_ah_read_grh(&qp->alt_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid,
- ibp->rvp.gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
- qib_bad_pkey(ibp,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if ((be16_to_cpu(hdr->lrh[3]) !=
- rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
- ppd_from_ibp(ibp)->port !=
- rdma_ah_get_port_num(&qp->alt_ah_attr))
- goto err;
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_migrate_qp(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH)
- goto err;
- } else {
- const struct ib_global_route *grh;
-
- if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH))
- goto err;
- grh = rdma_ah_read_grh(&qp->remote_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid,
- ibp->rvp.gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_pkey_index))) {
- qib_bad_pkey(ibp,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (be16_to_cpu(hdr->lrh[3]) !=
- rdma_ah_get_dlid(&qp->remote_ah_attr) ||
- ppd_from_ibp(ibp)->port != qp->port_num)
- goto err;
- if (qp->s_mig_state == IB_MIG_REARM &&
- !(bth0 & IB_BTH_MIG_REQ))
- qp->s_mig_state = IB_MIG_ARMED;
- }
-
- return 0;
-
-err:
- return 1;
-}
-
-/**
- * qib_make_grh - construct a GRH header
- * @ibp: a pointer to the IB port
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- const struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
- hdr->version_tclass_flow =
- cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
- (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
- (grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = IB_GRH_NEXT_HDR;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
- if (!grh->sgid_index)
- hdr->sgid.global.interface_id = ppd_from_ibp(ibp)->guid;
- else if (grh->sgid_index < QIB_GUIDS_PER_PORT)
- hdr->sgid.global.interface_id = ibp->guids[grh->sgid_index - 1];
- hdr->dgid = grh->dgid;
-
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
-
- /* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = QIB_LRH_BTH;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
- qp->s_hdrwords +=
- qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- }
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
- rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
- priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- priv->s_hdr->lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- priv->s_hdr->lrh[3] =
- cpu_to_be16(ppd_from_ibp(ibp)->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(bth2);
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
-}
-
-void _qib_do_send(struct work_struct *work)
-{
- struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
- s_work);
- struct rvt_qp *qp = priv->owner;
-
- qib_do_send(qp);
-}
-
-/**
- * qib_do_send - perform a send on a QP
- * @qp: pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void qib_do_send(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
- unsigned long flags;
-
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC) &&
- (rdma_ah_get_dlid(&qp->remote_ah_attr) &
- ~((1 << ppd->lmc) - 1)) == ppd->lid) {
- rvt_ruc_loopback(qp);
- return;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_RC)
- make_req = qib_make_rc_req;
- else if (qp->ibqp.qp_type == IB_QPT_UC)
- make_req = qib_make_uc_req;
- else
- make_req = qib_make_ud_req;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if (!qib_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return;
- }
-
- qp->s_flags |= RVT_S_BUSY;
-
- do {
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- /*
- * If the packet cannot be sent now, return and
- * the send tasklet will be woken up later.
- */
- if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
- qp->s_cur_sge, qp->s_cur_size))
- return;
- /* Record that s_hdr is empty. */
- qp->s_hdrwords = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- }
- } while (make_req(qp, &flags));
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
deleted file mode 100644
index 40bc0a34273e..000000000000
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ /dev/null
@@ -1,1445 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the SerDes
- * on the QLogic_IB 7220 chip.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
-MODULE_FIRMWARE(SD7220_FW_NAME);
-
-/*
- * Same as in qib_iba7220.c, but just the registers needed here.
- * Could move whole set to qib_7220.h, but decided better to keep
- * local.
- */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-/* these are used only here, not in qib_iba7220.c */
-#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
-#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
-#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
-#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
-#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
-
-/*
- * The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC.
- */
-#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
-
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB.
- */
-#define PCIE_SERDES0 0
-#define PCIE_SERDES1 1
-
-/*
- * The EPB requires addressing in a particular form. EPB_LOC() is intended
- * to make #definitions a little more readable.
- */
-#define EPB_ADDR_SHF 8
-#define EPB_LOC(chn, elt, reg) \
- (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
- EPB_ADDR_SHF)
-#define EPB_IB_QUAD0_CS_SHF (25)
-#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
-#define EPB_IB_UC_CS_SHF (26)
-#define EPB_PCIE_UC_CS_SHF (27)
-#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
-
-/* Forward declarations. */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask);
-static int qib_sd_trimdone_poll(struct qib_devdata *dd);
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
-static int qib_sd_setvals(struct qib_devdata *dd);
-static int qib_sd_early(struct qib_devdata *dd);
-static int qib_sd_dactrim(struct qib_devdata *dd);
-static int qib_internal_presets(struct qib_devdata *dd);
-/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int qib_sd_trimself(struct qib_devdata *dd, int val);
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
-static int qib_sd7220_ib_load(struct qib_devdata *dd,
- const struct firmware *fw);
-static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
- const struct firmware *fw);
-
-/*
- * Below keeps track of whether the "once per power-on" initialization has
- * been done, because uC code Version 1.32.17 or higher allows the uC to
- * be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", is no longer valid. Instead, we check for the
- * actual uC code having been loaded.
- */
-static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
- const struct firmware *fw)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (!dd->cspec->serdes_first_init_done &&
- qib_sd7220_ib_vfy(dd, fw) > 0)
- dd->cspec->serdes_first_init_done = 1;
- return dd->cspec->serdes_first_init_done;
-}
-
-/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
-#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
-#define UC_PAR_CLR_D 8
-#define UC_PAR_CLR_M 0xC
-#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
-#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-
-void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
-{
- int ret;
-
- /* clear, then re-enable parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
- UC_PAR_CLR_D, UC_PAR_CLR_M);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
- goto bail;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
- UC_PAR_CLR_M);
-
- qib_read_kreg32(dd, kr_scratch);
- udelay(4);
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_read_kreg32(dd, kr_scratch);
-bail:
- return;
-}
-
-/*
- * After a reset or other unusual event, the epb interface may need
- * to be re-synchronized, between the host and the uC.
- * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
- */
-#define IBSD_RESYNC_TRIES 3
-#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
-#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-
-static int qib_resync_ibepb(struct qib_devdata *dd)
-{
- int ret, pat, tries, chn;
- u32 loc;
-
- ret = -1;
- chn = 0;
- for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
- loc = IB_PGUDP(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed read in resync\n");
- continue;
- }
- if (ret != 0xF0 && ret != 0x55 && tries == 0)
- qib_dev_err(dd, "unexpected pattern in resync\n");
- pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
- if (ret < 0) {
- qib_dev_err(dd, "Failed write in resync\n");
- continue;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed re-read in resync\n");
- continue;
- }
- if (ret != pat) {
- qib_dev_err(dd, "Failed compare1 in resync\n");
- continue;
- }
- loc = IB_CMUDONE(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
- continue;
- }
- if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
- continue;
- }
- if (++chn == 4)
- break; /* Success */
- }
- return (ret > 0) ? 0 : ret;
-}
-
-/*
- * Localize the stuff that should be done to change IB uC reset
- * returns <0 for errors.
- */
-static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
-{
- u64 rst_val;
- int ret = 0;
- unsigned long flags;
-
- rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
- if (assert_rst) {
- /*
- * Vendor recommends "interrupting" uC before reset, to
- * minimize possible glitches.
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
- epb_access(dd, IB_7220_SERDES, 1);
- rst_val |= 1ULL;
- /* Squelch possible parity error from _asserting_ reset */
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay to ensure it took effect */
- qib_read_kreg32(dd, kr_scratch);
- udelay(2);
- /* once it's reset, can remove interrupt */
- epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- } else {
- /*
- * Before we de-assert reset, we need to deal with
- * possible glitch on the Parity-error line.
- * Suppress it around the reset, both in chip-level
- * hwerrmask and in IB uC control reg. uC will allow
- * it again during startup.
- */
- u64 val;
-
- rst_val &= ~(1ULL);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "unable to re-sync IB EPB\n");
-
- /* set uC control regs to suppress parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
- if (ret < 0)
- goto bail;
- /* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
- 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set WDOG disable\n");
- goto bail;
- }
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay for startup */
- qib_read_kreg32(dd, kr_scratch);
- udelay(1);
- /* clear, then re-enable parity errs */
- qib_sd7220_clr_ibpar(dd);
- val = qib_read_kreg64(dd, kr_hwerrstatus);
- if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
- qib_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->cspec->hwerrmask &=
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- }
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask);
- }
-
-bail:
- return ret;
-}
-
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
-{
- int ret, chn, baduns;
- u64 val;
-
- if (!where)
- where = "?";
-
- /* give time for reset to settle out in EPB */
- udelay(2);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
-
- /* Do "sacrificial read" to get EPB in sane state after reset */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
- if (ret < 0)
- qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
-
- /* Check/show "summary" Trim-done bit in IBCStatus */
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(val & (1ULL << 11)))
- qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
- /*
- * Do "dummy read/mod/wr" to get EPB in sane state after reset
- * The default value for MPREG6 is 0.
- */
- udelay(2);
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
- udelay(10);
-
- baduns = 0;
-
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed checking TRIMDONE, chn %d (%s)\n",
- chn, where);
-
- if (!(ret & 0x10)) {
- int probe;
-
- baduns |= (1 << chn);
- qib_dev_err(dd,
- "TRIMDONE cleared on chn %d (%02X). (%s)\n",
- chn, ret, where);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_PGUDP(0), 0, 0);
- qib_dev_err(dd, "probe is %d (%02X)\n",
- probe, probe);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- qib_dev_err(dd, "re-read: %d (%02X)\n",
- probe, probe);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Err on TRIMDONE rewrite1\n");
- }
- }
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- if (baduns & (1 << chn)) {
- qib_dev_err(dd,
- "Resetting TRIMDONE on chn %d (%s)\n",
- chn, where);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed re-setting TRIMDONE, chn %d (%s)\n",
- chn, where);
- }
- }
-}
-
-/*
- * Below is portion of IBA7220-specific bringup_serdes() that actually
- * deals with registers and memory within the SerDes itself.
- * Post IB uC code version 1.32.17, was_reset being 1 is not really
- * informative, so we double-check.
- */
-int qib_sd7220_init(struct qib_devdata *dd)
-{
- const struct firmware *fw;
- int ret = 1; /* default to failure */
- int first_reset, was_reset;
-
- /* SERDES MPU reset recorded in D0 */
- was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
- if (!was_reset) {
- /* entered with reset not asserted, we need to do it */
- qib_ibsd_reset(dd, 1);
- qib_sd_trimdone_monitor(dd, "Driver-reload");
- }
-
- ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
- if (ret) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto done;
- }
-
- /* Substitute our deduced value for was_reset */
- ret = qib_ibsd_ucode_loaded(dd->pport, fw);
- if (ret < 0)
- goto bail;
-
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
- /*
- * Alter some regs per vendor latest doc, reset-defaults
- * are not right for IB.
- */
- ret = qib_sd_early(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- goto bail;
- }
- /*
- * Set DAC manual trim IB.
- * We only do this once after chip has been reset (usually
- * same as once per system boot).
- */
- if (first_reset) {
- ret = qib_sd_dactrim(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
- goto bail;
- }
- }
- /*
- * Set various registers (DDS and RXEQ) that will be
- * controlled by IBC (in 1.2 mode) to reasonable preset values
- * Calling the "internal" version avoids the "check for needed"
- * and "trimdone monitor" that might be counter-productive.
- */
- ret = qib_internal_presets(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES presets\n");
- goto bail;
- }
- ret = qib_sd_trimself(dd, 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- goto bail;
- }
-
- /* Load image, then try to verify */
- ret = 0; /* Assume success */
- if (first_reset) {
- int vfy;
- int trim_done;
-
- ret = qib_sd7220_ib_load(dd, fw);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto bail;
- } else {
- /* Loaded image, try to verify */
- vfy = qib_sd7220_ib_vfy(dd, fw);
- if (vfy != ret) {
- qib_dev_err(dd, "SERDES PRAM VFY failed\n");
- goto bail;
- } /* end if verified */
- } /* end if loaded */
-
- /*
- * Loaded and verified. Almost good...
- * hold "success" in ret
- */
- ret = 0;
- /*
- * Prev steps all worked, continue bringup
- * De-assert RESET to uC, only in first reset, to allow
- * trimming.
- *
- * Since our default setup sets START_EQ1 to
- * PRESET, we need to clear that for this very first run.
- */
- ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing START_EQ1\n");
- goto bail;
- }
-
- qib_ibsd_reset(dd, 0);
- /*
- * If this is not the first reset, trimdone should be set
- * already. We may need to check about this.
- */
- trim_done = qib_sd_trimdone_poll(dd);
- /*
- * Whether or not trimdone succeeded, we need to put the
- * uC back into reset to avoid a possible fight with the
- * IBC state-machine.
- */
- qib_ibsd_reset(dd, 1);
-
- if (!trim_done) {
- qib_dev_err(dd, "No TRIMDONE seen\n");
- goto bail;
- }
- /*
- * DEBUG: check each time we reset if trimdone bits have
- * gotten cleared, and re-set them.
- */
- qib_sd_trimdone_monitor(dd, "First-reset");
- /* Remember so we do not re-do the load, dactrim, etc. */
- dd->cspec->serdes_first_init_done = 1;
- }
- /*
- * setup for channel training and load values for
- * RxEq and DDS in tables used by IBC in IB1.2 mode
- */
- ret = 0;
- if (qib_sd_setvals(dd) >= 0)
- goto done;
-bail:
- ret = 1;
-done:
- /* start relock timer regardless, but start at 1 second */
- set_7220_relock_poll(dd, -1);
-
- release_firmware(fw);
- return ret;
-}
-
-#define EPB_ACC_REQ 1
-#define EPB_ACC_GNT 0x100
-#define EPB_DATA_MASK 0xFF
-#define EPB_RD (1ULL << 24)
-#define EPB_TRANS_RDY (1ULL << 31)
-#define EPB_TRANS_ERR (1ULL << 30)
-#define EPB_TRANS_TRIES 5
-
-/*
- * query, claim, release ownership of the EPB (External Parallel Bus)
- * for a specified SERDES.
- * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
- * Returns <0 for errors, >0 if we had ownership, else 0.
- */
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
-{
- u16 acc;
- u64 accval;
- int owned = 0;
- u64 oct_sel = 0;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- /*
- * The IB SERDES "ownership" is fairly simple. A single each
- * request/grant.
- */
- acc = kr_ibsd_epb_access_ctrl;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has two "octants", need to select which */
- acc = kr_pciesd_epb_access_ctrl;
- oct_sel = (2 << (sdnum - PCIE_SERDES0));
- break;
-
- default:
- return 0;
- }
-
- /* Make sure any outstanding transaction was seen */
- qib_read_kreg32(dd, kr_scratch);
- udelay(15);
-
- accval = qib_read_kreg32(dd, acc);
-
- owned = !!(accval & EPB_ACC_GNT);
- if (claim < 0) {
- /* Need to release */
- u64 pollval;
- /*
- * The only writable bits are the request and CS.
- * Both should be clear
- */
- u64 newval = 0;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (pollval & EPB_ACC_GNT)
- owned = -1;
- } else if (claim > 0) {
- /* Need to claim */
- u64 pollval;
- u64 newval = EPB_ACC_REQ | oct_sel;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (!(pollval & EPB_ACC_GNT))
- owned = -1;
- }
- return owned;
-}
-
-/*
- * Lemma to deal with race condition of write..read to epb regs
- */
-static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
-{
- int tries;
- u64 transval;
-
- qib_write_kreg(dd, reg, i_val);
- /* Throw away first read, as RDY bit may be stale */
- transval = qib_read_kreg64(dd, reg);
-
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, reg);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
- if (transval & EPB_TRANS_ERR)
- return -1;
- if (tries > 0 && o_vp)
- *o_vp = transval;
- return tries;
-}
-
-/**
- * qib_sd7220_reg_mod - modify SERDES register
- * @dd: the qlogic_ib device
- * @sdnum: which SERDES to access
- * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
- * @wd: Write Data - value to set in register
- * @mask: ones where data should be spliced into reg.
- *
- * Basic register read/modify/write, with un-needed acesses elided. That is,
- * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
- * returns current (presumed, if a write was done) contents of selected
- * register, or <0 if errors.
- */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
-{
- u16 trans;
- u64 transval;
- int owned;
- int tries, ret;
- unsigned long flags;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- /*
- * All access is locked in software (vs other host threads) and
- * hardware (vs uC access).
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- if (tries > 0) {
- tries = 1; /* to make read-skip work */
- if (mask != 0xFF) {
- /*
- * Not a pure write, so need to read.
- * loc encodes chip-select as well as address
- */
- transval = loc | EPB_RD;
- tries = epb_trans(dd, trans, transval, &transval);
- }
- if (tries > 0 && mask != 0) {
- /*
- * Not a pure read, so need to write.
- */
- wd = (wd & mask) | (transval & ~mask);
- transval = loc | (wd & EPB_DATA_MASK);
- tries = epb_trans(dd, trans, transval, &transval);
- }
- }
- /* else, failed to see ready, what error-handling? */
-
- /*
- * Release bus. Failure is an error.
- */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
- else
- ret = transval & EPB_DATA_MASK;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define EPB_ROM_R (2)
-#define EPB_ROM_W (1)
-/*
- * Below, all uC-related, use appropriate UC_CS, depending
- * on which SerDes is used.
- */
-#define EPB_UC_CTL EPB_LOC(6, 0, 0)
-#define EPB_MADDRL EPB_LOC(6, 0, 2)
-#define EPB_MADDRH EPB_LOC(6, 0, 3)
-#define EPB_ROMDATA EPB_LOC(6, 0, 4)
-#define EPB_RAMDATA EPB_LOC(6, 0, 5)
-
-/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
- u8 *buf, int cnt, int rd_notwr)
-{
- u16 trans;
- u64 transval;
- u64 csbit;
- int owned;
- int tries;
- int sofar;
- int addr;
- int ret;
- unsigned long flags;
-
- /* Pick appropriate transaction reg and "Chip select" for this serdes */
- switch (sdnum) {
- case IB_7220_SERDES:
- csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has uC "chip select" in different bit, too */
- csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
-
- /*
- * In future code, we may need to distinguish several address ranges,
- * and select various memories based on this. For now, just trim
- * "loc" (location including address and memory select) to
- * "addr" (address within memory). we will only support PRAM
- * The memory is 8KB.
- */
- addr = loc & 0x1FFF;
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- sofar = 0;
- if (tries > 0) {
- /*
- * Every "memory" access is doubly-indirect.
- * We set two bytes of address, then read/write
- * one or mores bytes of data.
- */
-
- /* First, we set control to "Read" or "Write" */
- transval = csbit | EPB_UC_CTL |
- (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
- tries = epb_trans(dd, trans, transval, &transval);
- while (tries > 0 && sofar < cnt) {
- if (!sofar) {
- /* Only set address at start of chunk */
- int addrbyte = (addr + sofar) >> 8;
-
- transval = csbit | EPB_MADDRH | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- addrbyte = (addr + sofar) & 0xFF;
- transval = csbit | EPB_MADDRL | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- }
-
- if (rd_notwr)
- transval = csbit | EPB_ROMDATA | EPB_RD;
- else
- transval = csbit | EPB_ROMDATA | buf[sofar];
- tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- break;
- if (rd_notwr)
- buf[sofar] = transval & EPB_DATA_MASK;
- ++sofar;
- }
- /* Finally, clear control-bit for Read or Write */
- transval = csbit | EPB_UC_CTL;
- tries = epb_trans(dd, trans, transval, &transval);
- }
-
- ret = sofar;
- /* Release bus. Failure is an error */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define PROG_CHUNK 64
-
-static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req;
-
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > PROG_CHUNK)
- req = PROG_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
- (u8 *)img + sofar, req, 0);
- if (cnt < req) {
- sofar = -1;
- break;
- }
- sofar += req;
- }
- return sofar;
-}
-
-#define VFY_CHUNK 64
-#define SD_PRAM_ERROR_LIMIT 42
-
-static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req, idx, errors;
- unsigned char readback[VFY_CHUNK];
-
- errors = 0;
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > VFY_CHUNK)
- req = VFY_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
- readback, req, 1);
- if (cnt < req) {
- /* failed in read itself */
- sofar = -1;
- break;
- }
- for (idx = 0; idx < cnt; ++idx) {
- if (readback[idx] != img[idx+sofar])
- ++errors;
- }
- sofar += cnt;
- }
- return errors ? -errors : sofar;
-}
-
-static int
-qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-static int
-qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-/*
- * IRQ not set up at this point in init, so we poll.
- */
-#define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (15)
-
-static int qib_sd_trimdone_poll(struct qib_devdata *dd)
-{
- int trim_tmo, ret;
- uint64_t val;
-
- /*
- * Default to failure, so IBC will not start
- * without IB_SERDES_TRIM_DONE.
- */
- ret = 0;
- for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (val & IB_SERDES_TRIM_DONE) {
- ret = 1;
- break;
- }
- msleep(20);
- }
- if (trim_tmo >= TRIM_TMO) {
- qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
- ret = 0;
- }
- return ret;
-}
-
-#define TX_FAST_ELT (9)
-
-/*
- * Set the "negotiation" values for SERDES. These are used by the IB1.2
- * link negotiation. Macros below are attempt to keep the values a
- * little more human-editable.
- * First, values related to Drive De-emphasis Settings.
- */
-
-#define NUM_DDS_REGS 6
-#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
-
-#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
- { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
- (main_d << 3) | 4 | (ipre_d >> 2), \
- (main_s << 3) | 4 | (ipre_s >> 2), \
- ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
- ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
-
-static struct dds_init {
- uint8_t reg_vals[NUM_DDS_REGS];
-} dds_init_vals[] = {
- /* DDR(FDR) SDR(HDR) */
- /* Vendor recommends below for 3m cable */
-#define DDS_3M 0
- DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
- DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
- DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
- DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
- DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
- DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
- DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
- DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
- DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
- DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
- DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
- DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
- DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
- /* Vendor recommends below for 1m cable */
-#define DDS_1M 13
- DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
- DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
- DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
-};
-
-/*
- * Now the RXEQ section of the table.
- */
-/* Hardware packs an element number and register address thus: */
-#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
-#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
-
-#define RXEQ_VAL_ALL(elt, adr, val) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
-
-#define RXEQ_SDR_DFELTH 0
-#define RXEQ_SDR_TLTH 0
-#define RXEQ_SDR_G1CNT_Z1CNT 0x11
-#define RXEQ_SDR_ZCNT 23
-
-static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
- u8 rdata[4];
-} rxeq_init_vals[] = {
- /* Set Rcv Eq. to Preset node */
- RXEQ_VAL_ALL(7, 0x27, 0x10),
- /* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
- RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR theshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
- /* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
- /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
- RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
- /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
- RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
- RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
-};
-
-/* There are 17 values from vendor, but IBC only accesses the first 16 */
-#define DDS_ROWS (16)
-#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-
-static int qib_sd_setvals(struct qib_devdata *dd)
-{
- int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
- uint32_t dds_reg_map;
- u64 __iomem *taddr, *iaddr;
- uint64_t data;
- uint64_t sdctl;
-
- taddr = dd->kregbase + kr_serdes_maptable;
- iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
-
- /*
- * Init the DDS section of the table.
- * Each "row" of the table provokes NUM_DDS_REG writes, to the
- * registers indicated in DDS_REG_MAP.
- */
- sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
- sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
- sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
-
- /*
- * Iterate down table within loop for each register to store.
- */
- dds_reg_map = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
- writeq(data, iaddr + idx);
- qib_read_kreg32(dd, kr_scratch);
- dds_reg_map >>= 4;
- for (midx = 0; midx < DDS_ROWS; ++midx) {
- u64 __iomem *daddr = taddr + ((midx << 4) + idx);
-
- data = dds_init_vals[midx].reg_vals[idx];
- writeq(data, daddr);
- qib_read_kreg32(dd, kr_scratch);
- } /* End inner for (vals for this reg, each row) */
- } /* end outer for (regs to be stored) */
-
- /*
- * Init the RXEQ section of the table.
- * This runs in a different order, as the pattern of
- * register references is more complex, but there are only
- * four "data" values per register.
- */
- min_idx = idx; /* RXEQ indices pick up where DDS left off */
- taddr += 0x100; /* RXEQ data is in second half of table */
- /* Iterate through RXEQ register addresses */
- for (idx = 0; idx < RXEQ_ROWS; ++idx) {
- int didx; /* "destination" */
- int vidx;
-
- /* didx is offset by min_idx to address RXEQ range of regs */
- didx = idx + min_idx;
- /* Store the next RXEQ register address */
- writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
- qib_read_kreg32(dd, kr_scratch);
- /* Iterate through RXEQ values */
- for (vidx = 0; vidx < 4; vidx++) {
- data = rxeq_init_vals[idx].rdata[vidx];
- writeq(data, taddr + (vidx << 6) + idx);
- qib_read_kreg32(dd, kr_scratch);
- }
- } /* end outer for (Reg-writes for RXEQ) */
- return 0;
-}
-
-#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
-#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
-#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
-#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
-#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
-#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-
-/*
- * Repeat a "store" across all channels of the IB SerDes.
- * Although nominally it inherits the "read value" of the last
- * channel it modified, the only really useful return is <0 for
- * failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location in some channel of the register to be modified
- * The caller can specify use of the "gang write" option of EPB,
- * in which case we use the specified channel data for any fields
- * not explicitely written.
- */
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask)
-{
- int ret = -1;
- int chnl;
-
- if (loc & EPB_GLOBAL_WR) {
- /*
- * Our caller has assured us that we can set all four
- * channels at once. Trust that. If mask is not 0xFF,
- * we will read the _specified_ channel for our starting
- * value.
- */
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
- if (mask != 0xFF) {
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
- (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
- return ret;
- }
- val = (ret & ~mask) | (val & mask);
- }
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Global WR failed: elt %d, addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
- }
- return ret;
- }
- /* Clear "channel" and set CS so we can simply iterate */
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- for (chnl = 0; chnl < 4; ++chnl) {
- int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
- break;
- }
- }
- return ret;
-}
-
-/*
- * Set the Tx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from first row of init table.
- */
-static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
-{
- int ret;
- int idx, reg, data;
- uint32_t regmap;
-
- regmap = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- reg = (regmap & 0xF);
- regmap >>= 4;
- data = ddi->reg_vals[idx];
- /* Vendor says RMW not needed for these regs, use 0xFF mask */
- ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the Rx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from selected column of init table.
- */
-static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
-{
- int ret;
- int ridx;
- int cnt = ARRAY_SIZE(rxeq_init_vals);
-
- for (ridx = 0; ridx < cnt; ++ridx) {
- int elt, reg, val, loc;
-
- elt = rxeq_init_vals[ridx].rdesc & 0xF;
- reg = rxeq_init_vals[ridx].rdesc >> 4;
- loc = EPB_LOC(0, elt, reg);
- val = rxeq_init_vals[ridx].rdata[vsel];
- /* mask of 0xFF, because hardware does full-byte store. */
- ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the default values (row 0) for DDR Driver Demphasis.
- * we do this initially and whenever we turn off IB-1.2
- *
- * The "default" values for Rx equalization are also stored to
- * SerDes registers. Formerly (and still default), we used set 2.
- * For experimenting with cables and link-partners, we allow changing
- * that via a module parameter.
- */
-static unsigned qib_rxeq_set = 2;
-module_param_named(rxeq_default_set, qib_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
-
-static int qib_internal_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
-
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default RXEQ values\n");
- return ret;
-}
-
-int qib_sd7220_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (!dd->cspec->presets_needed)
- return ret;
- dd->cspec->presets_needed = 0;
- /* Assert uC reset, so we don't clash with it. */
- qib_ibsd_reset(dd, 1);
- udelay(2);
- qib_sd_trimdone_monitor(dd, "link-down");
-
- ret = qib_internal_presets(dd);
- return ret;
-}
-
-static int qib_sd_trimself(struct qib_devdata *dd, int val)
-{
- int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
-
- return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
-}
-
-static int qib_sd_early(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
-bail:
- return ret;
-}
-
-#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
-#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
-#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-
-static int qib_sd_dactrim(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
-
- /* more fine-tuning of what will be default */
- ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- /*
- * Delay for max possible number of steps, with slop.
- * Each step is about 4usec.
- */
- udelay(415);
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
-
-bail:
- return ret;
-}
-
-#define RELOCK_FIRST_MS 3
-#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void toggle_7220_rclkrls(struct qib_devdata *dd)
-{
- int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
- int ret;
-
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* And again for good measure */
- udelay(1);
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* Now reset xgxs and IBC to complete the recovery */
- dd->f_xgxs_reset(dd->pport);
-}
-
-/*
- * Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from qib_7220_quiet_serdes(), which is called
- * just before qib_shutdown_device() in qib_driver.c shuts down all
- * the other timers
- */
-void shutdown_7220_relock_poll(struct qib_devdata *dd)
-{
- if (dd->cspec->relock_timer_active)
- timer_delete_sync(&dd->cspec->relock_timer);
-}
-
-static unsigned qib_relock_by_timer = 1;
-module_param_named(relock_by_timer, qib_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-
-static void qib_run_relock(struct timer_list *t)
-{
- struct qib_chip_specific *cs = timer_container_of(cs, t, relock_timer);
- struct qib_devdata *dd = cs->dd;
- struct qib_pportdata *ppd = dd->pport;
- int timeoff;
-
- /*
- * Check link-training state for "stuck" state, when down.
- * if found, try relock and schedule another try at
- * exponentially growing delay, maxed at one second.
- * if not stuck, our work is done.
- */
- if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
- (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE))) {
- if (qib_relock_by_timer) {
- if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
- toggle_7220_rclkrls(dd);
- }
- /* re-set timer for next check */
- timeoff = cs->relock_interval << 1;
- if (timeoff > HZ)
- timeoff = HZ;
- cs->relock_interval = timeoff;
- } else
- timeoff = HZ;
- mod_timer(&cs->relock_timer, jiffies + timeoff);
-}
-
-void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
-{
- struct qib_chip_specific *cs = dd->cspec;
-
- if (ibup) {
- /* We are now up, relax timer to 1 second interval */
- if (cs->relock_timer_active) {
- cs->relock_interval = HZ;
- mod_timer(&cs->relock_timer, jiffies + HZ);
- }
- } else {
- /* Transition to down, (re-)set timer to short interval. */
- unsigned int timeout;
-
- timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
- if (timeout == 0)
- timeout = 1;
- /* If timer has not yet been started, do so. */
- if (!cs->relock_timer_active) {
- cs->relock_timer_active = 1;
- timer_setup(&cs->relock_timer, qib_run_relock, 0);
- cs->relock_interval = timeout;
- cs->relock_timer.expires = jiffies + timeout;
- add_timer(&cs->relock_timer);
- } else {
- cs->relock_interval = timeout;
- mod_timer(&cs->relock_timer, jiffies + timeout);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
deleted file mode 100644
index 5e86cbf7d70e..000000000000
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/* default pio off, sdma on */
-static ushort sdma_descq_cnt = 256;
-module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
-
-/*
- * Bits defined in the send DMA descriptor.
- */
-#define SDMA_DESC_LAST (1ULL << 11)
-#define SDMA_DESC_FIRST (1ULL << 12)
-#define SDMA_DESC_DMA_HEAD (1ULL << 13)
-#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
-#define SDMA_DESC_INTR (1ULL << 15)
-#define SDMA_DESC_COUNT_LSB 16
-#define SDMA_DESC_GEN_LSB 30
-
-/* declare all statics here rather than keep sorting */
-static int alloc_sdma(struct qib_pportdata *);
-static void sdma_complete(struct kref *);
-static void sdma_finalput(struct qib_sdma_state *);
-static void sdma_get(struct qib_sdma_state *);
-static void sdma_put(struct qib_sdma_state *);
-static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
-static void sdma_start_sw_clean_up(struct qib_pportdata *);
-static void sdma_sw_clean_up_task(struct tasklet_struct *);
-static void unmap_desc(struct qib_pportdata *, unsigned);
-
-static void sdma_get(struct qib_sdma_state *ss)
-{
- kref_get(&ss->kref);
-}
-
-static void sdma_complete(struct kref *kref)
-{
- struct qib_sdma_state *ss =
- container_of(kref, struct qib_sdma_state, kref);
-
- complete(&ss->comp);
-}
-
-static void sdma_put(struct qib_sdma_state *ss)
-{
- kref_put(&ss->kref, sdma_complete);
-}
-
-static void sdma_finalput(struct qib_sdma_state *ss)
-{
- sdma_put(ss);
- wait_for_completion(&ss->comp);
-}
-
-/*
- * Complete all the sdma requests on the active list, in the correct
- * order, and with appropriate processing. Called when cleaning up
- * after sdma shutdown, and when new sdma requests are submitted for
- * a link that is down. This matches what is done for requests
- * that complete normally, it's just the full list.
- *
- * Must be called with sdma_lock held
- */
-static void clear_sdma_activelist(struct qib_pportdata *ppd)
-{
- struct qib_sdma_txreq *txp, *txp_next;
-
- list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
- list_del_init(&txp->list);
- if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
- unsigned idx;
-
- idx = txp->start_idx;
- while (idx != txp->next_descq_idx) {
- unmap_desc(ppd, idx);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
- }
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
- }
-}
-
-static void sdma_sw_clean_up_task(struct tasklet_struct *t)
-{
- struct qib_pportdata *ppd = from_tasklet(ppd, t,
- sdma_sw_clean_up_task);
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- /*
- * At this point, the following should always be true:
- * - We are halted, so no more descriptors are getting retired.
- * - We are not running, so no one is submitting new work.
- * - Only we can send the e40_sw_cleaned, so we can't start
- * running again until we say so. So, the active list and
- * descq are ours to play with.
- */
-
- /* Process all retired requests. */
- qib_sdma_make_progress(ppd);
-
- clear_sdma_activelist(ppd);
-
- /*
- * Resync count of added and removed. It is VERY important that
- * sdma_descq_removed NEVER decrement - user_sdma depends on it.
- */
- ppd->sdma_descq_removed = ppd->sdma_descq_added;
-
- /*
- * Reset our notion of head and tail.
- * Note that the HW registers will be reset when switching states
- * due to calling __qib_sdma_process_event() below.
- */
- ppd->sdma_descq_tail = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_head_dma[0] = 0;
- ppd->sdma_generation = 0;
-
- __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
- * as a result of send buffer errors or send DMA descriptor errors.
- * We want to disarm the buffers in these cases.
- */
-static void sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- unsigned bufno;
-
- for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
- ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
-
- ppd->dd->f_sdma_hw_start_up(ppd);
-}
-
-static void sdma_sw_tear_down(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- /* Releasing this reference means the state machine has stopped. */
- sdma_put(ss);
-}
-
-static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
-{
- tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
-}
-
-static void sdma_set_state(struct qib_pportdata *ppd,
- enum qib_sdma_states next_state)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- struct sdma_set_state_action *action = ss->set_state_action;
- unsigned op = 0;
-
- /* debugging bookkeeping */
- ss->previous_state = ss->current_state;
- ss->previous_op = ss->current_op;
-
- ss->current_state = next_state;
-
- if (action[next_state].op_enable)
- op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
-
- if (action[next_state].op_intenable)
- op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
-
- if (action[next_state].op_halt)
- op |= QIB_SDMA_SENDCTRL_OP_HALT;
-
- if (action[next_state].op_drain)
- op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
-
- if (action[next_state].go_s99_running_tofalse)
- ss->go_s99_running = 0;
-
- if (action[next_state].go_s99_running_totrue)
- ss->go_s99_running = 1;
-
- ss->current_op = op;
-
- ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
-}
-
-static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
-{
- __le64 *descqp = &ppd->sdma_descq[head].qw[0];
- u64 desc[2];
- dma_addr_t addr;
- size_t len;
-
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
-
- addr = (desc[1] << 32) | (desc[0] >> 32);
- len = (desc[0] >> 14) & (0x7ffULL << 2);
- dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-static int alloc_sdma(struct qib_pportdata *ppd)
-{
- ppd->sdma_descq_cnt = sdma_descq_cnt;
- if (!ppd->sdma_descq_cnt)
- ppd->sdma_descq_cnt = 256;
-
- /* Allocate memory for SendDMA descriptor FIFO */
- ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
- GFP_KERNEL);
-
- if (!ppd->sdma_descq) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA descriptor FIFO memory\n");
- goto bail;
- }
-
- /* Allocate memory for DMA of head register to memory */
- ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
- if (!ppd->sdma_head_dma) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA head memory\n");
- goto cleanup_descq;
- }
- ppd->sdma_head_dma[0] = 0;
- return 0;
-
-cleanup_descq:
- dma_free_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
- ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
-bail:
- ppd->sdma_descq_cnt = 0;
- return -ENOMEM;
-}
-
-static void free_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (ppd->sdma_head_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *)ppd->sdma_head_dma,
- ppd->sdma_head_phys);
- ppd->sdma_head_dma = NULL;
- ppd->sdma_head_phys = 0;
- }
-
- if (ppd->sdma_descq) {
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]),
- ppd->sdma_descq, ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
- }
-}
-
-static inline void make_sdma_desc(struct qib_pportdata *ppd,
- u64 *sdmadesc, u64 addr, u64 dwlen,
- u64 dwoffset)
-{
-
- WARN_ON(addr & 3);
- /* SDmaPhyAddr[47:32] */
- sdmadesc[1] = addr >> 32;
- /* SDmaPhyAddr[31:0] */
- sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
- /* SDmaGeneration[1:0] */
- sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
- SDMA_DESC_GEN_LSB;
- /* SDmaDwordCount[10:0] */
- sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
- /* SDmaBufOffset[12:2] */
- sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/* sdma_lock must be held */
-int qib_sdma_make_progress(struct qib_pportdata *ppd)
-{
- struct list_head *lp = NULL;
- struct qib_sdma_txreq *txp = NULL;
- struct qib_devdata *dd = ppd->dd;
- int progress = 0;
- u16 hwhead;
- u16 idx = 0;
-
- hwhead = dd->f_sdma_gethead(ppd);
-
- /* The reason for some of the complexity of this code is that
- * not all descriptors have corresponding txps. So, we have to
- * be able to skip over descs until we wander into the range of
- * the next txp on the list.
- */
-
- if (!list_empty(&ppd->sdma_activelist)) {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq, list);
- idx = txp->start_idx;
- }
-
- while (ppd->sdma_descq_head != hwhead) {
- /* if desc is part of this txp, unmap if needed */
- if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
- (idx == ppd->sdma_descq_head)) {
- unmap_desc(ppd, ppd->sdma_descq_head);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
-
- /* increment dequed desc count */
- ppd->sdma_descq_removed++;
-
- /* advance head, wrap if needed */
- if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
- ppd->sdma_descq_head = 0;
-
- /* if now past this txp's descs, do the callback */
- if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
- /* remove from active list */
- list_del_init(&txp->list);
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
- /* see if there is another txp */
- if (list_empty(&ppd->sdma_activelist))
- txp = NULL;
- else {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq,
- list);
- idx = txp->start_idx;
- }
- }
- progress = 1;
- }
- if (progress)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
- return progress;
-}
-
-/*
- * This is called from interrupt context.
- */
-void qib_sdma_intr(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_intr(ppd);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_intr(struct qib_pportdata *ppd)
-{
- if (__qib_sdma_running(ppd)) {
- qib_sdma_make_progress(ppd);
- if (!list_empty(&ppd->sdma_userpending))
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- }
-}
-
-int qib_setup_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int ret = 0;
-
- ret = alloc_sdma(ppd);
- if (ret)
- goto bail;
-
- /* set consistent sdma state */
- ppd->dd->f_sdma_init_early(ppd);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- /* set up reference counting */
- kref_init(&ppd->sdma_state.kref);
- init_completion(&ppd->sdma_state.comp);
-
- ppd->sdma_generation = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_descq_removed = 0;
- ppd->sdma_descq_added = 0;
-
- ppd->sdma_intrequest = 0;
- INIT_LIST_HEAD(&ppd->sdma_userpending);
-
- INIT_LIST_HEAD(&ppd->sdma_activelist);
-
- tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
-
- ret = dd->f_init_sdma_regs(ppd);
- if (ret)
- goto bail_alloc;
-
- qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
-
- return 0;
-
-bail_alloc:
- qib_teardown_sdma(ppd);
-bail:
- return ret;
-}
-
-void qib_teardown_sdma(struct qib_pportdata *ppd)
-{
- qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
-
- /*
- * This waits for the state machine to exit so it is not
- * necessary to kill the sdma_sw_clean_up_task to make sure
- * it is not running.
- */
- sdma_finalput(&ppd->sdma_state);
-
- free_sdma(ppd);
-}
-
-int qib_sdma_running(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = __qib_sdma_running(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/*
- * Complete a request when sdma not running; likely only request
- * but to simplify the code, always queue it, then process the full
- * activelist. We process the entire list to ensure that this particular
- * request does get it's callback, but in the correct order.
- * Must be called with sdma_lock held
- */
-static void complete_sdma_err_req(struct qib_pportdata *ppd,
- struct qib_verbs_txreq *tx)
-{
- struct qib_qp_priv *priv = tx->qp->priv;
-
- atomic_inc(&priv->s_dma_busy);
- /* no sdma descriptors, so no unmap_desc */
- tx->txreq.start_idx = 0;
- tx->txreq.next_descq_idx = 0;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- clear_sdma_activelist(ppd);
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- * the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- * (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int qib_sdma_verbs_send(struct qib_pportdata *ppd,
- struct rvt_sge_state *ss, u32 dwords,
- struct qib_verbs_txreq *tx)
-{
- unsigned long flags;
- struct rvt_sge *sge;
- struct rvt_qp *qp;
- int ret = 0;
- u16 tail;
- __le64 *descqp;
- u64 sdmadesc[2];
- u32 dwoffset;
- dma_addr_t addr;
- struct qib_qp_priv *priv;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
-retry:
- if (unlikely(!__qib_sdma_running(ppd))) {
- complete_sdma_err_req(ppd, tx);
- goto unlock;
- }
-
- if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
- if (qib_sdma_make_progress(ppd))
- goto retry;
- if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
- ppd->dd->f_sdma_set_desc_cnt(ppd,
- ppd->sdma_descq_cnt / 2);
- goto busy;
- }
-
- dwoffset = tx->hdr_dwords;
- make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
-
- sdmadesc[0] |= SDMA_DESC_FIRST;
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
-
- /* write to the descq */
- tail = ppd->sdma_descq_tail;
- descqp = &ppd->sdma_descq[tail].qw[0];
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
-
- tx->txreq.start_idx = tail;
-
- sge = &ss->sge;
- while (dwords) {
- u32 dw;
- u32 len = rvt_get_sge_length(sge, dwords << 2);
-
- dw = (len + 3) >> 2;
- addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
- dw << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
- ret = -ENOMEM;
- goto unmap;
- }
- sdmadesc[0] = 0;
- make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
- /* SDmaUseLargeBuf has to be set in every descriptor */
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
- /* write to the descq */
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
- rvt_update_sge(ss, len, false);
- dwoffset += dw;
- dwords -= dw;
- }
-
- if (!tail)
- descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
- descqp -= 2;
- descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
- descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
- descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
- priv = tx->qp->priv;
- atomic_inc(&priv->s_dma_busy);
- tx->txreq.next_descq_idx = tail;
- ppd->dd->f_sdma_update_tail(ppd, tail);
- ppd->sdma_descq_added += tx->txreq.sg_count;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- goto unlock;
-
-unmap:
- for (;;) {
- if (!tail)
- tail = ppd->sdma_descq_cnt - 1;
- else
- tail--;
- if (tail == ppd->sdma_descq_tail)
- break;
- unmap_desc(ppd, tail);
- }
- qp = tx->qp;
- priv = qp->priv;
- qib_put_txreq(tx);
- spin_lock(&qp->r_lock);
- spin_lock(&qp->s_lock);
- if (qp->ibqp.qp_type == IB_QPT_RC) {
- /* XXX what about error sending RDMA read responses? */
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
- rvt_error_qp(qp, IB_WC_GENERAL_ERR);
- } else if (qp->s_wqe)
- rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->r_lock);
- /* return zero to process the next send work request */
- goto unlock;
-
-busy:
- qp = tx->qp;
- priv = qp->priv;
- spin_lock(&qp->s_lock);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- struct qib_ibdev *dev;
-
- /*
- * If we couldn't queue the DMA request, save the info
- * and try again later rather than destroying the
- * buffer and undoing the side effects of the copy.
- */
- tx->ss = ss;
- tx->dwords = dwords;
- priv->s_tx = tx;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- struct qib_ibport *ibp;
-
- ibp = &ppd->ibport_data;
- ibp->rvp.n_dmawait++;
- qp->s_flags |= RVT_S_WAIT_DMA_DESC;
- list_add_tail(&priv->iowait, &dev->dmawait);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- spin_unlock(&qp->s_lock);
- ret = -EBUSY;
- } else {
- spin_unlock(&qp->s_lock);
- qib_put_txreq(tx);
- }
-unlock:
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return ret;
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-void dump_sdma_state(struct qib_pportdata *ppd)
-{
- struct qib_sdma_desc *descq;
- struct qib_sdma_txreq *txp, *txpnext;
- __le64 *descqp;
- u64 desc[2];
- u64 addr;
- u16 gen, dwlen, dwoffset;
- u16 head, tail, cnt;
-
- head = ppd->sdma_descq_head;
- tail = ppd->sdma_descq_tail;
- cnt = qib_sdma_descq_freecnt(ppd);
- descq = ppd->sdma_descq;
-
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_head: %u\n", head);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_tail: %u\n", tail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdma_descq_freecnt: %u\n", cnt);
-
- /* print info for each entry in the descriptor queue */
- while (head != tail) {
- char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
-
- descqp = &descq[head].qw[0];
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
- flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
- flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
- flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
- flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
- flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
- addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
- gen = (desc[0] >> 30) & 3ULL;
- dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
- dwoffset = (desc[0] & 0x7ffULL) << 2;
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
- head, flags, addr, gen, dwlen, dwoffset);
- if (++head == ppd->sdma_descq_cnt)
- head = 0;
- }
-
- /* print dma descriptor indices from the TX requests */
- list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
- list)
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
- txp->start_idx, txp->next_descq_idx);
-}
-
-void qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_process_event(ppd, event);
-
- if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- switch (ss->current_state) {
- case qib_sdma_state_s00_hw_down:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- break;
- case qib_sdma_event_e30_go_running:
- /*
- * If down, but running requested (usually result
- * of link up, then we need to start up.
- * This can happen when hw down is requested while
- * bringing the link up with traffic active on
- * 7220, e.g. */
- ss->go_s99_running = 1;
- fallthrough; /* and start dma engine */
- case qib_sdma_event_e10_go_hw_start:
- /* This reference means the state machine is started */
- sdma_get(&ppd->sdma_state);
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- sdma_set_state(ppd, ss->go_s99_running ?
- qib_sdma_state_s99_running :
- qib_sdma_state_s20_idle);
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s20_idle:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- sdma_set_state(ppd, qib_sdma_state_s99_running);
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- sdma_hw_start_up(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s40_hw_clean_up_wait);
- ppd->dd->f_sdma_hw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s99_running:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e7322_err_halted:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
- }
-
- ss->last_event = event;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
deleted file mode 100644
index a6571bc38366..000000000000
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/ctype.h>
-#include <rdma/ib_sysfs.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj)
-{
- u32 port_num;
- struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
-
- return &dd->pport[port_num - 1];
-}
-
-/*
- * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
- */
-static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
-}
-
-static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
- return ret;
- }
-
- /*
- * Set the "intentional" heartbeat enable per either of
- * "Enable" and "Auto", as these are normally set together.
- * This bit is consulted when leaving loopback mode,
- * because entering loopback mode overrides it and automatically
- * disables heartbeat.
- */
- ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
- return ret < 0 ? ret : count;
-}
-static IB_PORT_ATTR_RW(hrtbt_enable);
-
-static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret = count, r;
-
- r = dd->f_set_ib_loopback(ppd, buf);
- if (r < 0)
- ret = r;
-
- return ret;
-}
-static IB_PORT_ATTR_WO(loopback);
-
-static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid LED override\n");
- return ret;
- }
-
- qib_set_led_override(ppd, val);
- return count;
-}
-static IB_PORT_ATTR_WO(led_override);
-
-static ssize_t status_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- if (!ppd->statusp)
- return -EINVAL;
-
- return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
-}
-static IB_PORT_ATTR_RO(status);
-
-/*
- * For userland compatibility, these offsets must remain fixed.
- * They are strings for QIB_STATUS_*
- */
-static const char * const qib_status_str[] = {
- "Initted",
- "",
- "",
- "",
- "",
- "Present",
- "IB_link_up",
- "IB_configured",
- "",
- "Fatal_Hardware_Error",
- NULL,
-};
-
-static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int i, any;
- u64 s;
- ssize_t ret;
-
- if (!ppd->statusp) {
- ret = -EINVAL;
- goto bail;
- }
-
- s = *(ppd->statusp);
- *buf = '\0';
- for (any = i = 0; s && qib_status_str[i]; i++) {
- if (s & 1) {
- /* if overflow */
- if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- break;
- if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
- PAGE_SIZE)
- break;
- any = 1;
- }
- s >>= 1;
- }
- if (any)
- strlcat(buf, "\n", PAGE_SIZE);
-
- ret = strlen(buf);
-
-bail:
- return ret;
-}
-static IB_PORT_ATTR_RO(status_str);
-
-/* end of per-port functions */
-
-static struct attribute *port_linkcontrol_attributes[] = {
- &ib_port_attr_loopback.attr,
- &ib_port_attr_led_override.attr,
- &ib_port_attr_hrtbt_enable.attr,
- &ib_port_attr_status.attr,
- &ib_port_attr_status_str.attr,
- NULL
-};
-
-static const struct attribute_group port_linkcontrol_group = {
- .name = "linkcontrol",
- .attrs = port_linkcontrol_attributes,
-};
-
-/*
- * Start of per-port congestion control structures and support code
- */
-
-/*
- * Congestion control table size followed by table entries
- */
-static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
- int ret;
-
- if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
- return -EINVAL;
-
- ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
- + sizeof(__be16);
-
- if (pos > ret)
- return -EINVAL;
-
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->ccti_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
-
-/*
- * Congestion settings: port control, control map and an array of 16
- * entries for the congestion entries - increase, timer, event log
- * trigger threshold and the minimum injection rate delay.
- */
-static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
- int ret;
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return -EINVAL;
-
- ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
-
- if (pos > ret)
- return -EINVAL;
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->congestion_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-
-static const struct bin_attribute *const port_ccmgta_attributes[] = {
- &bin_attr_cc_setting_bin,
- &bin_attr_cc_table_bin,
- NULL,
-};
-
-static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
- const struct bin_attribute *attr, int n)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return 0;
- return attr->attr.mode;
-}
-
-static const struct attribute_group port_ccmgta_attribute_group = {
- .name = "CCMgtA",
- .is_bin_visible = qib_ccmgta_is_bin_visible,
- .bin_attrs = port_ccmgta_attributes,
-};
-
-/* Start sl2vl */
-
-struct qib_sl2vl_attr {
- struct ib_port_attribute attr;
- int sl;
-};
-
-static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_sl2vl_attr *sattr =
- container_of(attr, struct qib_sl2vl_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
-}
-
-#define QIB_SL2VL_ATTR(N) \
- static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
- .attr = __ATTR(N, 0444, sl2vl_attr_show, NULL), \
- .sl = N, \
- }
-
-QIB_SL2VL_ATTR(0);
-QIB_SL2VL_ATTR(1);
-QIB_SL2VL_ATTR(2);
-QIB_SL2VL_ATTR(3);
-QIB_SL2VL_ATTR(4);
-QIB_SL2VL_ATTR(5);
-QIB_SL2VL_ATTR(6);
-QIB_SL2VL_ATTR(7);
-QIB_SL2VL_ATTR(8);
-QIB_SL2VL_ATTR(9);
-QIB_SL2VL_ATTR(10);
-QIB_SL2VL_ATTR(11);
-QIB_SL2VL_ATTR(12);
-QIB_SL2VL_ATTR(13);
-QIB_SL2VL_ATTR(14);
-QIB_SL2VL_ATTR(15);
-
-static struct attribute *port_sl2vl_attributes[] = {
- &qib_sl2vl_attr_0.attr.attr,
- &qib_sl2vl_attr_1.attr.attr,
- &qib_sl2vl_attr_2.attr.attr,
- &qib_sl2vl_attr_3.attr.attr,
- &qib_sl2vl_attr_4.attr.attr,
- &qib_sl2vl_attr_5.attr.attr,
- &qib_sl2vl_attr_6.attr.attr,
- &qib_sl2vl_attr_7.attr.attr,
- &qib_sl2vl_attr_8.attr.attr,
- &qib_sl2vl_attr_9.attr.attr,
- &qib_sl2vl_attr_10.attr.attr,
- &qib_sl2vl_attr_11.attr.attr,
- &qib_sl2vl_attr_12.attr.attr,
- &qib_sl2vl_attr_13.attr.attr,
- &qib_sl2vl_attr_14.attr.attr,
- &qib_sl2vl_attr_15.attr.attr,
- NULL
-};
-
-static const struct attribute_group port_sl2vl_group = {
- .name = "sl2vl",
- .attrs = port_sl2vl_attributes,
-};
-
-/* End sl2vl */
-
-/* Start diag_counters */
-
-struct qib_diagc_attr {
- struct ib_port_attribute attr;
- size_t counter;
-};
-
-static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter));
-}
-
-static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
- u64 val;
- int ret;
-
- ret = kstrtou64(buf, 0, &val);
- if (ret)
- return ret;
- *((u64 *)qibp + dattr->counter) = val;
- return count;
-}
-
-#define QIB_DIAGC_ATTR(N) \
- static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \
- static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
- .counter = \
- offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \
- }
-
-QIB_DIAGC_ATTR(rc_resends);
-QIB_DIAGC_ATTR(seq_naks);
-QIB_DIAGC_ATTR(rdma_seq);
-QIB_DIAGC_ATTR(rnr_naks);
-QIB_DIAGC_ATTR(other_naks);
-QIB_DIAGC_ATTR(rc_timeouts);
-QIB_DIAGC_ATTR(loop_pkts);
-QIB_DIAGC_ATTR(pkt_drops);
-QIB_DIAGC_ATTR(dmawait);
-QIB_DIAGC_ATTR(unaligned);
-QIB_DIAGC_ATTR(rc_dupreq);
-QIB_DIAGC_ATTR(rc_seqnak);
-QIB_DIAGC_ATTR(rc_crwaits);
-
-static u64 get_all_cpu_total(u64 __percpu *cntr)
-{
- int cpu;
- u64 counter = 0;
-
- for_each_possible_cpu(cpu)
- counter += *per_cpu_ptr(cntr, cpu);
- return counter;
-}
-
-static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf,
- size_t count, u64 *zero, u64 cur)
-{
- u32 val;
- int ret;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
- if (val != 0) {
- qib_dev_err(dd, "Per CPU cntrs can only be zeroed");
- return count;
- }
- *zero = cur;
- return count;
-}
-
-static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_acks) -
- qibp->rvp.z_rc_acks);
-}
-
-static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks,
- get_all_cpu_total(qibp->rvp.rc_acks));
-}
-static IB_PORT_ATTR_RW(rc_acks);
-
-static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_qacks) -
- qibp->rvp.z_rc_qacks);
-}
-
-static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks,
- get_all_cpu_total(qibp->rvp.rc_qacks));
-}
-static IB_PORT_ATTR_RW(rc_qacks);
-
-static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_delayed_comp) -
- qibp->rvp.z_rc_delayed_comp);
-}
-
-static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp,
- get_all_cpu_total(qibp->rvp.rc_delayed_comp));
-}
-static IB_PORT_ATTR_RW(rc_delayed_comp);
-
-static struct attribute *port_diagc_attributes[] = {
- &qib_diagc_attr_rc_resends.attr.attr,
- &qib_diagc_attr_seq_naks.attr.attr,
- &qib_diagc_attr_rdma_seq.attr.attr,
- &qib_diagc_attr_rnr_naks.attr.attr,
- &qib_diagc_attr_other_naks.attr.attr,
- &qib_diagc_attr_rc_timeouts.attr.attr,
- &qib_diagc_attr_loop_pkts.attr.attr,
- &qib_diagc_attr_pkt_drops.attr.attr,
- &qib_diagc_attr_dmawait.attr.attr,
- &qib_diagc_attr_unaligned.attr.attr,
- &qib_diagc_attr_rc_dupreq.attr.attr,
- &qib_diagc_attr_rc_seqnak.attr.attr,
- &qib_diagc_attr_rc_crwaits.attr.attr,
- &ib_port_attr_rc_acks.attr,
- &ib_port_attr_rc_qacks.attr,
- &ib_port_attr_rc_delayed_comp.attr,
- NULL
-};
-
-static const struct attribute_group port_diagc_group = {
- .name = "diag_counters",
- .attrs = port_diagc_attributes,
-};
-
-/* End diag_counters */
-
-const struct attribute_group *qib_attr_port_groups[] = {
- &port_linkcontrol_group,
- &port_ccmgta_attribute_group,
- &port_sl2vl_group,
- &port_diagc_group,
- NULL,
-};
-
-/* end of per-port file structures and support code */
-
-/*
- * Start of per-unit (or driver, in some cases, but replicated
- * per unit) functions (these get a device *)
- */
-static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
-
- return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- if (!dd->boardname)
- return -EINVAL;
- return sysfs_emit(buf, "%s\n", dd->boardname);
-}
-static DEVICE_ATTR_RO(hca_type);
-static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
-
-static DEVICE_STRING_ATTR_RO(version, 0444, QIB_DRIVER_VERSION);
-
-static ssize_t boardversion_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return sysfs_emit(buf, "%s", dd->boardversion);
-}
-static DEVICE_ATTR_RO(boardversion);
-
-static ssize_t localbus_info_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return sysfs_emit(buf, "%s", dd->lbus_info);
-}
-static DEVICE_ATTR_RO(localbus_info);
-
-static ssize_t nctxts_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of user ports (contexts) available. */
- /* The calculation below deals with a special case where
- * cfgctxts is set to 1 on a single-port board. */
- return sysfs_emit(buf, "%u\n",
- (dd->first_user_ctxt > dd->cfgctxts) ?
- 0 :
- (dd->cfgctxts - dd->first_user_ctxt));
-}
-static DEVICE_ATTR_RO(nctxts);
-
-static ssize_t nfreectxts_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of free user ports (contexts) available. */
- return sysfs_emit(buf, "%u\n", dd->freectxts);
-}
-static DEVICE_ATTR_RO(nfreectxts);
-
-static ssize_t serial_show(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
- int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
-
- return sysfs_emit(buf, ".%*s\n", size, dd->serial);
-}
-static DEVICE_ATTR_RO(serial);
-
-static ssize_t chip_reset_store(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = qib_reset_device(dd->unit);
-bail:
- return ret < 0 ? ret : count;
-}
-static DEVICE_ATTR_WO(chip_reset);
-
-/*
- * Dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t tempsense_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int i;
- u8 regvals[8];
-
- for (i = 0; i < 8; i++) {
- int ret;
-
- if (i == 6)
- continue;
- ret = dd->f_tempsense_rd(dd, i);
- if (ret < 0)
- return ret; /* return error on bad read */
- regvals[i] = ret;
- }
- return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
- (signed char)regvals[0],
- (signed char)regvals[1],
- regvals[2],
- regvals[3],
- (signed char)regvals[5],
- (signed char)regvals[7]);
-}
-static DEVICE_ATTR_RO(tempsense);
-
-/*
- * end of per-unit (or driver, in some cases, but replicated
- * per unit) functions
- */
-
-/* start of per-unit file structures and support code */
-static struct attribute *qib_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- &dev_attr_version.attr.attr,
- &dev_attr_nctxts.attr,
- &dev_attr_nfreectxts.attr,
- &dev_attr_serial.attr,
- &dev_attr_boardversion.attr,
- &dev_attr_tempsense.attr,
- &dev_attr_localbus_info.attr,
- &dev_attr_chip_reset.attr,
- NULL,
-};
-
-const struct attribute_group qib_attr_group = {
- .attrs = qib_attributes,
-};
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
deleted file mode 100644
index 97b8a2bf5c69..000000000000
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * QLogic_IB "Two Wire Serial Interface" driver.
- * Originally written for a not-quite-i2c serial eeprom, which is
- * still used on some supported boards. Later boards have added a
- * variety of other uses, most board-specific, so the bit-boffing
- * part has been split off to this file, while the other parts
- * have been moved to chip-specific files.
- *
- * We have also dropped all pretense of fully generic (e.g. pretend
- * we don't know whether '1' is the higher voltage) interface, as
- * the restrictions of the generic i2c interface (e.g. no access from
- * driver itself) make it unsuitable for this use.
- */
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the qlogic_ib device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct qib_devdata *dd)
-{
- /*
- * implicit read of EXTStatus is as good as explicit
- * read of scratch, if all we want to do is flush
- * writes.
- */
- dd->f_gpio_mod(dd, 0, 0, 0);
- rmb(); /* inlined, so prevent compiler reordering */
-}
-
-/*
- * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
- * for "almost compliant" modules
- */
-#define SCL_WAIT_USEC 1000
-
-/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
- * Should be 20, but some chips need more.
- */
-#define TWSI_BUF_WAIT_USEC 60
-
-static void scl_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- udelay(1);
-
- mask = 1UL << dd->gpio_scl_num;
-
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- /*
- * Allow for slow slaves by simple
- * delay for falling edge, sampling on rise.
- */
- if (!bit)
- udelay(2);
- else {
- int rise_usec;
-
- for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
- if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
- break;
- udelay(2);
- }
- if (rise_usec <= 0)
- qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
- }
- i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- mask = 1UL << dd->gpio_sda_num;
-
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- i2c_wait_for_writes(dd);
- udelay(2);
-}
-
-static u8 sda_in(struct qib_devdata *dd, int wait)
-{
- int bnum;
- u32 read_val, mask;
-
- bnum = dd->gpio_sda_num;
- mask = (1UL << bnum);
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, 0, mask);
- read_val = dd->f_gpio_mod(dd, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd);
- return (read_val & mask) >> bnum;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the qlogic_ib device
- */
-static int i2c_ackrcv(struct qib_devdata *dd)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, 1);
- scl_out(dd, 1);
- ack_received = sda_in(dd, 1) == 0;
- scl_out(dd, 0);
- return ack_received;
-}
-
-static void stop_cmd(struct qib_devdata *dd);
-
-/**
- * rd_byte - read a byte, sending STOP on last, else ACK
- * @dd: the qlogic_ib device
- * @last: identifies the last read
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct qib_devdata *dd, int last)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, 1);
- data |= sda_in(dd, 0);
- scl_out(dd, 0);
- }
- if (last) {
- scl_out(dd, 1);
- stop_cmd(dd);
- } else {
- sda_out(dd, 0);
- scl_out(dd, 1);
- scl_out(dd, 0);
- sda_out(dd, 1);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the qlogic_ib device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct qib_devdata *dd, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, bit);
- scl_out(dd, 1);
- scl_out(dd, 0);
- }
- return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-/*
- * issue TWSI start sequence:
- * (both clock/data high, clock high, data low while clock is high)
- */
-static void start_seq(struct qib_devdata *dd)
-{
- sda_out(dd, 1);
- scl_out(dd, 1);
- sda_out(dd, 0);
- udelay(1);
- scl_out(dd, 0);
-}
-
-/**
- * stop_seq - transmit the stop sequence
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_seq(struct qib_devdata *dd)
-{
- scl_out(dd, 0);
- sda_out(dd, 0);
- scl_out(dd, 1);
- sda_out(dd, 1);
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct qib_devdata *dd)
-{
- stop_seq(dd);
- udelay(TWSI_BUF_WAIT_USEC);
-}
-
-/**
- * qib_twsi_reset - reset I2C communication
- * @dd: the qlogic_ib device
- */
-
-int qib_twsi_reset(struct qib_devdata *dd)
-{
- int clock_cycles_left = 9;
- int was_high = 0;
- u32 pins, mask;
-
- /* Both SCL and SDA should be high. If not, there
- * is something wrong.
- */
- mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
-
- /*
- * Force pins to desired innocuous state.
- * This is the default power-on state with out=0 and dir=0,
- * So tri-stated and should be floating high (barring HW problems)
- */
- dd->f_gpio_mod(dd, 0, 0, mask);
-
- /*
- * Clock nine times to get all listeners into a sane state.
- * If SDA does not go high at any point, we are wedged.
- * One vendor recommends then issuing START followed by STOP.
- * we cannot use our "normal" functions to do that, because
- * if SCL drops between them, another vendor's part will
- * wedge, dropping SDA and keeping it low forever, at the end of
- * the next transaction (even if it was not the device addressed).
- * So our START and STOP take place with SCL held high.
- */
- while (clock_cycles_left--) {
- scl_out(dd, 0);
- scl_out(dd, 1);
- /* Note if SDA is high, but keep clocking to sync slave */
- was_high |= sda_in(dd, 0);
- }
-
- if (was_high) {
- /*
- * We saw a high, which we hope means the slave is sync'd.
- * Issue START, STOP, pause for T_BUF.
- */
-
- pins = dd->f_gpio_mod(dd, 0, 0, 0);
- if ((pins & mask) != mask)
- qib_dev_err(dd, "GPIO pins not at rest: %d\n",
- pins & mask);
- /* Drop SDA to issue START */
- udelay(1); /* Guarantee .6 uSec setup */
- sda_out(dd, 0);
- udelay(1); /* Guarantee .6 uSec hold */
- /* At this point, SCL is high, SDA low. Raise SDA for STOP */
- sda_out(dd, 1);
- udelay(TWSI_BUF_WAIT_USEC);
- }
-
- return !was_high;
-}
-
-#define QIB_TWSI_START 0x100
-#define QIB_TWSI_STOP 0x200
-
-/* Write byte to TWSI, optionally prefixed with START or suffixed with
- * STOP.
- * returns 0 if OK (ACK received), else != 0
- */
-static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
-{
- int ret = 1;
-
- if (flags & QIB_TWSI_START)
- start_seq(dd);
-
- ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
-
- if (flags & QIB_TWSI_STOP)
- stop_cmd(dd);
- return ret;
-}
-
-/* Added functionality for IBA7220-based cards */
-#define QIB_TEMP_DEV 0x98
-
-/*
- * qib_twsi_blk_rd
- * Formerly called qib_eeprom_internal_read, and only used for eeprom,
- * but now the general interface for data transfer from twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device from which data should
- * be read.
- */
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
- void *buffer, int len)
-{
- int ret;
- u8 *bp = buffer;
-
- ret = 1;
-
- if (dev == QIB_TWSI_NO_DEV) {
- /* legacy not-really-I2C */
- addr = (addr << 1) | READ_CMD;
- ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
- } else {
- /* Actual I2C */
- ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
- /*
- * SFF spec claims we do _not_ stop after the addr
- * but simply issue a start with the "read" dev-addr.
- * Since we are implicitely waiting for ACK here,
- * we need t_buf (nominally 20uSec) before that start,
- * and cannot rely on the delay built in to the STOP
- */
- ret = qib_twsi_wr(dd, addr, 0);
- udelay(TWSI_BUF_WAIT_USEC);
-
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface read addr %02X\n",
- addr);
- ret = 1;
- goto bail;
- }
- ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
- }
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
-
- /*
- * block devices keeps clocking data out as long as we ack,
- * automatically incrementing the address. Some have "pages"
- * whose boundaries will not be crossed, but the handling
- * of these is left to the caller, who is in a better
- * position to know.
- */
- while (len-- > 0) {
- /*
- * Get and store data, sending ACK if length remaining,
- * else STOP
- */
- *bp++ = rd_byte(dd, !len);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_twsi_blk_wr
- * Formerly called qib_eeprom_internal_write, and only used for eeprom,
- * but now the general interface for data transfer to twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device to which data should
- * be written.
- */
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len)
-{
- int sub_len;
- const u8 *bp = buffer;
- int max_wait_time, i;
- int ret = 1;
-
- while (len > 0) {
- if (dev == QIB_TWSI_NO_DEV) {
- if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
- QIB_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
- goto failed_write;
- ret = qib_twsi_wr(dd, addr, 0);
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface write addr %02X\n",
- addr);
- goto failed_write;
- }
- }
-
- sub_len = min(len, 4);
- addr += sub_len;
- len -= sub_len;
-
- for (i = 0; i < sub_len; i++)
- if (qib_twsi_wr(dd, *bp++, 0))
- goto failed_write;
-
- stop_cmd(dd);
-
- /*
- * Wait for write complete by waiting for a successful
- * read (the chip replies with a zero after the write
- * cmd completes, and before it writes to the eeprom.
- * The startcmd for the read will fail the ack until
- * the writes have completed. We do this inline to avoid
- * the debug prints that are in the real read routine
- * if the startcmd fails.
- * We also use the proper device address, so it doesn't matter
- * whether we have real eeprom_dev. Legacy likes any address.
- */
- max_wait_time = 100;
- while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
- stop_cmd(dd);
- if (!--max_wait_time)
- goto failed_write;
- }
- /* now read (and ignore) the resulting byte */
- rd_byte(dd, 1);
- }
-
- ret = 0;
- goto bail;
-
-failed_write:
- stop_cmd(dd);
- ret = 1;
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
deleted file mode 100644
index 397928c80f7c..000000000000
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-
-static unsigned qib_hol_timeout_ms = 3000;
-module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
- "duration of user app suspension after link failure");
-
-unsigned qib_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/**
- * qib_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the qlogic_ib device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * Cancel a range of PIO buffers. Used at user process close,
- * in case it died while writing to a PIO buffer.
- */
-void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
-{
- unsigned long flags;
- unsigned i;
- unsigned last;
-
- last = first + cnt;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = first; i < last; i++) {
- __clear_bit(i, dd->pio_need_disarm);
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * This is called by a user process when it sees the DISARM_BUFS event
- * bit is set.
- */
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned i;
- unsigned last;
-
- last = rcd->pio_base + rcd->piocnt;
- /*
- * Don't need uctxt_lock here, since user has called in to us.
- * Clear at start in case more interrupts set bits while we
- * are disarming
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- spin_lock_irq(&dd->pioavail_lock);
- for (i = rcd->pio_base; i < last; i++) {
- if (__test_and_clear_bit(i, dd->pio_need_disarm))
- dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irq(&dd->pioavail_lock);
- return 0;
-}
-
-static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- for (pidx = 0; pidx < dd->num_pports; pidx++) {
- ppd = dd->pport + pidx;
- if (i >= ppd->sdma_state.first_sendbuf &&
- i < ppd->sdma_state.last_sendbuf)
- return ppd;
- }
- return NULL;
-}
-
-/*
- * Return true if send buffer is being used by a user context.
- * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
- */
-static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
-
- spin_lock(&dd->uctxt_lock);
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- rcd = dd->rcd[ctxt];
- if (!rcd || bufn < rcd->pio_base ||
- bufn >= rcd->pio_base + rcd->piocnt)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock(&dd->uctxt_lock);
-
- return ret;
-}
-
-/*
- * Disarm a set of send buffers. If the buffer might be actively being
- * written to, mark the buffer to be disarmed later when it is not being
- * written to.
- *
- * This should only be called from the IRQ error handler.
- */
-void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
- unsigned cnt)
-{
- struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
- unsigned i;
- unsigned long flags;
-
- for (i = 0; i < dd->num_pports; i++)
- pppd[i] = NULL;
-
- for (i = 0; i < cnt; i++) {
- if (!test_bit(i, mask))
- continue;
- /*
- * If the buffer is owned by the DMA hardware,
- * reset the DMA engine.
- */
- ppd = is_sdma_buf(dd, i);
- if (ppd) {
- pppd[ppd->port] = ppd;
- continue;
- }
- /*
- * If the kernel is writing the buffer or the buffer is
- * owned by a user process, we can't clear it yet.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (test_bit(i, dd->pio_writing) ||
- (!test_bit(i << 1, dd->pioavailkernel) &&
- find_ctxt(dd, i))) {
- __set_bit(i, dd->pio_need_disarm);
- } else {
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- }
-
- /* do cancel_sends once per port that had sdma piobufs in error */
- for (i = 0; i < dd->num_pports; i++)
- if (pppd[i])
- qib_cancel_sends(pppd[i]);
-}
-
-/**
- * update_send_bufs - update shadow copy of the PIO availability map
- * @dd: the qlogic_ib device
- *
- * called whenever our local copy indicates we have run out of send buffers
- */
-static void update_send_bufs(struct qib_devdata *dd)
-{
- unsigned long flags;
- unsigned i;
- const unsigned piobregs = dd->pioavregs;
-
- /*
- * If the generation (check) bits have changed, then we update the
- * busy bit for the corresponding PIO buffer. This algorithm will
- * modify positions to the value they already have in some cases
- * (i.e., no change), but it's faster than changing only the bits
- * that have changed.
- *
- * We would like to do this atomicly, to avoid spinlocks in the
- * critical send path, but that's not really possible, given the
- * type of changes, and that this routine could be called on
- * multiple cpu's simultaneously, so we lock in this routine only,
- * to avoid conflicting updates; all we change is the shadow, and
- * it's a single 64 bit memory location, so by definition the update
- * is atomic in terms of what other cpu's can see in testing the
- * bits. The spin_lock overhead isn't too bad, since it only
- * happens when all buffers are in use, so only cpu overhead, not
- * latency or bandwidth is affected.
- */
- if (!dd->pioavailregs_dma)
- return;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = 0; i < piobregs; i++) {
- u64 pchbusy, pchg, piov, pnew;
-
- piov = le64_to_cpu(dd->pioavailregs_dma[i]);
- pchg = dd->pioavailkernel[i] &
- ~(dd->pioavailshadow[i] ^ piov);
- pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
- if (pchg && (pchbusy & dd->pioavailshadow[i])) {
- pnew = dd->pioavailshadow[i] & ~pchbusy;
- pnew |= piov & pchbusy;
- dd->pioavailshadow[i] = pnew;
- }
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * Debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_send_bufs(struct qib_devdata *dd)
-{
- dd->upd_pio_shadow = 1;
-
- /* not atomic, but if we lose a stat count in a while, that's OK */
- qib_stats.sps_nopiobufs++;
-}
-
-/*
- * Common code for normal driver send buffer allocation, and reserved
- * allocation.
- *
- * Do appropriate marking as busy, etc.
- * Returns buffer pointer if one is found, otherwise NULL.
- */
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
- u32 first, u32 last)
-{
- unsigned i, j, updated = 0;
- unsigned nbufs;
- unsigned long flags;
- unsigned long *shadow = dd->pioavailshadow;
- u32 __iomem *buf;
-
- if (!(dd->flags & QIB_PRESENT))
- return NULL;
-
- nbufs = last - first + 1; /* number in range to check */
- if (dd->upd_pio_shadow) {
-update_shadow:
- /*
- * Minor optimization. If we had no buffers on last call,
- * start out by doing the update; continue and do scan even
- * if no buffers were updated, to be paranoid.
- */
- update_send_bufs(dd);
- updated++;
- }
- i = first;
- /*
- * While test_and_set_bit() is atomic, we do that and then the
- * change_bit(), and the pair is not. See if this is the cause
- * of the remaining armlaunch errors.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (dd->last_pio >= first && dd->last_pio <= last)
- i = dd->last_pio + 1;
- if (!first)
- /* adjust to min possible */
- nbufs = last - dd->min_kernel_pio + 1;
- for (j = 0; j < nbufs; j++, i++) {
- if (i > last)
- i = !first ? dd->min_kernel_pio : first;
- if (__test_and_set_bit((2 * i) + 1, shadow))
- continue;
- /* flip generation bit */
- __change_bit(2 * i, shadow);
- /* remember that the buffer can be written to now */
- __set_bit(i, dd->pio_writing);
- if (!first && first != last) /* first == last on VL15, avoid */
- dd->last_pio = i;
- break;
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- if (j == nbufs) {
- if (!updated)
- /*
- * First time through; shadow exhausted, but may be
- * buffers available, try an update and then rescan.
- */
- goto update_shadow;
- no_send_bufs(dd);
- buf = NULL;
- } else {
- if (i < dd->piobcnt2k)
- buf = (u32 __iomem *)(dd->pio2kbase +
- i * dd->palign);
- else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
- buf = (u32 __iomem *)(dd->pio4kbase +
- (i - dd->piobcnt2k) * dd->align4k);
- else
- buf = (u32 __iomem *)(dd->piovl15base +
- (i - (dd->piobcnt2k + dd->piobcnt4k)) *
- dd->align4k);
- if (pbufnum)
- *pbufnum = i;
- dd->upd_pio_shadow = 0;
- }
-
- return buf;
-}
-
-/*
- * Record that the caller is finished writing to the buffer so we don't
- * disarm it while it is being written and disarm it now if needed.
- */
-void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- __clear_bit(n, dd->pio_writing);
- if (__test_and_clear_bit(n, dd->pio_need_disarm))
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/**
- * qib_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the qlogic_ib device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- * @rcd: the context pointer
- */
-void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
- unsigned len, u32 avail, struct qib_ctxtdata *rcd)
-{
- unsigned long flags;
- unsigned end;
- unsigned ostart = start;
-
- /* There are two bits per send buffer (busy and generation) */
- start *= 2;
- end = start + len * 2;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- /* Set or clear the busy bit in the shadow. */
- while (start < end) {
- if (avail) {
- unsigned long dma;
- int i;
-
- /*
- * The BUSY bit will never be set, because we disarm
- * the user buffers before we hand them back to the
- * kernel. We do have to make sure the generation
- * bit is set correctly in shadow, since it could
- * have changed many times while allocated to user.
- * We can't use the bitmap functions on the full
- * dma array because it is always little-endian, so
- * we have to flip to host-order first.
- * BITS_PER_LONG is slightly wrong, since it's
- * always 64 bits per register in chip...
- * We only work on 64 bit kernels, so that's OK.
- */
- i = start / BITS_PER_LONG;
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
- dd->pioavailshadow);
- dma = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start) % BITS_PER_LONG, &dma))
- __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start, dd->pioavailshadow);
- else
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
- + start, dd->pioavailshadow);
- __set_bit(start, dd->pioavailkernel);
- if ((start >> 1) < dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- } else {
- __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
- dd->pioavailshadow);
- __clear_bit(start, dd->pioavailkernel);
- if ((start >> 1) > dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- }
- start += 2;
- }
-
- if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
- dd->last_pio = dd->min_kernel_pio - 1;
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- dd->f_txchk_change(dd, ostart, len, avail, rcd);
-}
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent. Used whenever we need to be
- * sure the send side is idle. Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo. The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if a normal send had happened.
- */
-void qib_cancel_sends(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- unsigned ctxt;
- unsigned i;
- unsigned last;
-
- /*
- * Tell PSM to disarm buffers again before trying to reuse them.
- * We need to be sure the rcd doesn't change out from under us
- * while we do so. We hold the two locks sequentially. We might
- * needlessly set some need_disarm bits as a result, if the
- * context is closed after we release the uctxt_lock, but that's
- * fairly benign, and safer than nesting the locks.
- */
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- rcd = dd->rcd[ctxt];
- if (rcd && rcd->ppd == ppd) {
- last = rcd->pio_base + rcd->piocnt;
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt,
- * if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- i = rcd->pio_base;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (; i < last; i++)
- __set_bit(i, dd->pio_need_disarm);
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- } else
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- }
-
- if (!(dd->flags & QIB_HAS_SEND_DMA))
- dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
- QIB_SENDCTRL_FLUSH);
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.
- * If already off, this routine is a nop, on the assumption that the
- * caller (or set of callers) will "do the right thing".
- * This is a per-device operation, so just the first port.
- */
-void qib_force_pio_avail_update(struct qib_devdata *dd)
-{
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-}
-
-void qib_hol_down(struct qib_pportdata *ppd)
-{
- /*
- * Cancel sends when the link goes DOWN so that we aren't doing it
- * at INIT when we might be trying to send SMI packets.
- */
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- qib_cancel_sends(ppd);
-}
-
-/*
- * Link is at INIT.
- * We start the HoL timer so we can detect stuck packets blocking SMP replies.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void qib_hol_init(struct qib_pportdata *ppd)
-{
- if (ppd->hol_state != QIB_HOL_INIT) {
- ppd->hol_state = QIB_HOL_INIT;
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
-
-/*
- * Link is up, continue any user processes, and ensure timer
- * is a nop, if running. Let timer keep running, if set; it
- * will nop when it sees the link is up.
- */
-void qib_hol_up(struct qib_pportdata *ppd)
-{
- ppd->hol_state = QIB_HOL_UP;
-}
-
-/*
- * This is only called via the timer.
- */
-void qib_hol_event(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t, hol_timer);
-
- /* If hardware error, etc, skip. */
- if (!(ppd->dd->flags & QIB_INITTED))
- return;
-
- if (ppd->hol_state != QIB_HOL_UP) {
- /*
- * Try to flush sends in case a stuck packet is blocking
- * SMP replies.
- */
- qib_hol_down(ppd);
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
deleted file mode 100644
index 8e2bda77d8b9..000000000000
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * qib_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- * @flags: unused
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
- struct rvt_swqe *wqe;
- u32 hwords;
- u32 bth0;
- u32 len;
- u32 pmtu = qp->pmtu;
- int ret = 0;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- ohdr = &priv->s_hdr->u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &priv->s_hdr->u.l.oth;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
-
- /* Get the next send request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- qp->s_wqe = NULL;
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /* Check if send work queue is empty. */
- if (qp->s_cur == READ_ONCE(qp->s_head))
- goto bail;
- /*
- * Start a new request.
- */
- qp->s_psn = wqe->psn;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- len = wqe->length;
- qp->s_len = len;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state =
- OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / 4;
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- break;
-
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- fallthrough;
- case OP(SEND_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state =
- OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_psn++ & QIB_PSN_MASK);
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-/**
- * qib_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from qib_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct ib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- struct ib_reth *reth;
- int ret;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
-
- /* Compare the PSN verses the expected PSN. */
- if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
- /*
- * Handle a sequence error.
- * Silently drop any current message.
- */
- qp->r_psn = psn;
-inv:
- if (qp->r_state == OP(SEND_FIRST) ||
- qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
- } else
- rvt_put_ss(&qp->r_sge);
- qp->r_state = OP(SEND_LAST);
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- goto send_first;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- goto rdma_first;
-
- default:
- goto drop;
- }
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- default:
- if (opcode == OP(SEND_FIRST) ||
- opcode == OP(SEND_ONLY) ||
- opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_FIRST) ||
- opcode == OP(RDMA_WRITE_ONLY) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- break;
- goto inv;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- rvt_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
-send_first:
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
- qp->r_sge = qp->s_rdma_read_sge;
- else {
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- /*
- * qp->s_rdma_read_sge will be the owner
- * of the mr references.
- */
- qp->s_rdma_read_sge = qp->r_sge;
- }
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
- goto send_last_imm;
- fallthrough;
- case OP(SEND_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto rewind;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto rewind;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
- break;
-
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
-no_immediate_data:
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto rewind;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto rewind;
- wc.opcode = IB_WC_RECV;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
- rvt_put_ss(&qp->s_rdma_read_sge);
-last_imm:
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-rdma_first:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- goto drop;
- }
- reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
- vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto drop;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_ONLY))
- goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- goto rdma_last_imm;
- }
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto drop;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto drop;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- wc.ex.imm_data = ohdr->u.imm_data;
-rdma_last_imm:
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
-
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
- rvt_put_ss(&qp->s_rdma_read_sge);
- else {
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- }
- wc.byte_len = qp->r_len;
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- goto last_imm;
-
- case OP(RDMA_WRITE_LAST):
-rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- break;
-
- default:
- /* Drop packet for unknown opcodes. */
- goto drop;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- return;
-
-rewind:
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
-drop:
- ibp->rvp.n_pkt_drops++;
- return;
-
-op_err:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
-
-}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
deleted file mode 100644
index 81eda94bd279..000000000000
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/**
- * qib_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from qib_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling qib_ud_rcv()
- * while this is being called.
- */
-static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp;
- struct rdma_ah_attr *ah_attr;
- unsigned long flags;
- struct rvt_sge_state ssge;
- struct rvt_sge *sge;
- struct ib_wc wc;
- u32 length;
- enum ib_qp_type sqptype, dqptype;
-
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
- if (!qp) {
- ibp->rvp.n_pkt_drops++;
- goto drop;
- }
-
- sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : sqp->ibqp.qp_type;
- dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : qp->ibqp.qp_type;
-
- if (dqptype != sqptype ||
- !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto drop;
- }
-
- ah_attr = rvt_get_swqe_ah_attr(swqe);
- ppd = ppd_from_ibp(ibp);
-
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1;
- u16 pkey2;
- u16 lid;
-
- pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- qib_bad_pkey(ibp, pkey1,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
- goto drop;
- }
- }
-
- /*
- * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- if (qp->ibqp.qp_num) {
- u32 qkey;
-
- qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
- sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
- if (unlikely(qkey != qp->qkey))
- goto drop;
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- length = swqe->length;
- memset(&wc, 0, sizeof(wc));
- wc.byte_len = length + sizeof(struct ib_grh);
-
- if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = swqe->wr.ex.imm_data;
- }
-
- spin_lock_irqsave(&qp->r_lock, flags);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE)
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- else {
- int ret;
-
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0) {
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- goto bail_unlock;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- ibp->rvp.n_pkt_drops++;
- goto bail_unlock;
- }
-
- if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
- struct ib_grh grh;
- const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
-
- qib_make_grh(ibp, &grh, grd, 0, 0);
- rvt_copy_sge(qp, &qp->r_sge, &grh,
- sizeof(grh), true, false);
- wc.wc_flags |= IB_WC_GRH;
- } else
- rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- ssge.sg_list = swqe->sg_list + 1;
- ssge.sge = *swqe->sg_list;
- ssge.num_sge = swqe->wr.num_sge;
- sge = &ssge.sge;
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ssge.num_sge)
- *sge = *ssge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = sqp->ibqp.qp_num;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- rvt_get_swqe_pkey_index(swqe) : 0;
- wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- wc.sl = rdma_ah_get_sl(ah_attr);
- wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->rvp.n_loop_pkts++;
-bail_unlock:
- spin_unlock_irqrestore(&qp->r_lock, flags);
-drop:
- rcu_read_unlock();
-}
-
-/**
- * qib_make_ud_req - construct a UD request packet
- * @qp: the QP
- * @flags: flags to modify and pass back to caller
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
- struct rdma_ah_attr *ah_attr;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct rvt_swqe *wqe;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
- int ret = 0;
- int next_cur;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- /* see post_one_send() */
- if (qp->s_cur == READ_ONCE(qp->s_head))
- goto bail;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- next_cur = qp->s_cur + 1;
- if (next_cur >= qp->s_size)
- next_cur = 0;
-
- /* Construct the header. */
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ppd = ppd_from_ibp(ibp);
- ah_attr = rvt_get_swqe_ah_attr(wqe);
- if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- if (rdma_ah_get_dlid(ah_attr) !=
- be16_to_cpu(IB_LID_PERMISSIVE))
- this_cpu_inc(ibp->pmastats->n_multicast_xmit);
- else
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- } else {
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
- if (unlikely(lid == ppd->lid)) {
- unsigned long tflags = *flags;
- /*
- * If DMAs are in progress, we can't generate
- * a completion for the loopback packet since
- * it would be out of order.
- * XXX Instead of waiting, we could queue a
- * zero length descriptor so we get a callback.
- */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- qp->s_cur = next_cur;
- spin_unlock_irqrestore(&qp->s_lock, tflags);
- qib_ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, tflags);
- *flags = tflags;
- rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done;
- }
- }
-
- qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_srate = rdma_ah_get_static_rate(ah_attr);
- qp->s_wqe = wqe;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
-
- if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- rdma_ah_read_grh(ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- ohdr = &priv->s_hdr->u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = QIB_LRH_BTH;
- ohdr = &priv->s_hdr->u.oth;
- }
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- else
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
- priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
- priv->s_hdr->lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- lid = ppd->lid;
- if (lid) {
- lid |= rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1);
- priv->s_hdr->lrh[3] = cpu_to_be16(lid);
- } else
- priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth0 |= extra_bytes << 20;
- bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
- qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
- rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- /*
- * Use the multicast QP if the destination LID is a multicast LID.
- */
- ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
- cpu_to_be32(QIB_MULTICAST_QPN) :
- cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
- ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] =
- cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
- rvt_get_swqe_remote_qkey(wqe));
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned i;
-
- pkey &= 0x7fff; /* remove limited/full membership bit */
-
- for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
- if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
- return i;
-
- /*
- * Should not get here, this means hardware failed to validate pkeys.
- * Punt and return index 0.
- */
- return 0;
-}
-
-/**
- * qib_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct ib_other_headers *ohdr;
- int opcode;
- u32 hdrsize;
- u32 pad;
- struct ib_wc wc;
- u32 qkey;
- u32 src_qp;
- u16 dlid;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
- }
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
-
- /*
- * Get the number of bytes the message was padded by
- * and drop incomplete packets.
- */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
-
- tlen -= hdrsize + pad + 4;
-
- /*
- * Check that the permissive LID is only used on QP0
- * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
- */
- if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE))
- goto drop;
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1, pkey2;
-
- pkey1 = be32_to_cpu(ohdr->bth[0]);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- qib_bad_pkey(ibp,
- pkey1,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- return;
- }
- }
- if (unlikely(qkey != qp->qkey))
- return;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
- goto drop;
- } else {
- struct ib_smp *smp;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
- goto drop;
- smp = (struct ib_smp *) data;
- if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- goto drop;
- }
-
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (qp->ibqp.qp_num > 1 &&
- opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
- } else
- goto drop;
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- wc.byte_len = tlen + sizeof(struct ib_grh);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE)
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- else {
- int ret;
-
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0) {
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- return;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- goto drop;
- }
- if (has_grh) {
- rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), true, false);
- wc.wc_flags |= IB_WC_GRH;
- } else
- rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- true, false);
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- return;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = src_qp;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
- dlid = be16_to_cpu(hdr->lrh[1]);
- /*
- * Save the LMC lower bits if the destination LID is a unicast LID.
- */
- wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
- dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
deleted file mode 100644
index 1bb7507325bc..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/device.h>
-
-#include "qib.h"
-
-static void __qib_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- unpin_user_pages_dirty_lock(p, num_pages, dirty);
-}
-
-/*
- * qib_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
-{
- dma_addr_t phys;
-
- phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hwdev->dev, phys))
- return -ENOMEM;
-
- if (!phys) {
- dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
- phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&hwdev->dev, phys))
- return -ENOMEM;
- /*
- * FIXME: If we get 0 again, we should keep this page,
- * map another, then free the 0 page.
- */
- }
- *daddr = phys;
- return 0;
-}
-
-/**
- * qib_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages. For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long locked, lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
-
- if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
- ret = -ENOMEM;
- goto bail;
- }
-
- mmap_read_lock(current->mm);
- for (got = 0; got < num_pages; got += ret) {
- ret = pin_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got,
- FOLL_LONGTERM | FOLL_WRITE,
- p + got);
- if (ret < 0) {
- mmap_read_unlock(current->mm);
- goto bail_release;
- }
- }
- mmap_read_unlock(current->mm);
-
- return 0;
-bail_release:
- __qib_release_user_pages(p, got, 0);
-bail:
- atomic64_sub(num_pages, &current->mm->pinned_vm);
- return ret;
-}
-
-void qib_release_user_pages(struct page **p, size_t num_pages)
-{
- __qib_release_user_pages(p, num_pages, 1);
-
- /* during close after signal, mm can be NULL */
- if (current->mm)
- atomic64_sub(num_pages, &current->mm->pinned_vm);
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
deleted file mode 100644
index 336eb15a721f..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ /dev/null
@@ -1,1470 +0,0 @@
-/*
- * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_user_sdma.h"
-
-/* minimum size of header */
-#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
-/* expected size of headers (for dma_pool) */
-#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
-/* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
-
-/*
- * track how many times a process open this driver.
- */
-static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
-
-struct qib_user_sdma_rb_node {
- struct rb_node node;
- int refcount;
- pid_t pid;
-};
-
-struct qib_user_sdma_pkt {
- struct list_head list; /* list element */
-
- u8 tiddma; /* if this is NEW tid-sdma */
- u8 largepkt; /* this is large pkt from kmalloc */
- u16 frag_size; /* frag size used by PSM */
- u16 index; /* last header index or push index */
- u16 naddr; /* dimension of addr (1..3) ... */
- u16 addrlimit; /* addr array size */
- u16 tidsmidx; /* current tidsm index */
- u16 tidsmcount; /* tidsm array item count */
- u16 payload_size; /* payload size so far for header */
- u32 bytes_togo; /* bytes for processing */
- u32 counter; /* sdma pkts queued counter for this entry */
- struct qib_tid_session_member *tidsm; /* tid session member array */
- struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
- u64 added; /* global descq number of entries */
-
- struct {
- u16 offset; /* offset for kvaddr, addr */
- u16 length; /* length in page */
- u16 first_desc; /* first desc */
- u16 last_desc; /* last desc */
- u16 put_page; /* should we put_page? */
- u16 dma_mapped; /* is page dma_mapped? */
- u16 dma_length; /* for dma_unmap_page() */
- u16 padding;
- struct page *page; /* may be NULL (coherent mem) */
- void *kvaddr; /* FIXME: only for pio hack */
- dma_addr_t addr;
- } addr[4]; /* max pages, any more and we coalesce */
-};
-
-struct qib_user_sdma_queue {
- /*
- * pkts sent to dma engine are queued on this
- * list head. the type of the elements of this
- * list are struct qib_user_sdma_pkt...
- */
- struct list_head sent;
-
- /*
- * Because above list will be accessed by both process and
- * signal handler, we need a spinlock for it.
- */
- spinlock_t sent_lock ____cacheline_aligned_in_smp;
-
- /* headers with expected length are allocated from here... */
- char header_cache_name[64];
- struct dma_pool *header_cache;
-
- /* packets are allocated from the slab cache... */
- char pkt_slab_name[64];
- struct kmem_cache *pkt_slab;
-
- /* as packets go on the queued queue, they are counted... */
- u32 counter;
- u32 sent_counter;
- /* pending packets, not sending yet */
- u32 num_pending;
- /* sending packets, not complete yet */
- u32 num_sending;
- /* global descq number of entry of last sending packet */
- u64 added;
-
- /* dma page table */
- struct rb_root dma_pages_root;
-
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- /* protect everything above... */
- struct mutex lock;
-};
-
-static struct qib_user_sdma_rb_node *
-qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
-{
- struct qib_user_sdma_rb_node *sdma_rb_node;
- struct rb_node *node = root->rb_node;
-
- while (node) {
- sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
- node);
- if (pid < sdma_rb_node->pid)
- node = node->rb_left;
- else if (pid > sdma_rb_node->pid)
- node = node->rb_right;
- else
- return sdma_rb_node;
- }
- return NULL;
-}
-
-static int
-qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
-{
- struct rb_node **node = &(root->rb_node);
- struct rb_node *parent = NULL;
- struct qib_user_sdma_rb_node *got;
-
- while (*node) {
- got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
- parent = *node;
- if (new->pid < got->pid)
- node = &((*node)->rb_left);
- else if (new->pid > got->pid)
- node = &((*node)->rb_right);
- else
- return 0;
- }
-
- rb_link_node(&new->node, parent, node);
- rb_insert_color(&new->node, root);
- return 1;
-}
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
-{
- struct qib_user_sdma_queue *pq =
- kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- if (!pq)
- goto done;
-
- pq->counter = 0;
- pq->sent_counter = 0;
- pq->num_pending = 0;
- pq->num_sending = 0;
- pq->added = 0;
- pq->sdma_rb_node = NULL;
-
- INIT_LIST_HEAD(&pq->sent);
- spin_lock_init(&pq->sent_lock);
- mutex_init(&pq->lock);
-
- snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
- "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
- sizeof(struct qib_user_sdma_pkt),
- 0, 0, NULL);
-
- if (!pq->pkt_slab)
- goto err_kfree;
-
- snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
- "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->header_cache = dma_pool_create(pq->header_cache_name,
- dev,
- QIB_USER_SDMA_EXP_HEADER_LENGTH,
- 4, 0);
- if (!pq->header_cache)
- goto err_slab;
-
- pq->dma_pages_root = RB_ROOT;
-
- sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
- current->pid);
- if (sdma_rb_node) {
- sdma_rb_node->refcount++;
- } else {
- sdma_rb_node = kmalloc(sizeof(
- struct qib_user_sdma_rb_node), GFP_KERNEL);
- if (!sdma_rb_node)
- goto err_rb;
-
- sdma_rb_node->refcount = 1;
- sdma_rb_node->pid = current->pid;
-
- qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
- }
- pq->sdma_rb_node = sdma_rb_node;
-
- goto done;
-
-err_rb:
- dma_pool_destroy(pq->header_cache);
-err_slab:
- kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
- kfree(pq);
- pq = NULL;
-
-done:
- return pq;
-}
-
-static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
- int i, u16 offset, u16 len,
- u16 first_desc, u16 last_desc,
- u16 put_page, u16 dma_mapped,
- struct page *page, void *kvaddr,
- dma_addr_t dma_addr, u16 dma_length)
-{
- pkt->addr[i].offset = offset;
- pkt->addr[i].length = len;
- pkt->addr[i].first_desc = first_desc;
- pkt->addr[i].last_desc = last_desc;
- pkt->addr[i].put_page = put_page;
- pkt->addr[i].dma_mapped = dma_mapped;
- pkt->addr[i].page = page;
- pkt->addr[i].kvaddr = kvaddr;
- pkt->addr[i].addr = dma_addr;
- pkt->addr[i].dma_length = dma_length;
-}
-
-static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
- size_t len, dma_addr_t *dma_addr)
-{
- void *hdr;
-
- if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
- hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
- dma_addr);
- else
- hdr = NULL;
-
- if (!hdr) {
- hdr = kmalloc(len, GFP_KERNEL);
- if (!hdr)
- return NULL;
-
- *dma_addr = 0;
- }
-
- return hdr;
-}
-
-static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- struct page *page, u16 put,
- u16 offset, u16 len, void *kvaddr)
-{
- __le16 *pbc16;
- void *pbcvaddr;
- struct qib_message_header *hdr;
- u16 newlen, pbclen, lastdesc, dma_mapped;
- u32 vcto;
- union qib_seqnum seqnum;
- dma_addr_t pbcdaddr;
- dma_addr_t dma_addr =
- dma_map_page(&dd->pcidev->dev,
- page, offset, len, DMA_TO_DEVICE);
- int ret = 0;
-
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- /*
- * dma mapping error, pkt has not managed
- * this page yet, return the page here so
- * the caller can ignore this page.
- */
- if (put) {
- unpin_user_page(page);
- } else {
- /* coalesce case */
- __free_page(page);
- }
- ret = -ENOMEM;
- goto done;
- }
- offset = 0;
- dma_mapped = 1;
-
-
-next_fragment:
-
- /*
- * In tid-sdma, the transfer length is restricted by
- * receiver side current tid page length.
- */
- if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
- newlen = pkt->tidsm[pkt->tidsmidx].length;
- else
- newlen = len;
-
- /*
- * Then the transfer length is restricted by MTU.
- * the last descriptor flag is determined by:
- * 1. the current packet is at frag size length.
- * 2. the current tid page is done if tid-sdma.
- * 3. there is no more byte togo if sdma.
- */
- lastdesc = 0;
- if ((pkt->payload_size + newlen) >= pkt->frag_size) {
- newlen = pkt->frag_size - pkt->payload_size;
- lastdesc = 1;
- } else if (pkt->tiddma) {
- if (newlen == pkt->tidsm[pkt->tidsmidx].length)
- lastdesc = 1;
- } else {
- if (newlen == pkt->bytes_togo)
- lastdesc = 1;
- }
-
- /* fill the next fragment in this page */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- offset, newlen, /* offset, len */
- 0, lastdesc, /* first last desc */
- put, dma_mapped, /* put page, dma mapped */
- page, kvaddr, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->bytes_togo -= newlen;
- pkt->payload_size += newlen;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* If there is no more byte togo. (lastdesc==1) */
- if (pkt->bytes_togo == 0) {
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- goto done;
- }
-
- /* If tid-sdma, advance tid info. */
- if (pkt->tiddma) {
- pkt->tidsm[pkt->tidsmidx].length -= newlen;
- if (pkt->tidsm[pkt->tidsmidx].length) {
- pkt->tidsm[pkt->tidsmidx].offset += newlen;
- } else {
- pkt->tidsmidx++;
- if (pkt->tidsmidx == pkt->tidsmcount) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
-
- /*
- * If this is NOT the last descriptor. (newlen==len)
- * the current packet is not done yet, but the current
- * send side page is done.
- */
- if (lastdesc == 0)
- goto done;
-
- /*
- * If running this driver under PSM with message size
- * fitting into one transfer unit, it is not possible
- * to pass this line. otherwise, it is a buggggg.
- */
-
- /*
- * Since the current packet is done, and there are more
- * bytes togo, we need to create a new sdma header, copying
- * from previous sdma header and modify both.
- */
- pbclen = pkt->addr[pkt->index].length;
- pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
- if (!pbcvaddr) {
- ret = -ENOMEM;
- goto done;
- }
- /* Copy the previous sdma header to new sdma header */
- pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
- memcpy(pbcvaddr, pbc16, pbclen);
-
- /* Modify the previous sdma header */
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* turn on the header suppression */
- hdr->iph.pkt_flags =
- cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
- /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
- hdr->flags &= ~(0x04|0x20);
- } else {
- /* turn off extra bytes: 20-21 bits */
- hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
- /* turn off ACK_REQ: 0x04 */
- hdr->flags &= ~(0x04);
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- /* Modify the new sdma header */
- pbc16 = (__le16 *)pbcvaddr;
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* Set new tid and offset for new sdma header */
- hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
- (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
- (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
- (pkt->tidsm[pkt->tidsmidx].offset>>2));
- } else {
- /* Middle protocol new packet offset */
- hdr->uwords[2] += pkt->payload_size;
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* Next sequence number in new sdma header */
- seqnum.val = be32_to_cpu(hdr->bth[2]);
- if (pkt->tiddma)
- seqnum.seq++;
- else
- seqnum.pkt++;
- hdr->bth[2] = cpu_to_be32(seqnum.val);
-
- /* Init new sdma header. */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- 0, pbclen, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbcvaddr, /* struct page, virt addr */
- pbcdaddr, pbclen); /* dma addr, dma length */
- pkt->index = pkt->naddr;
- pkt->payload_size = 0;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* Prepare for next fragment in this page */
- if (newlen != len) {
- if (dma_mapped) {
- put = 0;
- dma_mapped = 0;
- page = NULL;
- kvaddr = NULL;
- }
- len -= newlen;
- offset += newlen;
-
- goto next_fragment;
- }
-
-done:
- return ret;
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- struct page *page = alloc_page(GFP_KERNEL);
- void *mpage_save;
- char *mpage;
- int i;
- int len = 0;
-
- if (!page) {
- ret = -ENOMEM;
- goto done;
- }
-
- mpage = page_address(page);
- mpage_save = mpage;
- for (i = 0; i < niov; i++) {
- int cfur;
-
- cfur = copy_from_user(mpage,
- iov[i].iov_base, iov[i].iov_len);
- if (cfur) {
- ret = -EFAULT;
- goto page_free;
- }
-
- mpage += iov[i].iov_len;
- len += iov[i].iov_len;
- }
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- page, 0, 0, len, mpage_save);
- goto done;
-
-page_free:
- __free_page(page);
-done:
- return ret;
-}
-
-/*
- * How many pages in this iovec element?
- */
-static size_t qib_user_sdma_num_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long) iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-static void qib_user_sdma_free_pkt_frag(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- int frag)
-{
- const int i = frag;
-
- if (pkt->addr[i].page) {
- /* only user data has page */
- if (pkt->addr[i].dma_mapped)
- dma_unmap_page(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
-
- if (pkt->addr[i].put_page)
- unpin_user_page(pkt->addr[i].page);
- else
- __free_page(pkt->addr[i].page);
- } else if (pkt->addr[i].kvaddr) {
- /* for headers */
- if (pkt->addr[i].dma_mapped) {
- /* from kmalloc & dma mapped */
- dma_unmap_single(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
- kfree(pkt->addr[i].kvaddr);
- } else if (pkt->addr[i].addr) {
- /* free coherent mem from cache... */
- dma_pool_free(pq->header_cache,
- pkt->addr[i].kvaddr, pkt->addr[i].addr);
- } else {
- /* from kmalloc but not dma mapped */
- kfree(pkt->addr[i].kvaddr);
- }
- }
-}
-
-/* return number of pages pinned... */
-static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, size_t npages)
-{
- struct page *pages[8];
- int i, j;
- int ret = 0;
-
- while (npages) {
- if (npages > 8)
- j = 8;
- else
- j = npages;
-
- ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
- if (ret != j) {
- i = 0;
- j = ret;
- ret = -ENOMEM;
- goto free_pages;
- }
-
- for (i = 0; i < j; i++) {
- /* map the pages... */
- unsigned long fofs = addr & ~PAGE_MASK;
- int flen = ((fofs + tlen) > PAGE_SIZE) ?
- (PAGE_SIZE - fofs) : tlen;
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- pages[i], 1, fofs, flen, NULL);
- if (ret < 0) {
- /* current page has beed taken
- * care of inside above call.
- */
- i++;
- goto free_pages;
- }
-
- addr += flen;
- tlen -= flen;
- }
-
- npages -= j;
- }
-
- goto done;
-
- /* if error, return all pages not managed by pkt */
-free_pages:
- while (i < j)
- unpin_user_page(pages[i++]);
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- unsigned long idx;
-
- for (idx = 0; idx < niov; idx++) {
- const size_t npages = qib_user_sdma_num_pages(iov + idx);
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
- ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
- iov[idx].iov_len, npages);
- if (ret < 0)
- goto free_pkt;
- }
-
- goto done;
-
-free_pkt:
- /* we need to ignore the first entry here */
- for (idx = 1; idx < pkt->naddr; idx++)
- qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
- /* need to dma unmap the first entry, this is to restore to
- * the original state so that caller can free the memory in
- * error condition. Caller does not know if dma mapped or not*/
- if (pkt->addr[0].dma_mapped) {
- dma_unmap_single(&dd->pcidev->dev,
- pkt->addr[0].addr,
- pkt->addr[0].dma_length,
- DMA_TO_DEVICE);
- pkt->addr[0].addr = 0;
- pkt->addr[0].dma_mapped = 0;
- }
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov, int npages)
-{
- int ret = 0;
-
- if (pkt->frag_size == pkt->bytes_togo &&
- npages >= ARRAY_SIZE(pkt->addr))
- ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
- else
- ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
- return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void qib_user_sdma_free_pkt_list(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct list_head *list)
-{
- struct qib_user_sdma_pkt *pkt, *pkt_next;
-
- list_for_each_entry_safe(pkt, pkt_next, list, list) {
- int i;
-
- for (i = 0; i < pkt->naddr; i++)
- qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
- }
- INIT_LIST_HEAD(list);
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total. list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
- struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long niov,
- struct list_head *list,
- int *maxpkts, int *ndesc)
-{
- unsigned long idx = 0;
- int ret = 0;
- int npkts = 0;
- __le32 *pbc;
- dma_addr_t dma_addr;
- struct qib_user_sdma_pkt *pkt = NULL;
- size_t len;
- size_t nw;
- u32 counter = pq->counter;
- u16 frag_size;
-
- while (idx < niov && npkts < *maxpkts) {
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
- const unsigned long idx_save = idx;
- unsigned pktnw;
- unsigned pktnwc;
- int nfrags = 0;
- size_t npages = 0;
- size_t bytes_togo = 0;
- int tiddma = 0;
- int cfur;
-
- len = iov[idx].iov_len;
- nw = len >> 2;
-
- if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
- len > PAGE_SIZE || len & 3 || addr & 3) {
- ret = -EINVAL;
- goto free_list;
- }
-
- pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
- if (!pbc) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- cfur = copy_from_user(pbc, iov[idx].iov_base, len);
- if (cfur) {
- ret = -EFAULT;
- goto free_pbc;
- }
-
- /*
- * This assignment is a bit strange. it's because
- * the pbc counts the number of 32 bit words in the full
- * packet _except_ the first word of the pbc itself...
- */
- pktnwc = nw - 1;
-
- /*
- * pktnw computation yields the number of 32 bit words
- * that the caller has indicated in the PBC. note that
- * this is one less than the total number of words that
- * goes to the send DMA engine as the first 32 bit word
- * of the PBC itself is not counted. Armed with this count,
- * we can verify that the packet is consistent with the
- * iovec lengths.
- */
- pktnw = le32_to_cpu(*pbc) & 0xFFFF;
- if (pktnw < pktnwc) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- idx++;
- while (pktnwc < pktnw && idx < niov) {
- const size_t slen = iov[idx].iov_len;
- const unsigned long faddr =
- (unsigned long) iov[idx].iov_base;
-
- if (slen & 3 || faddr & 3 || !slen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- npages += qib_user_sdma_num_pages(&iov[idx]);
-
- if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
- bytes_togo > type_max(typeof(pkt->bytes_togo))) {
- ret = -EINVAL;
- goto free_pbc;
- }
- pktnwc += slen >> 2;
- idx++;
- nfrags++;
- }
-
- if (pktnwc != pktnw) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
- if (((frag_size ? frag_size : bytes_togo) + len) >
- ppd->ibmaxlen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- if (frag_size) {
- size_t tidsmsize, n, pktsize, sz, addrlimit;
-
- n = npages*((2*PAGE_SIZE/frag_size)+1);
- pktsize = struct_size(pkt, addr, n);
-
- /*
- * Determine if this is tid-sdma or just sdma.
- */
- tiddma = (((le32_to_cpu(pbc[7])>>
- QLOGIC_IB_I_TID_SHIFT)&
- QLOGIC_IB_I_TID_MASK) !=
- QLOGIC_IB_I_TID_MASK);
-
- if (tiddma)
- tidsmsize = iov[idx].iov_len;
- else
- tidsmsize = 0;
-
- if (check_add_overflow(pktsize, tidsmsize, &sz)) {
- ret = -EINVAL;
- goto free_pbc;
- }
- pkt = kmalloc(sz, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 1;
- pkt->frag_size = frag_size;
- if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
- &addrlimit) ||
- addrlimit > type_max(typeof(pkt->addrlimit))) {
- ret = -EINVAL;
- goto free_pkt;
- }
- pkt->addrlimit = addrlimit;
-
- if (tiddma) {
- char *tidsm = (char *)pkt + pktsize;
-
- cfur = copy_from_user(tidsm,
- iov[idx].iov_base, tidsmsize);
- if (cfur) {
- ret = -EFAULT;
- goto free_pkt;
- }
- pkt->tidsm =
- (struct qib_tid_session_member *)tidsm;
- pkt->tidsmcount = tidsmsize/
- sizeof(struct qib_tid_session_member);
- pkt->tidsmidx = 0;
- idx++;
- }
-
- /*
- * pbc 'fill1' field is borrowed to pass frag size,
- * we need to clear it after picking frag size, the
- * hardware requires this field to be zero.
- */
- *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
- } else {
- pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 0;
- pkt->frag_size = bytes_togo;
- pkt->addrlimit = ARRAY_SIZE(pkt->addr);
- }
- pkt->bytes_togo = bytes_togo;
- pkt->payload_size = 0;
- pkt->counter = counter;
- pkt->tiddma = tiddma;
-
- /* setup the first header */
- qib_user_sdma_init_frag(pkt, 0, /* index */
- 0, len, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbc, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->index = 0;
- pkt->naddr = 1;
-
- if (nfrags) {
- ret = qib_user_sdma_init_payload(dd, pq, pkt,
- iov + idx_save + 1,
- nfrags, npages);
- if (ret < 0)
- goto free_pkt;
- } else {
- /* since there is no payload, mark the
- * header as the last desc. */
- pkt->addr[0].last_desc = 1;
-
- if (dma_addr == 0) {
- /*
- * the header is not dma mapped yet.
- * it should be from kmalloc.
- */
- dma_addr = dma_map_single(&dd->pcidev->dev,
- pbc, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- dma_addr)) {
- ret = -ENOMEM;
- goto free_pkt;
- }
- pkt->addr[0].addr = dma_addr;
- pkt->addr[0].dma_mapped = 1;
- }
- }
-
- counter++;
- npkts++;
- pkt->pq = pq;
- pkt->index = 0; /* reset index for push on hw */
- *ndesc += pkt->naddr;
-
- list_add_tail(&pkt->list, list);
- }
-
- *maxpkts = npkts;
- ret = idx;
- goto done;
-
-free_pkt:
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
-free_pbc:
- if (dma_addr)
- dma_pool_free(pq->header_cache, pbc, dma_addr);
- else
- kfree(pbc);
-free_list:
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
- return ret;
-}
-
-static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
- u32 c)
-{
- pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- struct list_head free_list;
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- unsigned long flags;
- int ret = 0;
-
- if (!pq->num_sending)
- return 0;
-
- INIT_LIST_HEAD(&free_list);
-
- /*
- * We need this spin lock here because interrupt handler
- * might modify this list in qib_user_sdma_send_desc(), also
- * we can not get interrupted, otherwise it is a deadlock.
- */
- spin_lock_irqsave(&pq->sent_lock, flags);
- list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
- s64 descd = ppd->sdma_descq_removed - pkt->added;
-
- if (descd < 0)
- break;
-
- list_move_tail(&pkt->list, &free_list);
-
- /* one more packet cleaned */
- ret++;
- pq->num_sending--;
- }
- spin_unlock_irqrestore(&pq->sent_lock, flags);
-
- if (!list_empty(&free_list)) {
- u32 counter;
-
- pkt = list_entry(free_list.prev,
- struct qib_user_sdma_pkt, list);
- counter = pkt->counter;
-
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- qib_user_sdma_set_complete_counter(pq, counter);
- }
-
- return ret;
-}
-
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
-{
- if (!pq)
- return;
-
- pq->sdma_rb_node->refcount--;
- if (pq->sdma_rb_node->refcount == 0) {
- rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
- kfree(pq->sdma_rb_node);
- }
- dma_pool_destroy(pq->header_cache);
- kmem_cache_destroy(pq->pkt_slab);
- kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int i;
-
- if (!pq)
- return;
-
- for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
- mutex_lock(&pq->lock);
- if (!pq->num_pending && !pq->num_sending) {
- mutex_unlock(&pq->lock);
- break;
- }
- qib_user_sdma_hwqueue_clean(ppd);
- qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
- msleep(20);
- }
-
- if (pq->num_pending || pq->num_sending) {
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- struct list_head free_list;
-
- mutex_lock(&pq->lock);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Since we hold sdma_lock, it is safe without sent_lock.
- */
- if (pq->num_pending) {
- list_for_each_entry_safe(pkt, pkt_prev,
- &ppd->sdma_userpending, list) {
- if (pkt->pq == pq) {
- list_move_tail(&pkt->list, &pq->sent);
- pq->num_pending--;
- pq->num_sending++;
- }
- }
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
- INIT_LIST_HEAD(&free_list);
- list_splice_init(&pq->sent, &free_list);
- pq->num_sending = 0;
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- mutex_unlock(&pq->lock);
- }
-}
-
-static inline __le64 qib_sdma_make_desc0(u8 gen,
- u64 addr, u64 dwlen, u64 dwoffset)
-{
- return cpu_to_le64(/* SDmaPhyAddr[31:0] */
- ((addr & 0xfffffffcULL) << 32) |
- /* SDmaGeneration[1:0] */
- ((gen & 3ULL) << 30) |
- /* SDmaDwordCount[10:0] */
- ((dwlen & 0x7ffULL) << 16) |
- /* SDmaBufOffset[12:2] */
- (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
-{
- return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
-{
- /* last */ /* dma head */
- return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 qib_sdma_make_desc1(u64 addr)
-{
- /* SDmaPhyAddr[47:32] */
- return cpu_to_le64(addr >> 32);
-}
-
-static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
- struct qib_user_sdma_pkt *pkt, int idx,
- unsigned ofs, u16 tail, u8 gen)
-{
- const u64 addr = (u64) pkt->addr[idx].addr +
- (u64) pkt->addr[idx].offset;
- const u64 dwlen = (u64) pkt->addr[idx].length / 4;
- __le64 *descqp;
- __le64 descq0;
-
- descqp = &ppd->sdma_descq[tail].qw[0];
-
- descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
- if (pkt->addr[idx].first_desc)
- descq0 = qib_sdma_make_first_desc0(descq0);
- if (pkt->addr[idx].last_desc) {
- descq0 = qib_sdma_make_last_desc0(descq0);
- if (ppd->sdma_intrequest) {
- descq0 |= cpu_to_le64(1ULL << 15);
- ppd->sdma_intrequest = 0;
- }
- }
-
- descqp[0] = descq0;
- descqp[1] = qib_sdma_make_desc1(addr);
-}
-
-void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
- struct list_head *pktlist)
-{
- struct qib_devdata *dd = ppd->dd;
- u16 nfree, nsent;
- u16 tail, tail_c;
- u8 gen, gen_c;
-
- nfree = qib_sdma_descq_freecnt(ppd);
- if (!nfree)
- return;
-
-retry:
- nsent = 0;
- tail_c = tail = ppd->sdma_descq_tail;
- gen_c = gen = ppd->sdma_generation;
- while (!list_empty(pktlist)) {
- struct qib_user_sdma_pkt *pkt =
- list_entry(pktlist->next, struct qib_user_sdma_pkt,
- list);
- int i, j, c = 0;
- unsigned ofs = 0;
- u16 dtail = tail;
-
- for (i = pkt->index; i < pkt->naddr && nfree; i++) {
- qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
- ofs += pkt->addr[i].length >> 2;
-
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- ++gen;
- ppd->sdma_intrequest = 1;
- } else if (tail == (ppd->sdma_descq_cnt>>1)) {
- ppd->sdma_intrequest = 1;
- }
- nfree--;
- if (pkt->addr[i].last_desc == 0)
- continue;
-
- /*
- * If the packet is >= 2KB mtu equivalent, we
- * have to use the large buffers, and have to
- * mark each descriptor as part of a large
- * buffer packet.
- */
- if (ofs > dd->piosize2kmax_dwords) {
- for (j = pkt->index; j <= i; j++) {
- ppd->sdma_descq[dtail].qw[0] |=
- cpu_to_le64(1ULL << 14);
- if (++dtail == ppd->sdma_descq_cnt)
- dtail = 0;
- }
- }
- c += i + 1 - pkt->index;
- pkt->index = i + 1; /* index for next first */
- tail_c = dtail = tail;
- gen_c = gen;
- ofs = 0; /* reset for next packet */
- }
-
- ppd->sdma_descq_added += c;
- nsent += c;
- if (pkt->index == pkt->naddr) {
- pkt->added = ppd->sdma_descq_added;
- pkt->pq->added = pkt->added;
- pkt->pq->num_pending--;
- spin_lock(&pkt->pq->sent_lock);
- pkt->pq->num_sending++;
- list_move_tail(&pkt->list, &pkt->pq->sent);
- spin_unlock(&pkt->pq->sent_lock);
- }
- if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
- break;
- }
-
- /* advance the tail on the chip if necessary */
- if (ppd->sdma_descq_tail != tail_c) {
- ppd->sdma_generation = gen_c;
- dd->f_sdma_update_tail(ppd, tail_c);
- }
-
- if (nfree && !list_empty(pktlist))
- goto retry;
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- struct list_head *pktlist, int count)
-{
- unsigned long flags;
-
- if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
- return -ECOMM;
-
- /* non-blocking mode */
- if (pq->sdma_rb_node->refcount > 1) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- pq->num_pending += count;
- list_splice_tail_init(pktlist, &ppd->sdma_userpending);
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return 0;
- }
-
- /* In this case, descriptors from this process are not
- * linked to ppd pending queue, interrupt handler
- * won't update this process, it is OK to directly
- * modify without sdma lock.
- */
-
-
- pq->num_pending += count;
- /*
- * Blocking mode for single rail process, we must
- * release/regain sdma_lock to give other process
- * chance to make progress. This is important for
- * performance.
- */
- do {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- qib_user_sdma_send_desc(ppd, pktlist);
- if (!list_empty(pktlist))
- qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- } while (!list_empty(pktlist));
-
- return 0;
-}
-
-int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- int ret = 0;
- struct list_head list;
- int npkts = 0;
-
- INIT_LIST_HEAD(&list);
-
- mutex_lock(&pq->lock);
-
- /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
- if (!qib_sdma_running(ppd))
- goto done_unlock;
-
- /* if I have packets not complete yet */
- if (pq->added > ppd->sdma_descq_removed)
- qib_user_sdma_hwqueue_clean(ppd);
- /* if I have complete packets to be freed */
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
-
- while (dim) {
- int mxp = 1;
- int ndesc = 0;
-
- ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
- iov, dim, &list, &mxp, &ndesc);
- if (ret < 0)
- goto done_unlock;
- else {
- dim -= ret;
- iov += ret;
- }
-
- /* force packets onto the sdma hw queue... */
- if (!list_empty(&list)) {
- /*
- * Lazily clean hw queue.
- */
- if (qib_sdma_descq_freecnt(ppd) < ndesc) {
- qib_user_sdma_hwqueue_clean(ppd);
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
- }
-
- ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
- if (ret < 0)
- goto done_unlock;
- else {
- npkts += mxp;
- pq->counter += mxp;
- }
- }
- }
-
-done_unlock:
- if (!list_empty(&list))
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
- mutex_unlock(&pq->lock);
-
- return (ret < 0) ? ret : npkts;
-}
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- int ret = 0;
-
- mutex_lock(&pq->lock);
- qib_user_sdma_hwqueue_clean(ppd);
- ret = qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
-
- return ret;
-}
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->sent_counter : 0;
-}
-
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->counter : 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
deleted file mode 100644
index ce8cbaf6a5c2..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct qib_user_sdma_queue;
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
-
-int qib_user_sdma_writev(struct qib_ctxtdata *pd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim);
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
deleted file mode 100644
index bab657f93084..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ /dev/null
@@ -1,1705 +0,0 @@
-/*
- * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <rdma/rdma_vt.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-static unsigned int ib_qib_qp_table_size = 256;
-module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-static unsigned int qib_lkey_table_size = 16;
-module_param_named(lkey_table_size, qib_lkey_table_size, uint,
- S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
- "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_qib_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
-MODULE_PARM_DESC(max_pds,
- "Maximum number of protection domains to support");
-
-static unsigned int ib_qib_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_qib_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
- "Maximum number of completion queue entries to support");
-
-unsigned int ib_qib_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_qib_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_qib_max_qps = 16384;
-module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_qib_max_sges = 0x60;
-module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_qib_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
- "Maximum number of multicast groups to support");
-
-unsigned int ib_qib_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
- uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
- "Maximum number of attached QPs to support");
-
-unsigned int ib_qib_max_srqs = 1024;
-module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_qib_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_qib_disable_sma;
-module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-__be64 ib_qib_sys_image_guid;
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the qib_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
-{
- struct rvt_sge *sg_list = ss->sg_list;
- struct rvt_sge sge = ss->sge;
- u8 num_sge = ss->num_sge;
- u32 ndesc = 1; /* count the header */
-
- while (length) {
- u32 len = rvt_get_sge_length(&sge, length);
-
- if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
- (len != length && (len & (sizeof(u32) - 1)))) {
- ndesc = 0;
- break;
- }
- ndesc++;
- sge.vaddr += len;
- sge.length -= len;
- sge.sge_length -= len;
- if (sge.sge_length == 0) {
- if (--num_sge)
- sge = *sg_list++;
- } else if (sge.length == 0 && sge.mr->lkey) {
- if (++sge.n >= RVT_SEGSZ) {
- if (++sge.m >= sge.mr->mapsz)
- break;
- sge.n = 0;
- }
- sge.vaddr =
- sge.mr->map[sge.m]->segs[sge.n].vaddr;
- sge.length =
- sge.mr->map[sge.m]->segs[sge.n].length;
- }
- length -= len;
- }
- return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- memcpy(data, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * qib_qp_rcv - processing an incoming packet on a QP
- * @rcd: the context pointer
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
-
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (ib_qib_disable_sma)
- break;
- fallthrough;
- case IB_QPT_UD:
- qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_RC:
- qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_UC:
- qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- default:
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
-}
-
-/**
- * qib_ib_rcv - process an incoming packet
- * @rcd: the context pointer
- * @rhdr: the header of the packet
- * @data: the packet payload
- * @tlen: the packet length
- *
- * This is called from qib_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct ib_header *hdr = rhdr;
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct ib_other_headers *ohdr;
- struct rvt_qp *qp;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- /* 24 == LRH+BTH+CRC */
- if (unlikely(tlen < 24))
- goto drop;
-
- /* Check for a valid destination LID (see ch. 7.11.1). */
- lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-#ifdef CONFIG_DEBUG_FS
- rcd->opstats->stats[opcode].n_bytes += tlen;
- rcd->opstats->stats[opcode].n_packets++;
-#endif
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (qp_num == QIB_MULTICAST_QPN) {
- struct rvt_mcast *mcast;
- struct rvt_mcast_qp *p;
-
- if (lnh != QIB_LRH_GRH)
- goto drop;
- mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
- if (mcast == NULL)
- goto drop;
- this_cpu_inc(ibp->pmastats->n_multicast_rcv);
- rcu_read_lock();
- list_for_each_entry_rcu(p, &mcast->qp_list, list)
- qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
- rcu_read_unlock();
- /*
- * Notify rvt_multicast_detach() if it is waiting for us
- * to finish.
- */
- if (atomic_dec_return(&mcast->refcount) <= 1)
- wake_up(&mcast->wait);
- } else {
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
- this_cpu_inc(ibp->pmastats->n_unicast_rcv);
- qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
- rcu_read_unlock();
- }
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
-
-/*
- * This is called from a timer to check for QPs
- * which need kernel memory in order to send a packet.
- */
-static void mem_timer(struct timer_list *t)
-{
- struct qib_ibdev *dev = timer_container_of(dev, t, mem_timer);
- struct list_head *list = &dev->memwait;
- struct rvt_qp *qp = NULL;
- struct qib_qp_priv *priv = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- if (!list_empty(list)) {
- priv = list_entry(list->next, struct qib_qp_priv, iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- if (!list_empty(list))
- mod_timer(&dev->mem_timer, jiffies + 1);
- }
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- if (qp) {
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_KMEM) {
- qp->s_flags &= ~RVT_S_WAIT_KMEM;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- rvt_put_qp(qp);
- }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#endif
-
-static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
- u32 length, unsigned flush_wc)
-{
- u32 extra = 0;
- u32 data = 0;
- u32 last;
-
- while (1) {
- u32 len = rvt_get_sge_length(&ss->sge, length);
- u32 off;
-
- /* If the source address is not aligned, try to align it. */
- off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
- if (off) {
- u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
- ~(sizeof(u32) - 1));
- u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
- u32 y;
-
- y = sizeof(u32) - off;
- if (len > y)
- len = y;
- if (len + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, extra *
- BITS_PER_BYTE);
- len = sizeof(u32) - extra;
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, len, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += len;
- }
- } else if (extra) {
- /* Source address is aligned. */
- u32 *addr = (u32 *) ss->sge.vaddr;
- int shift = extra * BITS_PER_BYTE;
- int ushift = 32 - shift;
- u32 l = len;
-
- while (l >= sizeof(u32)) {
- u32 v = *addr;
-
- data |= set_upper_bits(v, shift);
- __raw_writel(data, piobuf);
- data = get_upper_bits(v, ushift);
- piobuf++;
- addr++;
- l -= sizeof(u32);
- }
- /*
- * We still have 'extra' number of bytes leftover.
- */
- if (l) {
- u32 v = *addr;
-
- if (l + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, shift);
- len -= l + extra - sizeof(u32);
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, l, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += l;
- }
- } else if (len == length) {
- last = data;
- break;
- }
- } else if (len == length) {
- u32 w;
-
- /*
- * Need to round up for the last dword in the
- * packet.
- */
- w = (len + 3) >> 2;
- qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
- piobuf += w - 1;
- last = ((u32 *) ss->sge.vaddr)[w - 1];
- break;
- } else {
- u32 w = len >> 2;
-
- qib_pio_copy(piobuf, ss->sge.vaddr, w);
- piobuf += w;
-
- extra = len & (sizeof(u32) - 1);
- if (extra) {
- u32 v = ((u32 *) ss->sge.vaddr)[w];
-
- /* Clear unused upper bytes */
- data = clear_upper_bytes(v, extra, 0);
- }
- }
- rvt_update_sge(ss, len, false);
- length -= len;
- }
- /* Update address before sending packet. */
- rvt_update_sge(ss, length, false);
- if (flush_wc) {
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(last, piobuf);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- __raw_writel(last, piobuf);
-}
-
-static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- spin_lock(&dev->rdi.pending_lock);
-
- if (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock(&dev->rdi.pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
- list_empty(&priv->iowait)) {
- dev->n_txwait++;
- qp->s_flags |= RVT_S_WAIT_TX;
- list_add_tail(&priv->iowait, &dev->txwait);
- }
- qp->s_flags &= ~RVT_S_BUSY;
- spin_unlock(&dev->rdi.pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = ERR_PTR(-EBUSY);
- }
- return tx;
-}
-
-static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- /* assume the list non empty */
- if (likely(!list_empty(&dev->txreq_free))) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- /* call slow path to get the extra lock */
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
- tx = __get_txreq(dev, qp);
- }
- return tx;
-}
-
-void qib_put_txreq(struct qib_verbs_txreq *tx)
-{
- struct qib_ibdev *dev;
- struct rvt_qp *qp;
- struct qib_qp_priv *priv;
- unsigned long flags;
-
- qp = tx->qp;
- dev = to_idev(qp->ibqp.device);
-
- if (tx->mr) {
- rvt_put_mr(tx->mr);
- tx->mr = NULL;
- }
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
- tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
- dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
- tx->txreq.addr, tx->hdr_dwords << 2,
- DMA_TO_DEVICE);
- kfree(tx->align_buf);
- }
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
-
- /* Put struct back on free list */
- list_add(&tx->txreq.list, &dev->txreq_free);
-
- if (!list_empty(&dev->txwait)) {
- /* Wake up first QP wanting a free struct */
- priv = list_entry(dev->txwait.next, struct qib_qp_priv,
- iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_TX) {
- qp->s_flags &= ~RVT_S_WAIT_TX;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- rvt_put_qp(qp);
- } else
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-}
-
-/*
- * This is called when there are send DMA descriptors that might be
- * available.
- *
- * This is called with ppd->sdma_lock held.
- */
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
-{
- struct rvt_qp *qp;
- struct qib_qp_priv *qpp, *nqpp;
- struct rvt_qp *qps[20];
- struct qib_ibdev *dev;
- unsigned i, n;
-
- n = 0;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->rdi.pending_lock);
-
- /* Search wait list for first QP wanting DMA descriptors. */
- list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
- qp = qpp->owner;
- if (qp->port_num != ppd->port)
- continue;
- if (n == ARRAY_SIZE(qps))
- break;
- if (qpp->s_tx->txreq.sg_count > avail)
- break;
- avail -= qpp->s_tx->txreq.sg_count;
- list_del_init(&qpp->iowait);
- rvt_get_qp(qp);
- qps[n++] = qp;
- }
-
- spin_unlock(&dev->rdi.pending_lock);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
- spin_lock(&qp->s_lock);
- if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
- qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- rvt_put_qp(qp);
- }
-}
-
-/*
- * This is called with ppd->sdma_lock held.
- */
-static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
-{
- struct qib_verbs_txreq *tx =
- container_of(cookie, struct qib_verbs_txreq, txreq);
- struct rvt_qp *qp = tx->qp;
- struct qib_qp_priv *priv = qp->priv;
-
- spin_lock(&qp->s_lock);
- if (tx->wqe)
- rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
- else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_header *hdr;
-
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
- hdr = &tx->align_buf->hdr;
- else {
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
- }
- qib_rc_send_complete(qp, hdr);
- }
- if (atomic_dec_and_test(&priv->s_dma_busy)) {
- if (qp->state == IB_QPS_RESET)
- wake_up(&priv->wait_dma);
- else if (qp->s_flags & RVT_S_WAIT_DMA) {
- qp->s_flags &= ~RVT_S_WAIT_DMA;
- qib_schedule_send(qp);
- }
- }
- spin_unlock(&qp->s_lock);
-
- qib_put_txreq(tx);
-}
-
-static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- if (list_empty(&dev->memwait))
- mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= RVT_S_WAIT_KMEM;
- list_add_tail(&priv->iowait, &dev->memwait);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- return ret;
-}
-
-static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_txreq *tx;
- struct qib_pio_header *phdr;
- u32 control;
- u32 ndesc;
- int ret;
-
- tx = priv->s_tx;
- if (tx) {
- priv->s_tx = NULL;
- /* resend previously constructed packet */
- ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
- goto bail;
- }
-
- tx = get_txreq(dev, qp);
- if (IS_ERR(tx))
- goto bail_tx;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(hdr->lrh[0]) >> 12);
- tx->qp = qp;
- tx->wqe = qp->s_wqe;
- tx->mr = qp->s_rdma_mr;
- if (qp->s_rdma_mr)
- qp->s_rdma_mr = NULL;
- tx->txreq.callback = sdma_complete;
- if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
- tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
- else
- tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
- if (plen + 1 > dd->piosize2kmax_dwords)
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
-
- if (len) {
- /*
- * Don't try to DMA if it takes more descriptors than
- * the queue holds.
- */
- ndesc = qib_count_sge(ss, len);
- if (ndesc >= ppd->sdma_descq_cnt)
- ndesc = 0;
- } else
- ndesc = 1;
- if (ndesc) {
- phdr = &dev->pio_hdrs[tx->hdr_inx];
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
- tx->txreq.sg_count = ndesc;
- tx->txreq.addr = dev->pio_hdrs_phys +
- tx->hdr_inx * sizeof(struct qib_pio_header);
- tx->hdr_dwords = hdrwords + 2; /* add PBC length */
- ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
- goto bail;
- }
-
- /* Allocate a buffer and copy the header and payload to it. */
- tx->hdr_dwords = plen + 1;
- phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
- if (!phdr)
- goto err_tx;
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
-
- tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
- tx->hdr_dwords << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
- goto map_err;
- tx->align_buf = phdr;
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
- tx->txreq.sg_count = 1;
- ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
- goto unaligned;
-
-map_err:
- kfree(phdr);
-err_tx:
- qib_put_txreq(tx);
- ret = wait_kmem(dev, qp);
-unaligned:
- ibp->rvp.n_unaligned++;
-bail:
- return ret;
-bail_tx:
- ret = PTR_ERR(tx);
- goto bail;
-}
-
-/*
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int no_bufs_available(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd;
- unsigned long flags;
- int ret = 0;
-
- /*
- * Note that as soon as want_buffer() is called and
- * possibly before it returns, qib_ib_piobufavail()
- * could be called. Therefore, put QP on the I/O wait list before
- * enabling the PIO avail interrupt.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- dev->n_piowait++;
- qp->s_flags |= RVT_S_WAIT_PIO;
- list_add_tail(&priv->iowait, &dev->piowait);
- dd = dd_from_dev(dev);
- dd->f_wantpiobuf_intr(dd, 1);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
- u32 *hdr = (u32 *) ibhdr;
- u32 __iomem *piobuf_orig;
- u32 __iomem *piobuf;
- u64 pbc;
- unsigned long flags;
- unsigned flush_wc;
- u32 control;
- u32 pbufn;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(ibhdr->lrh[0]) >> 12);
- pbc = ((u64) control << 32) | plen;
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (unlikely(piobuf == NULL))
- return no_bufs_available(qp);
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness on some cpus
- * or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
- piobuf_orig = piobuf;
- piobuf += 2;
-
- flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
- if (len == 0) {
- /*
- * If there is just the header portion, must flush before
- * writing last word of header for correctness, and after
- * the last header word (trigger word).
- */
- if (flush_wc) {
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, hdr, hdrwords);
- goto done;
- }
-
- if (flush_wc)
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords);
- piobuf += hdrwords;
-
- /* The common case is aligned and contained in one segment. */
- if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
- !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
- u32 *addr = (u32 *) ss->sge.vaddr;
-
- /* Update address before sending packet. */
- rvt_update_sge(ss, len, false);
- if (flush_wc) {
- qib_pio_copy(piobuf, addr, dwords - 1);
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, addr, dwords);
- goto done;
- }
- qib_copy_io(piobuf, ss, len, flush_wc);
-done:
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf_orig + spcl_off);
- }
- qib_sendbuf_done(dd, pbufn);
- if (qp->s_rdma_mr) {
- rvt_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_rc_send_complete(qp, ibhdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- *
- * Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
- */
-int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- u32 plen;
- int ret;
- u32 dwords = (len + 3) >> 2;
-
- /*
- * Calculate the send buffer trigger address.
- * The +1 counts for the pbc control dword following the pbc length.
- */
- plen = hdrwords + dwords + 1;
-
- /*
- * VL15 packets (IB_QPT_SMI) will always use PIO, so we
- * can defer SDMA restart until link goes ACTIVE without
- * worrying about just how we got there.
- */
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- !(dd->flags & QIB_HAS_SEND_DMA))
- ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
- plen, dwords);
- else
- ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
- plen, dwords);
-
- return ret;
-}
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait)
-{
- int ret;
- struct qib_devdata *dd = ppd->dd;
-
- if (!(dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
- *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
- *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
- *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
- *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_get_counters - get various chip counters
- * @ppd: the qlogic_ib device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs)
-{
- int ret;
-
- if (!(ppd->dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- cntrs->symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
- cntrs->link_error_recovery_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
- /*
- * The link downed counter counts when the other side downs the
- * connection. We add in the number of times we downed the link
- * due to local link integrity errors to compensate.
- */
- cntrs->link_downed_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
- cntrs->port_rcv_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
- cntrs->port_rcv_remphys_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
- cntrs->port_xmit_discards =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
- cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDSEND);
- cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDRCV);
- cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTSEND);
- cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTRCV);
- cntrs->local_link_integrity_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
- cntrs->excessive_buffer_overrun_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
- cntrs->vl15_dropped =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_ib_piobufavail - callback when a PIO buffer is available
- * @dd: the device pointer
- *
- * This is called from qib_intr() at interrupt level when a PIO buffer is
- * available after qib_verbs_send() returned an error that no buffers were
- * available. Disable the interrupt if there are no more QPs waiting.
- */
-void qib_ib_piobufavail(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct list_head *list;
- struct rvt_qp *qps[5];
- struct rvt_qp *qp;
- unsigned long flags;
- unsigned i, n;
- struct qib_qp_priv *priv;
-
- list = &dev->piowait;
- n = 0;
-
- /*
- * Note: checking that the piowait list is empty and clearing
- * the buffer available interrupt needs to be atomic or we
- * could end up with QPs on the wait list with the interrupt
- * disabled.
- */
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- while (!list_empty(list)) {
- if (n == ARRAY_SIZE(qps))
- goto full;
- priv = list_entry(list->next, struct qib_qp_priv, iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- qps[n++] = qp;
- }
- dd->f_wantpiobuf_intr(dd, 0);
-full:
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_PIO) {
- qp->s_flags &= ~RVT_S_WAIT_PIO;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify qib_destroy_qp() if it is waiting. */
- rvt_put_qp(qp);
- }
-}
-
-static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num,
- struct ib_port_attr *props)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- enum ib_mtu mtu;
- u16 lid = ppd->lid;
-
- /* props being zeroed by the caller, avoid zeroing it here */
- props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
- props->lmc = ppd->lmc;
- props->state = dd->f_iblink_state(ppd->lastibcstat);
- props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
- props->gid_tbl_len = QIB_GUIDS_PER_PORT;
- props->active_width = ppd->link_width_active;
- /* See rate_show() */
- props->active_speed = ppd->link_speed_active;
- props->max_vl_num = qib_num_vls(ppd->vls_supported);
-
- props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- switch (ppd->ibmtu) {
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- default:
- mtu = IB_MTU_2048;
- }
- props->active_mtu = mtu;
-
- return 0;
-}
-
-static int qib_modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- struct qib_devdata *dd = dd_from_ibdev(device);
- unsigned i;
- int ret;
-
- if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- ret = -EOPNOTSUPP;
- goto bail;
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc,
- IB_DEVICE_NODE_DESC_MAX);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_node_desc_chg(ibp);
- }
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
- ib_qib_sys_image_guid =
- cpu_to_be64(device_modify->sys_image_guid);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_sys_guid_chg(ibp);
- }
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
-
- return 0;
-}
-
-static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
- int guid_index, __be64 *guid)
-{
- struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- if (guid_index == 0)
- *guid = ppd->guid;
- else if (guid_index < QIB_GUIDS_PER_PORT)
- *guid = ibp->guids[guid_index - 1];
- else
- return -EINVAL;
-
- return 0;
-}
-
-int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
-{
- if (rdma_ah_get_sl(ah_attr) > 15)
- return -EINVAL;
-
- if (rdma_ah_get_dlid(ah_attr) == 0)
- return -EINVAL;
- if (rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- rdma_ah_get_dlid(ah_attr) !=
- be16_to_cpu(IB_LID_PERMISSIVE) &&
- !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
- return -EINVAL;
-
- return 0;
-}
-
-static void qib_notify_new_ah(struct ib_device *ibdev,
- struct rdma_ah_attr *ah_attr,
- struct rvt_ah *ah)
-{
- struct qib_ibport *ibp;
- struct qib_pportdata *ppd;
-
- /*
- * Do not trust reading anything from rvt_ah at this point as it is not
- * done being setup. We can however modify things which we need to set.
- */
-
- ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
- ppd = ppd_from_ibp(ibp);
- ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
- ah->log_pmtu = ilog2(ppd->ibmtu);
-}
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
-{
- struct rdma_ah_attr attr;
- struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct rvt_qp *qp0;
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_num = ppd->port;
-
- memset(&attr, 0, sizeof(attr));
- attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
- rdma_ah_set_dlid(&attr, dlid);
- rdma_ah_set_port_num(&attr, port_num);
- rcu_read_lock();
- qp0 = rcu_dereference(ibp->rvp.qp[0]);
- if (qp0)
- ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
- rcu_read_unlock();
- return ah;
-}
-
-/**
- * qib_get_npkeys - return the size of the PKEY table for context 0
- * @dd: the qlogic_ib device
- */
-unsigned qib_get_npkeys(struct qib_devdata *dd)
-{
- return ARRAY_SIZE(dd->rcd[0]->pkeys);
-}
-
-/*
- * Return the indexed PKEY from the port PKEY table.
- * No need to validate rcd[ctxt]; the port is setup if we are here.
- */
-unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned ret;
-
- /* dd->rcd null if mini_init or some init failures */
- if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
- ret = 0;
- else
- ret = dd->rcd[ctxt]->pkeys[index];
-
- return ret;
-}
-
-static void init_ibport(struct qib_pportdata *ppd)
-{
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- spin_lock_init(&ibp->rvp.lock);
- /* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
- IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
- IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
- IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
- IB_PORT_OTHER_LOCAL_CHANGES_SUP;
- if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
- ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- /* Snapshot current HW counters to "clear" them. */
- qib_get_counters(ppd, &cntrs);
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
- RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
-}
-
-/**
- * qib_fill_device_attr - Fill in rvt dev info device attributes.
- * @dd: the device data structure
- */
-static void qib_fill_device_attr(struct qib_devdata *dd)
-{
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
-
- memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
-
- rdi->dparms.props.max_pd = ib_qib_max_pds;
- rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- rdi->dparms.props.page_size_cap = PAGE_SIZE;
- rdi->dparms.props.vendor_id =
- QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
- rdi->dparms.props.vendor_part_id = dd->deviceid;
- rdi->dparms.props.hw_ver = dd->minrev;
- rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
- rdi->dparms.props.max_mr_size = ~0ULL;
- rdi->dparms.props.max_qp = ib_qib_max_qps;
- rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
- rdi->dparms.props.max_send_sge = ib_qib_max_sges;
- rdi->dparms.props.max_recv_sge = ib_qib_max_sges;
- rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
- rdi->dparms.props.max_cq = ib_qib_max_cqs;
- rdi->dparms.props.max_cqe = ib_qib_max_cqes;
- rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
- rdi->dparms.props.max_qp_init_rd_atom = 255;
- rdi->dparms.props.max_srq = ib_qib_max_srqs;
- rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
- rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
- rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
- rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
- rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
- rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
- rdi->dparms.props.max_total_mcast_qp_attach =
- rdi->dparms.props.max_mcast_qp_attach *
- rdi->dparms.props.max_mcast_grp;
- /* post send table */
- dd->verbs_dev.rdi.post_parms = qib_post_parms;
-
- /* opcode translation table */
- dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
-}
-
-static const struct ib_device_ops qib_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_QIB,
-
- .port_groups = qib_attr_port_groups,
- .device_group = &qib_attr_group,
- .modify_device = qib_modify_device,
- .process_mad = qib_process_mad,
-};
-
-/**
- * qib_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated qib_ibdev pointer or NULL on error.
- */
-int qib_register_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->rdi.ibdev;
- struct qib_pportdata *ppd = dd->pport;
- unsigned i, ctxt;
- int ret;
-
- for (i = 0; i < dd->num_pports; i++)
- init_ibport(ppd + i);
-
- /* Only need to initialize non-zero fields. */
- timer_setup(&dev->mem_timer, mem_timer, 0);
-
- INIT_LIST_HEAD(&dev->piowait);
- INIT_LIST_HEAD(&dev->dmawait);
- INIT_LIST_HEAD(&dev->txwait);
- INIT_LIST_HEAD(&dev->memwait);
- INIT_LIST_HEAD(&dev->txreq_free);
-
- if (ppd->sdma_descq_cnt) {
- dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- &dev->pio_hdrs_phys,
- GFP_KERNEL);
- if (!dev->pio_hdrs) {
- ret = -ENOMEM;
- goto err_hdrs;
- }
- }
-
- for (i = 0; i < ppd->sdma_descq_cnt; i++) {
- struct qib_verbs_txreq *tx;
-
- tx = kzalloc(sizeof(*tx), GFP_KERNEL);
- if (!tx) {
- ret = -ENOMEM;
- goto err_tx;
- }
- tx->hdr_inx = i;
- list_add(&tx->txreq.list, &dev->txreq_free);
- }
-
- /*
- * The system image GUID is supposed to be the same for all
- * IB HCAs in a single system but since there can be other
- * device types in the system, we can't be sure this is unique.
- */
- if (!ib_qib_sys_image_guid)
- ib_qib_sys_image_guid = ppd->guid;
-
- ibdev->node_guid = ppd->guid;
- ibdev->phys_port_cnt = dd->num_pports;
- ibdev->dev.parent = &dd->pcidev->dev;
-
- snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
- "Intel Infiniband HCA %.42s", init_utsname()->nodename);
-
- /*
- * Fill in rvt info object.
- */
- dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
- dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
- dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
- dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
- dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
- dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
- dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
- dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
- dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
- dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
- dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
- dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
- dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
- dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
- dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
- dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
- dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
- dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
- dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
- dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
- dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
- dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
- dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
- dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
- qib_notify_create_mad_agent;
- dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
- qib_notify_free_mad_agent;
-
- dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
- dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
- dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
- dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
- dd->verbs_dev.rdi.dparms.qpn_start = 1;
- dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
- dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
- dd->verbs_dev.rdi.dparms.qpn_inc = 1;
- dd->verbs_dev.rdi.dparms.qos_shift = 1;
- dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
- dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
- dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
- dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
- dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
- dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
- dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
- dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
-
- qib_fill_device_attr(dd);
-
- ppd = dd->pport;
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- ctxt = ppd->hw_pidx;
- rvt_init_port(&dd->verbs_dev.rdi,
- &ppd->ibport_data.rvp,
- i,
- dd->rcd[ctxt]->pkeys);
- }
-
- ib_set_device_ops(ibdev, &qib_dev_ops);
- ret = rvt_register_device(&dd->verbs_dev.rdi);
- if (ret)
- goto err_tx;
-
- return ret;
-
-err_tx:
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (ppd->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
-err_hdrs:
- qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
- return ret;
-}
-
-void qib_unregister_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
-
- rvt_unregister_device(&dd->verbs_dev.rdi);
-
- if (!list_empty(&dev->piowait))
- qib_dev_err(dd, "piowait list not empty!\n");
- if (!list_empty(&dev->dmawait))
- qib_dev_err(dd, "dmawait list not empty!\n");
- if (!list_empty(&dev->txwait))
- qib_dev_err(dd, "txwait list not empty!\n");
- if (!list_empty(&dev->memwait))
- qib_dev_err(dd, "memwait list not empty!\n");
-
- timer_delete_sync(&dev->mem_timer);
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (dd->pport->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- dd->pport->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
-}
-
-/**
- * _qib_schedule_send - schedule progress
- * @qp: the qp
- *
- * This schedules progress w/o regard to the s_flags.
- *
- * It is only used in post send, which doesn't hold
- * the s_lock.
- */
-bool _qib_schedule_send(struct rvt_qp *qp)
-{
- struct qib_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_qp_priv *priv = qp->priv;
-
- return queue_work(ppd->qib_wq, &priv->s_work);
-}
-
-/**
- * qib_schedule_send - schedule progress
- * @qp: the qp
- *
- * This schedules qp progress. The s_lock
- * should be held.
- */
-bool qib_schedule_send(struct rvt_qp *qp)
-{
- if (qib_send_ok(qp))
- return _qib_schedule_send(qp);
- return false;
-}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
deleted file mode 100644
index 408fe1ba74b9..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef QIB_VERBS_H
-#define QIB_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <linux/completion.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_hdrs.h>
-#include <rdma/rdmavt_qp.h>
-#include <rdma/rdmavt_cq.h>
-
-struct qib_ctxtdata;
-struct qib_pportdata;
-struct qib_devdata;
-struct qib_verbs_txreq;
-
-#define QIB_MAX_RDMA_ATOMIC 16
-#define QIB_GUIDS_PER_PORT 5
-#define QIB_PSN_SHIFT 8
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define QIB_UVERBS_ABI_VERSION 2
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE 0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
-
-#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-
-#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
-
-/* Values for set/get portinfo VLCap OperationalVLs */
-#define IB_VL_VL0 1
-#define IB_VL_VL0_1 2
-#define IB_VL_VL0_3 3
-#define IB_VL_VL0_7 4
-#define IB_VL_VL0_14 5
-
-static inline int qib_num_vls(int vls)
-{
- switch (vls) {
- default:
- case IB_VL_VL0:
- return 1;
- case IB_VL_VL0_1:
- return 2;
- case IB_VL_VL0_3:
- return 4;
- case IB_VL_VL0_7:
- return 8;
- case IB_VL_VL0_14:
- return 15;
- }
-}
-
-struct qib_pio_header {
- __le32 pbc[2];
- struct ib_header hdr;
-} __packed;
-
-/*
- * qib specific data structure that will be hidden from rvt after the queue pair
- * is made common.
- */
-struct qib_qp_priv {
- struct ib_header *s_hdr; /* next packet header to send */
- struct list_head iowait; /* link for wait PIO buf */
- atomic_t s_dma_busy;
- struct qib_verbs_txreq *s_tx;
- struct work_struct s_work;
- wait_queue_head_t wait_dma;
- struct rvt_qp *owner;
-};
-
-#define QIB_PSN_CREDIT 16
-
-struct qib_opcode_stats {
- u64 n_packets; /* number of packets */
- u64 n_bytes; /* total number of bytes */
-};
-
-struct qib_opcode_stats_perctx {
- struct qib_opcode_stats stats[128];
-};
-
-struct qib_pma_counters {
- u64 n_unicast_xmit; /* total unicast packets sent */
- u64 n_unicast_rcv; /* total unicast packets received */
- u64 n_multicast_xmit; /* total multicast packets sent */
- u64 n_multicast_rcv; /* total multicast packets received */
-};
-
-struct qib_ibport {
- struct rvt_ibport rvp;
- struct rvt_ah *smi_ah;
- __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- struct qib_pma_counters __percpu *pmastats;
- u64 z_unicast_xmit; /* starting count for PMA */
- u64 z_unicast_rcv; /* starting count for PMA */
- u64 z_multicast_xmit; /* starting count for PMA */
- u64 z_multicast_rcv; /* starting count for PMA */
- u64 z_symbol_error_counter; /* starting count for PMA */
- u64 z_link_error_recovery_counter; /* starting count for PMA */
- u64 z_link_downed_counter; /* starting count for PMA */
- u64 z_port_rcv_errors; /* starting count for PMA */
- u64 z_port_rcv_remphys_errors; /* starting count for PMA */
- u64 z_port_xmit_discards; /* starting count for PMA */
- u64 z_port_xmit_data; /* starting count for PMA */
- u64 z_port_rcv_data; /* starting count for PMA */
- u64 z_port_xmit_packets; /* starting count for PMA */
- u64 z_port_rcv_packets; /* starting count for PMA */
- u32 z_local_link_integrity_errors; /* starting count for PMA */
- u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
- u32 z_vl15_dropped; /* starting count for PMA */
- u8 sl_to_vl[16];
-};
-
-struct qib_ibdev {
- struct rvt_dev_info rdi;
-
- struct list_head piowait; /* list for wait PIO buf */
- struct list_head dmawait; /* list for wait DMA */
- struct list_head txwait; /* list for wait qib_verbs_txreq */
- struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
- struct timer_list mem_timer;
- struct qib_pio_header *pio_hdrs;
- dma_addr_t pio_hdrs_phys;
-
- u32 n_piowait;
- u32 n_txwait;
-
-#ifdef CONFIG_DEBUG_FS
- /* per HCA debugfs */
- struct dentry *qib_ibdev_dbg;
-#endif
-};
-
-struct qib_verbs_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
- u32 vl15_dropped;
-};
-
-static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
-{
- struct rvt_dev_info *rdi;
-
- rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
- return container_of(rdi, struct qib_ibdev, rdi);
-}
-
-/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int qib_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-bool _qib_schedule_send(struct rvt_qp *qp);
-bool qib_schedule_send(struct rvt_qp *qp);
-
-static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
-{
- u16 p1 = pkey1 & 0x7FFF;
- u16 p2 = pkey2 & 0x7FFF;
-
- /*
- * Low 15 bits must be non-zero and match, and
- * one of the two must be a full member.
- */
- return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
-}
-
-void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
-void qib_sys_guid_chg(struct qib_ibport *ibp);
-void qib_node_desc_chg(struct qib_ibport *ibp);
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in, struct ib_mad *out,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
-void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
-void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int qib_cmp24(u32 a, u32 b)
-{
- return (((int) a) - ((int) b)) << 8;
-}
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait);
-
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs);
-
-/*
- * Functions provided by qib driver for rdmavt to use
- */
-unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-void qib_notify_qp_reset(struct rvt_qp *qp);
-int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u32 port);
-void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
-#ifdef CONFIG_DEBUG_FS
-
-void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
-
-#endif
-
-unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
-
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
-
-void qib_put_txreq(struct qib_verbs_txreq *tx);
-
-int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-
-void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-
-int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
- bool *call_send);
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
-
-void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
-
-int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
-
-void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-void qib_migrate_qp(struct rvt_qp *qp);
-
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0);
-
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- const struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2);
-
-void _qib_do_send(struct work_struct *work);
-
-void qib_do_send(struct rvt_qp *qp);
-
-void qib_send_rc_ack(struct rvt_qp *qp);
-
-int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_register_ib_device(struct qib_devdata *);
-
-void qib_unregister_ib_device(struct qib_devdata *);
-
-void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
-
-void qib_ib_piobufavail(struct qib_devdata *);
-
-unsigned qib_get_npkeys(struct qib_devdata *);
-
-unsigned qib_get_pkey(struct qib_ibport *, unsigned);
-
-extern const enum ib_wc_opcode ib_qib_wc_opcode[];
-
-/*
- * Below HCA-independent IB PhysPortState values, returned
- * by the f_ibphys_portstate() routine.
- */
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
-#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
-#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
-#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
-#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
-#define IB_PHYSPORTSTATE_CFG_ENH 0x10
-#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
-
-extern const int ib_rvt_state_ops[];
-
-extern __be64 ib_qib_sys_image_guid; /* in network order */
-
-extern unsigned int ib_rvt_lkey_table_size;
-
-extern unsigned int ib_qib_max_cqes;
-
-extern unsigned int ib_qib_max_cqs;
-
-extern unsigned int ib_qib_max_qp_wrs;
-
-extern unsigned int ib_qib_max_qps;
-
-extern unsigned int ib_qib_max_sges;
-
-extern unsigned int ib_qib_max_mcast_grps;
-
-extern unsigned int ib_qib_max_mcast_qp_attached;
-
-extern unsigned int ib_qib_max_srqs;
-
-extern unsigned int ib_qib_max_srq_sges;
-
-extern unsigned int ib_qib_max_srq_wrs;
-
-extern const u32 ib_qib_rnr_table[];
-
-extern const struct rvt_operation_params qib_post_parms[];
-
-#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_wc_ppc64.c b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
deleted file mode 100644
index 673cf4c22ebd..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_ppc64.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int qib_unordered_wc(void)
-{
- return 1;
-}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
deleted file mode 100644
index edd0ddbd4481..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/mtrr.h>
-#include <asm/processor.h>
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 pioaddr, piolen;
- unsigned bits;
- const unsigned long addr = pci_resource_start(dd->pcidev, 0);
- const size_t len = pci_resource_len(dd->pcidev, 0);
-
- /*
- * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
- * chip. Linux (possibly the hardware) requires it to be on a power
- * of 2 address matching the length (which has to be a power of 2).
- * For rev1, that means the base address, for rev2, it will be just
- * the PIO buffers themselves.
- * For chips with two sets of buffers, the calculations are
- * somewhat more complicated; we need to sum, and the piobufbase
- * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
- * The buffers are still packed, so a single range covers both.
- */
- if (dd->piobcnt2k && dd->piobcnt4k) {
- /* 2 sizes for chip */
- unsigned long pio2kbase, pio4kbase;
-
- pio2kbase = dd->piobufbase & 0xffffffffUL;
- pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
- if (pio2kbase < pio4kbase) {
- /* all current chips */
- pioaddr = addr + pio2kbase;
- piolen = pio4kbase - pio2kbase +
- dd->piobcnt4k * dd->align4k;
- } else {
- pioaddr = addr + pio4kbase;
- piolen = pio2kbase - pio4kbase +
- dd->piobcnt2k * dd->palign;
- }
- } else { /* single buffer size (2K, currently) */
- pioaddr = addr + dd->piobufbase;
- piolen = dd->piobcnt2k * dd->palign +
- dd->piobcnt4k * dd->align4k;
- }
-
- for (bits = 0; !(piolen & (1ULL << bits)); bits++)
- ; /* do nothing */
-
- if (piolen != (1ULL << bits)) {
- piolen >>= bits;
- while (piolen >>= 1)
- bits++;
- piolen = 1ULL << (bits + 1);
- }
- if (pioaddr & (piolen - 1)) {
- u64 atmp = pioaddr & ~(piolen - 1);
-
- if (atmp < addr || (atmp + piolen) > (addr + len)) {
- qib_dev_err(dd,
- "No way to align address/size (%llx/%llx), no WC mtrr\n",
- (unsigned long long) atmp,
- (unsigned long long) piolen << 1);
- ret = -ENODEV;
- } else {
- pioaddr = atmp;
- piolen <<= 1;
- }
- }
-
- if (!ret) {
- dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
- if (dd->wc_cookie < 0)
- /* use error from routine */
- ret = dd->wc_cookie;
- }
-
- return ret;
-}
-
-/**
- * qib_disable_wc - disable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- */
-void qib_disable_wc(struct qib_devdata *dd)
-{
- arch_phys_wc_del(dd->wc_cookie);
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering. Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int qib_unordered_wc(void)
-{
- return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 217af34e82b3..ae5df96589d9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -592,6 +592,7 @@ int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct usnic_ib_mr *mr;
@@ -600,6 +601,9 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
virt_addr, length);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 53f53f2d53be..e3031ac32488 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -60,6 +60,7 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index e80848bfb3bd..ec7a00c8285b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -104,12 +104,14 @@ struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
* @length: length of region
* @virt_addr: I/O virtual address
* @access_flags: access flags for memory region
+ * @dmah: dma handle
* @udata: user data
*
* @return: ib_mr pointer on success, otherwise returns an errno.
*/
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
@@ -121,6 +123,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
int ret, npages;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index fd47b0b1df5c..603e5a9311eb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -366,6 +366,7 @@ int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 5ed5cfc2b280..86e482593a85 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -329,12 +329,14 @@ bail:
* @length: length of region to register
* @virt_addr: associated virtual address
* @mr_access_flags: access flags for this memory region
+ * @dmah: dma handle
* @udata: unused by the driver
*
* Return: the memory region on success, otherwise returns an errno.
*/
struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct rvt_mr *mr;
@@ -343,6 +345,9 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int n, m;
struct ib_mr *ret;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (length == 0)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 44afe2731741..72dab48307b7 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -26,6 +26,7 @@ void rvt_mr_exit(struct rvt_dev_info *rdi);
struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 5499025e8a0a..d22d610c2696 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -49,7 +49,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
{
struct rvt_dev_info *rdi;
- rdi = container_of(_ib_alloc_device(size), struct rvt_dev_info, ibdev);
+ rdi = container_of(_ib_alloc_device(size, &init_net), struct rvt_dev_info, ibdev);
if (!rdi)
return rdi;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 3a77d6db1720..e891199cbdef 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -34,6 +34,10 @@ void rxe_dealloc(struct ib_device *ib_dev)
mutex_destroy(&rxe->usdev_lock);
}
+static const struct ib_device_ops rxe_ib_dev_odp_ops = {
+ .advise_mr = rxe_ib_advise_mr,
+};
+
/* initialize rxe device parameters */
static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
{
@@ -103,6 +107,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
+
+ /* set handler for ODP prefetching API - ibv_advise_mr(3) */
+ ib_set_device_ops(&rxe->ib_dev, &rxe_ib_dev_odp_ops);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 876702058c84..7992290886e1 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -203,6 +203,9 @@ enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
unsigned int length);
enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+int rxe_ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge,
+ struct uverbs_attr_bundle *attrs);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -231,6 +234,15 @@ static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
+static inline int rxe_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index dbc5a5600eb7..f58e3ec6252f 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -203,8 +203,6 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
- if (!user_va)
- return -EFAULT;
src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
@@ -283,17 +281,15 @@ static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = rxe_odp_iova_to_index(umem_odp, iova);
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
- page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
- if (!page)
- return RESPST_ERR_RKEY_VIOLATION;
-
if (unlikely(page_offset & 0x7)) {
rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
+
va = kmap_local_page(page);
spin_lock_bh(&atomic_ops_lock);
@@ -352,10 +348,6 @@ int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
- if (!page) {
- mutex_unlock(&umem_odp->umem_mutex);
- return -EFAULT;
- }
bytes = min_t(unsigned int, length,
mr_page_size(mr) - page_offset);
@@ -396,12 +388,6 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
return RESPST_ERR_RKEY_VIOLATION;
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
- index = rxe_odp_iova_to_index(umem_odp, iova);
- page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
- if (!page) {
- mutex_unlock(&umem_odp->umem_mutex);
- return RESPST_ERR_RKEY_VIOLATION;
- }
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -409,6 +395,9 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
return RESPST_ERR_MISALIGNED_ATOMIC;
}
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+
va = kmap_local_page(page);
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
@@ -418,3 +407,172 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
return RESPST_NONE;
}
+
+struct prefetch_mr_work {
+ struct work_struct work;
+ u32 pf_flags;
+ u32 num_sge;
+ struct {
+ u64 io_virt;
+ struct rxe_mr *mr;
+ size_t length;
+ } frags[];
+};
+
+static void rxe_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ int ret;
+ u32 i;
+
+ /*
+ * We rely on IB/core that work is executed
+ * if we have num_sge != 0 only.
+ */
+ WARN_ON(!work->num_sge);
+ for (i = 0; i < work->num_sge; ++i) {
+ struct ib_umem_odp *umem_odp;
+
+ ret = rxe_odp_do_pagefault_and_lock(work->frags[i].mr,
+ work->frags[i].io_virt,
+ work->frags[i].length,
+ work->pf_flags);
+ if (ret < 0) {
+ rxe_dbg_mr(work->frags[i].mr,
+ "failed to prefetch the mr\n");
+ goto deref;
+ }
+
+ umem_odp = to_ib_umem_odp(work->frags[i].mr->umem);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+deref:
+ rxe_put(work->frags[i].mr);
+ }
+
+ kvfree(work);
+}
+
+static int rxe_ib_prefetch_sg_list(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ struct rxe_pd *pd = container_of(ibpd, struct rxe_pd, ibpd);
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct rxe_mr *mr;
+ struct ib_umem_odp *umem_odp;
+
+ mr = lookup_mr(pd, IB_ACCESS_LOCAL_WRITE,
+ sg_list[i].lkey, RXE_LOOKUP_LOCAL);
+
+ if (!mr) {
+ rxe_dbg_pd(pd, "mr with lkey %x not found\n",
+ sg_list[i].lkey);
+ return -EINVAL;
+ }
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !mr->umem->writable) {
+ rxe_dbg_mr(mr, "missing write permission\n");
+ rxe_put(mr);
+ return -EPERM;
+ }
+
+ ret = rxe_odp_do_pagefault_and_lock(
+ mr, sg_list[i].addr, sg_list[i].length, pf_flags);
+ if (ret < 0) {
+ rxe_dbg_mr(mr, "failed to prefetch the mr\n");
+ rxe_put(mr);
+ return ret;
+ }
+
+ umem_odp = to_ib_umem_odp(mr->umem);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ rxe_put(mr);
+ }
+
+ return 0;
+}
+
+static int rxe_ib_advise_mr_prefetch(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ struct rxe_pd *pd = container_of(ibpd, struct rxe_pd, ibpd);
+ u32 pf_flags = RXE_PAGEFAULT_DEFAULT;
+ struct prefetch_mr_work *work;
+ struct rxe_mr *mr;
+ u32 i;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
+ pf_flags |= RXE_PAGEFAULT_RDONLY;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ pf_flags |= RXE_PAGEFAULT_SNAPSHOT;
+
+ /* Synchronous call */
+ if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
+ return rxe_ib_prefetch_sg_list(ibpd, advice, pf_flags, sg_list,
+ num_sge);
+
+ /* Asynchronous call is "best-effort" and allowed to fail */
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ INIT_WORK(&work->work, rxe_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
+ work->num_sge = num_sge;
+
+ for (i = 0; i < num_sge; ++i) {
+ /* Takes a reference, which will be released in the queued work */
+ mr = lookup_mr(pd, IB_ACCESS_LOCAL_WRITE,
+ sg_list[i].lkey, RXE_LOOKUP_LOCAL);
+ if (!mr) {
+ mr = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
+ }
+
+ queue_work(system_unbound_wq, &work->work);
+
+ return 0;
+
+ err:
+ /* rollback reference counts for the invalid request */
+ while (i > 0) {
+ i--;
+ rxe_put(work->frags[i].mr);
+ }
+
+ kvfree(work);
+
+ return PTR_ERR(mr);
+}
+
+int rxe_ib_advise_mr(struct ib_pd *ibpd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ return -EOPNOTSUPP;
+
+ return rxe_ib_advise_mr_prefetch(ibpd, advice, flags,
+ sg_list, num_sge);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2331e698a65b..38d8c408320f 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -65,7 +65,7 @@ static int rxe_query_port(struct ib_device *ibdev,
attr->state = ib_get_curr_port_state(ndev);
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
- else if (dev_get_flags(ndev) & IFF_UP)
+ else if (netif_get_flags(ndev) & IFF_UP)
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
@@ -1271,6 +1271,7 @@ err_free:
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
u64 length, u64 iova, int access,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
@@ -1278,6 +1279,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
struct rxe_mr *mr;
int err, cleanup_err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
RXE_ACCESS_SUPPORTED_MR);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 6432bce7d083..f7dd32c6e5ba 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -277,6 +277,15 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
return PKT_FRAGMENTED;
}
+static noinline_for_stack int
+siw_sendmsg(struct socket *sock, unsigned int msg_flags,
+ struct kvec *vec, size_t num, size_t len)
+{
+ struct msghdr msg = { .msg_flags = msg_flags };
+
+ return kernel_sendmsg(sock, &msg, vec, num, len);
+}
+
/*
* Send out one complete control type FPDU, or header of FPDU carrying
* data. Used for fixed sized packets like Read.Requests or zero length
@@ -285,12 +294,11 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
int flags)
{
- struct msghdr msg = { .msg_flags = flags };
struct kvec iov = { .iov_base =
(char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
.iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
- int rv = kernel_sendmsg(s, &msg, &iov, 1, iov.iov_len);
+ int rv = siw_sendmsg(s, flags, &iov, 1, iov.iov_len);
if (rv >= 0) {
c_tx->ctrl_sent += rv;
@@ -332,18 +340,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
if (!sendpage_ok(page[i]))
msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page[i], bytes, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
try_page_again:
lock_sock(sk);
- rv = tcp_sendmsg_locked(sk, &msg, size);
+ rv = tcp_sendmsg_locked(sk, &msg, bytes);
release_sock(sk);
if (rv > 0) {
size -= rv;
sent += rv;
if (rv != bytes) {
- offset += rv;
bytes -= rv;
goto try_page_again;
}
@@ -427,13 +434,13 @@ static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len)
* Write out iov referencing hdr, data and trailer of current FPDU.
* Update transmit state dependent on write return status
*/
-static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
+static noinline_for_stack int siw_tx_hdt(struct siw_iwarp_tx *c_tx,
+ struct socket *s)
{
struct siw_wqe *wqe = &c_tx->wqe_active;
struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
struct kvec iov[MAX_ARRAY];
struct page *page_array[MAX_ARRAY];
- struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
@@ -586,14 +593,16 @@ sge_done:
rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
c_tx->sge_off, data_len);
if (rv == data_len) {
- rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len);
+
+ rv = siw_sendmsg(s, MSG_DONTWAIT | MSG_EOR, &iov[seg],
+ 1, trl_len);
if (rv > 0)
rv += data_len;
else
rv = data_len;
}
} else {
- rv = kernel_sendmsg(s, &msg, iov, seg + 1,
+ rv = siw_sendmsg(s, MSG_DONTWAIT | MSG_EOR, iov, seg + 1,
hdr_len + data_len + trl_len);
siw_unmap_pages(iov, kmap_mask, seg);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 2b2a7b8e93b0..35c3bde0d00a 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1321,10 +1321,12 @@ int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
* @len: len of MR
* @rnic_va: not used by siw
* @rights: MR access rights
+ * @dmah: dma handle
* @udata: user buffer to communicate STag and Key.
*/
struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
- u64 rnic_va, int rights, struct ib_udata *udata)
+ u64 rnic_va, int rights, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct siw_mr *mr = NULL;
struct siw_umem *umem = NULL;
@@ -1336,6 +1338,9 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
siw_dbg_pd(pd, "too many mr's\n");
rv = -ENOMEM;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 1f1a305540af..e9f4463aecdc 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -65,7 +65,8 @@ int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
- u64 rnic_va, int rights, struct ib_udata *udata);
+ u64 rnic_va, int rights, struct ib_dmah *dmah,
+ struct ib_udata *udata);
struct ib_mr *siw_alloc_mr(struct ib_pd *base_pd, enum ib_mr_type mr_type,
u32 max_sge);
struct ib_mr *siw_get_dma_mr(struct ib_pd *base_pd, int rights);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f2f5465f2a90..7acafc5c0e09 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2577,6 +2577,8 @@ static struct net_device *ipoib_add_port(const char *format,
ndev->rtnl_link_ops = ipoib_get_link_ops();
+ dev_net_set(ndev, rdma_dev_net(hca));
+
result = register_netdev(ndev);
if (result) {
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index a5be6f1ba12b..2e3c0516ce8f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -267,19 +267,15 @@ static int iscsi_iser_task_init(struct iscsi_task *task)
static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
struct iscsi_task *task)
{
- int error = 0;
-
iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
- error = iser_send_control(conn, task);
-
/* since iser xmits control with zero copy, tasks can not be recycled
* right after sending them.
* The recycling scheme is based on whether a response is expected
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
- return error;
+ return iser_send_control(conn, task);
}
static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 930b64d2115e..2cd6e1c9a778 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -7,7 +7,7 @@
obj-$(CONFIG_INPUT) += input-core.o
input-core-y := input.o input-compat.o input-mt.o input-poller.o ff-core.o
-input-core-y += touchscreen.o
+input-core-y += touchscreen.o touch-overlay.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index b5cbb57ee5f6..90ff6be85cf4 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -1408,8 +1408,12 @@ static void evdev_disconnect(struct input_handle *handle)
}
static const struct input_device_id evdev_ids[] = {
- { .driver_info = 1 }, /* Matches all devices */
- { }, /* Terminating zero entry */
+ {
+ /* Matches all devices */
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_SYN) },
+ },
+ { } /* Terminating zero entry */
};
MODULE_DEVICE_TABLE(input, evdev_ids);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 44887e51e049..1da41324362b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -971,7 +971,7 @@ static const struct input_device_id *input_match_device(struct input_handler *ha
{
const struct input_device_id *id;
- for (id = handler->id_table; id->flags || id->driver_info; id++) {
+ for (id = handler->id_table; id->flags; id++) {
if (input_match_device_id(dev, id) &&
(!handler->match || handler->match(handler, dev))) {
return id;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1d8c579b5433..4c94297e17e6 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -442,8 +442,8 @@ static const signed short xpad_btn[] = {
/* used when dpad is mapped to buttons */
static const signed short xpad_btn_pad[] = {
- BTN_TRIGGER_HAPPY1, BTN_TRIGGER_HAPPY2, /* d-pad left, right */
- BTN_TRIGGER_HAPPY3, BTN_TRIGGER_HAPPY4, /* d-pad up, down */
+ BTN_DPAD_LEFT, BTN_DPAD_RIGHT, /* d-pad left, right */
+ BTN_DPAD_UP, BTN_DPAD_DOWN, /* d-pad up, down */
-1 /* terminating entry */
};
@@ -479,8 +479,8 @@ static const signed short xpad_abs_triggers[] = {
/* used when the controller has extra paddle buttons */
static const signed short xpad_btn_paddles[] = {
- BTN_TRIGGER_HAPPY5, BTN_TRIGGER_HAPPY6, /* paddle upper right, lower right */
- BTN_TRIGGER_HAPPY7, BTN_TRIGGER_HAPPY8, /* paddle upper left, lower left */
+ BTN_GRIPR, BTN_GRIPR2, /* paddle upper right, lower right */
+ BTN_GRIPL, BTN_GRIPL2, /* paddle upper left, lower left */
-1 /* terminating entry */
};
@@ -840,10 +840,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
@@ -891,10 +891,10 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1));
}
/*
@@ -1075,10 +1075,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[18] = 0;
/* Elite Series 2 split packet paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[18] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3));
do_sync = true;
}
@@ -1113,10 +1113,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[5] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[5] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[5] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[5] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[5] & 0x08) - !!(data[5] & 0x04));
@@ -1175,10 +1175,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[32] = 0;
/* OG Elite Series Controller paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[32] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[32] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[32] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[32] & BIT(2));
+ input_report_key(dev, BTN_GRIPR, data[32] & BIT(1));
+ input_report_key(dev, BTN_GRIPR2, data[32] & BIT(3));
+ input_report_key(dev, BTN_GRIPL, data[32] & BIT(0));
+ input_report_key(dev, BTN_GRIPL2, data[32] & BIT(2));
} else if (xpad->packet_type == PKT_XBE2_FW_OLD) {
/* Mute paddles if controller has a custom mapping applied.
* Checked by comparing the current mapping
@@ -1188,10 +1188,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[18] = 0;
/* Elite Series 2 4.x firmware paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[18] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3));
} else if (xpad->packet_type == PKT_XBE2_FW_5_EARLY) {
/* Mute paddles if controller has a custom mapping applied.
* Checked by comparing the current mapping
@@ -1203,10 +1203,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* Elite Series 2 5.x firmware paddle bits
* (before the packet was split)
*/
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[22] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[22] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[22] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[22] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[22] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[22] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[22] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[22] & BIT(3));
}
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index dc734974ce06..414fbef4abf9 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -232,8 +232,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
return !!(val & bit);
}
-static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned int off, int val)
+static int adp5588_gpio_set_value(struct gpio_chip *chip, unsigned int off,
+ int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -246,7 +246,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
+ return adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
+ kpad->dat_out[bank]);
}
static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c9e1127578b9..6c999d89ee4b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -84,12 +84,12 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
#include "hpps2atkbd.h" /* include the keyboard scancodes */
#else
- 0, 67, 65, 63, 61, 59, 60, 88, 0, 68, 66, 64, 62, 15, 41,117,
- 0, 56, 42, 93, 29, 16, 2, 0, 0, 0, 44, 31, 30, 17, 3, 0,
- 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
- 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
- 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 67, 65, 63, 61, 59, 60, 88,183, 68, 66, 64, 62, 15, 41,117,
+ 184, 56, 42, 93, 29, 16, 2, 0,185, 0, 44, 31, 30, 17, 3, 0,
+ 186, 46, 45, 32, 18, 5, 4, 95,187, 57, 47, 33, 20, 19, 6,183,
+ 188, 49, 48, 35, 34, 21, 7,184,189, 0, 50, 36, 22, 8, 9,185,
+ 190, 51, 37, 23, 24, 11, 10, 0,191, 52, 53, 38, 39, 25, 12, 0,
+ 192, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0,194,
0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 061d48350df6..50e2e792c91d 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -12,6 +12,7 @@
#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
#include <linux/module.h>
@@ -117,6 +118,19 @@ static const struct mtk_pmic_regs mt6358_regs = {
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
+static const struct mtk_pmic_regs mt6359_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6359_TOPSTATUS,
+ 0x2, MT6359_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6359_TOPSTATUS,
+ 0x8, MT6359_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
+ .pmic_rst_reg = MT6359_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
+};
+
struct mtk_pmic_keys_info {
struct mtk_pmic_keys *keys;
const struct mtk_pmic_keys_regs *regs;
@@ -297,6 +311,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6358-keys",
.data = &mt6358_regs,
}, {
+ .compatible = "mediatek,mt6359-keys",
+ .data = &mt6359_regs,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 9f1049aa3048..17127269e3f0 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -7,6 +7,7 @@
* Author: Donghwa Lee <dh09.lee@samsung.com>
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -29,11 +30,11 @@
#define SAMSUNG_KEYIFFC 0x10
/* SAMSUNG_KEYIFCON */
-#define SAMSUNG_KEYIFCON_INT_F_EN (1 << 0)
-#define SAMSUNG_KEYIFCON_INT_R_EN (1 << 1)
-#define SAMSUNG_KEYIFCON_DF_EN (1 << 2)
-#define SAMSUNG_KEYIFCON_FC_EN (1 << 3)
-#define SAMSUNG_KEYIFCON_WAKEUPEN (1 << 4)
+#define SAMSUNG_KEYIFCON_INT_F_EN BIT(0)
+#define SAMSUNG_KEYIFCON_INT_R_EN BIT(1)
+#define SAMSUNG_KEYIFCON_DF_EN BIT(2)
+#define SAMSUNG_KEYIFCON_FC_EN BIT(3)
+#define SAMSUNG_KEYIFCON_WAKEUPEN BIT(4)
/* SAMSUNG_KEYIFSTSCLR */
#define SAMSUNG_KEYIFSTSCLR_P_INT_MASK (0xff << 0)
@@ -44,8 +45,7 @@
#define S5PV210_KEYIFSTSCLR_R_INT_OFFSET 16
/* SAMSUNG_KEYIFCOL */
-#define SAMSUNG_KEYIFCOL_MASK (0xff << 0)
-#define S5PV210_KEYIFCOLEN_MASK (0xff << 8)
+#define SAMSUNG_KEYIFCOL_MASK 0xff
/* SAMSUNG_KEYIFROW */
#define SAMSUNG_KEYIFROW_MASK (0xff << 0)
@@ -54,12 +54,12 @@
/* SAMSUNG_KEYIFFC */
#define SAMSUNG_KEYIFFC_MASK (0x3ff << 0)
-enum samsung_keypad_type {
- KEYPAD_TYPE_SAMSUNG,
- KEYPAD_TYPE_S5PV210,
+struct samsung_chip_info {
+ unsigned int column_shift;
};
struct samsung_keypad {
+ const struct samsung_chip_info *chip;
struct input_dev *input_dev;
struct platform_device *pdev;
struct clk *clk;
@@ -68,7 +68,6 @@ struct samsung_keypad {
bool stopped;
bool wake_enabled;
int irq;
- enum samsung_keypad_type type;
unsigned int row_shift;
unsigned int rows;
unsigned int cols;
@@ -83,19 +82,14 @@ static void samsung_keypad_scan(struct samsung_keypad *keypad,
unsigned int val;
for (col = 0; col < keypad->cols; col++) {
- if (keypad->type == KEYPAD_TYPE_S5PV210) {
- val = S5PV210_KEYIFCOLEN_MASK;
- val &= ~(1 << col) << 8;
- } else {
- val = SAMSUNG_KEYIFCOL_MASK;
- val &= ~(1 << col);
- }
+ val = SAMSUNG_KEYIFCOL_MASK & ~BIT(col);
+ val <<= keypad->chip->column_shift;
writel(val, keypad->base + SAMSUNG_KEYIFCOL);
mdelay(1);
val = readl(keypad->base + SAMSUNG_KEYIFROW);
- row_state[col] = ~val & ((1 << keypad->rows) - 1);
+ row_state[col] = ~val & GENMASK(keypad->rows - 1, 0);
}
/* KEYIFCOL reg clear */
@@ -119,10 +113,10 @@ static bool samsung_keypad_report(struct samsung_keypad *keypad,
continue;
for (row = 0; row < keypad->rows; row++) {
- if (!(changed & (1 << row)))
+ if (!(changed & BIT(row)))
continue;
- pressed = row_state[col] & (1 << row);
+ pressed = row_state[col] & BIT(row);
dev_dbg(&keypad->input_dev->dev,
"key %s, row: %d, col: %d\n",
@@ -314,11 +308,11 @@ static int samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
const struct matrix_keymap_data *keymap_data;
+ const struct platform_device_id *id;
struct samsung_keypad *keypad;
struct resource *res;
struct input_dev *input_dev;
unsigned int row_shift;
- unsigned int keymap_size;
int error;
pdata = dev_get_platdata(&pdev->dev);
@@ -345,12 +339,16 @@ static int samsung_keypad_probe(struct platform_device *pdev)
pdata->cfg_gpio(pdata->rows, pdata->cols);
row_shift = get_count_order(pdata->cols);
- keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]);
- keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size,
+ keypad = devm_kzalloc(&pdev->dev,
+ struct_size(keypad, keycodes,
+ pdata->rows << row_shift),
GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
input_dev = devm_input_allocate_device(&pdev->dev);
- if (!keypad || !input_dev)
+ if (!input_dev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -361,18 +359,12 @@ static int samsung_keypad_probe(struct platform_device *pdev)
if (!keypad->base)
return -EBUSY;
- keypad->clk = devm_clk_get(&pdev->dev, "keypad");
+ keypad->clk = devm_clk_get_prepared(&pdev->dev, "keypad");
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clk\n");
return PTR_ERR(keypad->clk);
}
- error = clk_prepare(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "keypad clock prepare failed\n");
- return error;
- }
-
keypad->input_dev = input_dev;
keypad->pdev = pdev;
keypad->row_shift = row_shift;
@@ -381,15 +373,20 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->stopped = true;
init_waitqueue_head(&keypad->wait);
- if (pdev->dev.of_node)
- keypad->type = of_device_is_compatible(pdev->dev.of_node,
- "samsung,s5pv210-keypad");
- else
- keypad->type = platform_get_device_id(pdev)->driver_data;
+ keypad->chip = device_get_match_data(&pdev->dev);
+ if (!keypad->chip) {
+ id = platform_get_device_id(pdev);
+ if (id)
+ keypad->chip = (const void *)id->driver_data;
+ }
+
+ if (!keypad->chip) {
+ dev_err(&pdev->dev, "Unable to determine chip type");
+ return -EINVAL;
+ }
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &pdev->dev;
input_dev->open = samsung_keypad_open;
input_dev->close = samsung_keypad_close;
@@ -399,7 +396,7 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_unprepare_clk;
+ return error;
}
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
@@ -411,7 +408,7 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
error = keypad->irq;
- goto err_unprepare_clk;
+ return error;
}
error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL,
@@ -419,16 +416,19 @@ static int samsung_keypad_probe(struct platform_device *pdev)
dev_name(&pdev->dev), keypad);
if (error) {
dev_err(&pdev->dev, "failed to register keypad interrupt\n");
- goto err_unprepare_clk;
+ return error;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
platform_set_drvdata(pdev, keypad);
- pm_runtime_enable(&pdev->dev);
+
+ error = devm_pm_runtime_enable(&pdev->dev);
+ if (error)
+ return error;
error = input_register_device(keypad->input_dev);
if (error)
- goto err_disable_runtime_pm;
+ return error;
if (pdev->dev.of_node) {
devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
@@ -436,23 +436,6 @@ static int samsung_keypad_probe(struct platform_device *pdev)
devm_kfree(&pdev->dev, (void *)pdata);
}
return 0;
-
-err_disable_runtime_pm:
- pm_runtime_disable(&pdev->dev);
-err_unprepare_clk:
- clk_unprepare(keypad->clk);
- return error;
-}
-
-static void samsung_keypad_remove(struct platform_device *pdev)
-{
- struct samsung_keypad *keypad = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- input_unregister_device(keypad->input_dev);
-
- clk_unprepare(keypad->clk);
}
static int samsung_keypad_runtime_suspend(struct device *dev)
@@ -528,15 +511,13 @@ static int samsung_keypad_suspend(struct device *dev)
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- mutex_lock(&input_dev->mutex);
+ guard(mutex)(&input_dev->mutex);
if (input_device_enabled(input_dev))
samsung_keypad_stop(keypad);
samsung_keypad_toggle_wakeup(keypad, true);
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
@@ -546,15 +527,13 @@ static int samsung_keypad_resume(struct device *dev)
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- mutex_lock(&input_dev->mutex);
+ guard(mutex)(&input_dev->mutex);
samsung_keypad_toggle_wakeup(keypad, false);
if (input_device_enabled(input_dev))
samsung_keypad_start(keypad);
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
@@ -564,11 +543,24 @@ static const struct dev_pm_ops samsung_keypad_pm_ops = {
samsung_keypad_runtime_resume, NULL)
};
+static const struct samsung_chip_info samsung_s3c6410_chip_info = {
+ .column_shift = 0,
+};
+
+static const struct samsung_chip_info samsung_s5pv210_chip_info = {
+ .column_shift = 8,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id samsung_keypad_dt_match[] = {
- { .compatible = "samsung,s3c6410-keypad" },
- { .compatible = "samsung,s5pv210-keypad" },
- {},
+ {
+ .compatible = "samsung,s3c6410-keypad",
+ .data = &samsung_s3c6410_chip_info,
+ }, {
+ .compatible = "samsung,s5pv210-keypad",
+ .data = &samsung_s5pv210_chip_info,
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
#endif
@@ -576,18 +568,17 @@ MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
static const struct platform_device_id samsung_keypad_driver_ids[] = {
{
.name = "samsung-keypad",
- .driver_data = KEYPAD_TYPE_SAMSUNG,
+ .driver_data = (kernel_ulong_t)&samsung_s3c6410_chip_info,
}, {
.name = "s5pv210-keypad",
- .driver_data = KEYPAD_TYPE_S5PV210,
+ .driver_data = (kernel_ulong_t)&samsung_s5pv210_chip_info,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.of_match_table = of_match_ptr(samsung_keypad_dt_match),
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index f5496ca0c0d2..0fb21c99a5e3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -584,13 +584,6 @@ config INPUT_PALMAS_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called palmas_pwrbutton.
-config INPUT_PCF50633_PMU
- tristate "PCF50633 PMU events"
- depends on MFD_PCF50633
- help
- Say Y to include support for delivering PMU events via input
- layer on NXP PCF50633.
-
config INPUT_PCF8574
tristate "PCF8574 Keypad input device"
depends on I2C
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 6d91804d0a6f..d468c8140b93 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
-obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o
diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c
index 330f09123631..7aa7d577e01b 100644
--- a/drivers/input/misc/cs40l50-vibra.c
+++ b/drivers/input/misc/cs40l50-vibra.c
@@ -482,7 +482,6 @@ static int cs40l50_erase(struct input_dev *dev, int effect_id)
static void cs40l50_remove_wq(void *data)
{
- flush_workqueue(data);
destroy_workqueue(data);
}
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 1dfd7b95a4ce..5d45680d74a4 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -68,15 +68,16 @@ struct max77693_haptic {
static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
{
- struct pwm_args pargs;
- int delta;
+ struct pwm_state state;
int error;
- pwm_get_args(haptic->pwm_dev, &pargs);
- delta = (pargs.period + haptic->pwm_duty) / 2;
- error = pwm_config(haptic->pwm_dev, delta, pargs.period);
+ pwm_init_state(haptic->pwm_dev, &state);
+ state.duty_cycle = (state.period + haptic->pwm_duty) / 2;
+
+ error = pwm_apply_might_sleep(haptic->pwm_dev, &state);
if (error) {
- dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
+ dev_err(haptic->dev,
+ "failed to set pwm duty cycle: %d\n", error);
return error;
}
@@ -166,12 +167,17 @@ static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
static void max77693_haptic_enable(struct max77693_haptic *haptic)
{
+ struct pwm_state state;
int error;
if (haptic->enabled)
return;
- error = pwm_enable(haptic->pwm_dev);
+ pwm_init_state(haptic->pwm_dev, &state);
+ state.duty_cycle = (state.period + haptic->pwm_duty) / 2;
+ state.enabled = true;
+
+ error = pwm_apply_might_sleep(haptic->pwm_dev, &state);
if (error) {
dev_err(haptic->dev,
"failed to enable haptic pwm device: %d\n", error);
@@ -224,18 +230,13 @@ static void max77693_haptic_play_work(struct work_struct *work)
{
struct max77693_haptic *haptic =
container_of(work, struct max77693_haptic, work);
- int error;
-
- error = max77693_haptic_set_duty_cycle(haptic);
- if (error) {
- dev_err(haptic->dev, "failed to set duty cycle: %d\n", error);
- return;
- }
- if (haptic->magnitude)
- max77693_haptic_enable(haptic);
- else
+ if (!haptic->magnitude)
max77693_haptic_disable(haptic);
+ else if (haptic->enabled)
+ max77693_haptic_set_duty_cycle(haptic);
+ else
+ max77693_haptic_enable(haptic);
}
static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
@@ -340,12 +341,6 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return PTR_ERR(haptic->pwm_dev);
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(haptic->pwm_dev);
-
haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
if (IS_ERR(haptic->motor_reg)) {
dev_err(&pdev->dev, "failed to get regulator\n");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index f97f341ee0bb..d5e051a25a74 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -53,40 +53,30 @@ struct max8997_haptic {
unsigned int pattern_signal_period;
};
-static int max8997_haptic_set_duty_cycle(struct max8997_haptic *chip)
+static void max8997_haptic_set_internal_duty_cycle(struct max8997_haptic *chip)
{
- int ret = 0;
+ u8 duty_index = DIV_ROUND_UP(chip->level * 64, 100);
- if (chip->mode == MAX8997_EXTERNAL_MODE) {
- unsigned int duty = chip->pwm_period * chip->level / 100;
- ret = pwm_config(chip->pwm, duty, chip->pwm_period);
- } else {
- u8 duty_index = 0;
-
- duty_index = DIV_ROUND_UP(chip->level * 64, 100);
-
- switch (chip->internal_mode_pattern) {
- case 0:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC1, duty_index);
- break;
- case 1:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC2, duty_index);
- break;
- case 2:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC3, duty_index);
- break;
- case 3:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC4, duty_index);
- break;
- default:
- break;
- }
+ switch (chip->internal_mode_pattern) {
+ case 0:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC1, duty_index);
+ break;
+ case 1:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC2, duty_index);
+ break;
+ case 2:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC3, duty_index);
+ break;
+ case 3:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC4, duty_index);
+ break;
+ default:
+ break;
}
- return ret;
}
static void max8997_haptic_configure(struct max8997_haptic *chip)
@@ -155,11 +145,8 @@ static void max8997_haptic_enable(struct max8997_haptic *chip)
guard(mutex)(&chip->mutex);
- error = max8997_haptic_set_duty_cycle(chip);
- if (error) {
- dev_err(chip->dev, "set_pwm_cycle failed, error: %d\n", error);
- return;
- }
+ if (chip->mode != MAX8997_EXTERNAL_MODE)
+ max8997_haptic_set_internal_duty_cycle(chip);
if (!chip->enabled) {
error = regulator_enable(chip->regulator);
@@ -168,16 +155,32 @@ static void max8997_haptic_enable(struct max8997_haptic *chip)
return;
}
max8997_haptic_configure(chip);
- if (chip->mode == MAX8997_EXTERNAL_MODE) {
- error = pwm_enable(chip->pwm);
- if (error) {
- dev_err(chip->dev, "Failed to enable PWM\n");
- regulator_disable(chip->regulator);
- return;
- }
+ }
+
+ /*
+ * It would be more straight forward to configure the external PWM
+ * earlier i.e. when the internal duty_cycle is setup in internal mode.
+ * But historically this is done only after the regulator was enabled
+ * and max8997_haptic_configure() set the enable bit in
+ * MAX8997_HAPTIC_REG_CONF2. So better keep it this way.
+ */
+ if (chip->mode == MAX8997_EXTERNAL_MODE) {
+ struct pwm_state state;
+
+ pwm_init_state(chip->pwm, &state);
+ state.period = chip->pwm_period;
+ state.duty_cycle = chip->pwm_period * chip->level / 100;
+ state.enabled = true;
+
+ error = pwm_apply_might_sleep(chip->pwm, &state);
+ if (error) {
+ dev_err(chip->dev, "Failed to enable PWM\n");
+ regulator_disable(chip->regulator);
+ return;
}
- chip->enabled = true;
}
+
+ chip->enabled = true;
}
static void max8997_haptic_disable(struct max8997_haptic *chip)
@@ -282,11 +285,6 @@ static int max8997_haptic_probe(struct platform_device *pdev)
goto err_free_mem;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(chip->pwm);
break;
default:
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
deleted file mode 100644
index 6d046e236ba6..000000000000
--- a/drivers/input/misc/pcf50633-input.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 Input Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_OOCSTAT_ONKEY 0x01
-#define PCF50633_REG_OOCSTAT 0x12
-#define PCF50633_REG_OOCMODE 0x10
-
-struct pcf50633_input {
- struct pcf50633 *pcf;
- struct input_dev *input_dev;
-};
-
-static void
-pcf50633_input_irq(int irq, void *data)
-{
- struct pcf50633_input *input;
- int onkey_released;
-
- input = data;
-
- /* We report only one event depending on the key press status */
- onkey_released = pcf50633_reg_read(input->pcf, PCF50633_REG_OOCSTAT)
- & PCF50633_OOCSTAT_ONKEY;
-
- if (irq == PCF50633_IRQ_ONKEYF && !onkey_released)
- input_report_key(input->input_dev, KEY_POWER, 1);
- else if (irq == PCF50633_IRQ_ONKEYR && onkey_released)
- input_report_key(input->input_dev, KEY_POWER, 0);
-
- input_sync(input->input_dev);
-}
-
-static int pcf50633_input_probe(struct platform_device *pdev)
-{
- struct pcf50633_input *input;
- struct input_dev *input_dev;
- int ret;
-
-
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- kfree(input);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, input);
- input->pcf = dev_to_pcf50633(pdev->dev.parent);
- input->input_dev = input_dev;
-
- input_dev->name = "PCF50633 PMU events";
- input_dev->id.bustype = BUS_I2C;
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_PWR);
- set_bit(KEY_POWER, input_dev->keybit);
-
- ret = input_register_device(input_dev);
- if (ret) {
- input_free_device(input_dev);
- kfree(input);
- return ret;
- }
- pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR,
- pcf50633_input_irq, input);
- pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF,
- pcf50633_input_irq, input);
-
- return 0;
-}
-
-static void pcf50633_input_remove(struct platform_device *pdev)
-{
- struct pcf50633_input *input = platform_get_drvdata(pdev);
-
- pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYR);
- pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYF);
-
- input_unregister_device(input->input_dev);
- kfree(input);
-}
-
-static struct platform_driver pcf50633_input_driver = {
- .driver = {
- .name = "pcf50633-input",
- },
- .probe = pcf50633_input_probe,
- .remove = pcf50633_input_remove,
-};
-module_platform_driver(pcf50633_input_driver);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 input driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-input");
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index c0163b983ce6..5db58fc9e11b 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -82,6 +82,21 @@ config RMI4_F12
touchpads. For sensors that support relative pointing, F12 also
provides mouse input.
+config RMI4_F1A
+ bool "RMI4 Function 1A (0D pointing)"
+ help
+ Say Y here if you want to add support for RMI4 function 1A.
+
+ Function 1A provides capacitive keys support for RMI4 devices.
+
+config RMI4_F21
+ bool "RMI4 Function 21 (PRESSURE)"
+ help
+ Say Y here if you want to add support for RMI4 function 21.
+
+ Function 21 provides buttons/pressure handling for RMI4 devices.
+ This includes support for buttons/pressure on PressurePad.
+
config RMI4_F30
bool "RMI4 Function 30 (GPIO LED)"
help
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 02f14c846861..35ae29f72497 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -8,6 +8,8 @@ rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
+rmi_core-$(CONFIG_RMI4_F1A) += rmi_f1a.o
+rmi_core-$(CONFIG_RMI4_F21) += rmi_f21.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
rmi_core-$(CONFIG_RMI4_F3A) += rmi_f3a.o
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 3aee04837205..5f98c3bcfd46 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -360,6 +360,12 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F12
&rmi_f12_handler,
#endif
+#ifdef CONFIG_RMI4_F1A
+ &rmi_f1a_handler,
+#endif
+#ifdef CONFIG_RMI4_F21
+ &rmi_f21_handler,
+#endif
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 3bfe9013043e..e84495caab15 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -133,6 +133,8 @@ extern struct rmi_function_handler rmi_f01_handler;
extern struct rmi_function_handler rmi_f03_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
+extern struct rmi_function_handler rmi_f1a_handler;
+extern struct rmi_function_handler rmi_f21_handler;
extern struct rmi_function_handler rmi_f30_handler;
extern struct rmi_function_handler rmi_f34_handler;
extern struct rmi_function_handler rmi_f3a_handler;
diff --git a/drivers/input/rmi4/rmi_f1a.c b/drivers/input/rmi4/rmi_f1a.c
new file mode 100644
index 000000000000..765e9eae4cdc
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f1a.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 André Apitzsch <git@apitzsch.eu>
+ */
+
+#include <linux/input.h>
+#include <linux/property.h>
+#include "rmi_driver.h"
+
+struct f1a_data {
+ struct input_dev *input;
+
+ u32 *keymap;
+ unsigned int num_keys;
+};
+
+static int rmi_f1a_parse_device_properties(struct rmi_function *fn, struct f1a_data *f1a)
+{
+ static const char buttons_property[] = "linux,keycodes";
+ struct device *dev = &fn->dev;
+ u32 *buttonmap;
+ int n_keys;
+ int error;
+
+ if (!device_property_present(dev, buttons_property))
+ return 0;
+
+ n_keys = device_property_count_u32(dev, buttons_property);
+ if (n_keys <= 0) {
+ error = n_keys < 0 ? n_keys : -EINVAL;
+ dev_err(dev, "Invalid/malformed '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ buttonmap = devm_kmalloc_array(dev, n_keys, sizeof(*buttonmap),
+ GFP_KERNEL);
+ if (!buttonmap)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, buttons_property,
+ buttonmap, n_keys);
+ if (error) {
+ dev_err(dev, "Failed to parse '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ f1a->keymap = buttonmap;
+ f1a->num_keys = n_keys;
+
+ return 0;
+}
+
+static irqreturn_t rmi_f1a_attention(int irq, void *ctx)
+{
+ struct rmi_function *fn = ctx;
+ struct f1a_data *f1a = dev_get_drvdata(&fn->dev);
+ char button_bitmask;
+ int key;
+ int error;
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr,
+ &button_bitmask, sizeof(button_bitmask));
+ if (error) {
+ dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
+ error);
+ return IRQ_RETVAL(error);
+ }
+
+ for (key = 0; key < f1a->num_keys; key++)
+ input_report_key(f1a->input, f1a->keymap[key],
+ button_bitmask & BIT(key));
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_f1a_config(struct rmi_function *fn)
+{
+ struct f1a_data *f1a = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ if (f1a->num_keys)
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f1a_initialize(struct rmi_function *fn, struct f1a_data *f1a)
+{
+ int error;
+ int i;
+
+ error = rmi_f1a_parse_device_properties(fn, f1a);
+ if (error)
+ return error;
+
+ for (i = 0; i < f1a->num_keys; i++)
+ input_set_capability(f1a->input, EV_KEY, f1a->keymap[i]);
+
+ f1a->input->keycode = f1a->keymap;
+ f1a->input->keycodemax = f1a->num_keys;
+ f1a->input->keycodesize = sizeof(f1a->keymap[0]);
+
+ return 0;
+}
+
+static int rmi_f1a_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f1a_data *f1a;
+ int error;
+
+ if (!drv_data->input) {
+ dev_info(&fn->dev, "F1A: no input device found, ignoring\n");
+ return -ENXIO;
+ }
+
+ f1a = devm_kzalloc(&fn->dev, sizeof(*f1a), GFP_KERNEL);
+ if (!f1a)
+ return -ENOMEM;
+
+ f1a->input = drv_data->input;
+
+ error = rmi_f1a_initialize(fn, f1a);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f1a);
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f1a_handler = {
+ .driver = {
+ .name = "rmi4_f1a",
+ },
+ .func = 0x1a,
+ .probe = rmi_f1a_probe,
+ .config = rmi_f1a_config,
+ .attention = rmi_f1a_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f21.c b/drivers/input/rmi4/rmi_f21.c
new file mode 100644
index 000000000000..e3a9dfc3f0ec
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f21.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2025 Synaptics Incorporated
+ */
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define RMI_F21_SENSOR_COUNT_MASK GENMASK(3, 0)
+#define RMI_F21_FINGER_COUNT_PRESENT BIT(5)
+#define RMI_F21_NEW_REPORT_FORMAT BIT(6)
+
+#define RMI_F21_FINGER_COUNT_MASK GENMASK(3, 0)
+
+#define RMI_F21_MAX_SENSORS 16
+#define RMI_F21_MAX_FINGERS 16
+#define RMI_F21_DATA_REGS_MAX_SIZE (RMI_F21_MAX_SENSORS * 2 + \
+ RMI_F21_MAX_FINGERS * 2 + 1)
+
+#define RMI_F21_FORCE_CLICK_BIT BIT(0)
+
+#define RMI_F21_FORCEPAD_BUTTON_COUNT 1
+
+struct f21_data {
+ struct input_dev *input;
+ u16 key_code;
+
+ unsigned int attn_data_size;
+ unsigned int attn_data_button_offset;
+
+ unsigned int data_reg_size;
+ unsigned int data_reg_button_offset;
+ u8 data_regs[RMI_F21_DATA_REGS_MAX_SIZE];
+};
+
+static irqreturn_t rmi_f21_attention(int irq, void *ctx)
+{
+ struct rmi_function *fn = ctx;
+ struct f21_data *f21 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+ u8 *pdata;
+ int error;
+ bool pressed;
+
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f21->attn_data_size) {
+ dev_warn(&fn->dev, "f21 interrupt, but data is missing\n");
+ return IRQ_HANDLED;
+ }
+
+ pdata = drvdata->attn_data.data + f21->attn_data_button_offset;
+
+ drvdata->attn_data.data += f21->attn_data_size;
+ drvdata->attn_data.size -= f21->attn_data_size;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr,
+ f21->data_regs, f21->data_reg_size);
+ if (error) {
+ dev_err(&fn->dev, "failed to read f21 data registers: %d\n",
+ error);
+ return IRQ_RETVAL(error);
+ }
+
+ pdata = f21->data_regs + f21->data_reg_button_offset;
+ }
+
+ pressed = *pdata & RMI_F21_FORCE_CLICK_BIT;
+ input_report_key(f21->input, f21->key_code, pressed);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_f21_config(struct rmi_function *fn)
+{
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f21_initialize(struct rmi_function *fn, struct f21_data *f21)
+{
+ struct input_dev *input = f21->input;
+
+ f21->key_code = BTN_LEFT;
+
+ input->keycode = &f21->key_code;
+ input->keycodesize = sizeof(f21->key_code);
+ input->keycodemax = RMI_F21_FORCEPAD_BUTTON_COUNT;
+
+ input_set_capability(input, EV_KEY, f21->key_code);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ return 0;
+}
+
+static int rmi_f21_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f21_data *f21;
+ unsigned int sensor_count;
+ unsigned int max_fingers;
+ unsigned int query15_offset;
+ u8 query15_data;
+ int error;
+
+ if (!drv_data->input) {
+ dev_info(&fn->dev, "f21: no input device found, ignoring\n");
+ return -ENXIO;
+ }
+
+ f21 = devm_kzalloc(&fn->dev, sizeof(*f21), GFP_KERNEL);
+ if (!f21)
+ return -ENOMEM;
+
+ f21->input = drv_data->input;
+
+ error = rmi_f21_initialize(fn, f21);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f21);
+
+ sensor_count = fn->fd.query_base_addr & RMI_F21_SENSOR_COUNT_MASK;
+ if (fn->fd.query_base_addr & RMI_F21_FINGER_COUNT_PRESENT) {
+ query15_offset = fn->fd.query_base_addr & RMI_F21_NEW_REPORT_FORMAT ? 2 : 1;
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.query_base_addr + query15_offset,
+ &query15_data, sizeof(query15_data));
+ if (error)
+ return dev_err_probe(&fn->dev, error,
+ "failed to read 'query15' data");
+
+ max_fingers = query15_data & RMI_F21_FINGER_COUNT_MASK;
+ } else {
+ max_fingers = 5;
+ }
+
+ if (fn->fd.query_base_addr & RMI_F21_NEW_REPORT_FORMAT) {
+ /* Each finger uses one byte, and the button state uses one byte.*/
+ f21->attn_data_size = max_fingers + 1;
+ f21->attn_data_button_offset = f21->attn_data_size - 1;
+ /*
+ * Each sensor uses two bytes, the button state uses one byte,
+ * and each finger uses two bytes.
+ */
+ f21->data_reg_size = sensor_count * 2 + 1 + max_fingers * 2;
+ f21->data_reg_button_offset = sensor_count * 2;
+ } else {
+ /*
+ * Regardless of the transport each finger uses two bytes,
+ * and the button state uses one byte.
+ */
+ f21->attn_data_size = sensor_count * 2 + 1;
+ f21->attn_data_button_offset = sensor_count * 2;
+
+ f21->data_reg_size = f21->attn_data_size;
+ f21->data_reg_button_offset = f21->attn_data_button_offset;
+ }
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f21_handler = {
+ .driver = {
+ .name = "rmi4_f21",
+ },
+ .func = 0x21,
+ .probe = rmi_f21_probe,
+ .config = rmi_f21_config,
+ .attention = rmi_f21_attention,
+};
diff --git a/drivers/input/touch-overlay.c b/drivers/input/touch-overlay.c
new file mode 100644
index 000000000000..8806373f7a4a
--- /dev/null
+++ b/drivers/input/touch-overlay.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Helper functions for overlay objects on touchscreens
+ *
+ * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
+ */
+
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touch-overlay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/property.h>
+
+struct touch_overlay_segment {
+ struct list_head list;
+ u32 x_origin;
+ u32 y_origin;
+ u32 x_size;
+ u32 y_size;
+ u32 key;
+ bool pressed;
+ int slot;
+};
+
+static int touch_overlay_get_segment(struct fwnode_handle *segment_node,
+ struct touch_overlay_segment *segment,
+ struct input_dev *input)
+{
+ int error;
+
+ error = fwnode_property_read_u32(segment_node, "x-origin",
+ &segment->x_origin);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "y-origin",
+ &segment->y_origin);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "x-size",
+ &segment->x_size);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "y-size",
+ &segment->y_size);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "linux,code",
+ &segment->key);
+ if (!error)
+ input_set_capability(input, EV_KEY, segment->key);
+ else if (error != -EINVAL)
+ return error;
+
+ return 0;
+}
+
+/**
+ * touch_overlay_map - map overlay objects from the device tree and set
+ * key capabilities if buttons are defined.
+ * @list: pointer to the list that will hold the segments
+ * @input: pointer to the already allocated input_dev
+ *
+ * Returns 0 on success and error number otherwise.
+ *
+ * If buttons are defined, key capabilities are set accordingly.
+ */
+int touch_overlay_map(struct list_head *list, struct input_dev *input)
+{
+ struct fwnode_handle *fw_segment;
+ struct device *dev = input->dev.parent;
+ struct touch_overlay_segment *segment;
+ int error;
+
+ struct fwnode_handle *overlay __free(fwnode_handle) =
+ device_get_named_child_node(dev, "touch-overlay");
+ if (!overlay)
+ return 0;
+
+ fwnode_for_each_available_child_node(overlay, fw_segment) {
+ segment = devm_kzalloc(dev, sizeof(*segment), GFP_KERNEL);
+ if (!segment) {
+ fwnode_handle_put(fw_segment);
+ return -ENOMEM;
+ }
+ error = touch_overlay_get_segment(fw_segment, segment, input);
+ if (error) {
+ fwnode_handle_put(fw_segment);
+ return error;
+ }
+ list_add_tail(&segment->list, list);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(touch_overlay_map);
+
+/**
+ * touch_overlay_get_touchscreen_abs - get abs size from the touchscreen area.
+ * @list: pointer to the list that holds the segments
+ * @x: horizontal abs
+ * @y: vertical abs
+ */
+void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key) {
+ *x = segment->x_size - 1;
+ *y = segment->y_size - 1;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(touch_overlay_get_touchscreen_abs);
+
+static bool touch_overlay_segment_event(struct touch_overlay_segment *seg,
+ struct input_mt_pos *pos)
+{
+ if (pos->x >= seg->x_origin && pos->x < (seg->x_origin + seg->x_size) &&
+ pos->y >= seg->y_origin && pos->y < (seg->y_origin + seg->y_size))
+ return true;
+
+ return false;
+}
+
+/**
+ * touch_overlay_mapped_touchscreen - check if a touchscreen area is mapped
+ * @list: pointer to the list that holds the segments
+ *
+ * Returns true if a touchscreen area is mapped or false otherwise.
+ */
+bool touch_overlay_mapped_touchscreen(struct list_head *list)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(touch_overlay_mapped_touchscreen);
+
+static bool touch_overlay_event_on_ts(struct list_head *list,
+ struct input_mt_pos *pos)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (segment->key)
+ continue;
+
+ if (touch_overlay_segment_event(segment, pos)) {
+ pos->x -= segment->x_origin;
+ pos->y -= segment->y_origin;
+ return true;
+ }
+ /* ignore touch events outside the defined area */
+ return false;
+ }
+
+ return true;
+}
+
+static bool touch_overlay_button_event(struct input_dev *input,
+ struct touch_overlay_segment *segment,
+ struct input_mt_pos *pos, int slot)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *s = &mt->slots[slot];
+ bool button_contact = touch_overlay_segment_event(segment, pos);
+
+ if (segment->slot == slot && segment->pressed) {
+ /* sliding out of the button releases it */
+ if (!button_contact) {
+ input_report_key(input, segment->key, false);
+ segment->pressed = false;
+ /* keep available for a possible touch event */
+ return false;
+ }
+ /* ignore sliding on the button while pressed */
+ s->frame = mt->frame;
+ return true;
+ } else if (button_contact) {
+ input_report_key(input, segment->key, true);
+ s->frame = mt->frame;
+ segment->slot = slot;
+ segment->pressed = true;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * touch_overlay_sync_frame - update the status of the segments and report
+ * buttons whose tracked slot is unused.
+ * @list: pointer to the list that holds the segments
+ * @input: pointer to the input device associated to the contact
+ */
+void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input)
+{
+ struct touch_overlay_segment *segment;
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *s;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key)
+ continue;
+
+ s = &mt->slots[segment->slot];
+ if (!input_mt_is_used(mt, s) && segment->pressed) {
+ input_report_key(input, segment->key, false);
+ segment->pressed = false;
+ }
+ }
+}
+EXPORT_SYMBOL(touch_overlay_sync_frame);
+
+/**
+ * touch_overlay_process_contact - process contacts according to the overlay
+ * mapping. This function acts as a filter to release the calling driver
+ * from the contacts that are either related to overlay buttons or out of the
+ * overlay touchscreen area, if defined.
+ * @list: pointer to the list that holds the segments
+ * @input: pointer to the input device associated to the contact
+ * @pos: pointer to the contact position
+ * @slot: slot associated to the contact (0 if multitouch is not supported)
+ *
+ * Returns true if the contact was processed (reported for valid key events
+ * and dropped for contacts outside the overlay touchscreen area) or false
+ * if the contact must be processed by the caller. In that case this function
+ * shifts the (x,y) coordinates to the overlay touchscreen axis if required.
+ */
+bool touch_overlay_process_contact(struct list_head *list,
+ struct input_dev *input,
+ struct input_mt_pos *pos, int slot)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ /*
+ * buttons must be prioritized over overlay touchscreens to account for
+ * overlappings e.g. a button inside the touchscreen area.
+ */
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (segment->key &&
+ touch_overlay_button_event(input, segment, pos, slot))
+ return true;
+ }
+
+ /*
+ * valid contacts on the overlay touchscreen are left for the client
+ * to be processed/reported according to its (possibly) unique features.
+ */
+ return !touch_overlay_event_on_ts(list, pos);
+}
+EXPORT_SYMBOL(touch_overlay_process_contact);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Helper functions for overlay objects on touch devices");
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index f9db5cefb25b..8b4f3e3660b8 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -444,10 +444,11 @@ static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & AD7879_GPIO_DATA);
}
-static void ad7879_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int ad7879_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct ad7879 *ts = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&ts->mutex);
if (value)
@@ -455,8 +456,10 @@ static void ad7879_gpio_set_value(struct gpio_chip *chip,
else
ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
- ad7879_write(ts, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ ret = ad7879_write(ts, AD7879_REG_CTRL2, ts->cmd_crtl2);
mutex_unlock(&ts->mutex);
+
+ return ret;
}
static int ad7879_gpio_add(struct ad7879 *ts)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0d7bf18e2508..bf498bd4dea9 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -120,7 +120,6 @@ struct edt_ft5x06_ts_data {
struct regmap *regmap;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *debug_dir;
u8 *raw_buffer;
size_t raw_bufsize;
#endif
@@ -815,23 +814,21 @@ static const struct file_operations debugfs_raw_data_fops = {
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
- tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
+ struct dentry *debug_dir = tsdata->client->debugfs;
- debugfs_create_u16("num_x", S_IRUSR, tsdata->debug_dir, &tsdata->num_x);
- debugfs_create_u16("num_y", S_IRUSR, tsdata->debug_dir, &tsdata->num_y);
+ debugfs_create_u16("num_x", S_IRUSR, debug_dir, &tsdata->num_x);
+ debugfs_create_u16("num_y", S_IRUSR, debug_dir, &tsdata->num_y);
debugfs_create_file("mode", S_IRUSR | S_IWUSR,
- tsdata->debug_dir, tsdata, &debugfs_mode_fops);
+ debug_dir, tsdata, &debugfs_mode_fops);
debugfs_create_file("raw_data", S_IRUSR,
- tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
+ debug_dir, tsdata, &debugfs_raw_data_fops);
}
static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
- debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
}
@@ -842,8 +839,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
return -ENOSYS;
}
-static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
}
@@ -1349,7 +1345,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
if (error)
return error;
- edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
+ edt_ft5x06_ts_prepare_debugfs(tsdata);
dev_dbg(&client->dev,
"EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
@@ -1495,6 +1491,10 @@ static const struct edt_i2c_chip_data edt_ft8201_data = {
.max_support_points = 10,
};
+static const struct edt_i2c_chip_data edt_ft8716_data = {
+ .max_support_points = 10,
+};
+
static const struct edt_i2c_chip_data edt_ft8719_data = {
.max_support_points = 10,
};
@@ -1507,6 +1507,7 @@ static const struct i2c_device_id edt_ft5x06_ts_id[] = {
/* Note no edt- prefix for compatibility with the ft6236.c driver */
{ .name = "ft6236", .driver_data = (long)&edt_ft6236_data },
{ .name = "ft8201", .driver_data = (long)&edt_ft8201_data },
+ { .name = "ft8716", .driver_data = (long)&edt_ft8716_data },
{ .name = "ft8719", .driver_data = (long)&edt_ft8719_data },
{ /* sentinel */ }
};
@@ -1523,6 +1524,7 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
/* Note focaltech vendor prefix for compatibility with ft6236.c */
{ .compatible = "focaltech,ft6236", .data = &edt_ft6236_data },
{ .compatible = "focaltech,ft8201", .data = &edt_ft8201_data },
+ { .compatible = "focaltech,ft8716", .data = &edt_ft8716_data },
{ .compatible = "focaltech,ft8719", .data = &edt_ft8719_data },
{ /* sentinel */ }
};
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a3e8a51c9144..252dcae039f8 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -44,9 +44,11 @@
#define GOODIX_HAVE_KEY BIT(4)
#define GOODIX_BUFFER_STATUS_TIMEOUT 20
-#define RESOLUTION_LOC 1
-#define MAX_CONTACTS_LOC 5
-#define TRIGGER_LOC 6
+#define RESOLUTION_LOC 1
+#define MAX_CONTACTS_LOC 5
+#define TRIGGER_LOC 6
+
+#define GOODIX_POLL_INTERVAL_MS 17 /* 17ms = 60fps */
/* Our special handling for GPIO accesses through ACPI is x86 specific */
#if defined CONFIG_X86 && defined CONFIG_ACPI
@@ -497,6 +499,14 @@ sync:
input_sync(ts->input_dev);
}
+static void goodix_ts_work_i2c_poll(struct input_dev *input)
+{
+ struct goodix_ts_data *ts = input_get_drvdata(input);
+
+ goodix_process_events(ts);
+ goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0);
+}
+
/**
* goodix_ts_irq_handler - The IRQ handler
*
@@ -513,13 +523,29 @@ static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void goodix_enable_irq(struct goodix_ts_data *ts)
+{
+ if (ts->client->irq)
+ enable_irq(ts->client->irq);
+}
+
+static void goodix_disable_irq(struct goodix_ts_data *ts)
+{
+ if (ts->client->irq)
+ disable_irq(ts->client->irq);
+}
+
static void goodix_free_irq(struct goodix_ts_data *ts)
{
- devm_free_irq(&ts->client->dev, ts->client->irq, ts);
+ if (ts->client->irq)
+ devm_free_irq(&ts->client->dev, ts->client->irq, ts);
}
static int goodix_request_irq(struct goodix_ts_data *ts)
{
+ if (!ts->client->irq)
+ return 0;
+
return devm_request_threaded_irq(&ts->client->dev, ts->client->irq,
NULL, goodix_ts_irq_handler,
ts->irq_flags, ts->client->name, ts);
@@ -1219,6 +1245,18 @@ retry_read_config:
return error;
}
+ input_set_drvdata(ts->input_dev, ts);
+
+ if (!ts->client->irq) {
+ error = input_setup_polling(ts->input_dev, goodix_ts_work_i2c_poll);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "could not set up polling mode, %d\n", error);
+ return error;
+ }
+ input_set_poll_interval(ts->input_dev, GOODIX_POLL_INTERVAL_MS);
+ }
+
error = input_register_device(ts->input_dev);
if (error) {
dev_err(&ts->client->dev,
@@ -1422,7 +1460,7 @@ static int goodix_suspend(struct device *dev)
/* We need gpio pins to suspend/resume */
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
- disable_irq(client->irq);
+ goodix_disable_irq(ts);
return 0;
}
@@ -1466,7 +1504,7 @@ static int goodix_resume(struct device *dev)
int error;
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
- enable_irq(client->irq);
+ goodix_enable_irq(ts);
return 0;
}
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6475084aee1b..9b3901eec0a5 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -22,6 +22,7 @@
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/input/touch-overlay.h>
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
@@ -57,6 +58,7 @@ struct st1232_ts_data {
struct dev_pm_qos_request low_latency_req;
struct gpio_desc *reset_gpio;
const struct st_chip_info *chip_info;
+ struct list_head touch_overlay_list;
int read_buf_len;
u8 *read_buf;
};
@@ -156,6 +158,10 @@ static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
input_mt_assign_slots(input, slots, pos, n_contacts, 0);
for (i = 0; i < n_contacts; i++) {
+ if (touch_overlay_process_contact(&ts->touch_overlay_list,
+ input, &pos[i], slots[i]))
+ continue;
+
input_mt_slot(input, slots[i]);
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos[i].x);
@@ -164,6 +170,7 @@ static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
input_report_abs(input, ABS_MT_TOUCH_MAJOR, z[i]);
}
+ touch_overlay_sync_frame(&ts->touch_overlay_list, input);
input_mt_sync_frame(input);
input_sync(input);
@@ -292,18 +299,30 @@ static int st1232_ts_probe(struct i2c_client *client)
if (error)
return error;
- /* Read resolution from the chip */
- error = st1232_ts_read_resolution(ts, &max_x, &max_y);
- if (error) {
- dev_err(&client->dev,
- "Failed to read resolution: %d\n", error);
- return error;
- }
-
if (ts->chip_info->have_z)
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
ts->chip_info->max_area, 0, 0);
+ /* map overlay objects if defined in the device tree */
+ INIT_LIST_HEAD(&ts->touch_overlay_list);
+ error = touch_overlay_map(&ts->touch_overlay_list, input_dev);
+ if (error)
+ return error;
+
+ if (touch_overlay_mapped_touchscreen(&ts->touch_overlay_list)) {
+ /* Read resolution from the overlay touchscreen if defined */
+ touch_overlay_get_touchscreen_abs(&ts->touch_overlay_list,
+ &max_x, &max_y);
+ } else {
+ /* Read resolution from the chip */
+ error = st1232_ts_read_resolution(ts, &max_x, &max_y);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read resolution: %d\n", error);
+ return error;
+ }
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, max_x, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 0a33d995d15d..70d29b14d851 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -200,6 +200,7 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
+ select IRQ_MSI_LIB
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -305,7 +306,6 @@ config APPLE_DART
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_DART
select IOMMU_API
select IOMMU_IO_PGTABLE_DART
- default ARCH_APPLE
help
Support for Apple DART (Device Address Resolution Table) IOMMUs
found in Apple ARM SoCs like the M1.
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 994063e5586f..ecef69c11144 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -7,6 +7,7 @@ config AMD_IOMMU
select PCI_ATS
select PCI_PRI
select PCI_PASID
+ select IRQ_MSI_LIB
select MMU_NOTIFIER
select IOMMU_API
select IOMMU_IOVA
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 29a8864381c3..9b4b589a54b5 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -28,9 +28,9 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+void amd_iommu_debugfs_setup(void);
#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+static inline void amd_iommu_debugfs_setup(void) {}
#endif
/* Needed for interrupt remapping */
@@ -42,7 +42,9 @@ int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
extern enum protection_domain_mode amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
+extern u8 amd_iommu_hpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
+extern bool amd_iommu_hatdis;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ccbab3a4811a..5219d7ddfdaa 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -94,6 +94,7 @@
#define FEATURE_GA BIT_ULL(7)
#define FEATURE_HE BIT_ULL(8)
#define FEATURE_PC BIT_ULL(9)
+#define FEATURE_HATS GENMASK_ULL(11, 10)
#define FEATURE_GATS GENMASK_ULL(13, 12)
#define FEATURE_GLX GENMASK_ULL(15, 14)
#define FEATURE_GAM_VAPIC BIT_ULL(21)
@@ -460,6 +461,9 @@
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
+/* IOMMU HATDIS for IVHD type 11h and 40h */
+#define IOMMU_IVHD_ATTR_HATDIS_SHIFT 0
+
/* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7
@@ -558,7 +562,8 @@ struct amd_io_pgtable {
};
enum protection_domain_mode {
- PD_MODE_V1 = 1,
+ PD_MODE_NONE,
+ PD_MODE_V1,
PD_MODE_V2,
};
@@ -790,6 +795,8 @@ struct amd_iommu {
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
struct dentry *debugfs;
+ int dbg_mmio_offset;
+ int dbg_cap_offset;
#endif
/* IOPF support */
@@ -891,6 +898,13 @@ struct dev_table_entry {
};
/*
+ * Structure defining one entry in the command buffer
+ */
+struct iommu_cmd {
+ u32 data[4];
+};
+
+/*
* Structure to sture persistent DTE flags from IVHD
*/
struct ivhd_dte_flags {
@@ -1054,7 +1068,6 @@ struct irq_2_irte {
};
struct amd_ir_data {
- u32 cached_ga_tag;
struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c
index 545372fcc72f..10fa217a7119 100644
--- a/drivers/iommu/amd/debugfs.c
+++ b/drivers/iommu/amd/debugfs.c
@@ -11,22 +11,382 @@
#include <linux/pci.h>
#include "amd_iommu.h"
+#include "../irq_remapping.h"
static struct dentry *amd_iommu_debugfs;
-static DEFINE_MUTEX(amd_iommu_debugfs_lock);
#define MAX_NAME_LEN 20
+#define OFS_IN_SZ 8
+#define DEVID_IN_SZ 16
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+static int sbdf = -1;
+
+static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct amd_iommu *iommu = m->private;
+ int ret;
+
+ iommu->dbg_mmio_offset = -1;
+
+ if (cnt > OFS_IN_SZ)
+ return -EINVAL;
+
+ ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset);
+ if (ret)
+ return ret;
+
+ if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - 4) {
+ iommu->dbg_mmio_offset = -1;
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static int iommu_mmio_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ u64 value;
+
+ if (iommu->dbg_mmio_offset < 0) {
+ seq_puts(m, "Please provide mmio register's offset\n");
+ return 0;
+ }
+
+ value = readq(iommu->mmio_base + iommu->dbg_mmio_offset);
+ seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value);
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio);
+
+static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct amd_iommu *iommu = m->private;
+ int ret;
+
+ iommu->dbg_cap_offset = -1;
+
+ if (cnt > OFS_IN_SZ)
+ return -EINVAL;
+
+ ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_cap_offset);
+ if (ret)
+ return ret;
+
+ /* Capability register at offset 0x14 is the last IOMMU capability register. */
+ if (iommu->dbg_cap_offset > 0x14) {
+ iommu->dbg_cap_offset = -1;
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static int iommu_capability_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ u32 value;
+ int err;
+
+ if (iommu->dbg_cap_offset < 0) {
+ seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n");
+ return 0;
+ }
+
+ err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + iommu->dbg_cap_offset, &value);
+ if (err) {
+ seq_printf(m, "Not able to read capability register at 0x%x\n",
+ iommu->dbg_cap_offset);
+ return 0;
+ }
+
+ seq_printf(m, "Offset:0x%x Value:0x%08x\n", iommu->dbg_cap_offset, value);
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability);
+
+static int iommu_cmdbuf_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ struct iommu_cmd *cmd;
+ unsigned long flag;
+ u32 head, tail;
+ int i;
+
+ raw_spin_lock_irqsave(&iommu->lock, flag);
+ head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n",
+ (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff);
+ for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
+ cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
+ seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0],
+ cmd->data[1], cmd->data[2], cmd->data[3]);
+ }
+ raw_spin_unlock_irqrestore(&iommu->lock, flag);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf);
+
+static ssize_t devid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int seg, bus, slot, func;
+ struct amd_iommu *iommu;
+ char *srcid_ptr;
+ u16 devid;
+ int i;
+
+ sbdf = -1;
+
+ if (cnt >= DEVID_IN_SZ)
+ return -EINVAL;
+
+ srcid_ptr = memdup_user_nul(ubuf, cnt);
+ if (IS_ERR(srcid_ptr))
+ return PTR_ERR(srcid_ptr);
+
+ i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func);
+ if (i != 4) {
+ i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func);
+ if (i != 3) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+ seg = 0;
+ }
+
+ devid = PCI_DEVID(bus, PCI_DEVFN(slot, func));
+
+ /* Check if user device id input is a valid input */
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ if (devid > pci_seg->last_bdf) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu) {
+ kfree(srcid_ptr);
+ return -ENODEV;
+ }
+ break;
+ }
+
+ if (pci_seg->id != seg) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+
+ sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid);
+
+ kfree(srcid_ptr);
+
+ return cnt;
+}
+
+static int devid_show(struct seq_file *m, void *unused)
{
+ u16 devid;
+
+ if (sbdf >= 0) {
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
+ } else
+ seq_puts(m, "No or Invalid input provided\n");
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(devid);
+
+static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu *iommu;
+
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu)
+ return;
+
+ dev_table = get_dev_table(iommu);
+ if (!dev_table) {
+ seq_puts(m, "Device table not found");
+ return;
+ }
+
+ seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId",
+ "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]");
+ seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid),
+ PCI_SLOT(devid), PCI_FUNC(devid));
+ for (int i = 3; i >= 0; --i)
+ seq_printf(m, "%016llx ", dev_table[devid].data[i]);
+ seq_printf(m, "iommu%d\n", iommu->index);
+}
+
+static int iommu_devtbl_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ u16 seg, devid;
+
+ if (sbdf < 0) {
+ seq_puts(m, "Enter a valid device ID to 'devid' file\n");
+ return 0;
+ }
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ dump_dte(m, pci_seg, devid);
+ break;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_devtbl);
+
+static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
+{
+ struct irte_ga *ptr, *irte;
+ int index;
+
+ for (index = 0; index < int_tab_len; index++) {
+ ptr = (struct irte_ga *)table->table;
+ irte = &ptr[index];
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !irte->lo.fields_vapic.valid)
+ continue;
+ else if (!irte->lo.fields_remap.valid)
+ continue;
+ seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val);
+ }
+}
+
+static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
+{
+ union irte *ptr, *irte;
+ int index;
+
+ for (index = 0; index < int_tab_len; index++) {
+ ptr = (union irte *)table->table;
+ irte = &ptr[index];
+
+ if (!irte->fields.valid)
+ continue;
+ seq_printf(m, "IRT[%04d] %08x\n", index, irte->val);
+ }
+}
+
+static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg)
+{
+ struct dev_table_entry *dev_table;
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+ u16 int_tab_len;
+
+ table = pci_seg->irq_lookup_table[devid];
+ if (!table) {
+ seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n",
+ pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
+ return;
+ }
+
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu)
+ return;
+
+ dev_table = get_dev_table(iommu);
+ if (!dev_table) {
+ seq_puts(m, "Device table not found");
+ return;
+ }
+
+ int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK;
+ if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) {
+ seq_puts(m, "The device's DTE contains an invalid IRT length value.");
+ return;
+ }
+
+ seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid),
+ PCI_SLOT(devid), PCI_FUNC(devid));
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+ if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ dump_128_irte(m, table, BIT(int_tab_len >> 1));
+ else
+ dump_32_irte(m, table, BIT(int_tab_len >> 1));
+ seq_puts(m, "\n");
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+}
+
+static int iommu_irqtbl_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ u16 devid, seg;
+
+ if (!irq_remapping_enabled) {
+ seq_puts(m, "Interrupt remapping is disabled\n");
+ return 0;
+ }
+
+ if (sbdf < 0) {
+ seq_puts(m, "Enter a valid device ID to 'devid' file\n");
+ return 0;
+ }
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ dump_irte(m, devid, pci_seg);
+ break;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl);
+
+void amd_iommu_debugfs_setup(void)
+{
+ struct amd_iommu *iommu;
char name[MAX_NAME_LEN + 1];
- mutex_lock(&amd_iommu_debugfs_lock);
- if (!amd_iommu_debugfs)
- amd_iommu_debugfs = debugfs_create_dir("amd",
- iommu_debugfs_dir);
- mutex_unlock(&amd_iommu_debugfs_lock);
+ amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir);
+
+ for_each_iommu(iommu) {
+ iommu->dbg_mmio_offset = -1;
+ iommu->dbg_cap_offset = -1;
+
+ snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+ iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+
+ debugfs_create_file("mmio", 0644, iommu->debugfs, iommu,
+ &iommu_mmio_fops);
+ debugfs_create_file("capability", 0644, iommu->debugfs, iommu,
+ &iommu_capability_fops);
+ debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu,
+ &iommu_cmdbuf_fops);
+ }
- snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
- iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+ debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL,
+ &devid_fops);
+ debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL,
+ &iommu_devtbl_fops);
+ debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL,
+ &iommu_irqtbl_fops);
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 9c17dfa76703..7b5af6176de9 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -152,6 +152,8 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
+/* Host page table level */
+u8 amd_iommu_hpt_level;
/* Guest page table level */
int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
@@ -168,6 +170,9 @@ static int amd_iommu_target_ivhd_type;
u64 amd_iommu_efr;
u64 amd_iommu_efr2;
+/* Host (v1) page table is not supported*/
+bool amd_iommu_hatdis;
+
/* SNP is enabled on the system? */
bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en);
@@ -1792,6 +1797,11 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+ if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) {
+ pr_warn_once("Host Address Translation is not supported.\n");
+ amd_iommu_hatdis = true;
+ }
+
early_iommu_features_init(iommu, h);
break;
@@ -2112,7 +2122,15 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
return ret;
}
- iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
+ ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
+ if (ret || amd_iommu_pgtable == PD_MODE_NONE) {
+ /*
+ * Remove sysfs if DMA translation is not supported by the
+ * IOMMU. Do not return an error to enable IRQ remapping
+ * in state_next(), DTE[V, TV] must eventually be set to 0.
+ */
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
return pci_enable_device(iommu->dev);
}
@@ -2573,7 +2591,7 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
u32 devid;
struct dev_table_entry *dev_table = pci_seg->dev_table;
- if (dev_table == NULL)
+ if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE)
return;
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
@@ -3033,6 +3051,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base;
int ret;
acpi_status status;
+ u8 efr_hats;
if (!amd_iommu_detected)
return -ENODEV;
@@ -3077,6 +3096,19 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+ efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr);
+ if (efr_hats != 0x3) {
+ /*
+ * efr[HATS] bits specify the maximum host translation level
+ * supported, with LEVEL 4 being initial max level.
+ */
+ amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL;
+ } else {
+ pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n",
+ efr_hats);
+ amd_iommu_hatdis = true;
+ }
+
if (amd_iommu_pgtable == PD_MODE_V2) {
if (!amd_iommu_v2_pgtbl_supported()) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
@@ -3084,6 +3116,17 @@ static int __init early_amd_iommu_init(void)
}
}
+ if (amd_iommu_hatdis) {
+ /*
+ * Host (v1) page table is not available. Attempt to use
+ * Guest (v2) page table.
+ */
+ if (amd_iommu_v2_pgtbl_supported())
+ amd_iommu_pgtable = PD_MODE_V2;
+ else
+ amd_iommu_pgtable = PD_MODE_NONE;
+ }
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
@@ -3376,7 +3419,6 @@ int amd_iommu_enable_faulting(unsigned int cpu)
*/
static int __init amd_iommu_init(void)
{
- struct amd_iommu *iommu;
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
@@ -3390,8 +3432,8 @@ static int __init amd_iommu_init(void)
}
#endif
- for_each_iommu(iommu)
- amd_iommu_debugfs_setup(iommu);
+ if (!ret)
+ amd_iommu_debugfs_setup();
return ret;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 4d308c071134..a91e71f981ef 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -125,7 +125,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
goto out;
ret = false;
- if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(pgtable->mode == amd_iommu_hpt_level))
goto out;
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
@@ -526,7 +526,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
- pgtable->mode > PAGE_MODE_6_LEVEL);
+ pgtable->mode > amd_iommu_hpt_level);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
iommu_put_pages_list(&freelist);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 3117d99cf83d..eb348c63a8d0 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
@@ -63,13 +64,6 @@ static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
/*
- * general struct to manage commands send to an IOMMU
- */
-struct iommu_cmd {
- u32 data[4];
-};
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
@@ -634,8 +628,8 @@ static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
static void pdev_enable_caps(struct pci_dev *pdev)
{
- pdev_enable_cap_ats(pdev);
pdev_enable_cap_pasid(pdev);
+ pdev_enable_cap_ats(pdev);
pdev_enable_cap_pri(pdev);
}
@@ -2424,6 +2418,13 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
pci_max_pasids(to_pci_dev(dev)));
}
+ if (amd_iommu_pgtable == PD_MODE_NONE) {
+ pr_warn_once("%s: DMA translation not supported by iommu.\n",
+ __func__);
+ iommu_dev = ERR_PTR(-ENODEV);
+ goto out_err;
+ }
+
out_err:
iommu_completion_wait(iommu);
@@ -2511,6 +2512,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
case PD_MODE_V2:
fmt = AMD_IOMMU_V2;
break;
+ case PD_MODE_NONE:
+ WARN_ON_ONCE(1);
+ return -EPERM;
}
domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
@@ -2524,14 +2528,30 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
static inline u64 dma_max_address(enum protection_domain_mode pgtable)
{
if (pgtable == PD_MODE_V1)
- return ~0ULL;
+ return PM_LEVEL_SIZE(amd_iommu_hpt_level);
- /* V2 with 4/5 level page table */
- return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+ /*
+ * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
+ * Translation" shows that the V2 table sign extends the top of the
+ * address space creating a reserved region in the middle of the
+ * translation, just like the CPU does. Further Vasant says the docs are
+ * incomplete and this only applies to non-zero PASIDs. If the AMDv2
+ * page table is assigned to the 0 PASID then there is no sign extension
+ * check.
+ *
+ * Since the IOMMU must have a fixed geometry, and the core code does
+ * not understand sign extended addressing, we have to chop off the high
+ * bit to get consistent behavior with attachments of the domain to any
+ * PASID.
+ */
+ return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
}
static bool amd_iommu_hd_support(struct amd_iommu *iommu)
{
+ if (amd_iommu_hatdis)
+ return false;
+
return iommu && (iommu->features & FEATURE_HDSUP);
}
@@ -3804,13 +3824,70 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate,
};
-int amd_iommu_activate_guest_mode(void *data)
+static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
+ bool ga_log_intr)
+{
+ if (cpu >= 0) {
+ entry->lo.fields_vapic.destination =
+ APICID_TO_IRTE_DEST_LO(cpu);
+ entry->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cpu);
+ entry->lo.fields_vapic.is_run = true;
+ entry->lo.fields_vapic.ga_log_intr = false;
+ } else {
+ entry->lo.fields_vapic.is_run = false;
+ entry->lo.fields_vapic.ga_log_intr = ga_log_intr;
+ }
+}
+
+/*
+ * Update the pCPU information for an IRTE that is configured to post IRQs to
+ * a vCPU, without issuing an IOMMU invalidation for the IRTE.
+ *
+ * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
+ * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't
+ * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based
+ * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is
+ * blocking and requires a notification wake event). I.e. treat vCPUs that are
+ * associated with a pCPU as running. This API is intended to be used when a
+ * vCPU is scheduled in/out (or stops running for any reason), to do a fast
+ * update of IsRun, GALogIntr, and (conditionally) Destination.
+ *
+ * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
+ * and thus don't require an invalidation to ensure the IOMMU consumes fresh
+ * information.
+ */
+int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ if (!ir_data->iommu)
+ return -ENODEV;
+
+ __amd_iommu_update_ga(entry, cpu, ga_log_intr);
+
+ return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
+
+int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
u64 valid;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry)
return 0;
valid = entry->lo.fields_vapic.valid;
@@ -3820,11 +3897,12 @@ int amd_iommu_activate_guest_mode(void *data)
entry->lo.fields_vapic.valid = valid;
entry->lo.fields_vapic.guest_mode = 1;
- entry->lo.fields_vapic.ga_log_intr = 1;
entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+ __amd_iommu_update_ga(entry, cpu, ga_log_intr);
+
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
}
@@ -3837,8 +3915,10 @@ int amd_iommu_deactivate_guest_mode(void *data)
struct irq_cfg *cfg = ir_data->cfg;
u64 valid;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || !entry->lo.fields_vapic.guest_mode)
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry || !entry->lo.fields_vapic.guest_mode)
return 0;
valid = entry->lo.fields_remap.valid;
@@ -3860,11 +3940,10 @@ int amd_iommu_deactivate_guest_mode(void *data)
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
-static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
{
int ret;
- struct amd_iommu_pi_data *pi_data = vcpu_info;
- struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+ struct amd_iommu_pi_data *pi_data = info;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data;
@@ -3885,25 +3964,20 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
return -EINVAL;
ir_data->cfg = irqd_cfg(data);
- pi_data->ir_data = ir_data;
- pi_data->prev_ga_tag = ir_data->cached_ga_tag;
- if (pi_data->is_guest_mode) {
- ir_data->ga_root_ptr = (pi_data->base >> 12);
- ir_data->ga_vector = vcpu_pi_info->vector;
+ if (pi_data) {
+ pi_data->ir_data = ir_data;
+
+ ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
+ ir_data->ga_vector = pi_data->vector;
ir_data->ga_tag = pi_data->ga_tag;
- ret = amd_iommu_activate_guest_mode(ir_data);
- if (!ret)
- ir_data->cached_ga_tag = pi_data->ga_tag;
+ if (pi_data->is_guest_mode)
+ ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
+ pi_data->ga_log_intr);
+ else
+ ret = amd_iommu_deactivate_guest_mode(ir_data);
} else {
ret = amd_iommu_deactivate_guest_mode(ir_data);
-
- /*
- * This communicates the ga_tag back to the caller
- * so that it can do all the necessary clean up.
- */
- if (!ret)
- ir_data->cached_ga_tag = 0;
}
return ret;
@@ -3970,54 +4044,30 @@ static struct irq_chip amd_ir_chip = {
static const struct msi_parent_ops amdvi_msi_parent_ops = {
.supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_AMDVI,
+ .bus_select_mask = MATCH_PCI_MSI,
.prefix = "IR-",
.init_dev_msi_info = msi_parent_init_dev_msi_info,
};
int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index),
+ .ops = &amd_ir_domain_ops,
+ .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI,
+ .host_data = iommu,
+ .parent = arch_get_ir_parent_domain(),
+ };
- fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
- if (!fn)
+ if (!info.fwnode)
return -ENOMEM;
- iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
- fn, &amd_ir_domain_ops, iommu);
+
+ iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENOMEM;
}
-
- irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
- iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
- IRQ_DOMAIN_FLAG_ISOLATED_MSI;
- iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
-
return 0;
}
-
-int amd_iommu_update_ga(int cpu, bool is_run, void *data)
-{
- struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
- struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || !entry->lo.fields_vapic.guest_mode)
- return 0;
-
- if (!ir_data->iommu)
- return -ENODEV;
-
- if (cpu >= 0) {
- entry->lo.fields_vapic.destination =
- APICID_TO_IRTE_DEST_LO(cpu);
- entry->hi.fields.destination =
- APICID_TO_IRTE_DEST_HI(cpu);
- }
- entry->lo.fields_vapic.is_run = is_run;
-
- return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
- ir_data->irq_2_irte.index, entry);
-}
-EXPORT_SYMBOL(amd_iommu_update_ga);
#endif
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 757d24f67ad4..190f28d76615 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -991,7 +991,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
- .pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev_paging,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index e4fd8d522af8..8cd8929bbfdf 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -7,13 +7,22 @@
#include "arm-smmu-v3.h"
-void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type)
+void *arm_smmu_hw_info(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ const struct arm_smmu_impl_ops *impl_ops = master->smmu->impl_ops;
struct iommu_hw_info_arm_smmuv3 *info;
u32 __iomem *base_idr;
unsigned int i;
+ if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
+ *type != IOMMU_HW_INFO_TYPE_ARM_SMMUV3) {
+ if (!impl_ops || !impl_ops->hw_info)
+ return ERR_PTR(-EOPNOTSUPP);
+ return impl_ops->hw_info(master->smmu, length, type);
+ }
+
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -216,7 +225,7 @@ static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg,
return 0;
}
-static struct iommu_domain *
+struct iommu_domain *
arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
const struct iommu_user_data *user_data)
{
@@ -327,8 +336,8 @@ static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu,
return 0;
}
-static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
- struct iommu_user_data_array *array)
+int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array)
{
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
struct arm_smmu_device *smmu = vsmmu->smmu;
@@ -382,25 +391,14 @@ static const struct iommufd_viommu_ops arm_vsmmu_ops = {
.cache_invalidate = arm_vsmmu_cache_invalidate,
};
-struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
- struct iommu_domain *parent,
- struct iommufd_ctx *ictx,
- unsigned int viommu_type)
+size_t arm_smmu_get_viommu_size(struct device *dev,
+ enum iommu_viommu_type viommu_type)
{
- struct arm_smmu_device *smmu =
- iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu);
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- struct arm_smmu_domain *s2_parent = to_smmu_domain(parent);
- struct arm_vsmmu *vsmmu;
-
- if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
- return ERR_PTR(-EOPNOTSUPP);
+ struct arm_smmu_device *smmu = master->smmu;
if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
- return ERR_PTR(-EOPNOTSUPP);
-
- if (s2_parent->smmu != master->smmu)
- return ERR_PTR(-EINVAL);
+ return 0;
/*
* FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
@@ -408,7 +406,7 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
* any change to remove this.
*/
if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
- return ERR_PTR(-EOPNOTSUPP);
+ return 0;
/*
* Must support some way to prevent the VM from bypassing the cache
@@ -420,19 +418,39 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
*/
if (!arm_smmu_master_canwbs(master) &&
!(smmu->features & ARM_SMMU_FEAT_S2FWB))
- return ERR_PTR(-EOPNOTSUPP);
+ return 0;
- vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core,
- &arm_vsmmu_ops);
- if (IS_ERR(vsmmu))
- return ERR_CAST(vsmmu);
+ if (viommu_type == IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
+ return VIOMMU_STRUCT_SIZE(struct arm_vsmmu, core);
+
+ if (!smmu->impl_ops || !smmu->impl_ops->get_viommu_size)
+ return 0;
+ return smmu->impl_ops->get_viommu_size(viommu_type);
+}
+
+int arm_vsmmu_init(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data)
+{
+ struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
+ struct arm_smmu_device *smmu =
+ container_of(viommu->iommu_dev, struct arm_smmu_device, iommu);
+ struct arm_smmu_domain *s2_parent = to_smmu_domain(parent_domain);
+
+ if (s2_parent->smmu != smmu)
+ return -EINVAL;
vsmmu->smmu = smmu;
vsmmu->s2_parent = s2_parent;
/* FIXME Move VMID allocation from the S2 domain allocation to here */
vsmmu->vmid = s2_parent->s2_cfg.vmid;
- return &vsmmu->core;
+ if (viommu->type == IOMMU_VIOMMU_TYPE_ARM_SMMUV3) {
+ viommu->ops = &arm_vsmmu_ops;
+ return 0;
+ }
+
+ return smmu->impl_ops->vsmmu_init(vsmmu, user_data);
}
int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 0601dece0a0d..59a480974d80 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -220,6 +220,9 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
feat_mask |= ARM_SMMU_FEAT_VAX;
}
+ if (system_supports_bbml2_noabort())
+ feat_mask |= ARM_SMMU_FEAT_BBML2;
+
if ((smmu->features & feat_mask) != feat_mask)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 10cc6dc26b7b..5968043ac802 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -38,7 +38,7 @@ module_param(disable_msipolling, bool, 0444);
MODULE_PARM_DESC(disable_msipolling,
"Disable MSI-based polling for CMD_SYNC completion.");
-static struct iommu_ops arm_smmu_ops;
+static const struct iommu_ops arm_smmu_ops;
static struct iommu_dirty_ops arm_smmu_dirty_ops;
enum arm_smmu_msi_index {
@@ -2906,8 +2906,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
if (!master_domain) {
- kfree(state->vmaster);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_vmaster;
}
master_domain->domain = new_domain;
master_domain->master = master;
@@ -2941,7 +2941,6 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(state->vmaster);
ret = -EINVAL;
goto err_iopf;
}
@@ -2967,6 +2966,8 @@ err_iopf:
arm_smmu_disable_iopf(master, master_domain);
err_free_master_domain:
kfree(master_domain);
+err_free_vmaster:
+ kfree(state->vmaster);
return ret;
}
@@ -3674,7 +3675,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static struct iommu_ops arm_smmu_ops = {
+static const struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
@@ -3688,9 +3689,9 @@ static struct iommu_ops arm_smmu_ops = {
.get_resv_regions = arm_smmu_get_resv_regions,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
- .viommu_alloc = arm_vsmmu_alloc,
+ .get_viommu_size = arm_smmu_get_viommu_size,
+ .viommu_init = arm_vsmmu_init,
.user_pasid_table = 1,
- .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
@@ -4457,6 +4458,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (FIELD_GET(IDR3_FWB, reg))
smmu->features |= ARM_SMMU_FEAT_S2FWB;
+ if (FIELD_GET(IDR3_BBM, reg) == 2)
+ smmu->features |= ARM_SMMU_FEAT_BBML2;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
@@ -4504,11 +4508,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->oas = 48;
}
- if (arm_smmu_ops.pgsize_bitmap == -1UL)
- arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
- else
- arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
-
/* Set the DMA mask for our table walker */
if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
dev_warn(smmu->dev,
@@ -4702,6 +4701,7 @@ static void arm_smmu_impl_remove(void *data)
static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu)
{
struct arm_smmu_device *new_smmu = ERR_PTR(-ENODEV);
+ const struct arm_smmu_impl_ops *ops;
int ret;
if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV))
@@ -4712,11 +4712,24 @@ static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu)
if (IS_ERR(new_smmu))
return new_smmu;
+ ops = new_smmu->impl_ops;
+ if (ops) {
+ /* get_viommu_size and vsmmu_init ops must be paired */
+ if (WARN_ON(!ops->get_viommu_size != !ops->vsmmu_init)) {
+ ret = -EINVAL;
+ goto err_remove;
+ }
+ }
+
ret = devm_add_action_or_reset(new_smmu->dev, arm_smmu_impl_remove,
new_smmu);
if (ret)
return ERR_PTR(ret);
return new_smmu;
+
+err_remove:
+ arm_smmu_impl_remove(new_smmu);
+ return ERR_PTR(ret);
}
static int arm_smmu_device_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index ea41d790463e..ae23aacc3840 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -16,6 +16,7 @@
#include <linux/sizes.h>
struct arm_smmu_device;
+struct arm_vsmmu;
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
@@ -60,6 +61,7 @@ struct arm_smmu_device;
#define ARM_SMMU_IDR3 0xc
#define IDR3_FWB (1 << 8)
#define IDR3_RIL (1 << 10)
+#define IDR3_BBM GENMASK(12, 11)
#define ARM_SMMU_IDR5 0x14
#define IDR5_STALL_MAX GENMASK(31, 16)
@@ -720,6 +722,16 @@ struct arm_smmu_impl_ops {
int (*init_structures)(struct arm_smmu_device *smmu);
struct arm_smmu_cmdq *(*get_secondary_cmdq)(
struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
+ /*
+ * An implementation should define its own type other than the default
+ * IOMMU_HW_INFO_TYPE_ARM_SMMUV3. And it must validate the input @type
+ * to return its own structure.
+ */
+ void *(*hw_info)(struct arm_smmu_device *smmu, u32 *length,
+ enum iommu_hw_info_type *type);
+ size_t (*get_viommu_size)(enum iommu_viommu_type viommu_type);
+ int (*vsmmu_init)(struct arm_vsmmu *vsmmu,
+ const struct iommu_user_data *user_data);
};
/* An SMMUv3 instance */
@@ -755,6 +767,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HA (1 << 21)
#define ARM_SMMU_FEAT_HD (1 << 22)
#define ARM_SMMU_FEAT_S2FWB (1 << 23)
+#define ARM_SMMU_FEAT_BBML2 (1 << 24)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -1033,19 +1046,29 @@ struct arm_vsmmu {
};
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_IOMMUFD)
-void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type);
-struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
- struct iommu_domain *parent,
- struct iommufd_ctx *ictx,
- unsigned int viommu_type);
+void *arm_smmu_hw_info(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
+size_t arm_smmu_get_viommu_size(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+int arm_vsmmu_init(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
struct arm_smmu_nested_domain *nested_domain);
void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state);
void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master);
int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt);
+struct iommu_domain *
+arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
+ const struct iommu_user_data *user_data);
+int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array);
#else
+#define arm_smmu_get_viommu_size NULL
#define arm_smmu_hw_info NULL
-#define arm_vsmmu_alloc NULL
+#define arm_vsmmu_init NULL
+#define arm_vsmmu_alloc_domain_nested NULL
+#define arm_vsmmu_cache_invalidate NULL
static inline int
arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index dd7d030d2e89..be1aaaf8cd17 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -8,7 +8,9 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <linux/iopoll.h>
+#include <uapi/linux/iommufd.h>
#include <acpi/acpixf.h>
@@ -26,8 +28,10 @@
#define CMDQV_EN BIT(0)
#define TEGRA241_CMDQV_PARAM 0x0004
+#define CMDQV_NUM_SID_PER_VM_LOG2 GENMASK(15, 12)
#define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8)
#define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4)
+#define CMDQV_VER GENMASK(3, 0)
#define TEGRA241_CMDQV_STATUS 0x0008
#define CMDQV_ENABLED BIT(0)
@@ -53,6 +57,9 @@
#define VINTF_STATUS GENMASK(3, 1)
#define VINTF_ENABLED BIT(0)
+#define TEGRA241_VINTF_SID_MATCH(s) (0x0040 + 0x4*(s))
+#define TEGRA241_VINTF_SID_REPLACE(s) (0x0080 + 0x4*(s))
+
#define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
(0x00C0 + 0x8*(m))
#define LVCMDQ_ERR_MAP_NUM_64 2
@@ -114,16 +121,20 @@ MODULE_PARM_DESC(bypass_vcmdq,
/**
* struct tegra241_vcmdq - Virtual Command Queue
+ * @core: Embedded iommufd_hw_queue structure
* @idx: Global index in the CMDQV
* @lidx: Local index in the VINTF
* @enabled: Enable status
* @cmdqv: Parent CMDQV pointer
* @vintf: Parent VINTF pointer
+ * @prev: Previous LVCMDQ to depend on
* @cmdq: Command Queue struct
* @page0: MMIO Page0 base address
* @page1: MMIO Page1 base address
*/
struct tegra241_vcmdq {
+ struct iommufd_hw_queue core;
+
u16 idx;
u16 lidx;
@@ -131,22 +142,30 @@ struct tegra241_vcmdq {
struct tegra241_cmdqv *cmdqv;
struct tegra241_vintf *vintf;
+ struct tegra241_vcmdq *prev;
struct arm_smmu_cmdq cmdq;
void __iomem *page0;
void __iomem *page1;
};
+#define hw_queue_to_vcmdq(v) container_of(v, struct tegra241_vcmdq, core)
/**
* struct tegra241_vintf - Virtual Interface
+ * @vsmmu: Embedded arm_vsmmu structure
* @idx: Global index in the CMDQV
* @enabled: Enable status
* @hyp_own: Owned by hypervisor (in-kernel)
* @cmdqv: Parent CMDQV pointer
* @lvcmdqs: List of logical VCMDQ pointers
+ * @lvcmdq_mutex: Lock to serialize user-allocated lvcmdqs
* @base: MMIO base address
+ * @mmap_offset: Offset argument for mmap() syscall
+ * @sids: Stream ID mapping resources
*/
struct tegra241_vintf {
+ struct arm_vsmmu vsmmu;
+
u16 idx;
bool enabled;
@@ -154,19 +173,41 @@ struct tegra241_vintf {
struct tegra241_cmdqv *cmdqv;
struct tegra241_vcmdq **lvcmdqs;
+ struct mutex lvcmdq_mutex; /* user space race */
void __iomem *base;
+ unsigned long mmap_offset;
+
+ struct ida sids;
};
+#define viommu_to_vintf(v) container_of(v, struct tegra241_vintf, vsmmu.core)
+
+/**
+ * struct tegra241_vintf_sid - Virtual Interface Stream ID Mapping
+ * @core: Embedded iommufd_vdevice structure, holding virtual Stream ID
+ * @vintf: Parent VINTF pointer
+ * @sid: Physical Stream ID
+ * @idx: Mapping index in the VINTF
+ */
+struct tegra241_vintf_sid {
+ struct iommufd_vdevice core;
+ struct tegra241_vintf *vintf;
+ u32 sid;
+ u8 idx;
+};
+#define vdev_to_vsid(v) container_of(v, struct tegra241_vintf_sid, core)
/**
* struct tegra241_cmdqv - CMDQ-V for SMMUv3
* @smmu: SMMUv3 device
* @dev: CMDQV device
* @base: MMIO base address
+ * @base_phys: MMIO physical base address, for mmap
* @irq: IRQ number
* @num_vintfs: Total number of VINTFs
* @num_vcmdqs: Total number of VCMDQs
* @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
+ * @num_sids_per_vintf: Total number of SID mappings per VINTF
* @vintf_ids: VINTF id allocator
* @vintfs: List of VINTFs
*/
@@ -175,12 +216,14 @@ struct tegra241_cmdqv {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
int irq;
/* CMDQV Hardware Params */
u16 num_vintfs;
u16 num_vcmdqs;
u16 num_lvcmdqs_per_vintf;
+ u16 num_sids_per_vintf;
struct ida vintf_ids;
@@ -252,6 +295,20 @@ static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
/* ISR Functions */
+static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
+{
+ struct iommufd_viommu *viommu = &vintf->vsmmu.core;
+ struct iommu_vevent_tegra241_cmdqv vevent_data;
+ int i;
+
+ for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++)
+ vevent_data.lvcmdq_err_map[i] =
+ readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+
+ iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
+ &vevent_data, sizeof(vevent_data));
+}
+
static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
{
int i;
@@ -297,6 +354,14 @@ static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
vintf_map &= ~BIT_ULL(0);
}
+ /* Handle other user VINTFs and their LVCMDQs */
+ while (vintf_map) {
+ unsigned long idx = __ffs64(vintf_map);
+
+ tegra241_vintf_user_handle_error(cmdqv->vintfs[idx]);
+ vintf_map &= ~BIT_ULL(idx);
+ }
+
return IRQ_HANDLED;
}
@@ -351,6 +416,30 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
/* HW Reset Functions */
+/*
+ * When a guest-owned VCMDQ is disabled, if the guest did not enqueue a CMD_SYNC
+ * following an ATC_INV command at the end of the guest queue while this ATC_INV
+ * is timed out, the TIMEOUT will not be reported until this VCMDQ gets assigned
+ * to the next VM, which will be a false alarm potentially causing some unwanted
+ * behavior in the new VM. Thus, a guest-owned VCMDQ must flush the TIMEOUT when
+ * it gets disabled. This can be done by just issuing a CMD_SYNC to SMMU CMDQ.
+ */
+static void tegra241_vcmdq_hw_flush_timeout(struct tegra241_vcmdq *vcmdq)
+{
+ struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
+ u64 cmd_sync[CMDQ_ENT_DWORDS] = {};
+
+ cmd_sync[0] = FIELD_PREP(CMDQ_0_OP, CMDQ_OP_CMD_SYNC) |
+ FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
+
+ /*
+ * It does not hurt to insert another CMD_SYNC, taking advantage of the
+ * arm_smmu_cmdq_issue_cmdlist() that waits for the CMD_SYNC completion.
+ */
+ arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, cmd_sync, 1, true);
+}
+
+/* This function is for LVCMDQ, so @vcmdq must not be unmapped yet */
static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
{
char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
@@ -363,6 +452,8 @@ static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
}
+ tegra241_vcmdq_hw_flush_timeout(vcmdq);
+
writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
@@ -379,6 +470,7 @@ static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
}
+/* This function is for LVCMDQ, so @vcmdq must be mapped prior */
static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
{
char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
@@ -404,14 +496,45 @@ static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
return 0;
}
+/* Unmap a global VCMDQ from the pre-assigned LVCMDQ */
+static void tegra241_vcmdq_unmap_lvcmdq(struct tegra241_vcmdq *vcmdq)
+{
+ u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
+ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
+
+ writel(regval & ~CMDQV_CMDQ_ALLOCATED,
+ REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
+ dev_dbg(vcmdq->cmdqv->dev, "%sunmapped\n", h);
+}
+
static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
{
- u16 lidx;
+ u16 lidx = vintf->cmdqv->num_lvcmdqs_per_vintf;
+ int sidx;
- for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
- if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
+ /* HW requires to unmap LVCMDQs in descending order */
+ while (lidx--) {
+ if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
+ tegra241_vcmdq_unmap_lvcmdq(vintf->lvcmdqs[lidx]);
+ }
+ }
vintf_write_config(vintf, 0);
+ for (sidx = 0; sidx < vintf->cmdqv->num_sids_per_vintf; sidx++) {
+ writel(0, REG_VINTF(vintf, SID_MATCH(sidx)));
+ writel(0, REG_VINTF(vintf, SID_REPLACE(sidx)));
+ }
+}
+
+/* Map a global VCMDQ to the pre-assigned LVCMDQ */
+static void tegra241_vcmdq_map_lvcmdq(struct tegra241_vcmdq *vcmdq)
+{
+ u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
+ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
+
+ writel(regval | CMDQV_CMDQ_ALLOCATED,
+ REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
+ dev_dbg(vcmdq->cmdqv->dev, "%smapped\n", h);
}
static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
@@ -429,7 +552,8 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
* whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
* restricted set of supported commands.
*/
- regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
+ regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own) |
+ FIELD_PREP(VINTF_VMID, vintf->vsmmu.vmid);
writel(regval, REG_VINTF(vintf, CONFIG));
ret = vintf_write_config(vintf, regval | VINTF_EN);
@@ -441,8 +565,10 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
*/
vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
+ /* HW requires to map LVCMDQs in ascending order */
for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
+ tegra241_vcmdq_map_lvcmdq(vintf->lvcmdqs[lidx]);
ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
if (ret) {
tegra241_vintf_hw_deinit(vintf);
@@ -476,7 +602,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
regval = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
- regval |= CMDQV_CMDQ_ALLOCATED;
writel_relaxed(regval,
REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
}
@@ -555,7 +680,9 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
dev_dbg(vintf->cmdqv->dev,
"%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
- kfree(vcmdq);
+ /* Guest-owned VCMDQ is free-ed with hw_queue by iommufd core */
+ if (vcmdq->vintf->hyp_own)
+ kfree(vcmdq);
}
static struct tegra241_vcmdq *
@@ -628,28 +755,27 @@ static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
/* Remove Helpers */
-static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
-{
- tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
- tegra241_vintf_free_lvcmdq(vintf, lidx);
-}
-
static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
{
struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
u16 lidx;
+ tegra241_vintf_hw_deinit(vintf);
+
/* Remove LVCMDQ resources */
for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
if (vintf->lvcmdqs[lidx])
- tegra241_vintf_remove_lvcmdq(vintf, lidx);
-
- /* Remove VINTF resources */
- tegra241_vintf_hw_deinit(vintf);
+ tegra241_vintf_free_lvcmdq(vintf, lidx);
dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
tegra241_cmdqv_deinit_vintf(cmdqv, idx);
- kfree(vintf);
+ if (!vintf->hyp_own) {
+ mutex_destroy(&vintf->lvcmdq_mutex);
+ ida_destroy(&vintf->sids);
+ /* Guest-owned VINTF is free-ed with viommu by iommufd core */
+ } else {
+ kfree(vintf);
+ }
}
static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
@@ -677,10 +803,51 @@ static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
put_device(cmdqv->dev); /* smmu->impl_dev */
}
+static int
+tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
+ const struct iommu_user_data *user_data);
+
+static void *tegra241_cmdqv_hw_info(struct arm_smmu_device *smmu, u32 *length,
+ enum iommu_hw_info_type *type)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(smmu, struct tegra241_cmdqv, smmu);
+ struct iommu_hw_info_tegra241_cmdqv *info;
+ u32 regval;
+
+ if (*type != IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
+ info->log2vcmdqs = ilog2(cmdqv->num_lvcmdqs_per_vintf);
+ info->log2vsids = ilog2(cmdqv->num_sids_per_vintf);
+ info->version = FIELD_GET(CMDQV_VER, regval);
+
+ *length = sizeof(*info);
+ *type = IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV;
+ return info;
+}
+
+static size_t tegra241_cmdqv_get_vintf_size(enum iommu_viommu_type viommu_type)
+{
+ if (viommu_type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)
+ return 0;
+ return VIOMMU_STRUCT_SIZE(struct tegra241_vintf, vsmmu.core);
+}
+
static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
+ /* For in-kernel use */
.get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
.device_reset = tegra241_cmdqv_hw_reset,
.device_remove = tegra241_cmdqv_remove,
+ /* For user-space use */
+ .hw_info = tegra241_cmdqv_hw_info,
+ .get_viommu_size = tegra241_cmdqv_get_vintf_size,
+ .vsmmu_init = tegra241_cmdqv_init_vintf_user,
};
/* Probe Functions */
@@ -822,10 +989,12 @@ __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
cmdqv->irq = irq;
cmdqv->base = base;
cmdqv->dev = smmu->impl_dev;
+ cmdqv->base_phys = res->start;
if (cmdqv->irq > 0) {
- ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
- cmdqv);
+ ret = request_threaded_irq(irq, NULL, tegra241_cmdqv_isr,
+ IRQF_ONESHOT, "tegra241-cmdqv",
+ cmdqv);
if (ret) {
dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
cmdqv->irq, ret);
@@ -837,6 +1006,8 @@ __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
+ cmdqv->num_sids_per_vintf =
+ 1 << FIELD_GET(CMDQV_NUM_SID_PER_VM_LOG2, regval);
cmdqv->vintfs =
kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
@@ -890,3 +1061,287 @@ out_fallback:
put_device(smmu->impl_dev);
return ERR_PTR(-ENODEV);
}
+
+/* User space VINTF and VCMDQ Functions */
+
+static size_t tegra241_vintf_get_vcmdq_size(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type)
+{
+ if (queue_type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
+ return 0;
+ return HW_QUEUE_STRUCT_SIZE(struct tegra241_vcmdq, core);
+}
+
+static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq)
+{
+ char header[64];
+
+ /* Configure the vcmdq only; User space does the enabling */
+ writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
+
+ dev_dbg(vcmdq->cmdqv->dev, "%sinited at host PA 0x%llx size 0x%lx\n",
+ lvcmdq_error_header(vcmdq, header, 64),
+ vcmdq->cmdq.q.q_base & VCMDQ_ADDR,
+ 1UL << (vcmdq->cmdq.q.q_base & VCMDQ_LOG2SIZE));
+ return 0;
+}
+
+static void
+tegra241_vintf_destroy_lvcmdq_user(struct iommufd_hw_queue *hw_queue)
+{
+ struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
+
+ mutex_lock(&vcmdq->vintf->lvcmdq_mutex);
+ tegra241_vcmdq_hw_deinit(vcmdq);
+ tegra241_vcmdq_unmap_lvcmdq(vcmdq);
+ tegra241_vintf_free_lvcmdq(vcmdq->vintf, vcmdq->lidx);
+ if (vcmdq->prev)
+ iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
+ mutex_unlock(&vcmdq->vintf->lvcmdq_mutex);
+}
+
+static int tegra241_vintf_alloc_lvcmdq_user(struct iommufd_hw_queue *hw_queue,
+ u32 lidx, phys_addr_t base_addr_pa)
+{
+ struct tegra241_vintf *vintf = viommu_to_vintf(hw_queue->viommu);
+ struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
+ struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
+ struct arm_smmu_device *smmu = &cmdqv->smmu;
+ struct tegra241_vcmdq *prev = NULL;
+ u32 log2size, max_n_shift;
+ char header[64];
+ int ret;
+
+ if (hw_queue->type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
+ return -EOPNOTSUPP;
+ if (lidx >= cmdqv->num_lvcmdqs_per_vintf)
+ return -EINVAL;
+
+ mutex_lock(&vintf->lvcmdq_mutex);
+
+ if (vintf->lvcmdqs[lidx]) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+
+ /*
+ * HW requires to map LVCMDQs in ascending order, so reject if the
+ * previous lvcmdqs is not allocated yet.
+ */
+ if (lidx) {
+ prev = vintf->lvcmdqs[lidx - 1];
+ if (!prev) {
+ ret = -EIO;
+ goto unlock;
+ }
+ }
+
+ /*
+ * hw_queue->length must be a power of 2, in range of
+ * [ 32, 2 ^ (idr[1].CMDQS + CMDQ_ENT_SZ_SHIFT) ]
+ */
+ max_n_shift = FIELD_GET(IDR1_CMDQS,
+ readl_relaxed(smmu->base + ARM_SMMU_IDR1));
+ if (!is_power_of_2(hw_queue->length) || hw_queue->length < 32 ||
+ hw_queue->length > (1 << (max_n_shift + CMDQ_ENT_SZ_SHIFT))) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ log2size = ilog2(hw_queue->length) - CMDQ_ENT_SZ_SHIFT;
+
+ /* base_addr_pa must be aligned to hw_queue->length */
+ if (base_addr_pa & ~VCMDQ_ADDR ||
+ base_addr_pa & (hw_queue->length - 1)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /*
+ * HW requires to unmap LVCMDQs in descending order, so destroy() must
+ * follow this rule. Set a dependency on its previous LVCMDQ so iommufd
+ * core will help enforce it.
+ */
+ if (prev) {
+ ret = iommufd_hw_queue_depend(vcmdq, prev, core);
+ if (ret)
+ goto unlock;
+ }
+ vcmdq->prev = prev;
+
+ ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
+ if (ret)
+ goto undepend_vcmdq;
+
+ dev_dbg(cmdqv->dev, "%sallocated\n",
+ lvcmdq_error_header(vcmdq, header, 64));
+
+ tegra241_vcmdq_map_lvcmdq(vcmdq);
+
+ vcmdq->cmdq.q.q_base = base_addr_pa & VCMDQ_ADDR;
+ vcmdq->cmdq.q.q_base |= log2size;
+
+ ret = tegra241_vcmdq_hw_init_user(vcmdq);
+ if (ret)
+ goto unmap_lvcmdq;
+
+ hw_queue->destroy = &tegra241_vintf_destroy_lvcmdq_user;
+ mutex_unlock(&vintf->lvcmdq_mutex);
+ return 0;
+
+unmap_lvcmdq:
+ tegra241_vcmdq_unmap_lvcmdq(vcmdq);
+ tegra241_vintf_deinit_lvcmdq(vintf, lidx);
+undepend_vcmdq:
+ if (vcmdq->prev)
+ iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
+unlock:
+ mutex_unlock(&vintf->lvcmdq_mutex);
+ return ret;
+}
+
+static void tegra241_cmdqv_destroy_vintf_user(struct iommufd_viommu *viommu)
+{
+ struct tegra241_vintf *vintf = viommu_to_vintf(viommu);
+
+ if (vintf->mmap_offset)
+ iommufd_viommu_destroy_mmap(&vintf->vsmmu.core,
+ vintf->mmap_offset);
+ tegra241_cmdqv_remove_vintf(vintf->cmdqv, vintf->idx);
+}
+
+static void tegra241_vintf_destroy_vsid(struct iommufd_vdevice *vdev)
+{
+ struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
+ struct tegra241_vintf *vintf = vsid->vintf;
+
+ writel(0, REG_VINTF(vintf, SID_MATCH(vsid->idx)));
+ writel(0, REG_VINTF(vintf, SID_REPLACE(vsid->idx)));
+ ida_free(&vintf->sids, vsid->idx);
+ dev_dbg(vintf->cmdqv->dev,
+ "VINTF%u: deallocated SID_REPLACE%d for pSID=%x\n", vintf->idx,
+ vsid->idx, vsid->sid);
+}
+
+static int tegra241_vintf_init_vsid(struct iommufd_vdevice *vdev)
+{
+ struct device *dev = iommufd_vdevice_to_device(vdev);
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct tegra241_vintf *vintf = viommu_to_vintf(vdev->viommu);
+ struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
+ struct arm_smmu_stream *stream = &master->streams[0];
+ u64 virt_sid = vdev->virt_id;
+ int sidx;
+
+ if (virt_sid > UINT_MAX)
+ return -EINVAL;
+
+ WARN_ON_ONCE(master->num_streams != 1);
+
+ /* Find an empty pair of SID_REPLACE and SID_MATCH */
+ sidx = ida_alloc_max(&vintf->sids, vintf->cmdqv->num_sids_per_vintf - 1,
+ GFP_KERNEL);
+ if (sidx < 0)
+ return sidx;
+
+ writel(stream->id, REG_VINTF(vintf, SID_REPLACE(sidx)));
+ writel(virt_sid << 1 | 0x1, REG_VINTF(vintf, SID_MATCH(sidx)));
+ dev_dbg(vintf->cmdqv->dev,
+ "VINTF%u: allocated SID_REPLACE%d for pSID=%x, vSID=%x\n",
+ vintf->idx, sidx, stream->id, (u32)virt_sid);
+
+ vsid->idx = sidx;
+ vsid->vintf = vintf;
+ vsid->sid = stream->id;
+
+ vdev->destroy = &tegra241_vintf_destroy_vsid;
+ return 0;
+}
+
+static struct iommufd_viommu_ops tegra241_cmdqv_viommu_ops = {
+ .destroy = tegra241_cmdqv_destroy_vintf_user,
+ .alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
+ /* Non-accelerated commands will be still handled by the kernel */
+ .cache_invalidate = arm_vsmmu_cache_invalidate,
+ .vdevice_size = VDEVICE_STRUCT_SIZE(struct tegra241_vintf_sid, core),
+ .vdevice_init = tegra241_vintf_init_vsid,
+ .get_hw_queue_size = tegra241_vintf_get_vcmdq_size,
+ .hw_queue_init_phys = tegra241_vintf_alloc_lvcmdq_user,
+};
+
+static int
+tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
+ const struct iommu_user_data *user_data)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(vsmmu->smmu, struct tegra241_cmdqv, smmu);
+ struct tegra241_vintf *vintf = viommu_to_vintf(&vsmmu->core);
+ struct iommu_viommu_tegra241_cmdqv data;
+ phys_addr_t page0_base;
+ int ret;
+
+ /*
+ * Unsupported type should be rejected by tegra241_cmdqv_get_vintf_size.
+ * Seeing one here indicates a kernel bug or some data corruption.
+ */
+ if (WARN_ON(vsmmu->core.type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV))
+ return -EOPNOTSUPP;
+
+ if (!user_data)
+ return -EINVAL;
+
+ ret = iommu_copy_struct_from_user(&data, user_data,
+ IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
+ out_vintf_mmap_length);
+ if (ret)
+ return ret;
+
+ ret = tegra241_cmdqv_init_vintf(cmdqv, cmdqv->num_vintfs - 1, vintf);
+ if (ret < 0) {
+ dev_err(cmdqv->dev, "no more available vintf\n");
+ return ret;
+ }
+
+ /*
+ * Initialize the user-owned VINTF without a LVCMDQ, as it cannot pre-
+ * allocate a LVCMDQ until user space wants one, for security reasons.
+ * It is different than the kernel-owned VINTF0, which had pre-assigned
+ * and pre-allocated global VCMDQs that would be mapped to the LVCMDQs
+ * by the tegra241_vintf_hw_init() call.
+ */
+ ret = tegra241_vintf_hw_init(vintf, false);
+ if (ret)
+ goto deinit_vintf;
+
+ page0_base = cmdqv->base_phys + TEGRA241_VINTFi_PAGE0(vintf->idx);
+ ret = iommufd_viommu_alloc_mmap(&vintf->vsmmu.core, page0_base, SZ_64K,
+ &vintf->mmap_offset);
+ if (ret)
+ goto hw_deinit_vintf;
+
+ data.out_vintf_mmap_length = SZ_64K;
+ data.out_vintf_mmap_offset = vintf->mmap_offset;
+ ret = iommu_copy_struct_to_user(user_data, &data,
+ IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
+ out_vintf_mmap_length);
+ if (ret)
+ goto free_mmap;
+
+ ida_init(&vintf->sids);
+ mutex_init(&vintf->lvcmdq_mutex);
+
+ dev_dbg(cmdqv->dev, "VINTF%u: allocated with vmid (%d)\n", vintf->idx,
+ vintf->vsmmu.vmid);
+
+ vsmmu->core.ops = &tegra241_cmdqv_viommu_ops;
+ return 0;
+
+free_mmap:
+ iommufd_viommu_destroy_mmap(&vintf->vsmmu.core, vintf->mmap_offset);
+hw_deinit_vintf:
+ tegra241_vintf_hw_deinit(vintf);
+deinit_vintf:
+ tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
+ return ret;
+}
+
+MODULE_IMPORT_NS("IOMMUFD");
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 62874b18f645..57c097e87613 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -355,7 +355,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_prr_addr = NULL;
if (of_device_is_compatible(np, "qcom,smmu-500") &&
- of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ !of_device_is_compatible(np, "qcom,sm8250-smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
}
@@ -379,6 +380,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sdm670-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6115-mdss" },
{ .compatible = "qcom,sm6350-mdss" },
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8d95b14c7d5a..4ced4b5bee4d 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -109,7 +109,7 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
}
static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
+static const struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static struct device_node *dev_get_dev_node(struct device *dev)
@@ -919,6 +919,8 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = cfg->smmu;
/*
* Allocate the domain and initialise some of its data structures.
@@ -931,6 +933,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
+ smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap;
return &smmu_domain->domain;
}
@@ -1627,7 +1630,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static struct iommu_ops arm_smmu_ops = {
+static const struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
@@ -1639,7 +1642,6 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
- .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
@@ -1919,10 +1921,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
- if (arm_smmu_ops.pgsize_bitmap == -1UL)
- arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
- else
- arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
smmu->pgsize_bitmap);
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 3907924646a2..c5be95e56031 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -229,7 +229,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
goto out_unlock;
pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
+ .pgsize_bitmap = domain->pgsize_bitmap,
.ias = 32,
.oas = 40,
.tlb = &qcom_flush_ops,
@@ -246,8 +246,6 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
goto out_clear_iommu;
}
- /* Update the domain's page sizes to reflect the page table format */
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
@@ -337,6 +335,7 @@ static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
mutex_init(&qcom_domain->init_mutex);
spin_lock_init(&qcom_domain->pgtbl_lock);
+ qcom_domain->domain.pgsize_bitmap = SZ_4K;
return &qcom_domain->domain;
}
@@ -598,7 +597,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.probe_device = qcom_iommu_probe_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
- .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.map_pages = qcom_iommu_map,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index fcb6a0f7c082..b6edd178fe25 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "dma-iommu.h"
#include "iommu-pages.h"
typedef u32 sysmmu_iova_t;
@@ -925,6 +926,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
spin_lock_init(&domain->pgtablelock);
INIT_LIST_HEAD(&domain->clients);
+ domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE;
+
domain->domain.geometry.aperture_start = 0;
domain->domain.geometry.aperture_end = ~0UL;
domain->domain.geometry.force_aperture = true;
@@ -1477,7 +1480,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.device_group = generic_device_group,
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
- .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+ .get_resv_regions = iommu_dma_get_resv_regions,
.of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device,
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 47692cbfaabd..265e7290256b 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -370,7 +370,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
struct intel_iommu *iommu = tag->iommu;
u64 type = DMA_TLB_PSI_FLUSH;
- if (domain->use_first_level) {
+ if (intel_domain_is_fs_paging(domain)) {
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
pages, ih, domain->qi_batch);
return;
@@ -422,22 +422,6 @@ static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_
domain->qi_batch);
}
-static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
-{
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
-
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
- MAX_AGAW_PFN_WIDTH, domain->qi_batch);
- if (info->dtlb_extra_inval)
- qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
- MAX_AGAW_PFN_WIDTH, domain->qi_batch);
-}
-
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@@ -450,7 +434,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
struct cache_tag *tag;
unsigned long flags;
- addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+ if (start == 0 && end == ULONG_MAX) {
+ addr = 0;
+ pages = -1;
+ mask = MAX_AGAW_PFN_WIDTH;
+ } else {
+ addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+ }
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
@@ -491,31 +481,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
*/
void cache_tag_flush_all(struct dmar_domain *domain)
{
- struct intel_iommu *iommu = NULL;
- struct cache_tag *tag;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->cache_lock, flags);
- list_for_each_entry(tag, &domain->cache_tags, node) {
- if (iommu && iommu != tag->iommu)
- qi_batch_flush_descs(iommu, domain->qi_batch);
- iommu = tag->iommu;
-
- switch (tag->type) {
- case CACHE_TAG_IOTLB:
- case CACHE_TAG_NESTING_IOTLB:
- cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
- break;
- case CACHE_TAG_DEVTLB:
- case CACHE_TAG_NESTING_DEVTLB:
- cache_tag_flush_devtlb_all(domain, tag);
- break;
- }
-
- trace_cache_tag_flush_all(tag);
- }
- qi_batch_flush_descs(iommu, domain->qi_batch);
- spin_unlock_irqrestore(&domain->cache_lock, flags);
+ cache_tag_flush_range(domain, 0, ULONG_MAX, 0);
}
/*
@@ -545,7 +511,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
qi_batch_flush_descs(iommu, domain->qi_batch);
iommu = tag->iommu;
- if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
+ if (!cap_caching_mode(iommu->cap) ||
+ intel_domain_is_fs_paging(domain)) {
iommu_flush_write_buffer(iommu);
continue;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b61d9ea27aa9..ec975c73cfe6 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -935,14 +935,11 @@ void __init detect_intel_iommu(void)
pci_request_acs();
}
-#ifdef CONFIG_X86
if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init;
x86_platform.iommu_shutdown = intel_iommu_shutdown;
}
-#endif
-
if (dmar_tbl) {
acpi_put_table(dmar_tbl);
dmar_tbl = NULL;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 148b944143b8..9c3ab9d9f69a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -34,7 +34,7 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
-#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_GFX_DEVICE(pdev) pci_is_display(pdev)
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -57,6 +57,8 @@
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
+#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
+
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
@@ -1391,28 +1393,10 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (--info->refcnt == 0) {
ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
- domain->nid = NUMA_NO_NODE;
kfree(info);
}
}
-static void domain_exit(struct dmar_domain *domain)
-{
- if (domain->pgd) {
- struct iommu_pages_list freelist =
- IOMMU_PAGES_LIST_INIT(freelist);
-
- domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
- iommu_put_pages_list(&freelist);
- }
-
- if (WARN_ON(!list_empty(&domain->devices)))
- return;
-
- kfree(domain->qi_batch);
- kfree(domain);
-}
-
/*
* For kdump cases, old valid entries may be cached due to the
* in-flight DMA and copied pgtable, but there is no unmapping
@@ -1480,6 +1464,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct context_entry *context;
int ret;
+ if (WARN_ON(!intel_domain_is_ss_paging(domain)))
+ return -EINVAL;
+
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1736,15 +1723,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
intel_context_flush_no_pasid(info, context, did);
}
-int __domain_setup_first_level(struct intel_iommu *iommu,
- struct device *dev, ioasid_t pasid,
- u16 did, pgd_t *pgd, int flags,
- struct iommu_domain *old)
+int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev,
+ ioasid_t pasid, u16 did, phys_addr_t fsptptr,
+ int flags, struct iommu_domain *old)
{
if (!old)
- return intel_pasid_setup_first_level(iommu, dev, pgd,
- pasid, did, flags);
- return intel_pasid_replace_first_level(iommu, dev, pgd, pasid, did,
+ return intel_pasid_setup_first_level(iommu, dev, fsptptr, pasid,
+ did, flags);
+ return intel_pasid_replace_first_level(iommu, dev, fsptptr, pasid, did,
iommu_domain_did(old, iommu),
flags);
}
@@ -1793,7 +1779,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
return __domain_setup_first_level(iommu, dev, pasid,
domain_id_iommu(domain, iommu),
- (pgd_t *)pgd, flags, old);
+ __pa(pgd), flags, old);
}
static int dmar_domain_attach_device(struct dmar_domain *domain,
@@ -1819,12 +1805,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (!sm_supported(iommu))
ret = domain_context_mapping(domain, dev);
- else if (domain->use_first_level)
+ else if (intel_domain_is_fs_paging(domain))
ret = domain_setup_first_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
- else
+ else if (intel_domain_is_ss_paging(domain))
ret = domain_setup_second_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
if (ret)
goto out_block_translation;
@@ -3286,10 +3274,14 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
spin_lock_init(&domain->lock);
spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
+ INIT_LIST_HEAD(&domain->s1_domains);
+ spin_lock_init(&domain->s1_lock);
domain->nid = dev_to_node(dev);
domain->use_first_level = first_stage;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+
/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
@@ -3331,71 +3323,168 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
}
static struct iommu_domain *
-intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
- const struct iommu_user_data *user_data)
+intel_iommu_domain_alloc_first_stage(struct device *dev,
+ struct intel_iommu *iommu, u32 flags)
+{
+ struct dmar_domain *dmar_domain;
+
+ if (flags & ~IOMMU_HWPT_ALLOC_PASID)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Only SL is available in legacy mode */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dmar_domain = paging_domain_alloc(dev, true);
+ if (IS_ERR(dmar_domain))
+ return ERR_CAST(dmar_domain);
+
+ dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
+ /*
+ * iotlb sync for map is only needed for legacy implementations that
+ * explicitly require flushing internal write buffers to ensure memory
+ * coherence.
+ */
+ if (rwbf_required(iommu))
+ dmar_domain->iotlb_sync_map = true;
+
+ return &dmar_domain->domain;
+}
+
+static struct iommu_domain *
+intel_iommu_domain_alloc_second_stage(struct device *dev,
+ struct intel_iommu *iommu, u32 flags)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
- struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
- bool first_stage;
if (flags &
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID)))
return ERR_PTR(-EOPNOTSUPP);
- if (nested_parent && !nested_supported(iommu))
+
+ if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
+ !nested_supported(iommu)) ||
+ ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
+ !ssads_supported(iommu)))
return ERR_PTR(-EOPNOTSUPP);
- if (user_data || (dirty_tracking && !ssads_supported(iommu)))
+
+ /* Legacy mode always supports second stage */
+ if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
return ERR_PTR(-EOPNOTSUPP);
+ dmar_domain = paging_domain_alloc(dev, false);
+ if (IS_ERR(dmar_domain))
+ return ERR_CAST(dmar_domain);
+
+ dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
+ dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
+
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+
/*
- * Always allocate the guest compatible page table unless
- * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING
- * is specified.
+ * Besides the internal write buffer flush, the caching mode used for
+ * legacy nested translation (which utilizes shadowing page tables)
+ * also requires iotlb sync on map.
*/
- if (nested_parent || dirty_tracking) {
- if (!sm_supported(iommu) || !ecap_slts(iommu->ecap))
- return ERR_PTR(-EOPNOTSUPP);
- first_stage = false;
- } else {
- first_stage = first_level_by_default(iommu);
- }
+ if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
+ dmar_domain->iotlb_sync_map = true;
- dmar_domain = paging_domain_alloc(dev, first_stage);
- if (IS_ERR(dmar_domain))
- return ERR_CAST(dmar_domain);
- domain = &dmar_domain->domain;
- domain->type = IOMMU_DOMAIN_UNMANAGED;
- domain->owner = &intel_iommu_ops;
- domain->ops = intel_iommu_ops.default_domain_ops;
-
- if (nested_parent) {
- dmar_domain->nested_parent = true;
- INIT_LIST_HEAD(&dmar_domain->s1_domains);
- spin_lock_init(&dmar_domain->s1_lock);
- }
+ return &dmar_domain->domain;
+}
- if (dirty_tracking) {
- if (dmar_domain->use_first_level) {
- iommu_domain_free(domain);
- return ERR_PTR(-EOPNOTSUPP);
- }
- domain->dirty_ops = &intel_dirty_ops;
- }
+static struct iommu_domain *
+intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct iommu_domain *domain;
- return domain;
+ if (user_data)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Prefer first stage if possible by default. */
+ domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags);
+ if (domain != ERR_PTR(-EOPNOTSUPP))
+ return domain;
+ return intel_iommu_domain_alloc_second_stage(dev, iommu, flags);
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- WARN_ON(dmar_domain->nested_parent &&
- !list_empty(&dmar_domain->s1_domains));
- domain_exit(dmar_domain);
+ if (WARN_ON(dmar_domain->nested_parent &&
+ !list_empty(&dmar_domain->s1_domains)))
+ return;
+
+ if (WARN_ON(!list_empty(&dmar_domain->devices)))
+ return;
+
+ if (dmar_domain->pgd) {
+ struct iommu_pages_list freelist =
+ IOMMU_PAGES_LIST_INIT(freelist);
+
+ domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw),
+ &freelist);
+ iommu_put_pages_list(&freelist);
+ }
+
+ kfree(dmar_domain->qi_batch);
+ kfree(dmar_domain);
+}
+
+static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
+ struct intel_iommu *iommu)
+{
+ if (WARN_ON(dmar_domain->domain.dirty_ops ||
+ dmar_domain->nested_parent))
+ return -EINVAL;
+
+ /* Only SL is available in legacy mode */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return -EINVAL;
+
+ /* Same page size support */
+ if (!cap_fl1gp_support(iommu->cap) &&
+ (dmar_domain->domain.pgsize_bitmap & SZ_1G))
+ return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
+ struct intel_iommu *iommu)
+{
+ unsigned int sslps = cap_super_page_val(iommu->cap);
+
+ if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu))
+ return -EINVAL;
+ if (dmar_domain->nested_parent && !nested_supported(iommu))
+ return -EINVAL;
+
+ /* Legacy mode always supports second stage */
+ if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
+ return -EINVAL;
+
+ /* Same page size support */
+ if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M))
+ return -EINVAL;
+ if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
+ return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
+ !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
+ return 0;
}
int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
@@ -3403,28 +3492,29 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
+ int ret = -EINVAL;
int addr_width;
- if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING)))
- return -EPERM;
+ if (intel_domain_is_fs_paging(dmar_domain))
+ ret = paging_domain_compatible_first_stage(dmar_domain, iommu);
+ else if (intel_domain_is_ss_paging(dmar_domain))
+ ret = paging_domain_compatible_second_stage(dmar_domain, iommu);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
+ if (ret)
+ return ret;
+ /*
+ * FIXME this is locked wrong, it needs to be under the
+ * dmar_domain->lock
+ */
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;
- if (domain->dirty_ops && !ssads_supported(iommu))
- return -EINVAL;
-
if (dmar_domain->iommu_coherency !=
iommu_paging_structure_coherency(iommu))
return -EINVAL;
- if (dmar_domain->iommu_superpage !=
- iommu_superpage_capability(iommu, dmar_domain->use_first_level))
- return -EINVAL;
-
- if (dmar_domain->use_first_level &&
- (!sm_supported(iommu) || !ecap_flts(iommu->ecap)))
- return -EINVAL;
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
@@ -3610,44 +3700,41 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
return support;
}
-static void domain_set_force_snooping(struct dmar_domain *domain)
+static bool intel_iommu_enforce_cache_coherency_fs(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct device_domain_info *info;
- assert_spin_locked(&domain->lock);
- /*
- * Second level page table supports per-PTE snoop control. The
- * iommu_map() interface will handle this by setting SNP bit.
- */
- if (!domain->use_first_level) {
- domain->set_pte_snp = true;
- return;
- }
+ guard(spinlock_irqsave)(&dmar_domain->lock);
- list_for_each_entry(info, &domain->devices, link)
+ if (dmar_domain->force_snooping)
+ return true;
+
+ if (!domain_support_force_snooping(dmar_domain))
+ return false;
+
+ dmar_domain->force_snooping = true;
+ list_for_each_entry(info, &dmar_domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
IOMMU_NO_PASID);
+ return true;
}
-static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+static bool intel_iommu_enforce_cache_coherency_ss(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
- if (dmar_domain->force_snooping)
- return true;
-
- spin_lock_irqsave(&dmar_domain->lock, flags);
+ guard(spinlock_irqsave)(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain) ||
- (!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ dmar_domain->has_mappings)
return false;
- }
- domain_set_force_snooping(dmar_domain);
+ /*
+ * Second level page table supports per-PTE snoop control. The
+ * iommu_map() interface will handle this by setting SNP bit.
+ */
+ dmar_domain->set_pte_snp = true;
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
-
return true;
}
@@ -3954,7 +4041,10 @@ static bool risky_device(struct pci_dev *pdev)
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ if (dmar_domain->iotlb_sync_map)
+ cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1);
return 0;
}
@@ -4000,8 +4090,8 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- iopf_for_domain_remove(old, dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
+ iopf_for_domain_remove(old, dev);
domain_remove_dev_pasid(old, dev, pasid);
return 0;
@@ -4078,12 +4168,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (ret)
goto out_remove_dev_pasid;
- if (dmar_domain->use_first_level)
+ if (intel_domain_is_fs_paging(dmar_domain))
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
- else
+ else if (intel_domain_is_ss_paging(dmar_domain))
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
+
if (ret)
goto out_unwind_iopf;
@@ -4100,12 +4193,17 @@ out_remove_dev_pasid:
return ret;
}
-static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
+static void *intel_iommu_hw_info(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct iommu_hw_info_vtd *vtd;
+ if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
+ *type != IOMMU_HW_INFO_TYPE_INTEL_VTD)
+ return ERR_PTR(-EOPNOTSUPP);
+
vtd = kzalloc(sizeof(*vtd), GFP_KERNEL);
if (!vtd)
return ERR_PTR(-ENOMEM);
@@ -4358,6 +4456,32 @@ static struct iommu_domain identity_domain = {
},
};
+const struct iommu_domain_ops intel_fs_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_fs,
+};
+
+const struct iommu_domain_ops intel_ss_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_ss,
+};
+
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
@@ -4374,20 +4498,7 @@ const struct iommu_ops intel_iommu_ops = {
.device_group = intel_iommu_device_group,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
- .pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = intel_iommu_attach_device,
- .set_dev_pasid = intel_iommu_set_dev_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
- .iotlb_sync_map = intel_iommu_iotlb_sync_map,
- .flush_iotlb_all = intel_flush_iotlb_all,
- .iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .free = intel_iommu_domain_free,
- .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
- }
};
static void quirk_iommu_igfx(struct pci_dev *dev)
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 2d1afab5eedc..d09b92871659 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -614,6 +614,9 @@ struct dmar_domain {
u8 has_mappings:1; /* Has mappings configured through
* iommu_map() interface.
*/
+ u8 iotlb_sync_map:1; /* Need to flush IOTLB cache or write
+ * buffer when creating mappings.
+ */
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
@@ -1252,10 +1255,9 @@ domain_add_dev_pasid(struct iommu_domain *domain,
void domain_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
-int __domain_setup_first_level(struct intel_iommu *iommu,
- struct device *dev, ioasid_t pasid,
- u16 did, pgd_t *pgd, int flags,
- struct iommu_domain *old);
+int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev,
+ ioasid_t pasid, u16 did, phys_addr_t fsptptr,
+ int flags, struct iommu_domain *old);
int dmar_ir_support(void);
@@ -1378,6 +1380,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
extern const struct iommu_ops intel_iommu_ops;
+extern const struct iommu_domain_ops intel_fs_paging_domain_ops;
+extern const struct iommu_domain_ops intel_ss_paging_domain_ops;
+
+static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain)
+{
+ return domain->domain.ops == &intel_fs_paging_domain_ops;
+}
+
+static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain)
+{
+ return domain->domain.ops == &intel_ss_paging_domain_ops;
+}
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_sm;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index cf7b6882ec75..4f9b01dc91e8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -10,6 +10,7 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
@@ -518,8 +519,14 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
+ struct irq_domain_info info = {
+ .ops = &intel_ir_domain_ops,
+ .parent = arch_get_ir_parent_domain(),
+ .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI,
+ .size = INTR_REMAP_TABLE_ENTRIES,
+ .host_data = iommu,
+ };
struct ir_table *ir_table;
- struct fwnode_handle *fn;
unsigned long *bitmap;
void *ir_table_base;
@@ -544,25 +551,16 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
goto out_free_pages;
}
- fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
+ if (!info.fwnode)
goto out_free_bitmap;
- iommu->ir_domain =
- irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
- 0, INTR_REMAP_TABLE_ENTRIES,
- fn, &intel_ir_domain_ops,
- iommu);
+ iommu->ir_domain = msi_create_parent_irq_domain(&info, &dmar_msi_parent_ops);
if (!iommu->ir_domain) {
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_fwnode;
}
- irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR);
- iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
- IRQ_DOMAIN_FLAG_ISOLATED_MSI;
- iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
-
ir_table->base = ir_table_base;
ir_table->bitmap = bitmap;
iommu->ir_table = ir_table;
@@ -608,7 +606,7 @@ out_free_ir_domain:
irq_domain_remove(iommu->ir_domain);
iommu->ir_domain = NULL;
out_free_fwnode:
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
@@ -1244,10 +1242,10 @@ static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
{
struct intel_ir_data *ir_data = data->chip_data;
- struct vcpu_data *vcpu_pi_info = info;
+ struct intel_iommu_pi_data *pi_data = info;
/* stop posting interrupts, back to the default mode */
- if (!vcpu_pi_info) {
+ if (!pi_data) {
__intel_ir_reconfigure_irte(data, true);
} else {
struct irte irte_pi;
@@ -1265,10 +1263,10 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
/* Update the posted mode fields */
irte_pi.p_pst = 1;
irte_pi.p_urgent = 0;
- irte_pi.p_vector = vcpu_pi_info->vector;
- irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
+ irte_pi.p_vector = pi_data->vector;
+ irte_pi.pda_l = (pi_data->pi_desc_addr >>
(32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
- irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
+ irte_pi.pda_h = (pi_data->pi_desc_addr >> 32) &
~(-1UL << PDA_HIGH_BIT);
ir_data->irq_2_iommu.posted_vcpu = true;
@@ -1530,6 +1528,8 @@ static const struct irq_domain_ops intel_ir_domain_ops = {
static const struct msi_parent_ops dmar_msi_parent_ops = {
.supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_DMAR,
+ .bus_select_mask = MATCH_PCI_MSI,
.prefix = "IR-",
.init_dev_msi_info = msi_parent_init_dev_msi_info,
};
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index fc312f649f9e..1b6ad9c900a5 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
/* Must be nested domain */
if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
return ERR_PTR(-EOPNOTSUPP);
- if (parent->ops != intel_iommu_ops.default_domain_ops ||
- !s2_domain->nested_parent)
+ if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent)
return ERR_PTR(-EINVAL);
ret = iommu_copy_struct_from_user(&vtd, user_data,
@@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
if (!domain)
return ERR_PTR(-ENOMEM);
- domain->use_first_level = true;
domain->s2_domain = s2_domain;
domain->s1_cfg = vtd;
domain->domain.ops = &intel_nested_domain_ops;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index ac67a056b6c8..52f678975da7 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -348,14 +348,15 @@ static void intel_pasid_flush_present(struct intel_iommu *iommu,
*/
static void pasid_pte_config_first_level(struct intel_iommu *iommu,
struct pasid_entry *pte,
- pgd_t *pgd, u16 did, int flags)
+ phys_addr_t fsptptr, u16 did,
+ int flags)
{
lockdep_assert_held(&iommu->lock);
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
- pasid_set_flptr(pte, (u64)__pa(pgd));
+ pasid_set_flptr(pte, fsptptr);
if (flags & PASID_FLAG_FL5LP)
pasid_set_flpm(pte, 1);
@@ -372,9 +373,9 @@ static void pasid_pte_config_first_level(struct intel_iommu *iommu,
pasid_set_present(pte);
}
-int intel_pasid_setup_first_level(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd,
- u32 pasid, u16 did, int flags)
+int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev,
+ phys_addr_t fsptptr, u32 pasid, u16 did,
+ int flags)
{
struct pasid_entry *pte;
@@ -402,7 +403,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EBUSY;
}
- pasid_pte_config_first_level(iommu, pte, pgd, did, flags);
+ pasid_pte_config_first_level(iommu, pte, fsptptr, did, flags);
spin_unlock(&iommu->lock);
@@ -412,7 +413,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
int intel_pasid_replace_first_level(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd,
+ struct device *dev, phys_addr_t fsptptr,
u32 pasid, u16 did, u16 old_did,
int flags)
{
@@ -430,7 +431,7 @@ int intel_pasid_replace_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pasid_pte_config_first_level(iommu, &new_pte, pgd, did, flags);
+ pasid_pte_config_first_level(iommu, &new_pte, fsptptr, did, flags);
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index fd0fd1a0df84..a771a77d4239 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -288,9 +288,9 @@ extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
-int intel_pasid_setup_first_level(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd,
- u32 pasid, u16 did, int flags);
+int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev,
+ phys_addr_t fsptptr, u32 pasid, u16 did,
+ int flags);
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
@@ -302,9 +302,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain);
int intel_pasid_replace_first_level(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd,
- u32 pasid, u16 did, u16 old_did,
- int flags);
+ struct device *dev, phys_addr_t fsptptr,
+ u32 pasid, u16 did, u16 old_did, int flags);
int intel_pasid_replace_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u16 old_did,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f3da596410b5..e147f71f91b7 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -171,7 +171,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = __domain_setup_first_level(iommu, dev, pasid,
- FLPT_DEFAULT_DID, mm->pgd,
+ FLPT_DEFAULT_DID, __pa(mm->pgd),
sflags, old);
if (ret)
goto out_unwind_iopf;
@@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
domain->domain.ops = &intel_svm_domain_ops;
- domain->use_first_level = true;
INIT_LIST_HEAD(&domain->dev_pasids);
INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->cache_lock);
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
index 9defdae6ebae..6311ba3f1691 100644
--- a/drivers/iommu/intel/trace.h
+++ b/drivers/iommu/intel/trace.h
@@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
TP_ARGS(tag)
);
-DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
- TP_PROTO(struct cache_tag *tag),
- TP_ARGS(tag)
-);
-
DECLARE_EVENT_CLASS(cache_tag_flush,
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
unsigned long addr, unsigned long pages, unsigned long mask),
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 96425e92f313..7e8e2216c294 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -85,11 +85,6 @@
#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
-#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
-/* Ignore the contiguous bit for block splitting */
-#define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
-#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
- ARM_LPAE_PTE_ATTR_HI_MASK)
/* Software bit for solving coherency races */
#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
@@ -155,8 +150,6 @@
#define iopte_type(pte) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
-#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
-
#define iopte_writeable_dirty(pte) \
(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index a4b606c591da..060ebe330ee1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2002,13 +2002,6 @@ static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
domain->owner = ops;
if (!domain->ops)
domain->ops = ops->default_domain_ops;
-
- /*
- * If not already set, assume all sizes by default; the driver
- * may override this later
- */
- if (!domain->pgsize_bitmap)
- domain->pgsize_bitmap = ops->pgsize_bitmap;
}
static struct iommu_domain *
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 86244403b532..65fbd098f9e9 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -137,6 +137,57 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
}
}
+static void iommufd_device_remove_vdev(struct iommufd_device *idev)
+{
+ struct iommufd_vdevice *vdev;
+
+ mutex_lock(&idev->igroup->lock);
+ /* prevent new references from vdev */
+ idev->destroying = true;
+ /* vdev has been completely destroyed by userspace */
+ if (!idev->vdev)
+ goto out_unlock;
+
+ vdev = iommufd_get_vdevice(idev->ictx, idev->vdev->obj.id);
+ /*
+ * An ongoing vdev destroy ioctl has removed the vdev from the object
+ * xarray, but has not finished iommufd_vdevice_destroy() yet as it
+ * needs the same mutex. We exit the locking then wait on wait_cnt
+ * reference for the vdev destruction.
+ */
+ if (IS_ERR(vdev))
+ goto out_unlock;
+
+ /* Should never happen */
+ if (WARN_ON(vdev != idev->vdev)) {
+ iommufd_put_object(idev->ictx, &vdev->obj);
+ goto out_unlock;
+ }
+
+ /*
+ * vdev is still alive. Hold a users refcount to prevent racing with
+ * userspace destruction, then use iommufd_object_tombstone_user() to
+ * destroy it and leave a tombstone.
+ */
+ refcount_inc(&vdev->obj.users);
+ iommufd_put_object(idev->ictx, &vdev->obj);
+ mutex_unlock(&idev->igroup->lock);
+ iommufd_object_tombstone_user(idev->ictx, &vdev->obj);
+ return;
+
+out_unlock:
+ mutex_unlock(&idev->igroup->lock);
+}
+
+void iommufd_device_pre_destroy(struct iommufd_object *obj)
+{
+ struct iommufd_device *idev =
+ container_of(obj, struct iommufd_device, obj);
+
+ /* Release the wait_cnt reference on this */
+ iommufd_device_remove_vdev(idev);
+}
+
void iommufd_device_destroy(struct iommufd_object *obj)
{
struct iommufd_device *idev =
@@ -485,8 +536,7 @@ iommufd_device_get_attach_handle(struct iommufd_device *idev, ioasid_t pasid)
lockdep_assert_held(&idev->igroup->lock);
- handle =
- iommu_attach_handle_get(idev->igroup->group, pasid, 0);
+ handle = iommu_attach_handle_get(idev->igroup->group, pasid, 0);
if (IS_ERR(handle))
return NULL;
return to_iommufd_handle(handle);
@@ -1049,7 +1099,7 @@ static int iommufd_access_change_ioas(struct iommufd_access *access,
}
if (cur_ioas) {
- if (access->ops->unmap) {
+ if (!iommufd_access_is_internal(access) && access->ops->unmap) {
mutex_unlock(&access->ioas_lock);
access->ops->unmap(access->data, 0, ULONG_MAX);
mutex_lock(&access->ioas_lock);
@@ -1085,7 +1135,39 @@ void iommufd_access_destroy_object(struct iommufd_object *obj)
if (access->ioas)
WARN_ON(iommufd_access_change_ioas(access, NULL));
mutex_unlock(&access->ioas_lock);
- iommufd_ctx_put(access->ictx);
+ if (!iommufd_access_is_internal(access))
+ iommufd_ctx_put(access->ictx);
+}
+
+static struct iommufd_access *__iommufd_access_create(struct iommufd_ctx *ictx)
+{
+ struct iommufd_access *access;
+
+ /*
+ * There is no uAPI for the access object, but to keep things symmetric
+ * use the object infrastructure anyhow.
+ */
+ access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+ if (IS_ERR(access))
+ return access;
+
+ /* The calling driver is a user until iommufd_access_destroy() */
+ refcount_inc(&access->obj.users);
+ mutex_init(&access->ioas_lock);
+ return access;
+}
+
+struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx)
+{
+ struct iommufd_access *access;
+
+ access = __iommufd_access_create(ictx);
+ if (IS_ERR(access))
+ return access;
+ access->iova_alignment = PAGE_SIZE;
+
+ iommufd_object_finalize(ictx, &access->obj);
+ return access;
}
/**
@@ -1107,11 +1189,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
{
struct iommufd_access *access;
- /*
- * There is no uAPI for the access object, but to keep things symmetric
- * use the object infrastructure anyhow.
- */
- access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+ access = __iommufd_access_create(ictx);
if (IS_ERR(access))
return access;
@@ -1123,13 +1201,10 @@ iommufd_access_create(struct iommufd_ctx *ictx,
else
access->iova_alignment = 1;
- /* The calling driver is a user until iommufd_access_destroy() */
- refcount_inc(&access->obj.users);
access->ictx = ictx;
iommufd_ctx_get(ictx);
iommufd_object_finalize(ictx, &access->obj);
*id = access->obj.id;
- mutex_init(&access->ioas_lock);
return access;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_create, "IOMMUFD");
@@ -1174,6 +1249,22 @@ int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, "IOMMUFD");
+int iommufd_access_attach_internal(struct iommufd_access *access,
+ struct iommufd_ioas *ioas)
+{
+ int rc;
+
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
+ return -EINVAL;
+ }
+
+ rc = iommufd_access_change_ioas(access, ioas);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
+}
+
int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id)
{
int rc;
@@ -1215,7 +1306,8 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
xa_lock(&ioas->iopt.access_list);
xa_for_each(&ioas->iopt.access_list, index, access) {
- if (!iommufd_lock_obj(&access->obj))
+ if (!iommufd_lock_obj(&access->obj) ||
+ iommufd_access_is_internal(access))
continue;
xa_unlock(&ioas->iopt.access_list);
@@ -1239,6 +1331,7 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length)
{
+ bool internal = iommufd_access_is_internal(access);
struct iopt_area_contig_iter iter;
struct io_pagetable *iopt;
unsigned long last_iova;
@@ -1265,7 +1358,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
area, iopt_area_iova_to_index(area, iter.cur_iova),
iopt_area_iova_to_index(
area,
- min(last_iova, iopt_area_last_iova(area))));
+ min(last_iova, iopt_area_last_iova(area))),
+ internal);
WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
mutex_unlock(&access->ioas_lock);
@@ -1314,6 +1408,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
unsigned int flags)
{
+ bool internal = iommufd_access_is_internal(access);
struct iopt_area_contig_iter iter;
struct io_pagetable *iopt;
unsigned long last_iova;
@@ -1322,7 +1417,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
/* Driver's ops don't support pin_pages */
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
- WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
+ WARN_ON(access->iova_alignment != PAGE_SIZE ||
+ (!internal && !access->ops->unmap)))
return -EINVAL;
if (!length)
@@ -1356,7 +1452,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
}
rc = iopt_area_add_access(area, index, last_index, out_pages,
- flags);
+ flags, internal);
if (rc)
goto err_remove;
out_pages += last_index - index + 1;
@@ -1379,7 +1475,8 @@ err_remove:
iopt_area_iova_to_index(area, iter.cur_iova),
iopt_area_iova_to_index(
area, min(last_iova,
- iopt_area_last_iova(area))));
+ iopt_area_last_iova(area))),
+ internal);
}
up_read(&iopt->iova_rwsem);
mutex_unlock(&access->ioas_lock);
@@ -1453,6 +1550,7 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, "IOMMUFD");
int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
{
+ const u32 SUPPORTED_FLAGS = IOMMU_HW_INFO_FLAG_INPUT_TYPE;
struct iommu_hw_info *cmd = ucmd->cmd;
void __user *user_ptr = u64_to_user_ptr(cmd->data_uptr);
const struct iommu_ops *ops;
@@ -1462,9 +1560,14 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
void *data;
int rc;
- if (cmd->flags || cmd->__reserved[0] || cmd->__reserved[1] ||
- cmd->__reserved[2])
+ if (cmd->flags & ~SUPPORTED_FLAGS)
return -EOPNOTSUPP;
+ if (cmd->__reserved[0] || cmd->__reserved[1] || cmd->__reserved[2])
+ return -EOPNOTSUPP;
+
+ /* Clear the type field since drivers don't support a random input */
+ if (!(cmd->flags & IOMMU_HW_INFO_FLAG_INPUT_TYPE))
+ cmd->in_data_type = IOMMU_HW_INFO_TYPE_DEFAULT;
idev = iommufd_get_device(ucmd, cmd->dev_id);
if (IS_ERR(idev))
@@ -1484,7 +1587,7 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
*/
if (WARN_ON_ONCE(cmd->out_data_type ==
IOMMU_HW_INFO_TYPE_NONE)) {
- rc = -ENODEV;
+ rc = -EOPNOTSUPP;
goto out_free;
}
} else {
diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c
index 922cd1fe7ec2..6f1010da221c 100644
--- a/drivers/iommu/iommufd/driver.c
+++ b/drivers/iommu/iommufd/driver.c
@@ -3,38 +3,91 @@
*/
#include "iommufd_private.h"
-struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
- size_t size,
- enum iommufd_object_type type)
+/* Driver should use a per-structure helper in include/linux/iommufd.h */
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
{
- struct iommufd_object *obj;
+ /* Reject self dependency that dead locks */
+ if (obj_dependent == obj_depended)
+ return -EINVAL;
+ /* Only support dependency between two objects of the same type */
+ if (obj_dependent->type != obj_depended->type)
+ return -EINVAL;
+
+ refcount_inc(&obj_depended->users);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_object_depend, "IOMMUFD");
+
+/* Driver should use a per-structure helper in include/linux/iommufd.h */
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ if (WARN_ON_ONCE(obj_dependent == obj_depended ||
+ obj_dependent->type != obj_depended->type))
+ return;
+
+ refcount_dec(&obj_depended->users);
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_object_undepend, "IOMMUFD");
+
+/*
+ * Allocate an @offset to return to user space to use for an mmap() syscall
+ *
+ * Driver should use a per-structure helper in include/linux/iommufd.h
+ */
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ struct iommufd_mmap *immap;
+ unsigned long startp;
int rc;
- obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
- if (!obj)
- return ERR_PTR(-ENOMEM);
- obj->type = type;
- /* Starts out bias'd by 1 until it is removed from the xarray */
- refcount_set(&obj->shortterm_users, 1);
- refcount_set(&obj->users, 1);
+ if (!PAGE_ALIGNED(mmio_addr))
+ return -EINVAL;
+ if (!length || !PAGE_ALIGNED(length))
+ return -EINVAL;
- /*
- * Reserve an ID in the xarray but do not publish the pointer yet since
- * the caller hasn't initialized it yet. Once the pointer is published
- * in the xarray and visible to other threads we can't reliably destroy
- * it anymore, so the caller must complete all errorable operations
- * before calling iommufd_object_finalize().
- */
- rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
- GFP_KERNEL_ACCOUNT);
- if (rc)
- goto out_free;
- return obj;
-out_free:
- kfree(obj);
- return ERR_PTR(rc);
+ immap = kzalloc(sizeof(*immap), GFP_KERNEL);
+ if (!immap)
+ return -ENOMEM;
+ immap->owner = owner;
+ immap->length = length;
+ immap->mmio_addr = mmio_addr;
+
+ /* Skip the first page to ease caller identifying the returned offset */
+ rc = mtree_alloc_range(&ictx->mt_mmap, &startp, immap, immap->length,
+ PAGE_SIZE, ULONG_MAX, GFP_KERNEL);
+ if (rc < 0) {
+ kfree(immap);
+ return rc;
+ }
+
+ /* mmap() syscall will right-shift the offset in vma->vm_pgoff too */
+ immap->vm_pgoff = startp >> PAGE_SHIFT;
+ *offset = startp;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_alloc_mmap, "IOMMUFD");
+
+/* Driver should use a per-structure helper in include/linux/iommufd.h */
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset)
+{
+ struct iommufd_mmap *immap;
+
+ immap = mtree_erase(&ictx->mt_mmap, offset);
+ WARN_ON_ONCE(!immap || immap->owner != owner);
+ kfree(immap);
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_destroy_mmap, "IOMMUFD");
+
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return vdev->idev->dev;
}
-EXPORT_SYMBOL_NS_GPL(_iommufd_object_alloc, "IOMMUFD");
+EXPORT_SYMBOL_NS_GPL(iommufd_vdevice_to_device, "IOMMUFD");
/* Caller should xa_lock(&viommu->vdevs) to protect the return value */
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
@@ -45,7 +98,7 @@ struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
lockdep_assert_held(&viommu->vdevs.xa_lock);
vdev = xa_load(&viommu->vdevs, vdev_id);
- return vdev ? vdev->dev : NULL;
+ return vdev ? iommufd_vdevice_to_device(vdev) : NULL;
}
EXPORT_SYMBOL_NS_GPL(iommufd_viommu_find_dev, "IOMMUFD");
@@ -62,8 +115,8 @@ int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
xa_lock(&viommu->vdevs);
xa_for_each(&viommu->vdevs, index, vdev) {
- if (vdev->dev == dev) {
- *vdev_id = vdev->id;
+ if (iommufd_vdevice_to_device(vdev) == dev) {
+ *vdev_id = vdev->virt_id;
rc = 0;
break;
}
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index e373b9eec7f5..fc4de63b0bce 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -427,8 +427,8 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
if (cmd->flags)
return -EOPNOTSUPP;
- fault = __iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT,
- common.obj);
+ fault = __iommufd_object_alloc_ucmd(ucmd, fault, IOMMUFD_OBJ_FAULT,
+ common.obj);
if (IS_ERR(fault))
return PTR_ERR(fault);
@@ -437,10 +437,8 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
fdno = iommufd_eventq_init(&fault->common, "[iommufd-pgfault]",
ucmd->ictx, &iommufd_fault_fops);
- if (fdno < 0) {
- rc = fdno;
- goto out_abort;
- }
+ if (fdno < 0)
+ return fdno;
cmd->out_fault_id = fault->common.obj.id;
cmd->out_fault_fd = fdno;
@@ -448,7 +446,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
goto out_put_fdno;
- iommufd_object_finalize(ucmd->ictx, &fault->common.obj);
fd_install(fdno, fault->common.filep);
@@ -456,9 +453,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
fput(fault->common.filep);
-out_abort:
- iommufd_object_abort_and_destroy(ucmd->ictx, &fault->common.obj);
-
return rc;
}
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 487779470261..fe789c2dc0c9 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -264,7 +264,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
- rc = -EINVAL;
+ rc = -EOPNOTSUPP;
goto out_abort;
}
return hwpt_nested;
@@ -309,10 +309,8 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
refcount_inc(&viommu->obj.users);
hwpt_nested->parent = viommu->hwpt;
- hwpt->domain =
- viommu->ops->alloc_domain_nested(viommu,
- flags & ~IOMMU_HWPT_FAULT_ID_VALID,
- user_data);
+ hwpt->domain = viommu->ops->alloc_domain_nested(
+ viommu, flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -323,7 +321,7 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
- rc = -EINVAL;
+ rc = -EOPNOTSUPP;
goto out_abort;
}
return hwpt_nested;
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 8a790e597e12..c0360c450880 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -70,36 +70,45 @@ struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter)
return iter->area;
}
-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
- unsigned long length,
- unsigned long iova_alignment,
- unsigned long page_offset)
+static bool __alloc_iova_check_range(unsigned long *start, unsigned long last,
+ unsigned long length,
+ unsigned long iova_alignment,
+ unsigned long page_offset)
{
- if (span->is_used || span->last_hole - span->start_hole < length - 1)
+ unsigned long aligned_start;
+
+ /* ALIGN_UP() */
+ if (check_add_overflow(*start, iova_alignment - 1, &aligned_start))
return false;
+ aligned_start &= ~(iova_alignment - 1);
+ aligned_start |= page_offset;
- span->start_hole = ALIGN(span->start_hole, iova_alignment) |
- page_offset;
- if (span->start_hole > span->last_hole ||
- span->last_hole - span->start_hole < length - 1)
+ if (aligned_start >= last || last - aligned_start < length - 1)
return false;
+ *start = aligned_start;
return true;
}
-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
+static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
unsigned long length,
unsigned long iova_alignment,
unsigned long page_offset)
{
- if (span->is_hole || span->last_used - span->start_used < length - 1)
+ if (span->is_used)
return false;
+ return __alloc_iova_check_range(&span->start_hole, span->last_hole,
+ length, iova_alignment, page_offset);
+}
- span->start_used = ALIGN(span->start_used, iova_alignment) |
- page_offset;
- if (span->start_used > span->last_used ||
- span->last_used - span->start_used < length - 1)
+static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
+ unsigned long length,
+ unsigned long iova_alignment,
+ unsigned long page_offset)
+{
+ if (span->is_hole)
return false;
- return true;
+ return __alloc_iova_check_range(&span->start_used, span->last_used,
+ length, iova_alignment, page_offset);
}
/*
@@ -719,6 +728,12 @@ again:
goto out_unlock_iova;
}
+ /* The area is locked by an object that has not been destroyed */
+ if (area->num_locks) {
+ rc = -EBUSY;
+ goto out_unlock_iova;
+ }
+
if (area_first < start || area_last > last) {
rc = -ENOENT;
goto out_unlock_iova;
@@ -743,8 +758,10 @@ again:
iommufd_access_notify_unmap(iopt, area_first, length);
/* Something is not responding to unmap requests. */
tries++;
- if (WARN_ON(tries > 100))
- return -EDEADLOCK;
+ if (WARN_ON(tries > 100)) {
+ rc = -EDEADLOCK;
+ goto out_unmapped;
+ }
goto again;
}
@@ -766,6 +783,7 @@ again:
out_unlock_iova:
up_write(&iopt->iova_rwsem);
up_read(&iopt->domains_rwsem);
+out_unmapped:
if (unmapped)
*unmapped = unmapped_bytes;
return rc;
@@ -1410,8 +1428,7 @@ out_unlock:
}
void iopt_remove_access(struct io_pagetable *iopt,
- struct iommufd_access *access,
- u32 iopt_access_list_id)
+ struct iommufd_access *access, u32 iopt_access_list_id)
{
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h
index 10c928a9a463..b6064f4ce4af 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -48,6 +48,7 @@ struct iopt_area {
int iommu_prot;
bool prevent_access : 1;
unsigned int num_accesses;
+ unsigned int num_locks;
};
struct iopt_allowed {
@@ -238,9 +239,9 @@ void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start,
int iopt_area_add_access(struct iopt_area *area, unsigned long start,
unsigned long last, struct page **out_pages,
- unsigned int flags);
+ unsigned int flags, bool lock_area);
void iopt_area_remove_access(struct iopt_area *area, unsigned long start,
- unsigned long last);
+ unsigned long last, bool unlock_area);
int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
void *data, unsigned long length, unsigned int flags);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 9ccc83341f32..0da2a81eedfa 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -7,6 +7,7 @@
#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/iova_bitmap.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/xarray.h>
@@ -44,6 +45,7 @@ struct iommufd_ctx {
struct xarray groups;
wait_queue_head_t destroy_wait;
struct rw_semaphore ioas_creation_lock;
+ struct maple_tree mt_mmap;
struct mutex sw_msi_lock;
struct list_head sw_msi_list;
@@ -55,6 +57,18 @@ struct iommufd_ctx {
struct iommufd_ioas *vfio_ioas;
};
+/* Entry for iommufd_ctx::mt_mmap */
+struct iommufd_mmap {
+ struct iommufd_object *owner;
+
+ /* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
+ unsigned long vm_pgoff;
+
+ /* Physical range for io_remap_pfn_range() */
+ phys_addr_t mmio_addr;
+ size_t length;
+};
+
/*
* The IOVA to PFN map. The map automatically copies the PFNs into multiple
* domains and permits sharing of PFNs between io_pagetable instances. This
@@ -135,6 +149,7 @@ struct iommufd_ucmd {
void __user *ubuffer;
u32 user_size;
void *cmd;
+ struct iommufd_object *new_obj;
};
int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
@@ -154,7 +169,7 @@ static inline bool iommufd_lock_obj(struct iommufd_object *obj)
{
if (!refcount_inc_not_zero(&obj->users))
return false;
- if (!refcount_inc_not_zero(&obj->shortterm_users)) {
+ if (!refcount_inc_not_zero(&obj->wait_cnt)) {
/*
* If the caller doesn't already have a ref on obj this must be
* called under the xa_lock. Otherwise the caller is holding a
@@ -172,11 +187,11 @@ static inline void iommufd_put_object(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
/*
- * Users first, then shortterm so that REMOVE_WAIT_SHORTTERM never sees
- * a spurious !0 users with a 0 shortterm_users.
+ * Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
+ * !0 users with a 0 wait_cnt.
*/
refcount_dec(&obj->users);
- if (refcount_dec_and_test(&obj->shortterm_users))
+ if (refcount_dec_and_test(&obj->wait_cnt))
wake_up_interruptible_all(&ictx->destroy_wait);
}
@@ -187,7 +202,8 @@ void iommufd_object_finalize(struct iommufd_ctx *ictx,
struct iommufd_object *obj);
enum {
- REMOVE_WAIT_SHORTTERM = 1,
+ REMOVE_WAIT = BIT(0),
+ REMOVE_OBJ_TOMBSTONE = BIT(1),
};
int iommufd_object_remove(struct iommufd_ctx *ictx,
struct iommufd_object *to_destroy, u32 id,
@@ -195,15 +211,35 @@ int iommufd_object_remove(struct iommufd_ctx *ictx,
/*
* The caller holds a users refcount and wants to destroy the object. At this
- * point the caller has no shortterm_users reference and at least the xarray
- * will be holding one.
+ * point the caller has no wait_cnt reference and at least the xarray will be
+ * holding one.
*/
static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
int ret;
- ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT_SHORTTERM);
+ ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
+
+ /*
+ * If there is a bug and we couldn't destroy the object then we did put
+ * back the caller's users refcount and will eventually try to free it
+ * again during close.
+ */
+ WARN_ON(ret);
+}
+
+/*
+ * Similar to iommufd_object_destroy_user(), except that the object ID is left
+ * reserved/tombstoned.
+ */
+static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ int ret;
+
+ ret = iommufd_object_remove(ictx, obj, obj->id,
+ REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
/*
* If there is a bug and we couldn't destroy the object then we did put
@@ -230,6 +266,15 @@ iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
iommufd_object_remove(ictx, obj, obj->id, 0);
}
+/*
+ * Callers of these normal object allocators must call iommufd_object_finalize()
+ * to finalize the object, or call iommufd_object_abort_and_destroy() to revert
+ * the allocation.
+ */
+struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
+ size_t size,
+ enum iommufd_object_type type);
+
#define __iommufd_object_alloc(ictx, ptr, type, obj) \
container_of(_iommufd_object_alloc( \
ictx, \
@@ -243,6 +288,26 @@ iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
__iommufd_object_alloc(ictx, ptr, type, obj)
/*
+ * Callers of these _ucmd allocators should not call iommufd_object_finalize()
+ * or iommufd_object_abort_and_destroy(), as the core automatically does that.
+ */
+struct iommufd_object *
+_iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
+ enum iommufd_object_type type);
+
+#define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj) \
+ container_of(_iommufd_object_alloc_ucmd( \
+ ucmd, \
+ sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
+ offsetof(typeof(*(ptr)), \
+ obj) != 0), \
+ type), \
+ typeof(*(ptr)), obj)
+
+#define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
+ __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
+
+/*
* The IO Address Space (IOAS) pagetable is a virtual page table backed by the
* io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
* mapping is copied into all of the associated domains and made available to
@@ -266,8 +331,7 @@ struct iommufd_ioas {
static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
u32 id)
{
- return container_of(iommufd_get_object(ictx, id,
- IOMMUFD_OBJ_IOAS),
+ return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
struct iommufd_ioas, obj);
}
@@ -425,6 +489,8 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
+ struct iommufd_vdevice *vdev;
+ bool destroying;
};
static inline struct iommufd_device *
@@ -435,6 +501,7 @@ iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
struct iommufd_device, obj);
}
+void iommufd_device_pre_destroy(struct iommufd_object *obj);
void iommufd_device_destroy(struct iommufd_object *obj);
int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
@@ -452,10 +519,32 @@ struct iommufd_access {
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
void iopt_remove_access(struct io_pagetable *iopt,
- struct iommufd_access *access,
- u32 iopt_access_list_id);
+ struct iommufd_access *access, u32 iopt_access_list_id);
void iommufd_access_destroy_object(struct iommufd_object *obj);
+/* iommufd_access for internal use */
+static inline bool iommufd_access_is_internal(struct iommufd_access *access)
+{
+ return !access->ictx;
+}
+
+struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
+
+static inline void
+iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
+ struct iommufd_access *access)
+{
+ iommufd_object_destroy_user(ictx, &access->obj);
+}
+
+int iommufd_access_attach_internal(struct iommufd_access *access,
+ struct iommufd_ioas *ioas);
+
+static inline void iommufd_access_detach_internal(struct iommufd_access *access)
+{
+ iommufd_access_detach(access);
+}
+
struct iommufd_eventq {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
@@ -528,7 +617,7 @@ struct iommufd_veventq {
struct list_head node; /* for iommufd_viommu::veventqs */
struct iommufd_vevent lost_events_header;
- unsigned int type;
+ enum iommu_veventq_type type;
unsigned int depth;
/* Use common.lock for protection */
@@ -583,7 +672,8 @@ iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
}
static inline struct iommufd_veventq *
-iommufd_viommu_find_veventq(struct iommufd_viommu *viommu, u32 type)
+iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type)
{
struct iommufd_veventq *veventq, *next;
@@ -600,14 +690,17 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
void iommufd_viommu_destroy(struct iommufd_object *obj);
int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
void iommufd_vdevice_destroy(struct iommufd_object *obj);
+void iommufd_vdevice_abort(struct iommufd_object *obj);
+int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
+void iommufd_hw_queue_destroy(struct iommufd_object *obj);
-struct iommufd_vdevice {
- struct iommufd_object obj;
- struct iommufd_ctx *ictx;
- struct iommufd_viommu *viommu;
- struct device *dev;
- u64 id; /* per-vIOMMU virtual ID */
-};
+static inline struct iommufd_vdevice *
+iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
+{
+ return container_of(iommufd_get_object(ictx, id,
+ IOMMUFD_OBJ_VDEVICE),
+ struct iommufd_vdevice, obj);
+}
#ifdef CONFIG_IOMMUFD_TEST
int iommufd_test(struct iommufd_ucmd *ucmd);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 1cd7e8394129..8fc618b2bcf9 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -227,6 +227,23 @@ struct iommu_hwpt_invalidate_selftest {
#define IOMMU_VIOMMU_TYPE_SELFTEST 0xdeadbeef
+/**
+ * struct iommu_viommu_selftest - vIOMMU data for Mock driver
+ * (IOMMU_VIOMMU_TYPE_SELFTEST)
+ * @in_data: Input random data from user space
+ * @out_data: Output data (matching @in_data) to user space
+ * @out_mmap_offset: The offset argument for mmap syscall
+ * @out_mmap_length: The length argument for mmap syscall
+ *
+ * Simply set @out_data=@in_data for a loopback test
+ */
+struct iommu_viommu_selftest {
+ __u32 in_data;
+ __u32 out_data;
+ __aligned_u64 out_mmap_offset;
+ __aligned_u64 out_mmap_length;
+};
+
/* Should not be equal to any defined value in enum iommu_viommu_invalidate_data_type */
#define IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST 0xdeadbeef
#define IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID 0xdadbeef
@@ -252,4 +269,7 @@ struct iommu_viommu_event_selftest {
__u32 virt_id;
};
+#define IOMMU_HW_QUEUE_TYPE_SELFTEST 0xdeadbeef
+#define IOMMU_TEST_HW_QUEUE_MAX 2
+
#endif
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 39a86a4a1d3a..4514575818fc 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -407,7 +407,6 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
update_indexes:
if (unlikely(!iova_bitmap_mapped_range(mapped, iova, length))) {
-
/*
* The attempt to advance the base index to @iova
* may fail if it's out of bounds, or pinning the pages
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 3df468f64e7d..15af7ced0501 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -23,12 +23,72 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
+ void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
};
static const struct iommufd_object_ops iommufd_object_ops[];
static struct miscdevice vfio_misc_dev;
+struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
+ size_t size,
+ enum iommufd_object_type type)
+{
+ struct iommufd_object *obj;
+ int rc;
+
+ obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ obj->type = type;
+ /* Starts out bias'd by 1 until it is removed from the xarray */
+ refcount_set(&obj->wait_cnt, 1);
+ refcount_set(&obj->users, 1);
+
+ /*
+ * Reserve an ID in the xarray but do not publish the pointer yet since
+ * the caller hasn't initialized it yet. Once the pointer is published
+ * in the xarray and visible to other threads we can't reliably destroy
+ * it anymore, so the caller must complete all errorable operations
+ * before calling iommufd_object_finalize().
+ */
+ rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
+ GFP_KERNEL_ACCOUNT);
+ if (rc)
+ goto out_free;
+ return obj;
+out_free:
+ kfree(obj);
+ return ERR_PTR(rc);
+}
+
+struct iommufd_object *_iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd,
+ size_t size,
+ enum iommufd_object_type type)
+{
+ struct iommufd_object *new_obj;
+
+ /* Something is coded wrong if this is hit */
+ if (WARN_ON(ucmd->new_obj))
+ return ERR_PTR(-EBUSY);
+
+ /*
+ * An abort op means that its caller needs to invoke it within a lock in
+ * the caller. So it doesn't work with _iommufd_object_alloc_ucmd() that
+ * will invoke the abort op in iommufd_object_abort_and_destroy(), which
+ * must be outside the caller's lock.
+ */
+ if (WARN_ON(iommufd_object_ops[type].abort))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ new_obj = _iommufd_object_alloc(ucmd->ictx, size, type);
+ if (IS_ERR(new_obj))
+ return new_obj;
+
+ ucmd->new_obj = new_obj;
+ return new_obj;
+}
+
/*
* Allow concurrent access to the object.
*
@@ -95,20 +155,22 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
return obj;
}
-static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
- struct iommufd_object *to_destroy)
+static int iommufd_object_dec_wait(struct iommufd_ctx *ictx,
+ struct iommufd_object *to_destroy)
{
- if (refcount_dec_and_test(&to_destroy->shortterm_users))
+ if (refcount_dec_and_test(&to_destroy->wait_cnt))
return 0;
+ if (iommufd_object_ops[to_destroy->type].pre_destroy)
+ iommufd_object_ops[to_destroy->type].pre_destroy(to_destroy);
+
if (wait_event_timeout(ictx->destroy_wait,
- refcount_read(&to_destroy->shortterm_users) ==
- 0,
- msecs_to_jiffies(60000)))
+ refcount_read(&to_destroy->wait_cnt) == 0,
+ msecs_to_jiffies(60000)))
return 0;
pr_crit("Time out waiting for iommufd object to become free\n");
- refcount_inc(&to_destroy->shortterm_users);
+ refcount_inc(&to_destroy->wait_cnt);
return -EBUSY;
}
@@ -122,17 +184,18 @@ int iommufd_object_remove(struct iommufd_ctx *ictx,
{
struct iommufd_object *obj;
XA_STATE(xas, &ictx->objects, id);
- bool zerod_shortterm = false;
+ bool zerod_wait_cnt = false;
int ret;
/*
- * The purpose of the shortterm_users is to ensure deterministic
- * destruction of objects used by external drivers and destroyed by this
- * function. Any temporary increment of the refcount must increment
- * shortterm_users, such as during ioctl execution.
+ * The purpose of the wait_cnt is to ensure deterministic destruction
+ * of objects used by external drivers and destroyed by this function.
+ * Incrementing this wait_cnt should either be short lived, such as
+ * during ioctl execution, or be revoked and blocked during
+ * pre_destroy(), such as vdev holding the idev's refcount.
*/
- if (flags & REMOVE_WAIT_SHORTTERM) {
- ret = iommufd_object_dec_wait_shortterm(ictx, to_destroy);
+ if (flags & REMOVE_WAIT) {
+ ret = iommufd_object_dec_wait(ictx, to_destroy);
if (ret) {
/*
* We have a bug. Put back the callers reference and
@@ -141,7 +204,7 @@ int iommufd_object_remove(struct iommufd_ctx *ictx,
refcount_dec(&to_destroy->users);
return ret;
}
- zerod_shortterm = true;
+ zerod_wait_cnt = true;
}
xa_lock(&ictx->objects);
@@ -167,17 +230,17 @@ int iommufd_object_remove(struct iommufd_ctx *ictx,
goto err_xa;
}
- xas_store(&xas, NULL);
+ xas_store(&xas, (flags & REMOVE_OBJ_TOMBSTONE) ? XA_ZERO_ENTRY : NULL);
if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
ictx->vfio_ioas = NULL;
xa_unlock(&ictx->objects);
/*
- * Since users is zero any positive users_shortterm must be racing
+ * Since users is zero any positive wait_cnt must be racing
* iommufd_put_object(), or we have a bug.
*/
- if (!zerod_shortterm) {
- ret = iommufd_object_dec_wait_shortterm(ictx, obj);
+ if (!zerod_wait_cnt) {
+ ret = iommufd_object_dec_wait(ictx, obj);
if (WARN_ON(ret))
return ret;
}
@@ -187,9 +250,9 @@ int iommufd_object_remove(struct iommufd_ctx *ictx,
return 0;
err_xa:
- if (zerod_shortterm) {
+ if (zerod_wait_cnt) {
/* Restore the xarray owned reference */
- refcount_set(&obj->shortterm_users, 1);
+ refcount_set(&obj->wait_cnt, 1);
}
xa_unlock(&ictx->objects);
@@ -226,6 +289,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
xa_init(&ictx->groups);
ictx->file = filp;
+ mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE);
init_waitqueue_head(&ictx->destroy_wait);
mutex_init(&ictx->sw_msi_lock);
INIT_LIST_HEAD(&ictx->sw_msi_list);
@@ -252,19 +316,41 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp)
while (!xa_empty(&ictx->objects)) {
unsigned int destroyed = 0;
unsigned long index;
+ bool empty = true;
+ /*
+ * We can't use xa_empty() to end the loop as the tombstones
+ * are stored as XA_ZERO_ENTRY in the xarray. However
+ * xa_for_each() automatically converts them to NULL and skips
+ * them causing xa_empty() to be kept false. Thus once
+ * xa_for_each() finds no further !NULL entries the loop is
+ * done.
+ */
xa_for_each(&ictx->objects, index, obj) {
+ empty = false;
if (!refcount_dec_if_one(&obj->users))
continue;
+
destroyed++;
xa_erase(&ictx->objects, index);
iommufd_object_ops[obj->type].destroy(obj);
kfree(obj);
}
+
+ if (empty)
+ break;
+
/* Bug related to users refcount */
if (WARN_ON(!destroyed))
break;
}
+
+ /*
+ * There may be some tombstones left over from
+ * iommufd_object_tombstone_user()
+ */
+ xa_destroy(&ictx->objects);
+
WARN_ON(!xa_empty(&ictx->groups));
mutex_destroy(&ictx->sw_msi_lock);
@@ -305,6 +391,7 @@ union ucmd_buffer {
struct iommu_destroy destroy;
struct iommu_fault_alloc fault;
struct iommu_hw_info info;
+ struct iommu_hw_queue_alloc hw_queue;
struct iommu_hwpt_alloc hwpt;
struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap;
struct iommu_hwpt_invalidate cache;
@@ -347,6 +434,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
struct iommu_fault_alloc, out_fault_fd),
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
__reserved),
+ IOCTL_OP(IOMMU_HW_QUEUE_ALLOC, iommufd_hw_queue_alloc_ioctl,
+ struct iommu_hw_queue_alloc, length),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
__reserved),
IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap,
@@ -417,14 +506,83 @@ static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
if (ret)
return ret;
ret = op->execute(&ucmd);
+
+ if (ucmd.new_obj) {
+ if (ret)
+ iommufd_object_abort_and_destroy(ictx, ucmd.new_obj);
+ else
+ iommufd_object_finalize(ictx, ucmd.new_obj);
+ }
return ret;
}
+static void iommufd_fops_vma_open(struct vm_area_struct *vma)
+{
+ struct iommufd_mmap *immap = vma->vm_private_data;
+
+ refcount_inc(&immap->owner->users);
+}
+
+static void iommufd_fops_vma_close(struct vm_area_struct *vma)
+{
+ struct iommufd_mmap *immap = vma->vm_private_data;
+
+ refcount_dec(&immap->owner->users);
+}
+
+static const struct vm_operations_struct iommufd_vma_ops = {
+ .open = iommufd_fops_vma_open,
+ .close = iommufd_fops_vma_close,
+};
+
+/* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
+static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct iommufd_ctx *ictx = filp->private_data;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct iommufd_mmap *immap;
+ int rc;
+
+ if (!PAGE_ALIGNED(length))
+ return -EINVAL;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+ if (vma->vm_flags & VM_EXEC)
+ return -EPERM;
+
+ /* vma->vm_pgoff carries a page-shifted start position to an immap */
+ immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
+ if (!immap)
+ return -ENXIO;
+ /*
+ * mtree_load() returns the immap for any contained mmio_addr, so only
+ * allow the exact immap thing to be mapped
+ */
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
+ return -ENXIO;
+
+ vma->vm_pgoff = 0;
+ vma->vm_private_data = immap;
+ vma->vm_ops = &iommufd_vma_ops;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start,
+ immap->mmio_addr >> PAGE_SHIFT, length,
+ vma->vm_page_prot);
+ if (rc)
+ return rc;
+
+ /* vm_ops.open won't be called for mmap itself. */
+ refcount_inc(&immap->owner->users);
+ return rc;
+}
+
static const struct file_operations iommufd_fops = {
.owner = THIS_MODULE,
.open = iommufd_fops_open,
.release = iommufd_fops_release,
.unlocked_ioctl = iommufd_fops_ioctl,
+ .mmap = iommufd_fops_mmap,
};
/**
@@ -498,11 +656,15 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
.destroy = iommufd_access_destroy_object,
},
[IOMMUFD_OBJ_DEVICE] = {
+ .pre_destroy = iommufd_device_pre_destroy,
.destroy = iommufd_device_destroy,
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
},
+ [IOMMUFD_OBJ_HW_QUEUE] = {
+ .destroy = iommufd_hw_queue_destroy,
+ },
[IOMMUFD_OBJ_HWPT_PAGING] = {
.destroy = iommufd_hwpt_paging_destroy,
.abort = iommufd_hwpt_paging_abort,
@@ -516,6 +678,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_VDEVICE] = {
.destroy = iommufd_vdevice_destroy,
+ .abort = iommufd_vdevice_abort,
},
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
@@ -539,7 +702,6 @@ static struct miscdevice iommu_misc_dev = {
.mode = 0660,
};
-
static struct miscdevice vfio_misc_dev = {
.minor = VFIO_MINOR,
.name = "vfio",
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 3427749bc5ce..c3433b845561 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -1287,8 +1287,7 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
}
static struct iopt_pages *iopt_alloc_pages(unsigned long start_byte,
- unsigned long length,
- bool writable)
+ unsigned long length, bool writable)
{
struct iopt_pages *pages;
@@ -1328,7 +1327,7 @@ struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
struct iopt_pages *pages;
unsigned long end;
void __user *uptr_down =
- (void __user *) ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE);
+ (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE);
if (check_add_overflow((unsigned long)uptr, length, &end))
return ERR_PTR(-EOVERFLOW);
@@ -2104,6 +2103,7 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
* @last_index: Inclusive last page index
* @out_pages: Output list of struct page's representing the PFNs
* @flags: IOMMUFD_ACCESS_RW_* flags
+ * @lock_area: Fail userspace munmap on this area
*
* Record that an in-kernel access will be accessing the pages, ensure they are
* pinned, and return the PFNs as a simple list of 'struct page *'.
@@ -2111,8 +2111,8 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
* This should be undone through a matching call to iopt_area_remove_access()
*/
int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
- unsigned long last_index, struct page **out_pages,
- unsigned int flags)
+ unsigned long last_index, struct page **out_pages,
+ unsigned int flags, bool lock_area)
{
struct iopt_pages *pages = area->pages;
struct iopt_pages_access *access;
@@ -2125,6 +2125,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
access = iopt_pages_get_exact_access(pages, start_index, last_index);
if (access) {
area->num_accesses++;
+ if (lock_area)
+ area->num_locks++;
access->users++;
iopt_pages_fill_from_xarray(pages, start_index, last_index,
out_pages);
@@ -2146,6 +2148,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
access->node.last = last_index;
access->users = 1;
area->num_accesses++;
+ if (lock_area)
+ area->num_locks++;
interval_tree_insert(&access->node, &pages->access_itree);
mutex_unlock(&pages->mutex);
return 0;
@@ -2162,12 +2166,13 @@ err_unlock:
* @area: The source of PFNs
* @start_index: First page index
* @last_index: Inclusive last page index
+ * @unlock_area: Must match the matching iopt_area_add_access()'s lock_area
*
* Undo iopt_area_add_access() and unpin the pages if necessary. The caller
* must stop using the PFNs before calling this.
*/
void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
- unsigned long last_index)
+ unsigned long last_index, bool unlock_area)
{
struct iopt_pages *pages = area->pages;
struct iopt_pages_access *access;
@@ -2178,6 +2183,10 @@ void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
goto out_unlock;
WARN_ON(area->num_accesses == 0 || access->users == 0);
+ if (unlock_area) {
+ WARN_ON(area->num_locks == 0);
+ area->num_locks--;
+ }
area->num_accesses--;
access->users--;
if (access->users)
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 6bd0abf9a641..61686603c769 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -138,7 +138,6 @@ to_mock_domain(struct iommu_domain *domain)
struct mock_iommu_domain_nested {
struct iommu_domain domain;
struct mock_viommu *mock_viommu;
- struct mock_iommu_domain *parent;
u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
};
@@ -151,6 +150,11 @@ to_mock_nested(struct iommu_domain *domain)
struct mock_viommu {
struct iommufd_viommu core;
struct mock_iommu_domain *s2_parent;
+ struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
+ struct mutex queue_mutex;
+
+ unsigned long mmap_offset;
+ u32 *page; /* Mmap page to test u32 type of in_data */
};
static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
@@ -158,6 +162,19 @@ static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
return container_of(viommu, struct mock_viommu, core);
}
+struct mock_hw_queue {
+ struct iommufd_hw_queue core;
+ struct mock_viommu *mock_viommu;
+ struct mock_hw_queue *prev;
+ u16 index;
+};
+
+static inline struct mock_hw_queue *
+to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
+{
+ return container_of(hw_queue, struct mock_hw_queue, core);
+}
+
enum selftest_obj_type {
TYPE_IDEV,
};
@@ -288,10 +305,15 @@ static struct iommu_domain mock_blocking_domain = {
.ops = &mock_blocking_ops,
};
-static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
+static void *mock_domain_hw_info(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type)
{
struct iommu_test_hw_info *info;
+ if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
+ *type != IOMMU_HW_INFO_TYPE_SELFTEST)
+ return ERR_PTR(-EOPNOTSUPP);
+
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -434,7 +456,6 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
mock_nested = __mock_domain_alloc_nested(user_data);
if (IS_ERR(mock_nested))
return ERR_CAST(mock_nested);
- mock_nested->parent = mock_parent;
return &mock_nested->domain;
}
@@ -671,9 +692,15 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
{
struct mock_iommu_device *mock_iommu = container_of(
viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
+ struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
if (refcount_dec_and_test(&mock_iommu->users))
complete(&mock_iommu->complete);
+ if (mock_viommu->mmap_offset)
+ iommufd_viommu_destroy_mmap(&mock_viommu->core,
+ mock_viommu->mmap_offset);
+ free_page((unsigned long)mock_viommu->page);
+ mutex_destroy(&mock_viommu->queue_mutex);
/* iommufd core frees mock_viommu and viommu */
}
@@ -692,7 +719,6 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
if (IS_ERR(mock_nested))
return ERR_CAST(mock_nested);
mock_nested->mock_viommu = mock_viommu;
- mock_nested->parent = mock_viommu->s2_parent;
return &mock_nested->domain;
}
@@ -766,31 +792,149 @@ out:
return rc;
}
+static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type)
+{
+ if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
+ return 0;
+ return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
+}
+
+static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
+{
+ struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
+ struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
+
+ mutex_lock(&mock_viommu->queue_mutex);
+ mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
+ if (mock_hw_queue->prev)
+ iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
+ core);
+ mutex_unlock(&mock_viommu->queue_mutex);
+}
+
+/* Test iommufd_hw_queue_depend/undepend() */
+static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa)
+{
+ struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
+ struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
+ struct mock_hw_queue *prev = NULL;
+ int rc = 0;
+
+ if (index >= IOMMU_TEST_HW_QUEUE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&mock_viommu->queue_mutex);
+
+ if (mock_viommu->hw_queue[index]) {
+ rc = -EEXIST;
+ goto unlock;
+ }
+
+ if (index) {
+ prev = mock_viommu->hw_queue[index - 1];
+ if (!prev) {
+ rc = -EIO;
+ goto unlock;
+ }
+ }
+
+ /*
+ * Test to catch a kernel bug if the core converted the physical address
+ * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
+ */
+ if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
+ hw_queue->base_addr)) {
+ rc = -EFAULT;
+ goto unlock;
+ }
+
+ if (prev) {
+ rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
+ if (rc)
+ goto unlock;
+ }
+
+ mock_hw_queue->prev = prev;
+ mock_hw_queue->mock_viommu = mock_viommu;
+ mock_viommu->hw_queue[index] = mock_hw_queue;
+
+ hw_queue->destroy = &mock_hw_queue_destroy;
+unlock:
+ mutex_unlock(&mock_viommu->queue_mutex);
+ return rc;
+}
+
static struct iommufd_viommu_ops mock_viommu_ops = {
.destroy = mock_viommu_destroy,
.alloc_domain_nested = mock_viommu_alloc_domain_nested,
.cache_invalidate = mock_viommu_cache_invalidate,
+ .get_hw_queue_size = mock_viommu_get_hw_queue_size,
+ .hw_queue_init_phys = mock_hw_queue_init_phys,
};
-static struct iommufd_viommu *mock_viommu_alloc(struct device *dev,
- struct iommu_domain *domain,
- struct iommufd_ctx *ictx,
- unsigned int viommu_type)
+static size_t mock_get_viommu_size(struct device *dev,
+ enum iommu_viommu_type viommu_type)
{
- struct mock_iommu_device *mock_iommu =
- iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev);
- struct mock_viommu *mock_viommu;
-
if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
- return ERR_PTR(-EOPNOTSUPP);
+ return 0;
+ return VIOMMU_STRUCT_SIZE(struct mock_viommu, core);
+}
+
+static int mock_viommu_init(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data)
+{
+ struct mock_iommu_device *mock_iommu = container_of(
+ viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
+ struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
+ struct iommu_viommu_selftest data;
+ int rc;
+
+ if (user_data) {
+ rc = iommu_copy_struct_from_user(
+ &data, user_data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
+ if (rc)
+ return rc;
+
+ /* Allocate two pages */
+ mock_viommu->page =
+ (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!mock_viommu->page)
+ return -ENOMEM;
- mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core,
- &mock_viommu_ops);
- if (IS_ERR(mock_viommu))
- return ERR_CAST(mock_viommu);
+ rc = iommufd_viommu_alloc_mmap(&mock_viommu->core,
+ __pa(mock_viommu->page),
+ PAGE_SIZE * 2,
+ &mock_viommu->mmap_offset);
+ if (rc)
+ goto err_free_page;
+
+ /* For loopback tests on both the page and out_data */
+ *mock_viommu->page = data.in_data;
+ data.out_data = data.in_data;
+ data.out_mmap_length = PAGE_SIZE * 2;
+ data.out_mmap_offset = mock_viommu->mmap_offset;
+ rc = iommu_copy_struct_to_user(
+ user_data, &data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
+ if (rc)
+ goto err_destroy_mmap;
+ }
refcount_inc(&mock_iommu->users);
- return &mock_viommu->core;
+ mutex_init(&mock_viommu->queue_mutex);
+ mock_viommu->s2_parent = to_mock_domain(parent_domain);
+
+ viommu->ops = &mock_viommu_ops;
+ return 0;
+
+err_destroy_mmap:
+ iommufd_viommu_destroy_mmap(&mock_viommu->core,
+ mock_viommu->mmap_offset);
+err_free_page:
+ free_page((unsigned long)mock_viommu->page);
+ return rc;
}
static const struct iommu_ops mock_ops = {
@@ -801,7 +945,6 @@ static const struct iommu_ops mock_ops = {
.default_domain = &mock_blocking_domain,
.blocked_domain = &mock_blocking_domain,
.owner = THIS_MODULE,
- .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
.domain_alloc_nested = mock_domain_alloc_nested,
@@ -810,7 +953,8 @@ static const struct iommu_ops mock_ops = {
.probe_device = mock_probe_device,
.page_response = mock_domain_page_response,
.user_pasid_table = true,
- .viommu_alloc = mock_viommu_alloc,
+ .get_viommu_size = mock_get_viommu_size,
+ .viommu_init = mock_viommu_init,
.default_domain_ops =
&(struct iommu_domain_ops){
.free = mock_domain_free,
@@ -1216,9 +1360,8 @@ static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
return 0;
}
-static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
- u32 mockpt_id, unsigned int iotlb_id,
- u32 iotlb)
+static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, u32 mockpt_id,
+ unsigned int iotlb_id, u32 iotlb)
{
struct mock_iommu_domain_nested *mock_nested;
struct iommufd_hw_pagetable *hwpt;
@@ -1491,7 +1634,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
int rc;
/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
- if (length > 16*1024*1024)
+ if (length > 16 * 1024 * 1024)
return -ENOMEM;
if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
@@ -1508,7 +1651,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
if (flags & MOCK_FLAGS_ACCESS_SYZ)
iova = iommufd_test_syz_conv_iova(staccess->access,
- &cmd->access_pages.iova);
+ &cmd->access_pages.iova);
npages = (ALIGN(iova + length, PAGE_SIZE) -
ALIGN_DOWN(iova, PAGE_SIZE)) /
@@ -1584,7 +1727,7 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
int rc;
/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
- if (length > 16*1024*1024)
+ if (length > 16 * 1024 * 1024)
return -ENOMEM;
if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
@@ -1610,7 +1753,7 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
if (flags & MOCK_FLAGS_ACCESS_SYZ)
iova = iommufd_test_syz_conv_iova(staccess->access,
- &cmd->access_rw.iova);
+ &cmd->access_rw.iova);
rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
if (rc)
@@ -1665,7 +1808,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
goto out_put;
}
- if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
+ if (copy_from_user(tmp, uptr, DIV_ROUND_UP(max, BITS_PER_BYTE))) {
rc = -EFAULT;
goto out_free;
}
@@ -1701,7 +1844,7 @@ out_put:
static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
struct iommu_test_cmd *cmd)
{
- struct iopf_fault event = { };
+ struct iopf_fault event = {};
struct iommufd_device *idev;
idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
@@ -1832,8 +1975,7 @@ static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
- iommufd_device_detach(sobj->idev.idev,
- cmd->pasid_attach.pasid);
+ iommufd_device_detach(sobj->idev.idev, cmd->pasid_attach.pasid);
out_sobj:
iommufd_put_object(ucmd->ictx, &sobj->obj);
@@ -2004,8 +2146,8 @@ int __init iommufd_test_init(void)
goto err_bus;
rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
- &iommufd_mock_bus_type.bus,
- &iommufd_mock_bus_type.nb);
+ &iommufd_mock_bus_type.bus,
+ &iommufd_mock_bus_type.nb);
if (rc)
goto err_sysfs;
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 01df2b985f02..2ca5809b238b 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -17,10 +17,16 @@ void iommufd_viommu_destroy(struct iommufd_object *obj)
int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
{
struct iommu_viommu_alloc *cmd = ucmd->cmd;
+ const struct iommu_user_data user_data = {
+ .type = cmd->type,
+ .uptr = u64_to_user_ptr(cmd->data_uptr),
+ .len = cmd->data_len,
+ };
struct iommufd_hwpt_paging *hwpt_paging;
struct iommufd_viommu *viommu;
struct iommufd_device *idev;
const struct iommu_ops *ops;
+ size_t viommu_size;
int rc;
if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT)
@@ -31,7 +37,22 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
return PTR_ERR(idev);
ops = dev_iommu_ops(idev->dev);
- if (!ops->viommu_alloc) {
+ if (!ops->get_viommu_size || !ops->viommu_init) {
+ rc = -EOPNOTSUPP;
+ goto out_put_idev;
+ }
+
+ viommu_size = ops->get_viommu_size(idev->dev, cmd->type);
+ if (!viommu_size) {
+ rc = -EOPNOTSUPP;
+ goto out_put_idev;
+ }
+
+ /*
+ * It is a driver bug for providing a viommu_size smaller than the core
+ * vIOMMU structure size
+ */
+ if (WARN_ON_ONCE(viommu_size < sizeof(*viommu))) {
rc = -EOPNOTSUPP;
goto out_put_idev;
}
@@ -47,8 +68,8 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
goto out_put_hwpt;
}
- viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain,
- ucmd->ictx, cmd->type);
+ viommu = (struct iommufd_viommu *)_iommufd_object_alloc_ucmd(
+ ucmd, viommu_size, IOMMUFD_OBJ_VIOMMU);
if (IS_ERR(viommu)) {
rc = PTR_ERR(viommu);
goto out_put_hwpt;
@@ -68,15 +89,20 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
*/
viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev);
+ rc = ops->viommu_init(viommu, hwpt_paging->common.domain,
+ user_data.len ? &user_data : NULL);
+ if (rc)
+ goto out_put_hwpt;
+
+ /* It is a driver bug that viommu->ops isn't filled */
+ if (WARN_ON_ONCE(!viommu->ops)) {
+ rc = -EOPNOTSUPP;
+ goto out_put_hwpt;
+ }
+
cmd->out_viommu_id = viommu->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
- if (rc)
- goto out_abort;
- iommufd_object_finalize(ucmd->ictx, &viommu->obj);
- goto out_put_hwpt;
-out_abort:
- iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj);
out_put_hwpt:
iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
out_put_idev:
@@ -84,22 +110,41 @@ out_put_idev:
return rc;
}
-void iommufd_vdevice_destroy(struct iommufd_object *obj)
+void iommufd_vdevice_abort(struct iommufd_object *obj)
{
struct iommufd_vdevice *vdev =
container_of(obj, struct iommufd_vdevice, obj);
struct iommufd_viommu *viommu = vdev->viommu;
+ struct iommufd_device *idev = vdev->idev;
+
+ lockdep_assert_held(&idev->igroup->lock);
+ if (vdev->destroy)
+ vdev->destroy(vdev);
/* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */
- xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
+ xa_cmpxchg(&viommu->vdevs, vdev->virt_id, vdev, NULL, GFP_KERNEL);
refcount_dec(&viommu->obj.users);
- put_device(vdev->dev);
+ idev->vdev = NULL;
+}
+
+void iommufd_vdevice_destroy(struct iommufd_object *obj)
+{
+ struct iommufd_vdevice *vdev =
+ container_of(obj, struct iommufd_vdevice, obj);
+ struct iommufd_device *idev = vdev->idev;
+ struct iommufd_ctx *ictx = idev->ictx;
+
+ mutex_lock(&idev->igroup->lock);
+ iommufd_vdevice_abort(obj);
+ mutex_unlock(&idev->igroup->lock);
+ iommufd_put_object(ictx, &idev->obj);
}
int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
{
struct iommu_vdevice_alloc *cmd = ucmd->cmd;
struct iommufd_vdevice *vdev, *curr;
+ size_t vdev_size = sizeof(*vdev);
struct iommufd_viommu *viommu;
struct iommufd_device *idev;
u64 virt_id = cmd->virt_id;
@@ -124,17 +169,54 @@ int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
goto out_put_idev;
}
- vdev = iommufd_object_alloc(ucmd->ictx, vdev, IOMMUFD_OBJ_VDEVICE);
+ mutex_lock(&idev->igroup->lock);
+ if (idev->destroying) {
+ rc = -ENOENT;
+ goto out_unlock_igroup;
+ }
+
+ if (idev->vdev) {
+ rc = -EEXIST;
+ goto out_unlock_igroup;
+ }
+
+ if (viommu->ops && viommu->ops->vdevice_size) {
+ /*
+ * It is a driver bug for:
+ * - ops->vdevice_size smaller than the core structure size
+ * - not implementing a pairing ops->vdevice_init op
+ */
+ if (WARN_ON_ONCE(viommu->ops->vdevice_size < vdev_size ||
+ !viommu->ops->vdevice_init)) {
+ rc = -EOPNOTSUPP;
+ goto out_put_idev;
+ }
+ vdev_size = viommu->ops->vdevice_size;
+ }
+
+ vdev = (struct iommufd_vdevice *)_iommufd_object_alloc(
+ ucmd->ictx, vdev_size, IOMMUFD_OBJ_VDEVICE);
if (IS_ERR(vdev)) {
rc = PTR_ERR(vdev);
- goto out_put_idev;
+ goto out_unlock_igroup;
}
- vdev->id = virt_id;
- vdev->dev = idev->dev;
- get_device(idev->dev);
+ vdev->virt_id = virt_id;
vdev->viommu = viommu;
refcount_inc(&viommu->obj.users);
+ /*
+ * A wait_cnt reference is held on the idev so long as we have the
+ * pointer. iommufd_device_pre_destroy() will revoke it before the
+ * idev real destruction.
+ */
+ vdev->idev = idev;
+
+ /*
+ * iommufd_device_destroy() delays until idev->vdev is NULL before
+ * freeing the idev, which only happens once the vdev is finished
+ * destruction.
+ */
+ idev->vdev = vdev;
curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL);
if (curr) {
@@ -142,17 +224,206 @@ int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
goto out_abort;
}
+ if (viommu->ops && viommu->ops->vdevice_init) {
+ rc = viommu->ops->vdevice_init(vdev);
+ if (rc)
+ goto out_abort;
+ }
+
cmd->out_vdevice_id = vdev->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
goto out_abort;
iommufd_object_finalize(ucmd->ictx, &vdev->obj);
- goto out_put_idev;
+ goto out_unlock_igroup;
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj);
+out_unlock_igroup:
+ mutex_unlock(&idev->igroup->lock);
out_put_idev:
- iommufd_put_object(ucmd->ictx, &idev->obj);
+ if (rc)
+ iommufd_put_object(ucmd->ictx, &idev->obj);
+out_put_viommu:
+ iommufd_put_object(ucmd->ictx, &viommu->obj);
+ return rc;
+}
+
+static void iommufd_hw_queue_destroy_access(struct iommufd_ctx *ictx,
+ struct iommufd_access *access,
+ u64 base_iova, size_t length)
+{
+ u64 aligned_iova = PAGE_ALIGN_DOWN(base_iova);
+ u64 offset = base_iova - aligned_iova;
+
+ iommufd_access_unpin_pages(access, aligned_iova,
+ PAGE_ALIGN(length + offset));
+ iommufd_access_detach_internal(access);
+ iommufd_access_destroy_internal(ictx, access);
+}
+
+void iommufd_hw_queue_destroy(struct iommufd_object *obj)
+{
+ struct iommufd_hw_queue *hw_queue =
+ container_of(obj, struct iommufd_hw_queue, obj);
+
+ if (hw_queue->destroy)
+ hw_queue->destroy(hw_queue);
+ if (hw_queue->access)
+ iommufd_hw_queue_destroy_access(hw_queue->viommu->ictx,
+ hw_queue->access,
+ hw_queue->base_addr,
+ hw_queue->length);
+ if (hw_queue->viommu)
+ refcount_dec(&hw_queue->viommu->obj.users);
+}
+
+/*
+ * When the HW accesses the guest queue via physical addresses, the underlying
+ * physical pages of the guest queue must be contiguous. Also, for the security
+ * concern that IOMMUFD_CMD_IOAS_UNMAP could potentially remove the mappings of
+ * the guest queue from the nesting parent iopt while the HW is still accessing
+ * the guest queue memory physically, such a HW queue must require an access to
+ * pin the underlying pages and prevent that from happening.
+ */
+static struct iommufd_access *
+iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd,
+ struct iommufd_viommu *viommu, phys_addr_t *base_pa)
+{
+ u64 aligned_iova = PAGE_ALIGN_DOWN(cmd->nesting_parent_iova);
+ u64 offset = cmd->nesting_parent_iova - aligned_iova;
+ struct iommufd_access *access;
+ struct page **pages;
+ size_t max_npages;
+ size_t length;
+ size_t i;
+ int rc;
+
+ /* max_npages = DIV_ROUND_UP(offset + cmd->length, PAGE_SIZE) */
+ if (check_add_overflow(offset, cmd->length, &length))
+ return ERR_PTR(-ERANGE);
+ if (check_add_overflow(length, PAGE_SIZE - 1, &length))
+ return ERR_PTR(-ERANGE);
+ max_npages = length / PAGE_SIZE;
+ /* length needs to be page aligned too */
+ length = max_npages * PAGE_SIZE;
+
+ /*
+ * Use kvcalloc() to avoid memory fragmentation for a large page array.
+ * Set __GFP_NOWARN to avoid syzkaller blowups
+ */
+ pages = kvcalloc(max_npages, sizeof(*pages), GFP_KERNEL | __GFP_NOWARN);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ access = iommufd_access_create_internal(viommu->ictx);
+ if (IS_ERR(access)) {
+ rc = PTR_ERR(access);
+ goto out_free;
+ }
+
+ rc = iommufd_access_attach_internal(access, viommu->hwpt->ioas);
+ if (rc)
+ goto out_destroy;
+
+ rc = iommufd_access_pin_pages(access, aligned_iova, length, pages, 0);
+ if (rc)
+ goto out_detach;
+
+ /* Validate if the underlying physical pages are contiguous */
+ for (i = 1; i < max_npages; i++) {
+ if (page_to_pfn(pages[i]) == page_to_pfn(pages[i - 1]) + 1)
+ continue;
+ rc = -EFAULT;
+ goto out_unpin;
+ }
+
+ *base_pa = (page_to_pfn(pages[0]) << PAGE_SHIFT) + offset;
+ kfree(pages);
+ return access;
+
+out_unpin:
+ iommufd_access_unpin_pages(access, aligned_iova, length);
+out_detach:
+ iommufd_access_detach_internal(access);
+out_destroy:
+ iommufd_access_destroy_internal(viommu->ictx, access);
+out_free:
+ kfree(pages);
+ return ERR_PTR(rc);
+}
+
+int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hw_queue_alloc *cmd = ucmd->cmd;
+ struct iommufd_hw_queue *hw_queue;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+ size_t hw_queue_size;
+ phys_addr_t base_pa;
+ u64 last;
+ int rc;
+
+ if (cmd->flags || cmd->type == IOMMU_HW_QUEUE_TYPE_DEFAULT)
+ return -EOPNOTSUPP;
+ if (!cmd->length)
+ return -EINVAL;
+ if (check_add_overflow(cmd->nesting_parent_iova, cmd->length - 1,
+ &last))
+ return -EOVERFLOW;
+
+ viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
+ if (IS_ERR(viommu))
+ return PTR_ERR(viommu);
+
+ if (!viommu->ops || !viommu->ops->get_hw_queue_size ||
+ !viommu->ops->hw_queue_init_phys) {
+ rc = -EOPNOTSUPP;
+ goto out_put_viommu;
+ }
+
+ hw_queue_size = viommu->ops->get_hw_queue_size(viommu, cmd->type);
+ if (!hw_queue_size) {
+ rc = -EOPNOTSUPP;
+ goto out_put_viommu;
+ }
+
+ /*
+ * It is a driver bug for providing a hw_queue_size smaller than the
+ * core HW queue structure size
+ */
+ if (WARN_ON_ONCE(hw_queue_size < sizeof(*hw_queue))) {
+ rc = -EOPNOTSUPP;
+ goto out_put_viommu;
+ }
+
+ hw_queue = (struct iommufd_hw_queue *)_iommufd_object_alloc_ucmd(
+ ucmd, hw_queue_size, IOMMUFD_OBJ_HW_QUEUE);
+ if (IS_ERR(hw_queue)) {
+ rc = PTR_ERR(hw_queue);
+ goto out_put_viommu;
+ }
+
+ access = iommufd_hw_queue_alloc_phys(cmd, viommu, &base_pa);
+ if (IS_ERR(access)) {
+ rc = PTR_ERR(access);
+ goto out_put_viommu;
+ }
+
+ hw_queue->viommu = viommu;
+ refcount_inc(&viommu->obj.users);
+ hw_queue->access = access;
+ hw_queue->type = cmd->type;
+ hw_queue->length = cmd->length;
+ hw_queue->base_addr = cmd->nesting_parent_iova;
+
+ rc = viommu->ops->hw_queue_init_phys(hw_queue, cmd->index, base_pa);
+ if (rc)
+ goto out_put_viommu;
+
+ cmd->out_hw_queue_id = hw_queue->obj.id;
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+
out_put_viommu:
iommufd_put_object(ucmd->ictx, &viommu->obj);
return rc;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 90341b24a811..ffa892f65714 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -430,7 +430,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* non-secure mode.
*/
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
- domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
+ domain->cfg.pgsize_bitmap = domain->io_domain.pgsize_bitmap;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_flush_ops;
@@ -571,6 +571,7 @@ static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
return NULL;
mutex_init(&domain->mutex);
+ domain->io_domain.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
return &domain->io_domain;
}
@@ -882,7 +883,6 @@ static const struct iommu_ops ipmmu_ops = {
*/
.device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
? generic_device_group : generic_single_device_group,
- .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = ipmmu_attach_device,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 2769e4544038..43a61ba021a5 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -312,6 +312,8 @@ static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
INIT_LIST_HEAD(&priv->list_attached);
+ priv->domain.pgsize_bitmap = MSM_IOMMU_PGSIZES;
+
priv->domain.geometry.aperture_start = 0;
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
priv->domain.geometry.force_aperture = true;
@@ -339,7 +341,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
spin_lock_init(&priv->pgtlock);
priv->cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
+ .pgsize_bitmap = priv->domain.pgsize_bitmap,
.ias = 32,
.oas = 32,
.tlb = &msm_iommu_flush_ops,
@@ -352,8 +354,6 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
return -EINVAL;
}
- msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
-
return 0;
}
@@ -692,7 +692,6 @@ static struct iommu_ops msm_iommu_ops = {
.domain_alloc_paging = msm_iommu_domain_alloc_paging,
.probe_device = msm_iommu_probe_device,
.device_group = generic_device_group,
- .pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index cb95fecf6016..0e0285348d2b 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -648,7 +648,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
if (share_dom) {
dom->iop = share_dom->iop;
dom->cfg = share_dom->cfg;
- dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
+ dom->domain.pgsize_bitmap = share_dom->domain.pgsize_bitmap;
goto update_iova_region;
}
@@ -656,7 +656,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_ARM_MTK_EXT,
- .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
+ .pgsize_bitmap = dom->domain.pgsize_bitmap,
.ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32,
.iommu_dev = data->dev,
};
@@ -675,9 +675,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
return -ENOMEM;
}
- /* Update our support page sizes bitmap */
- dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
-
data->share_dom = dom;
update_iova_region:
@@ -697,6 +694,7 @@ static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev)
if (!dom)
return NULL;
mutex_init(&dom->mutex);
+ dom->domain.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M;
return &dom->domain;
}
@@ -1019,7 +1017,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.get_resv_regions = mtk_iommu_get_resv_regions,
- .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 66824982e05f..10cc0b1197e8 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -288,6 +288,8 @@ static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
if (!dom)
return NULL;
+ dom->domain.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE;
+
return &dom->domain;
}
@@ -509,14 +511,10 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
static void mtk_iommu_v1_probe_finalize(struct device *dev)
{
- struct dma_iommu_mapping *mtk_mapping;
- struct mtk_iommu_v1_data *data;
+ __maybe_unused struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
int err;
- data = dev_iommu_priv_get(dev);
- mtk_mapping = data->mapping;
-
- err = arm_iommu_attach_device(dev, mtk_mapping);
+ err = arm_iommu_attach_device(dev, data->mapping);
if (err)
dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
@@ -582,7 +580,6 @@ static const struct iommu_ops mtk_iommu_v1_ops = {
.probe_finalize = mtk_iommu_v1_probe_finalize,
.release_device = mtk_iommu_v1_release_device,
.device_group = generic_device_group,
- .pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_v1_attach_device,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 3c62337f43c6..6fb93927bdb9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1123,29 +1123,15 @@ static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
struct omap_iommu *obj)
{
struct device_node *np = pdev->dev.of_node;
- int ret;
if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
return 0;
- if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
- dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
- return -EINVAL;
- }
-
- obj->syscfg =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
- if (IS_ERR(obj->syscfg)) {
- /* can fail with -EPROBE_DEFER */
- ret = PTR_ERR(obj->syscfg);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
- &obj->id)) {
- dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
- return -EINVAL;
- }
+ obj->syscfg = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-mmuconfig",
+ 1, &obj->id);
+ if (IS_ERR(obj->syscfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(obj->syscfg),
+ "ti,syscon-mmuconfig property is missing\n");
if (obj->id != 0 && obj->id != 1) {
dev_err(&pdev->dev, "invalid IOMMU instance id\n");
@@ -1584,6 +1570,8 @@ static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
spin_lock_init(&omap_domain->lock);
+ omap_domain->domain.pgsize_bitmap = OMAP_IOMMU_PGSIZES;
+
omap_domain->domain.geometry.aperture_start = 0;
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
omap_domain->domain.geometry.force_aperture = true;
@@ -1735,7 +1723,6 @@ static const struct iommu_ops omap_iommu_ops = {
.release_device = omap_iommu_release_device,
.device_group = generic_single_device_group,
.of_xlate = omap_iommu_of_xlate,
- .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = omap_iommu_attach_dev,
.map_pages = omap_iommu_map,
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index bb57092ca901..2d0d31ba2886 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -1533,7 +1533,6 @@ static void riscv_iommu_release_device(struct device *dev)
}
static const struct iommu_ops riscv_iommu_ops = {
- .pgsize_bitmap = SZ_4K,
.of_xlate = riscv_iommu_of_xlate,
.identity_domain = &riscv_iommu_identity_domain,
.blocked_domain = &riscv_iommu_blocking_domain,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index e6bb3c784017..0861dd469bd8 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1081,6 +1081,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
+ rk_domain->domain.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP;
+
rk_domain->domain.geometry.aperture_start = 0;
rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
rk_domain->domain.geometry.force_aperture = true;
@@ -1170,7 +1172,6 @@ static const struct iommu_ops rk_iommu_ops = {
.probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device,
.device_group = generic_single_device_group,
- .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = rk_iommu_attach_device,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 433b59f43530..9c80d61deb2c 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -557,6 +557,7 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
}
zdev->end_dma = zdev->start_dma + aperture_size - 1;
+ s390_domain->domain.pgsize_bitmap = SZ_4K;
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
@@ -1158,7 +1159,6 @@ static struct iommu_domain blocking_domain = {
.domain_alloc_paging = s390_domain_alloc_paging, \
.probe_device = s390_iommu_probe_device, \
.device_group = generic_device_group, \
- .pgsize_bitmap = SZ_4K, \
.get_resv_regions = s390_iommu_get_resv_regions, \
.default_domain_ops = &(const struct iommu_domain_ops) { \
.attach_dev = s390_iommu_attach_device, \
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 941d1f361c8c..c7ca1d8a0b15 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -143,6 +143,8 @@ static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev)
spin_lock_init(&dom->pgtlock);
+ dom->domain.pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE;
+
dom->domain.geometry.aperture_start = 0;
dom->domain.geometry.aperture_end = SZ_256M - 1;
dom->domain.geometry.force_aperture = true;
@@ -410,7 +412,6 @@ static const struct iommu_ops sprd_iommu_ops = {
.probe_device = sprd_iommu_probe_device,
.device_group = generic_single_device_group,
.of_xlate = sprd_iommu_of_xlate,
- .pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 76c9620af4bb..de10b569d9a9 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -697,6 +697,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
refcount_set(&sun50i_domain->refcnt, 1);
+ sun50i_domain->domain.pgsize_bitmap = SZ_4K;
+
sun50i_domain->domain.geometry.aperture_start = 0;
sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
sun50i_domain->domain.geometry.force_aperture = true;
@@ -842,7 +844,6 @@ static int sun50i_iommu_of_xlate(struct device *dev,
static const struct iommu_ops sun50i_iommu_ops = {
.identity_domain = &sun50i_iommu_identity_domain,
- .pgsize_bitmap = SZ_4K,
.device_group = generic_single_device_group,
.domain_alloc_paging = sun50i_iommu_domain_alloc_paging,
.of_xlate = sun50i_iommu_of_xlate,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e58fe9d8b9e7..36cdd5fbab07 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -318,6 +318,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
spin_lock_init(&as->lock);
+ as->domain.pgsize_bitmap = SZ_4K;
+
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -1002,7 +1004,6 @@ static const struct iommu_ops tegra_smmu_ops = {
.probe_device = tegra_smmu_probe_device,
.device_group = tegra_smmu_device_group,
.of_xlate = tegra_smmu_of_xlate,
- .pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = tegra_smmu_attach_dev,
.map_pages = tegra_smmu_map,
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index ecd41fb03e5a..532db1de201b 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -998,7 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
-static struct iommu_ops viommu_ops;
+static const struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data)
@@ -1086,7 +1086,7 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
}
}
-static struct iommu_ops viommu_ops = {
+static const struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc_identity = viommu_domain_alloc_identity,
.domain_alloc_paging = viommu_domain_alloc_paging,
@@ -1217,8 +1217,6 @@ static int viommu_probe(struct virtio_device *vdev)
viommu->first_domain++;
}
- viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
-
virtio_device_ready(vdev);
/* Populate the event queue with buffers */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 94a41c7d430e..6d12c6ab9ea4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -41,10 +41,14 @@ config ARM_GIC_V3
select HAVE_ARM_SMCCC_DISCOVERY
select IRQ_MSI_IOMMU
+config ARM_GIC_ITS_PARENT
+ bool
+
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
+ select ARM_GIC_ITS_PARENT
default ARM_GIC_V3
select IRQ_MSI_IOMMU
@@ -54,6 +58,14 @@ config ARM_GIC_V3_ITS_FSL_MC
depends on FSL_MC_BUS
default ARM_GIC_V3_ITS
+config ARM_GIC_V5
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
+ select ARM_GIC_ITS_PARENT
+
config ARM_NVIC
bool
select IRQ_DOMAIN_HIERARCHY
@@ -542,6 +554,7 @@ config IMX_MU_MSI
tristate "i.MX MU used as MSI controller"
depends on OF && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
+ depends on ARM || ARM64
default m if ARCH_MXC
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 0458d6c5d161..93e3ced023bb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -33,9 +33,12 @@ obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_IRQ_MSI_LIB) += irq-msi-lib.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o
+obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
+obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \
+ irq-gic-v5-iwb.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 020ecdf16901..710cab61d919 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -29,8 +29,6 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
-extern const struct msi_parent_ops gic_v3_its_msi_parent_ops;
-
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-its-msi-parent.c
index a5e110ffdd88..eb1473f1448a 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-its-msi-parent.c
@@ -5,9 +5,10 @@
// Copyright (C) 2022 Intel
#include <linux/acpi_iort.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
-#include "irq-gic-common.h"
+#include "irq-gic-its-msi-parent.h"
#include <linux/irqchip/irq-msi-lib.h>
#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
@@ -18,6 +19,23 @@
MSI_FLAG_PCI_MSIX | \
MSI_FLAG_MULTI_PCI_MSI)
+static int its_translate_frame_address(struct device_node *msi_node, phys_addr_t *pa)
+{
+ struct resource res;
+ int ret;
+
+ ret = of_property_match_string(msi_node, "reg-names", "ns-translate");
+ if (ret < 0)
+ return ret;
+
+ ret = of_address_to_resource(msi_node, ret, &res);
+ if (ret)
+ return ret;
+
+ *pa = res.start;
+ return 0;
+}
+
#ifdef CONFIG_PCI_MSI
static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
{
@@ -82,8 +100,46 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
msi_info = msi_get_domain_info(domain->parent);
return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
+
+static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct device_node *msi_node = NULL;
+ struct msi_domain_info *msi_info;
+ struct pci_dev *pdev;
+ phys_addr_t pa;
+ u32 rid;
+ int ret;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ pdev = to_pci_dev(dev);
+
+ rid = pci_msi_map_rid_ctlr_node(pdev, &msi_node);
+ if (!msi_node)
+ return -ENODEV;
+
+ ret = its_translate_frame_address(msi_node, &pa);
+ if (ret)
+ return -ENODEV;
+
+ of_node_put(msi_node);
+
+ /* ITS specific DeviceID */
+ info->scratchpad[0].ul = rid;
+ /* ITS translate frame physical address */
+ info->scratchpad[1].ul = pa;
+
+ /* Always allocate power of two vectors */
+ nvec = roundup_pow_of_two(nvec);
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
#else /* CONFIG_PCI_MSI */
#define its_pci_msi_prepare NULL
+#define its_v5_pci_msi_prepare NULL
#endif /* !CONFIG_PCI_MSI */
static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
@@ -118,6 +174,53 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
return ret;
}
+static int of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev,
+ u32 *dev_id, phys_addr_t *pa)
+{
+ int ret, index = 0;
+ /*
+ * Retrieve the DeviceID and the ITS translate frame node pointer
+ * out of the msi-parent property.
+ */
+ do {
+ struct of_phandle_args args;
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "msi-parent", "#msi-cells",
+ index, &args);
+ if (ret)
+ break;
+ /*
+ * The IRQ domain fwnode is the msi controller parent
+ * in GICv5 (where the msi controller nodes are the
+ * ITS translate frames).
+ */
+ if (args.np->parent == irq_domain_get_of_node(domain)) {
+ if (WARN_ON(args.args_count != 1))
+ return -EINVAL;
+ *dev_id = args.args[0];
+
+ ret = its_translate_frame_address(args.np, pa);
+ if (ret)
+ return -ENODEV;
+ break;
+ }
+ index++;
+ } while (!ret);
+
+ if (ret) {
+ struct device_node *np = NULL;
+
+ ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
+ if (np) {
+ ret = its_translate_frame_address(np, pa);
+ of_node_put(np);
+ }
+ }
+
+ return ret;
+}
+
int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
return -1;
@@ -148,6 +251,33 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
dev, nvec, info);
}
+static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ phys_addr_t pa;
+ u32 dev_id;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ ret = of_v5_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID */
+ info->scratchpad[0].ul = dev_id;
+ /* ITS translate frame physical address */
+ info->scratchpad[1].ul = pa;
+
+ /* Allocate always as a power of 2 */
+ nvec = roundup_pow_of_two(nvec);
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
+
static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
{
struct msi_domain_info *msi_info;
@@ -199,6 +329,32 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
return true;
}
+static bool its_v5_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ switch (info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->ops->msi_prepare = its_v5_pci_msi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
+ break;
+ case DOMAIN_BUS_DEVICE_MSI:
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ info->ops->msi_prepare = its_v5_pmsi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
+ break;
+ default:
+ /* Confused. How did the lib return true? */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ return true;
+}
+
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
@@ -208,3 +364,13 @@ const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.prefix = "ITS-",
.init_dev_msi_info = its_init_dev_msi_info,
};
+
+const struct msi_parent_ops gic_v5_its_msi_parent_ops = {
+ .supported_flags = ITS_MSI_FLAGS_SUPPORTED,
+ .required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "ITS-v5-",
+ .init_dev_msi_info = its_v5_init_dev_msi_info,
+};
diff --git a/drivers/irqchip/irq-gic-its-msi-parent.h b/drivers/irqchip/irq-gic-its-msi-parent.h
new file mode 100644
index 000000000000..df016f347337
--- /dev/null
+++ b/drivers/irqchip/irq-gic-its-msi-parent.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 ARM Limited, All Rights Reserved.
+ */
+
+#ifndef _IRQ_GIC_ITS_MSI_PARENT_H
+#define _IRQ_GIC_ITS_MSI_PARENT_H
+
+extern const struct msi_parent_ops gic_v3_its_msi_parent_ops;
+extern const struct msi_parent_ops gic_v5_its_msi_parent_ops;
+
+#endif /* _IRQ_GIC_ITS_MSI_PARENT_H */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d54fa0638dc4..467cb78435a9 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
+#include "irq-gic-its-msi-parent.h"
#include <linux/irqchip/irq-msi-lib.h>
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 58c28895f8c4..8455b4a5fbb0 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -342,10 +342,10 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
return irq_set_vcpu_affinity(irq, &info);
}
-int its_unmap_vlpi(int irq)
+void its_unmap_vlpi(int irq)
{
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
- return irq_set_vcpu_affinity(irq, NULL);
+ WARN_ON_ONCE(irq_set_vcpu_affinity(irq, NULL));
}
int its_prop_update_vlpi(int irq, u8 config, bool inv)
diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c
new file mode 100644
index 000000000000..f845415f9143
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v5-irs.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) "GICv5 IRS: " fmt
+
+#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v5.h>
+
+/*
+ * Hardcoded ID_BITS limit for systems supporting only a 1-level IST
+ * table. Systems supporting only a 1-level IST table aren't expected
+ * to require more than 2^12 LPIs. Tweak as required.
+ */
+#define LPI_ID_BITS_LINEAR 12
+
+#define IRS_FLAGS_NON_COHERENT BIT(0)
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct gicv5_irs_chip_data *, per_cpu_irs_data);
+static LIST_HEAD(irs_nodes);
+
+static u32 irs_readl_relaxed(struct gicv5_irs_chip_data *irs_data,
+ const u32 reg_offset)
+{
+ return readl_relaxed(irs_data->irs_base + reg_offset);
+}
+
+static void irs_writel_relaxed(struct gicv5_irs_chip_data *irs_data,
+ const u32 val, const u32 reg_offset)
+{
+ writel_relaxed(val, irs_data->irs_base + reg_offset);
+}
+
+static u64 irs_readq_relaxed(struct gicv5_irs_chip_data *irs_data,
+ const u32 reg_offset)
+{
+ return readq_relaxed(irs_data->irs_base + reg_offset);
+}
+
+static void irs_writeq_relaxed(struct gicv5_irs_chip_data *irs_data,
+ const u64 val, const u32 reg_offset)
+{
+ writeq_relaxed(val, irs_data->irs_base + reg_offset);
+}
+
+/*
+ * The polling wait (in gicv5_wait_for_op_s_atomic()) on a GIC register
+ * provides the memory barriers (through MMIO accessors)
+ * required to synchronize CPU and GIC access to IST memory.
+ */
+static int gicv5_irs_ist_synchronise(struct gicv5_irs_chip_data *irs_data)
+{
+ return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_IST_STATUSR,
+ GICV5_IRS_IST_STATUSR_IDLE, NULL);
+}
+
+static int __init gicv5_irs_init_ist_linear(struct gicv5_irs_chip_data *irs_data,
+ unsigned int lpi_id_bits,
+ unsigned int istsz)
+{
+ size_t l2istsz;
+ u32 n, cfgr;
+ void *ist;
+ u64 baser;
+ int ret;
+
+ /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */
+ n = max(5, lpi_id_bits + 1 + istsz);
+
+ l2istsz = BIT(n + 1);
+ /*
+ * Check memory requirements. For a linear IST we cap the
+ * number of ID bits to a value that should never exceed
+ * kmalloc interface memory allocation limits, so this
+ * check is really belt and braces.
+ */
+ if (l2istsz > KMALLOC_MAX_SIZE) {
+ u8 lpi_id_cap = ilog2(KMALLOC_MAX_SIZE) - 2 + istsz;
+
+ pr_warn("Limiting LPI ID bits from %u to %u\n",
+ lpi_id_bits, lpi_id_cap);
+ lpi_id_bits = lpi_id_cap;
+ l2istsz = KMALLOC_MAX_SIZE;
+ }
+
+ ist = kzalloc(l2istsz, GFP_KERNEL);
+ if (!ist)
+ return -ENOMEM;
+
+ if (irs_data->flags & IRS_FLAGS_NON_COHERENT)
+ dcache_clean_inval_poc((unsigned long)ist,
+ (unsigned long)ist + l2istsz);
+ else
+ dsb(ishst);
+
+ cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE,
+ GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ,
+ GICV5_IRS_IST_CFGR_L2SZ_4K) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits);
+ irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR);
+
+ gicv5_global_data.ist.l2 = false;
+
+ baser = (virt_to_phys(ist) & GICV5_IRS_IST_BASER_ADDR_MASK) |
+ FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1);
+ irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER);
+
+ ret = gicv5_irs_ist_synchronise(irs_data);
+ if (ret) {
+ kfree(ist);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init gicv5_irs_init_ist_two_level(struct gicv5_irs_chip_data *irs_data,
+ unsigned int lpi_id_bits,
+ unsigned int istsz,
+ unsigned int l2sz)
+{
+ __le64 *l1ist;
+ u32 cfgr, n;
+ size_t l1sz;
+ u64 baser;
+ int ret;
+
+ /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */
+ n = max(5, lpi_id_bits - ((10 - istsz) + (2 * l2sz)) + 2);
+
+ l1sz = BIT(n + 1);
+
+ l1ist = kzalloc(l1sz, GFP_KERNEL);
+ if (!l1ist)
+ return -ENOMEM;
+
+ if (irs_data->flags & IRS_FLAGS_NON_COHERENT)
+ dcache_clean_inval_poc((unsigned long)l1ist,
+ (unsigned long)l1ist + l1sz);
+ else
+ dsb(ishst);
+
+ cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE,
+ GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ, l2sz) |
+ FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits);
+ irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR);
+
+ /*
+ * The L2SZ determine bits required at L2 level. Number of bytes
+ * required by metadata is reported through istsz - the number of bits
+ * covered by L2 entries scales accordingly.
+ */
+ gicv5_global_data.ist.l2_size = BIT(11 + (2 * l2sz) + 1);
+ gicv5_global_data.ist.l2_bits = (10 - istsz) + (2 * l2sz);
+ gicv5_global_data.ist.l1ist_addr = l1ist;
+ gicv5_global_data.ist.l2 = true;
+
+ baser = (virt_to_phys(l1ist) & GICV5_IRS_IST_BASER_ADDR_MASK) |
+ FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1);
+ irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER);
+
+ ret = gicv5_irs_ist_synchronise(irs_data);
+ if (ret) {
+ kfree(l1ist);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Alloc L2 IST entries on demand.
+ *
+ * Locking/serialization is guaranteed by irqdomain core code by
+ * taking the hierarchical domain struct irq_domain.root->mutex.
+ */
+int gicv5_irs_iste_alloc(const u32 lpi)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ unsigned int index;
+ u32 l2istr, l2bits;
+ __le64 *l1ist;
+ size_t l2size;
+ void *l2ist;
+ int ret;
+
+ if (!gicv5_global_data.ist.l2)
+ return 0;
+
+ irs_data = per_cpu(per_cpu_irs_data, smp_processor_id());
+ if (!irs_data)
+ return -ENOENT;
+
+ l2size = gicv5_global_data.ist.l2_size;
+ l2bits = gicv5_global_data.ist.l2_bits;
+ l1ist = gicv5_global_data.ist.l1ist_addr;
+ index = lpi >> l2bits;
+
+ if (FIELD_GET(GICV5_ISTL1E_VALID, le64_to_cpu(l1ist[index])))
+ return 0;
+
+ l2ist = kzalloc(l2size, GFP_KERNEL);
+ if (!l2ist)
+ return -ENOMEM;
+
+ l1ist[index] = cpu_to_le64(virt_to_phys(l2ist) & GICV5_ISTL1E_L2_ADDR_MASK);
+
+ if (irs_data->flags & IRS_FLAGS_NON_COHERENT) {
+ dcache_clean_inval_poc((unsigned long)l2ist,
+ (unsigned long)l2ist + l2size);
+ dcache_clean_poc((unsigned long)(l1ist + index),
+ (unsigned long)(l1ist + index) + sizeof(*l1ist));
+ } else {
+ dsb(ishst);
+ }
+
+ l2istr = FIELD_PREP(GICV5_IRS_MAP_L2_ISTR_ID, lpi);
+ irs_writel_relaxed(irs_data, l2istr, GICV5_IRS_MAP_L2_ISTR);
+
+ ret = gicv5_irs_ist_synchronise(irs_data);
+ if (ret) {
+ l1ist[index] = 0;
+ kfree(l2ist);
+ return ret;
+ }
+
+ /*
+ * Make sure we invalidate the cache line pulled before the IRS
+ * had a chance to update the L1 entry and mark it valid.
+ */
+ if (irs_data->flags & IRS_FLAGS_NON_COHERENT) {
+ /*
+ * gicv5_irs_ist_synchronise() includes memory
+ * barriers (MMIO accessors) required to guarantee that the
+ * following dcache invalidation is not executed before the
+ * IST mapping operation has completed.
+ */
+ dcache_inval_poc((unsigned long)(l1ist + index),
+ (unsigned long)(l1ist + index) + sizeof(*l1ist));
+ }
+
+ return 0;
+}
+
+/*
+ * Try to match the L2 IST size to the pagesize, and if this is not possible
+ * pick the smallest supported L2 size in order to minimise the requirement for
+ * physically contiguous blocks of memory as page-sized allocations are
+ * guaranteed to be physically contiguous, and are by definition the easiest to
+ * find.
+ *
+ * Fall back to the smallest supported size (in the event that the pagesize
+ * itself is not supported) again serves to make it easier to find physically
+ * contiguous blocks of memory.
+ */
+static unsigned int gicv5_irs_l2_sz(u32 idr2)
+{
+ switch (PAGE_SIZE) {
+ case SZ_64K:
+ if (GICV5_IRS_IST_L2SZ_SUPPORT_64KB(idr2))
+ return GICV5_IRS_IST_CFGR_L2SZ_64K;
+ fallthrough;
+ case SZ_4K:
+ if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2))
+ return GICV5_IRS_IST_CFGR_L2SZ_4K;
+ fallthrough;
+ case SZ_16K:
+ if (GICV5_IRS_IST_L2SZ_SUPPORT_16KB(idr2))
+ return GICV5_IRS_IST_CFGR_L2SZ_16K;
+ break;
+ }
+
+ if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2))
+ return GICV5_IRS_IST_CFGR_L2SZ_4K;
+
+ return GICV5_IRS_IST_CFGR_L2SZ_64K;
+}
+
+static int __init gicv5_irs_init_ist(struct gicv5_irs_chip_data *irs_data)
+{
+ u32 lpi_id_bits, idr2_id_bits, idr2_min_lpi_id_bits, l2_iste_sz, l2sz;
+ u32 l2_iste_sz_split, idr2;
+ bool two_levels, istmd;
+ u64 baser;
+ int ret;
+
+ baser = irs_readq_relaxed(irs_data, GICV5_IRS_IST_BASER);
+ if (FIELD_GET(GICV5_IRS_IST_BASER_VALID, baser)) {
+ pr_err("IST is marked as valid already; cannot allocate\n");
+ return -EPERM;
+ }
+
+ idr2 = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2);
+ two_levels = !!FIELD_GET(GICV5_IRS_IDR2_IST_LEVELS, idr2);
+
+ idr2_id_bits = FIELD_GET(GICV5_IRS_IDR2_ID_BITS, idr2);
+ idr2_min_lpi_id_bits = FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, idr2);
+
+ /*
+ * For two level tables we are always supporting the maximum allowed
+ * number of IDs.
+ *
+ * For 1-level tables, we should support a number of bits that
+ * is >= min_lpi_id_bits but cap it to LPI_ID_BITS_LINEAR lest
+ * the level 1-table gets too large and its memory allocation
+ * may fail.
+ */
+ if (two_levels) {
+ lpi_id_bits = idr2_id_bits;
+ } else {
+ lpi_id_bits = max(LPI_ID_BITS_LINEAR, idr2_min_lpi_id_bits);
+ lpi_id_bits = min(lpi_id_bits, idr2_id_bits);
+ }
+
+ /*
+ * Cap the ID bits according to the CPUIF supported ID bits
+ */
+ lpi_id_bits = min(lpi_id_bits, gicv5_global_data.cpuif_id_bits);
+
+ if (two_levels)
+ l2sz = gicv5_irs_l2_sz(idr2);
+
+ istmd = !!FIELD_GET(GICV5_IRS_IDR2_ISTMD, idr2);
+
+ l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_4;
+
+ if (istmd) {
+ l2_iste_sz_split = FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, idr2);
+
+ if (lpi_id_bits < l2_iste_sz_split)
+ l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_8;
+ else
+ l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_16;
+ }
+
+ /*
+ * Follow GICv5 specification recommendation to opt in for two
+ * level tables (ref: 10.2.1.14 IRS_IST_CFGR).
+ */
+ if (two_levels && (lpi_id_bits > ((10 - l2_iste_sz) + (2 * l2sz)))) {
+ ret = gicv5_irs_init_ist_two_level(irs_data, lpi_id_bits,
+ l2_iste_sz, l2sz);
+ } else {
+ ret = gicv5_irs_init_ist_linear(irs_data, lpi_id_bits,
+ l2_iste_sz);
+ }
+ if (ret)
+ return ret;
+
+ gicv5_init_lpis(BIT(lpi_id_bits));
+
+ return 0;
+}
+
+struct iaffid_entry {
+ u16 iaffid;
+ bool valid;
+};
+
+static DEFINE_PER_CPU(struct iaffid_entry, cpu_iaffid);
+
+int gicv5_irs_cpu_to_iaffid(int cpuid, u16 *iaffid)
+{
+ if (!per_cpu(cpu_iaffid, cpuid).valid) {
+ pr_err("IAFFID for CPU %d has not been initialised\n", cpuid);
+ return -ENODEV;
+ }
+
+ *iaffid = per_cpu(cpu_iaffid, cpuid).iaffid;
+
+ return 0;
+}
+
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ u32 min, max;
+
+ list_for_each_entry(irs_data, &irs_nodes, entry) {
+ if (!irs_data->spi_range)
+ continue;
+
+ min = irs_data->spi_min;
+ max = irs_data->spi_min + irs_data->spi_range - 1;
+ if (spi_id >= min && spi_id <= max)
+ return irs_data;
+ }
+
+ return NULL;
+}
+
+static int gicv5_irs_wait_for_spi_op(struct gicv5_irs_chip_data *irs_data)
+{
+ u32 statusr;
+ int ret;
+
+ ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_SPI_STATUSR,
+ GICV5_IRS_SPI_STATUSR_IDLE, &statusr);
+ if (ret)
+ return ret;
+
+ return !!FIELD_GET(GICV5_IRS_SPI_STATUSR_V, statusr) ? 0 : -EIO;
+}
+
+static int gicv5_irs_wait_for_irs_pe(struct gicv5_irs_chip_data *irs_data,
+ bool selr)
+{
+ bool valid = true;
+ u32 statusr;
+ int ret;
+
+ ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_PE_STATUSR,
+ GICV5_IRS_PE_STATUSR_IDLE, &statusr);
+ if (ret)
+ return ret;
+
+ if (selr)
+ valid = !!FIELD_GET(GICV5_IRS_PE_STATUSR_V, statusr);
+
+ return valid ? 0 : -EIO;
+}
+
+static int gicv5_irs_wait_for_pe_selr(struct gicv5_irs_chip_data *irs_data)
+{
+ return gicv5_irs_wait_for_irs_pe(irs_data, true);
+}
+
+static int gicv5_irs_wait_for_pe_cr0(struct gicv5_irs_chip_data *irs_data)
+{
+ return gicv5_irs_wait_for_irs_pe(irs_data, false);
+}
+
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gicv5_irs_chip_data *irs_data = d->chip_data;
+ u32 selr, cfgr;
+ bool level;
+ int ret;
+
+ /*
+ * There is no distinction between HIGH/LOW for level IRQs
+ * and RISING/FALLING for edge IRQs in the architecture,
+ * hence consider them equivalent.
+ */
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ level = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ level = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ guard(raw_spinlock)(&irs_data->spi_config_lock);
+
+ selr = FIELD_PREP(GICV5_IRS_SPI_SELR_ID, d->hwirq);
+ irs_writel_relaxed(irs_data, selr, GICV5_IRS_SPI_SELR);
+ ret = gicv5_irs_wait_for_spi_op(irs_data);
+ if (ret)
+ return ret;
+
+ cfgr = FIELD_PREP(GICV5_IRS_SPI_CFGR_TM, level);
+ irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_SPI_CFGR);
+
+ return gicv5_irs_wait_for_spi_op(irs_data);
+}
+
+static int gicv5_irs_wait_for_idle(struct gicv5_irs_chip_data *irs_data)
+{
+ return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_CR0,
+ GICV5_IRS_CR0_IDLE, NULL);
+}
+
+void gicv5_irs_syncr(void)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ u32 syncr;
+
+ irs_data = list_first_entry_or_null(&irs_nodes, struct gicv5_irs_chip_data, entry);
+ if (WARN_ON_ONCE(!irs_data))
+ return;
+
+ syncr = FIELD_PREP(GICV5_IRS_SYNCR_SYNC, 1);
+ irs_writel_relaxed(irs_data, syncr, GICV5_IRS_SYNCR);
+
+ gicv5_wait_for_op(irs_data->irs_base, GICV5_IRS_SYNC_STATUSR,
+ GICV5_IRS_SYNC_STATUSR_IDLE);
+}
+
+int gicv5_irs_register_cpu(int cpuid)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ u32 selr, cr0;
+ u16 iaffid;
+ int ret;
+
+ ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid);
+ if (ret) {
+ pr_err("IAFFID for CPU %d has not been initialised\n", cpuid);
+ return ret;
+ }
+
+ irs_data = per_cpu(per_cpu_irs_data, cpuid);
+ if (!irs_data) {
+ pr_err("No IRS associated with CPU %u\n", cpuid);
+ return -ENXIO;
+ }
+
+ selr = FIELD_PREP(GICV5_IRS_PE_SELR_IAFFID, iaffid);
+ irs_writel_relaxed(irs_data, selr, GICV5_IRS_PE_SELR);
+
+ ret = gicv5_irs_wait_for_pe_selr(irs_data);
+ if (ret) {
+ pr_err("IAFFID 0x%x used in IRS_PE_SELR is invalid\n", iaffid);
+ return -ENXIO;
+ }
+
+ cr0 = FIELD_PREP(GICV5_IRS_PE_CR0_DPS, 0x1);
+ irs_writel_relaxed(irs_data, cr0, GICV5_IRS_PE_CR0);
+
+ ret = gicv5_irs_wait_for_pe_cr0(irs_data);
+ if (ret)
+ return ret;
+
+ pr_debug("CPU %d enabled PE IAFFID 0x%x\n", cpuid, iaffid);
+
+ return 0;
+}
+
+static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data,
+ void __iomem *irs_base,
+ struct fwnode_handle *handle)
+{
+ struct device_node *np = to_of_node(handle);
+ u32 cr0, cr1;
+
+ irs_data->fwnode = handle;
+ irs_data->irs_base = irs_base;
+
+ if (of_property_read_bool(np, "dma-noncoherent")) {
+ /*
+ * A non-coherent IRS implies that some cache levels cannot be
+ * used coherently by the cores and GIC. Our only option is to mark
+ * memory attributes for the GIC as non-cacheable; by default,
+ * non-cacheable memory attributes imply outer-shareable
+ * shareability, the value written into IRS_CR1_SH is ignored.
+ */
+ cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_NO_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_NO_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_NO_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) |
+ FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE);
+ irs_data->flags |= IRS_FLAGS_NON_COHERENT;
+ } else {
+ cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_WRITE_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_WB_CACHE) |
+ FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_WB_CACHE) |
+ FIELD_PREP(GICV5_IRS_CR1_SH, GICV5_INNER_SHARE);
+ }
+
+ irs_writel_relaxed(irs_data, cr1, GICV5_IRS_CR1);
+
+ cr0 = FIELD_PREP(GICV5_IRS_CR0_IRSEN, 0x1);
+ irs_writel_relaxed(irs_data, cr0, GICV5_IRS_CR0);
+ gicv5_irs_wait_for_idle(irs_data);
+}
+
+static int __init gicv5_irs_of_init_affinity(struct device_node *node,
+ struct gicv5_irs_chip_data *irs_data,
+ u8 iaffid_bits)
+{
+ /*
+ * Detect IAFFID<->CPU mappings from the device tree and
+ * record IRS<->CPU topology information.
+ */
+ u16 iaffid_mask = GENMASK(iaffid_bits - 1, 0);
+ int ret, i, ncpus, niaffids;
+
+ ncpus = of_count_phandle_with_args(node, "cpus", NULL);
+ if (ncpus < 0)
+ return -EINVAL;
+
+ niaffids = of_property_count_elems_of_size(node, "arm,iaffids",
+ sizeof(u16));
+ if (niaffids != ncpus)
+ return -EINVAL;
+
+ u16 *iaffids __free(kfree) = kcalloc(niaffids, sizeof(*iaffids), GFP_KERNEL);
+ if (!iaffids)
+ return -ENOMEM;
+
+ ret = of_property_read_u16_array(node, "arm,iaffids", iaffids, niaffids);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ncpus; i++) {
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpus", i);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ if (WARN_ON(cpu < 0))
+ continue;
+
+ if (iaffids[i] & ~iaffid_mask) {
+ pr_warn("CPU %d iaffid 0x%x exceeds IRS iaffid bits\n",
+ cpu, iaffids[i]);
+ continue;
+ }
+
+ per_cpu(cpu_iaffid, cpu).iaffid = iaffids[i];
+ per_cpu(cpu_iaffid, cpu).valid = true;
+
+ /* We also know that the CPU is connected to this IRS */
+ per_cpu(per_cpu_irs_data, cpu) = irs_data;
+ }
+
+ return ret;
+}
+
+static void irs_setup_pri_bits(u32 idr1)
+{
+ switch (FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1)) {
+ case GICV5_IRS_IDR1_PRIORITY_BITS_1BITS:
+ gicv5_global_data.irs_pri_bits = 1;
+ break;
+ case GICV5_IRS_IDR1_PRIORITY_BITS_2BITS:
+ gicv5_global_data.irs_pri_bits = 2;
+ break;
+ case GICV5_IRS_IDR1_PRIORITY_BITS_3BITS:
+ gicv5_global_data.irs_pri_bits = 3;
+ break;
+ case GICV5_IRS_IDR1_PRIORITY_BITS_4BITS:
+ gicv5_global_data.irs_pri_bits = 4;
+ break;
+ case GICV5_IRS_IDR1_PRIORITY_BITS_5BITS:
+ gicv5_global_data.irs_pri_bits = 5;
+ break;
+ default:
+ pr_warn("Detected wrong IDR priority bits value 0x%lx\n",
+ FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1));
+ gicv5_global_data.irs_pri_bits = 1;
+ break;
+ }
+}
+
+static int __init gicv5_irs_init(struct device_node *node)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ void __iomem *irs_base;
+ u32 idr, spi_count;
+ u8 iaffid_bits;
+ int ret;
+
+ irs_data = kzalloc(sizeof(*irs_data), GFP_KERNEL);
+ if (!irs_data)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&irs_data->spi_config_lock);
+
+ ret = of_property_match_string(node, "reg-names", "ns-config");
+ if (ret < 0) {
+ pr_err("%pOF: ns-config reg-name not present\n", node);
+ goto out_err;
+ }
+
+ irs_base = of_io_request_and_map(node, ret, of_node_full_name(node));
+ if (IS_ERR(irs_base)) {
+ pr_err("%pOF: unable to map GICv5 IRS registers\n", node);
+ ret = PTR_ERR(irs_base);
+ goto out_err;
+ }
+
+ gicv5_irs_init_bases(irs_data, irs_base, &node->fwnode);
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1);
+ iaffid_bits = FIELD_GET(GICV5_IRS_IDR1_IAFFID_BITS, idr) + 1;
+
+ ret = gicv5_irs_of_init_affinity(node, irs_data, iaffid_bits);
+ if (ret) {
+ pr_err("Failed to parse CPU IAFFIDs from the device tree!\n");
+ goto out_iomem;
+ }
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2);
+ if (WARN(!FIELD_GET(GICV5_IRS_IDR2_LPI, idr),
+ "LPI support not available - no IPIs, can't proceed\n")) {
+ ret = -ENODEV;
+ goto out_iomem;
+ }
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR7);
+ irs_data->spi_min = FIELD_GET(GICV5_IRS_IDR7_SPI_BASE, idr);
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR6);
+ irs_data->spi_range = FIELD_GET(GICV5_IRS_IDR6_SPI_IRS_RANGE, idr);
+
+ if (irs_data->spi_range) {
+ pr_info("%s detected SPI range [%u-%u]\n",
+ of_node_full_name(node),
+ irs_data->spi_min,
+ irs_data->spi_min +
+ irs_data->spi_range - 1);
+ }
+
+ /*
+ * Do the global setting only on the first IRS.
+ * Global properties (iaffid_bits, global spi count) are guaranteed to
+ * be consistent across IRSes by the architecture.
+ */
+ if (list_empty(&irs_nodes)) {
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1);
+ irs_setup_pri_bits(idr);
+
+ idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR5);
+
+ spi_count = FIELD_GET(GICV5_IRS_IDR5_SPI_RANGE, idr);
+ gicv5_global_data.global_spi_count = spi_count;
+
+ gicv5_init_lpi_domain();
+
+ pr_debug("Detected %u SPIs globally\n", spi_count);
+ }
+
+ list_add_tail(&irs_data->entry, &irs_nodes);
+
+ return 0;
+
+out_iomem:
+ iounmap(irs_base);
+out_err:
+ kfree(irs_data);
+ return ret;
+}
+
+void __init gicv5_irs_remove(void)
+{
+ struct gicv5_irs_chip_data *irs_data, *tmp_data;
+
+ gicv5_free_lpi_domain();
+ gicv5_deinit_lpis();
+
+ list_for_each_entry_safe(irs_data, tmp_data, &irs_nodes, entry) {
+ iounmap(irs_data->irs_base);
+ list_del(&irs_data->entry);
+ kfree(irs_data);
+ }
+}
+
+int __init gicv5_irs_enable(void)
+{
+ struct gicv5_irs_chip_data *irs_data;
+ int ret;
+
+ irs_data = list_first_entry_or_null(&irs_nodes,
+ struct gicv5_irs_chip_data, entry);
+ if (!irs_data)
+ return -ENODEV;
+
+ ret = gicv5_irs_init_ist(irs_data);
+ if (ret) {
+ pr_err("Failed to init IST\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void __init gicv5_irs_its_probe(void)
+{
+ struct gicv5_irs_chip_data *irs_data;
+
+ list_for_each_entry(irs_data, &irs_nodes, entry)
+ gicv5_its_of_probe(to_of_node(irs_data->fwnode));
+}
+
+int __init gicv5_irs_of_probe(struct device_node *parent)
+{
+ struct device_node *np;
+ int ret;
+
+ for_each_available_child_of_node(parent, np) {
+ if (!of_device_is_compatible(np, "arm,gic-v5-irs"))
+ continue;
+
+ ret = gicv5_irs_init(np);
+ if (ret)
+ pr_err("Failed to init IRS %s\n", np->full_name);
+ }
+
+ return list_empty(&irs_nodes) ? -ENODEV : 0;
+}
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
new file mode 100644
index 000000000000..9290ac741949
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) "GICv5 ITS: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/iommu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v5.h>
+#include <linux/irqchip/irq-msi-lib.h>
+
+#include "irq-gic-its-msi-parent.h"
+
+#define ITS_FLAGS_NON_COHERENT BIT(0)
+
+struct gicv5_its_chip_data {
+ struct xarray its_devices;
+ struct mutex dev_alloc_lock;
+ struct fwnode_handle *fwnode;
+ struct gicv5_its_devtab_cfg devtab_cfgr;
+ void __iomem *its_base;
+ u32 flags;
+ unsigned int msi_domain_flags;
+};
+
+struct gicv5_its_dev {
+ struct gicv5_its_chip_data *its_node;
+ struct gicv5_its_itt_cfg itt_cfg;
+ unsigned long *event_map;
+ u32 device_id;
+ u32 num_events;
+ phys_addr_t its_trans_phys_base;
+};
+
+static u32 its_readl_relaxed(struct gicv5_its_chip_data *its_node, const u32 reg_offset)
+{
+ return readl_relaxed(its_node->its_base + reg_offset);
+}
+
+static void its_writel_relaxed(struct gicv5_its_chip_data *its_node, const u32 val,
+ const u32 reg_offset)
+{
+ writel_relaxed(val, its_node->its_base + reg_offset);
+}
+
+static void its_writeq_relaxed(struct gicv5_its_chip_data *its_node, const u64 val,
+ const u32 reg_offset)
+{
+ writeq_relaxed(val, its_node->its_base + reg_offset);
+}
+
+static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start,
+ size_t sz)
+{
+ void *end = start + sz;
+
+ if (its->flags & ITS_FLAGS_NON_COHERENT)
+ dcache_clean_inval_poc((unsigned long)start, (unsigned long)end);
+ else
+ dsb(ishst);
+}
+
+static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry,
+ u64 val)
+{
+ WRITE_ONCE(*entry, cpu_to_le64(val));
+ gicv5_its_dcache_clean(its, entry, sizeof(*entry));
+}
+
+#define devtab_cfgr_field(its, f) \
+ FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr)
+
+static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its)
+{
+ return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR,
+ GICV5_ITS_STATUSR_IDLE, NULL);
+}
+
+static void gicv5_its_syncr(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev)
+{
+ u64 syncr;
+
+ syncr = FIELD_PREP(GICV5_ITS_SYNCR_SYNC, 1) |
+ FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id);
+
+ its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR);
+
+ gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE);
+}
+
+/* Number of bits required for each L2 {device/interrupt translation} table size */
+#define ITS_L2SZ_64K_L2_BITS 13
+#define ITS_L2SZ_16K_L2_BITS 11
+#define ITS_L2SZ_4K_L2_BITS 9
+
+static unsigned int gicv5_its_l2sz_to_l2_bits(unsigned int sz)
+{
+ switch (sz) {
+ case GICV5_ITS_DT_ITT_CFGR_L2SZ_64k:
+ return ITS_L2SZ_64K_L2_BITS;
+ case GICV5_ITS_DT_ITT_CFGR_L2SZ_16k:
+ return ITS_L2SZ_16K_L2_BITS;
+ case GICV5_ITS_DT_ITT_CFGR_L2SZ_4k:
+ default:
+ return ITS_L2SZ_4K_L2_BITS;
+ }
+}
+
+static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id,
+ u16 event_id)
+{
+ u32 eventr, eidr;
+ u64 didr;
+
+ didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, device_id);
+ eidr = FIELD_PREP(GICV5_ITS_EIDR_EVENTID, event_id);
+ eventr = FIELD_PREP(GICV5_ITS_INV_EVENTR_I, 0x1);
+
+ its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
+ its_writel_relaxed(its, eidr, GICV5_ITS_EIDR);
+ its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR);
+
+ return gicv5_its_cache_sync(its);
+}
+
+static void gicv5_its_free_itt_linear(struct gicv5_its_dev *its_dev)
+{
+ kfree(its_dev->itt_cfg.linear.itt);
+}
+
+static void gicv5_its_free_itt_two_level(struct gicv5_its_dev *its_dev)
+{
+ unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents;
+
+ for (i = 0; i < num_ents; i++)
+ kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
+
+ kfree(its_dev->itt_cfg.l2.l2ptrs);
+ kfree(its_dev->itt_cfg.l2.l1itt);
+}
+
+static void gicv5_its_free_itt(struct gicv5_its_dev *its_dev)
+{
+ if (!its_dev->itt_cfg.l2itt)
+ gicv5_its_free_itt_linear(its_dev);
+ else
+ gicv5_its_free_itt_two_level(its_dev);
+}
+
+static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev,
+ unsigned int event_id_bits)
+{
+ unsigned int num_ents = BIT(event_id_bits);
+ __le64 *itt;
+
+ itt = kcalloc(num_ents, sizeof(*itt), GFP_KERNEL);
+ if (!itt)
+ return -ENOMEM;
+
+ its_dev->itt_cfg.linear.itt = itt;
+ its_dev->itt_cfg.linear.num_ents = num_ents;
+ its_dev->itt_cfg.l2itt = false;
+ its_dev->itt_cfg.event_id_bits = event_id_bits;
+
+ gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt));
+
+ return 0;
+}
+
+/*
+ * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike
+ * with the device table. Span may be used to limit the second level table
+ * size, where possible.
+ */
+static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev,
+ unsigned int event_id_bits,
+ unsigned int itt_l2sz,
+ unsigned int num_events)
+{
+ unsigned int l1_bits, l2_bits, span, events_per_l2_table;
+ unsigned int i, complete_tables, final_span, num_ents;
+ __le64 *itt_l1, *itt_l2, **l2ptrs;
+ int ret;
+ u64 val;
+
+ ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
+ if (ret >= event_id_bits) {
+ pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n",
+ itt_l2sz, event_id_bits);
+ return -EINVAL;
+ }
+
+ l2_bits = ret;
+
+ l1_bits = event_id_bits - l2_bits;
+
+ num_ents = BIT(l1_bits);
+
+ itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL);
+ if (!itt_l1)
+ return -ENOMEM;
+
+ l2ptrs = kcalloc(num_ents, sizeof(*l2ptrs), GFP_KERNEL);
+ if (!l2ptrs) {
+ kfree(itt_l1);
+ return -ENOMEM;
+ }
+
+ its_dev->itt_cfg.l2.l2ptrs = l2ptrs;
+
+ its_dev->itt_cfg.l2.l2sz = itt_l2sz;
+ its_dev->itt_cfg.l2.l1itt = itt_l1;
+ its_dev->itt_cfg.l2.num_l1_ents = num_ents;
+ its_dev->itt_cfg.l2itt = true;
+ its_dev->itt_cfg.event_id_bits = event_id_bits;
+
+ /*
+ * Need to determine how many entries there are per L2 - this is based
+ * on the number of bits in the table.
+ */
+ events_per_l2_table = BIT(l2_bits);
+ complete_tables = num_events / events_per_l2_table;
+ final_span = order_base_2(num_events % events_per_l2_table);
+
+ for (i = 0; i < num_ents; i++) {
+ size_t l2sz;
+
+ span = i == complete_tables ? final_span : l2_bits;
+
+ itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL);
+ if (!itt_l2) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2;
+
+ l2sz = BIT(span) * sizeof(*itt_l2);
+
+ gicv5_its_dcache_clean(its, itt_l2, l2sz);
+
+ val = (virt_to_phys(itt_l2) & GICV5_ITTL1E_L2_ADDR_MASK) |
+ FIELD_PREP(GICV5_ITTL1E_SPAN, span) |
+ FIELD_PREP(GICV5_ITTL1E_VALID, 0x1);
+
+ WRITE_ONCE(itt_l1[i], cpu_to_le64(val));
+ }
+
+ gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1));
+
+ return 0;
+
+out_free:
+ for (i = i - 1; i >= 0; i--)
+ kfree(its_dev->itt_cfg.l2.l2ptrs[i]);
+
+ kfree(its_dev->itt_cfg.l2.l2ptrs);
+ kfree(itt_l1);
+ return ret;
+}
+
+/*
+ * Function to check whether the device table or ITT table support
+ * a two-level table and if so depending on the number of id_bits
+ * requested, determine whether a two-level table is required.
+ *
+ * Return the 2-level size value if a two level table is deemed
+ * necessary.
+ */
+static bool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz)
+{
+ unsigned int l2_bits, l2_sz;
+
+ if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1))
+ return false;
+
+ if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1))
+ return false;
+
+ /*
+ * Pick an L2 size that matches the pagesize; if a match
+ * is not found, go for the smallest supported l2 size granule.
+ *
+ * This ensures that we will always be able to allocate
+ * contiguous memory at L2.
+ */
+ switch (PAGE_SIZE) {
+ case SZ_64K:
+ if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
+ break;
+ }
+ fallthrough;
+ case SZ_4K:
+ if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
+ break;
+ }
+ fallthrough;
+ case SZ_16K:
+ if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) {
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k;
+ break;
+ }
+ if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
+ break;
+ }
+ if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k;
+ break;
+ }
+
+ l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k;
+ break;
+ }
+
+ l2_bits = gicv5_its_l2sz_to_l2_bits(l2_sz);
+
+ if (l2_bits > id_bits)
+ return false;
+
+ *sz = l2_sz;
+
+ return true;
+}
+
+static __le64 *gicv5_its_device_get_itte_ref(struct gicv5_its_dev *its_dev,
+ u16 event_id)
+{
+ unsigned int l1_idx, l2_idx, l2_bits;
+ __le64 *l2_itt;
+
+ if (!its_dev->itt_cfg.l2itt) {
+ __le64 *itt = its_dev->itt_cfg.linear.itt;
+
+ return &itt[event_id];
+ }
+
+ l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz);
+ l1_idx = event_id >> l2_bits;
+ l2_idx = event_id & GENMASK(l2_bits - 1, 0);
+ l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx];
+
+ return &l2_itt[l2_idx];
+}
+
+static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev)
+{
+ u32 devicer;
+ u64 didr;
+
+ didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id);
+ devicer = FIELD_PREP(GICV5_ITS_INV_DEVICER_I, 0x1) |
+ FIELD_PREP(GICV5_ITS_INV_DEVICER_EVENTID_BITS,
+ its_dev->itt_cfg.event_id_bits) |
+ FIELD_PREP(GICV5_ITS_INV_DEVICER_L1, 0x0);
+ its_writeq_relaxed(its, didr, GICV5_ITS_DIDR);
+ its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER);
+
+ return gicv5_its_cache_sync(its);
+}
+
+/*
+ * Allocate a level 2 device table entry, update L1 parent to reference it.
+ * Only used for 2-level device tables, and it is called on demand.
+ */
+static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its,
+ unsigned int l1_index)
+{
+ __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab;
+ u8 span, l2sz, l2_bits;
+ u64 l1dte;
+
+ if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index])))
+ return 0;
+
+ span = FIELD_GET(GICV5_DTL1E_SPAN, le64_to_cpu(l1devtab[l1_index]));
+ l2sz = devtab_cfgr_field(its, L2SZ);
+
+ l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
+
+ /*
+ * Span allows us to create a smaller L2 device table.
+ * If it is too large, use the number of allowed L2 bits.
+ */
+ if (span > l2_bits)
+ span = l2_bits;
+
+ l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL);
+ if (!l2devtab)
+ return -ENOMEM;
+
+ its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab;
+
+ l1dte = FIELD_PREP(GICV5_DTL1E_SPAN, span) |
+ (virt_to_phys(l2devtab) & GICV5_DTL1E_L2_ADDR_MASK) |
+ FIELD_PREP(GICV5_DTL1E_VALID, 0x1);
+ its_write_table_entry(its, &l1devtab[l1_index], l1dte);
+
+ return 0;
+}
+
+static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its,
+ u32 device_id, bool alloc)
+{
+ u8 str = devtab_cfgr_field(its, STRUCTURE);
+ unsigned int l2sz, l2_bits, l1_idx, l2_idx;
+ __le64 *l2devtab;
+ int ret;
+
+ if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
+ l2devtab = its->devtab_cfgr.linear.devtab;
+ return &l2devtab[device_id];
+ }
+
+ l2sz = devtab_cfgr_field(its, L2SZ);
+ l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz);
+ l1_idx = device_id >> l2_bits;
+ l2_idx = device_id & GENMASK(l2_bits - 1, 0);
+
+ if (alloc) {
+ /*
+ * Allocate a new L2 device table here before
+ * continuing. We make the assumption that the span in
+ * the L1 table has been set correctly, and blindly use
+ * that value.
+ */
+ ret = gicv5_its_alloc_l2_devtab(its, l1_idx);
+ if (ret)
+ return NULL;
+ }
+
+ l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx];
+ return &l2devtab[l2_idx];
+}
+
+/*
+ * Register a new device in the device table. Allocate an ITT and
+ * program the L2DTE entry according to the ITT structure that
+ * was chosen.
+ */
+static int gicv5_its_device_register(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev)
+{
+ u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz;
+ phys_addr_t itt_phys_base;
+ bool two_level_itt;
+ u32 idr1, idr2;
+ __le64 *dte;
+ u64 val;
+ int ret;
+
+ device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS);
+
+ if (its_dev->device_id >= BIT(device_id_bits)) {
+ pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!",
+ its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0));
+ return -EINVAL;
+ }
+
+ dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true);
+ if (!dte)
+ return -ENOMEM;
+
+ if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte)))
+ return -EBUSY;
+
+ /*
+ * Determine how many bits we need, validate those against the max.
+ * Based on these, determine if we should go for a 1- or 2-level ITT.
+ */
+ event_id_bits = order_base_2(its_dev->num_events);
+
+ idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2);
+
+ if (event_id_bits > FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)) {
+ pr_err("Required EventID bits (%u) larger than supported bits (%u)!",
+ event_id_bits,
+ (u8)FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2));
+ return -EINVAL;
+ }
+
+ idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
+
+ /*
+ * L2 ITT size is programmed into the L2DTE regardless of
+ * whether a two-level or linear ITT is built, init it.
+ */
+ itt_l2sz = 0;
+
+ two_level_itt = gicv5_its_l2sz_two_level(false, idr1, event_id_bits,
+ &itt_l2sz);
+ if (two_level_itt)
+ ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits,
+ itt_l2sz,
+ its_dev->num_events);
+ else
+ ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits);
+ if (ret)
+ return ret;
+
+ itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) :
+ virt_to_phys(its_dev->itt_cfg.linear.itt);
+
+ itt_struct = two_level_itt ? GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL :
+ GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR;
+
+ val = FIELD_PREP(GICV5_DTL2E_EVENT_ID_BITS, event_id_bits) |
+ FIELD_PREP(GICV5_DTL2E_ITT_STRUCTURE, itt_struct) |
+ (itt_phys_base & GICV5_DTL2E_ITT_ADDR_MASK) |
+ FIELD_PREP(GICV5_DTL2E_ITT_L2SZ, itt_l2sz) |
+ FIELD_PREP(GICV5_DTL2E_VALID, 0x1);
+
+ its_write_table_entry(its, dte, val);
+
+ ret = gicv5_its_device_cache_inv(its, its_dev);
+ if (ret) {
+ its_write_table_entry(its, dte, 0);
+ gicv5_its_free_itt(its_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Unregister a device in the device table. Lookup the device by ID, free the
+ * corresponding ITT, mark the device as invalid in the device table.
+ */
+static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its,
+ struct gicv5_its_dev *its_dev)
+{
+ __le64 *dte;
+
+ dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false);
+
+ if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) {
+ pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!",
+ its_dev->device_id);
+ return -EINVAL;
+ }
+
+ /* Zero everything - make it clear that this is an invalid entry */
+ its_write_table_entry(its, dte, 0);
+
+ gicv5_its_free_itt(its_dev);
+
+ return gicv5_its_device_cache_inv(its, its_dev);
+}
+
+/*
+ * Allocate a 1-level device table. All entries are allocated, but marked
+ * invalid.
+ */
+static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its,
+ u8 device_id_bits)
+{
+ __le64 *devtab;
+ size_t sz;
+ u64 baser;
+ u32 cfgr;
+
+ /*
+ * We expect a GICv5 implementation requiring a large number of
+ * deviceID bits to support a 2-level device table. If that's not
+ * the case, cap the number of deviceIDs supported according to the
+ * kmalloc limits so that the system can chug along with a linear
+ * device table.
+ */
+ sz = BIT_ULL(device_id_bits) * sizeof(*devtab);
+ if (sz > KMALLOC_MAX_SIZE) {
+ u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab));
+
+ pr_warn("Limiting device ID bits from %u to %u\n",
+ device_id_bits, device_id_cap);
+ device_id_bits = device_id_cap;
+ }
+
+ devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL);
+ if (!devtab)
+ return -ENOMEM;
+
+ gicv5_its_dcache_clean(its, devtab, sz);
+
+ cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
+ GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) |
+ FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, 0) |
+ FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
+ its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
+
+ baser = virt_to_phys(devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
+ its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
+
+ its->devtab_cfgr.cfgr = cfgr;
+ its->devtab_cfgr.linear.devtab = devtab;
+
+ return 0;
+}
+
+/*
+ * Allocate a 2-level device table. L2 entries are not allocated,
+ * they are allocated on-demand.
+ */
+static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its,
+ u8 device_id_bits,
+ u8 devtab_l2sz)
+{
+ unsigned int l1_bits, l2_bits, i;
+ __le64 *l1devtab, **l2ptrs;
+ size_t l1_sz;
+ u64 baser;
+ u32 cfgr;
+
+ l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz);
+
+ l1_bits = device_id_bits - l2_bits;
+ l1_sz = BIT(l1_bits) * sizeof(*l1devtab);
+ /*
+ * With 2-level device table support it is highly unlikely
+ * that we are not able to allocate the required amount of
+ * device table memory to cover deviceID space; cap the
+ * deviceID space if we encounter such set-up.
+ * If this ever becomes a problem we could revisit the policy
+ * behind level 2 size selection to reduce level-1 deviceID bits.
+ */
+ if (l1_sz > KMALLOC_MAX_SIZE) {
+ l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab));
+
+ pr_warn("Limiting device ID bits from %u to %u\n",
+ device_id_bits, l1_bits + l2_bits);
+ device_id_bits = l1_bits + l2_bits;
+ l1_sz = KMALLOC_MAX_SIZE;
+ }
+
+ l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL);
+ if (!l1devtab)
+ return -ENOMEM;
+
+ l2ptrs = kcalloc(BIT(l1_bits), sizeof(*l2ptrs), GFP_KERNEL);
+ if (!l2ptrs) {
+ kfree(l1devtab);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < BIT(l1_bits); i++)
+ l1devtab[i] = cpu_to_le64(FIELD_PREP(GICV5_DTL1E_SPAN, l2_bits));
+
+ gicv5_its_dcache_clean(its, l1devtab, l1_sz);
+
+ cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE,
+ GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL) |
+ FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, devtab_l2sz) |
+ FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits);
+ its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR);
+
+ baser = virt_to_phys(l1devtab) & GICV5_ITS_DT_BASER_ADDR_MASK;
+ its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER);
+
+ its->devtab_cfgr.cfgr = cfgr;
+ its->devtab_cfgr.l2.l1devtab = l1devtab;
+ its->devtab_cfgr.l2.l2ptrs = l2ptrs;
+
+ return 0;
+}
+
+/*
+ * Initialise the device table as either 1- or 2-level depending on what is
+ * supported by the hardware.
+ */
+static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its)
+{
+ u8 device_id_bits, devtab_l2sz;
+ bool two_level_devtab;
+ u32 idr1;
+
+ idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1);
+
+ device_id_bits = FIELD_GET(GICV5_ITS_IDR1_DEVICEID_BITS, idr1);
+ two_level_devtab = gicv5_its_l2sz_two_level(true, idr1, device_id_bits,
+ &devtab_l2sz);
+ if (two_level_devtab)
+ return gicv5_its_alloc_devtab_two_level(its, device_id_bits,
+ devtab_l2sz);
+ else
+ return gicv5_its_alloc_devtab_linear(its, device_id_bits);
+}
+
+static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its)
+{
+ u8 str = devtab_cfgr_field(its, STRUCTURE);
+
+ if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) {
+ kfree(its->devtab_cfgr.linear.devtab);
+ } else {
+ kfree(its->devtab_cfgr.l2.l1devtab);
+ kfree(its->devtab_cfgr.l2.l2ptrs);
+ }
+}
+
+static void gicv5_its_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
+ u64 addr = its_dev->its_trans_phys_base;
+
+ msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
+ msi_msg_set_addr(irq_data_get_msi_desc(d), msg, addr);
+}
+
+static const struct irq_chip gicv5_its_irq_chip = {
+ .name = "GICv5-ITS-MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_compose_msi_msg = gicv5_its_compose_msi_msg,
+};
+
+static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its,
+ u32 device_id)
+{
+ struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id);
+
+ return dev ? dev : ERR_PTR(-ENODEV);
+}
+
+static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec,
+ u32 dev_id)
+{
+ struct gicv5_its_dev *its_dev;
+ void *entry;
+ int ret;
+
+ its_dev = gicv5_its_find_device(its, dev_id);
+ if (!IS_ERR(its_dev)) {
+ pr_err("A device with this DeviceID (0x%x) has already been registered.\n",
+ dev_id);
+
+ return ERR_PTR(-EBUSY);
+ }
+
+ its_dev = kzalloc(sizeof(*its_dev), GFP_KERNEL);
+ if (!its_dev)
+ return ERR_PTR(-ENOMEM);
+
+ its_dev->device_id = dev_id;
+ its_dev->num_events = nvec;
+
+ ret = gicv5_its_device_register(its, its_dev);
+ if (ret) {
+ pr_err("Failed to register the device\n");
+ goto out_dev_free;
+ }
+
+ gicv5_its_device_cache_inv(its, its_dev);
+
+ its_dev->its_node = its;
+
+ its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
+ if (!its_dev->event_map) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL);
+ if (xa_is_err(entry)) {
+ ret = xa_err(entry);
+ goto out_bitmap_free;
+ }
+
+ return its_dev;
+
+out_bitmap_free:
+ bitmap_free(its_dev->event_map);
+out_unregister:
+ gicv5_its_device_unregister(its, its_dev);
+out_dev_free:
+ kfree(its_dev);
+ return ERR_PTR(ret);
+}
+
+static int gicv5_its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ u32 dev_id = info->scratchpad[0].ul;
+ struct msi_domain_info *msi_info;
+ struct gicv5_its_chip_data *its;
+ struct gicv5_its_dev *its_dev;
+
+ msi_info = msi_get_domain_info(domain);
+ its = msi_info->data;
+
+ guard(mutex)(&its->dev_alloc_lock);
+
+ its_dev = gicv5_its_alloc_device(its, nvec, dev_id);
+ if (IS_ERR(its_dev))
+ return PTR_ERR(its_dev);
+
+ its_dev->its_trans_phys_base = info->scratchpad[1].ul;
+ info->scratchpad[0].ptr = its_dev;
+
+ return 0;
+}
+
+static void gicv5_its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
+{
+ struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr;
+ struct msi_domain_info *msi_info;
+ struct gicv5_its_chip_data *its;
+
+ msi_info = msi_get_domain_info(domain);
+ its = msi_info->data;
+
+ guard(mutex)(&its->dev_alloc_lock);
+
+ if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events)))
+ return;
+
+ xa_erase(&its->its_devices, its_dev->device_id);
+ bitmap_free(its_dev->event_map);
+ gicv5_its_device_unregister(its, its_dev);
+ kfree(its_dev);
+}
+
+static struct msi_domain_ops gicv5_its_msi_domain_ops = {
+ .msi_prepare = gicv5_its_msi_prepare,
+ .msi_teardown = gicv5_its_msi_teardown,
+};
+
+static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32 lpi)
+{
+ struct gicv5_its_chip_data *its = its_dev->its_node;
+ u64 itt_entry;
+ __le64 *itte;
+
+ itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
+
+ if (FIELD_GET(GICV5_ITTL2E_VALID, *itte))
+ return -EEXIST;
+
+ itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) |
+ FIELD_PREP(GICV5_ITTL2E_VALID, 0x1);
+
+ its_write_table_entry(its, itte, itt_entry);
+
+ gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
+
+ return 0;
+}
+
+static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id)
+{
+ struct gicv5_its_chip_data *its = its_dev->its_node;
+ u64 itte_val;
+ __le64 *itte;
+
+ itte = gicv5_its_device_get_itte_ref(its_dev, event_id);
+
+ itte_val = le64_to_cpu(*itte);
+ itte_val &= ~GICV5_ITTL2E_VALID;
+
+ its_write_table_entry(its, itte, itte_val);
+
+ gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id);
+}
+
+static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info_t *info,
+ unsigned int nr_irqs, u32 *eventid)
+{
+ int event_id_base;
+
+ if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) {
+ event_id_base = bitmap_find_free_region(its_dev->event_map,
+ its_dev->num_events,
+ get_count_order(nr_irqs));
+ if (event_id_base < 0)
+ return event_id_base;
+ } else {
+ /*
+ * We want to have a fixed EventID mapped for hardcoded
+ * message data allocations.
+ */
+ if (WARN_ON_ONCE(nr_irqs != 1))
+ return -EINVAL;
+
+ event_id_base = info->hwirq;
+
+ if (event_id_base >= its_dev->num_events) {
+ pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n");
+
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(event_id_base, its_dev->event_map)) {
+ pr_warn("Can't reserve event_id bitmap\n");
+ return -EINVAL;
+
+ }
+ }
+
+ *eventid = event_id_base;
+
+ return 0;
+}
+
+static void gicv5_its_free_eventid(struct gicv5_its_dev *its_dev, u32 event_id_base,
+ unsigned int nr_irqs)
+{
+ bitmap_release_region(its_dev->event_map, event_id_base,
+ get_count_order(nr_irqs));
+}
+
+static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ u32 device_id, event_id_base, lpi;
+ struct gicv5_its_dev *its_dev;
+ msi_alloc_info_t *info = arg;
+ irq_hw_number_t hwirq;
+ struct irq_data *irqd;
+ int ret, i;
+
+ its_dev = info->scratchpad[0].ptr;
+
+ ret = gicv5_its_alloc_eventid(its_dev, info, nr_irqs, &event_id_base);
+ if (ret)
+ return ret;
+
+ ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base);
+ if (ret)
+ goto out_eventid;
+
+ device_id = its_dev->device_id;
+
+ for (i = 0; i < nr_irqs; i++) {
+ lpi = gicv5_alloc_lpi();
+ if (ret < 0) {
+ pr_debug("Failed to find free LPI!\n");
+ goto out_eventid;
+ }
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
+ if (ret)
+ goto out_free_lpi;
+
+ /*
+ * Store eventid and deviceid into the hwirq for later use.
+ *
+ * hwirq = event_id << 32 | device_id
+ */
+ hwirq = FIELD_PREP(GICV5_ITS_HWIRQ_DEVICE_ID, device_id) |
+ FIELD_PREP(GICV5_ITS_HWIRQ_EVENT_ID, (u64)event_id_base + i);
+ irq_domain_set_info(domain, virq + i, hwirq,
+ &gicv5_its_irq_chip, its_dev,
+ handle_fasteoi_irq, NULL, NULL);
+
+ irqd = irq_get_irq_data(virq + i);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
+ }
+
+ return 0;
+
+out_free_lpi:
+ gicv5_free_lpi(lpi);
+out_eventid:
+ gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
+ return ret;
+}
+
+static void gicv5_its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct gicv5_its_chip_data *its;
+ struct gicv5_its_dev *its_dev;
+ u16 event_id_base;
+ unsigned int i;
+
+ its_dev = irq_data_get_irq_chip_data(d);
+ its = its_dev->its_node;
+
+ event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
+
+ bitmap_release_region(its_dev->event_map, event_id_base,
+ get_count_order(nr_irqs));
+
+ /* Hierarchically free irq data */
+ for (i = 0; i < nr_irqs; i++) {
+ d = irq_domain_get_irq_data(domain, virq + i);
+
+ gicv5_free_lpi(d->parent_data->hwirq);
+ irq_domain_reset_irq_data(d);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
+
+ gicv5_its_syncr(its, its_dev);
+ gicv5_irs_syncr();
+}
+
+static int gicv5_its_irq_domain_activate(struct irq_domain *domain, struct irq_data *d,
+ bool reserve)
+{
+ struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
+ u16 event_id;
+ u32 lpi;
+
+ event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
+ lpi = d->parent_data->hwirq;
+
+ return gicv5_its_map_event(its_dev, event_id, lpi);
+}
+
+static void gicv5_its_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d);
+ u16 event_id;
+
+ event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq);
+
+ gicv5_its_unmap_event(its_dev, event_id);
+}
+
+static const struct irq_domain_ops gicv5_its_irq_domain_ops = {
+ .alloc = gicv5_its_irq_domain_alloc,
+ .free = gicv5_its_irq_domain_free,
+ .activate = gicv5_its_irq_domain_activate,
+ .deactivate = gicv5_its_irq_domain_deactivate,
+ .select = msi_lib_irq_domain_select,
+};
+
+static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable)
+{
+ u32 cr0 = FIELD_PREP(GICV5_ITS_CR0_ITSEN, enable);
+
+ its_writel_relaxed(its, cr0, GICV5_ITS_CR0);
+ return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0,
+ GICV5_ITS_CR0_IDLE, NULL);
+}
+
+static int gicv5_its_enable(struct gicv5_its_chip_data *its)
+{
+ return gicv5_its_write_cr0(its, true);
+}
+
+static int gicv5_its_disable(struct gicv5_its_chip_data *its)
+{
+ return gicv5_its_write_cr0(its, false);
+}
+
+static void gicv5_its_print_info(struct gicv5_its_chip_data *its_node)
+{
+ bool devtab_linear;
+ u8 device_id_bits;
+ u8 str;
+
+ device_id_bits = devtab_cfgr_field(its_node, DEVICEID_BITS);
+
+ str = devtab_cfgr_field(its_node, STRUCTURE);
+ devtab_linear = (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR);
+
+ pr_info("ITS %s enabled using %s device table device_id_bits %u\n",
+ fwnode_get_name(its_node->fwnode),
+ devtab_linear ? "linear" : "2-level",
+ device_id_bits);
+}
+
+static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent)
+{
+ struct irq_domain_info dom_info = {
+ .fwnode = its->fwnode,
+ .ops = &gicv5_its_irq_domain_ops,
+ .domain_flags = its->msi_domain_flags,
+ .parent = parent,
+ };
+ struct msi_domain_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->ops = &gicv5_its_msi_domain_ops;
+ info->data = its;
+ dom_info.host_data = info;
+
+ if (!msi_create_parent_irq_domain(&dom_info, &gic_v5_its_msi_parent_ops)) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init gicv5_its_init_bases(void __iomem *its_base, struct fwnode_handle *handle,
+ struct irq_domain *parent_domain)
+{
+ struct device_node *np = to_of_node(handle);
+ struct gicv5_its_chip_data *its_node;
+ u32 cr0, cr1;
+ bool enabled;
+ int ret;
+
+ its_node = kzalloc(sizeof(*its_node), GFP_KERNEL);
+ if (!its_node)
+ return -ENOMEM;
+
+ mutex_init(&its_node->dev_alloc_lock);
+ xa_init(&its_node->its_devices);
+ its_node->fwnode = handle;
+ its_node->its_base = its_base;
+ its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI |
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT;
+
+ cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0);
+ enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0);
+ if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) {
+ ret = gicv5_its_disable(its_node);
+ if (ret)
+ goto out_free_node;
+ }
+
+ if (of_property_read_bool(np, "dma-noncoherent")) {
+ /*
+ * A non-coherent ITS implies that some cache levels cannot be
+ * used coherently by the cores and GIC. Our only option is to mark
+ * memory attributes for the GIC as non-cacheable; by default,
+ * non-cacheable memory attributes imply outer-shareable
+ * shareability, the value written into ITS_CR1_SH is ignored.
+ */
+ cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) |
+ FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) |
+ FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE);
+ its_node->flags |= ITS_FLAGS_NON_COHERENT;
+ } else {
+ cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) |
+ FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) |
+ FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) |
+ FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE);
+ }
+
+ its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1);
+
+ ret = gicv5_its_init_devtab(its_node);
+ if (ret)
+ goto out_free_node;
+
+ ret = gicv5_its_enable(its_node);
+ if (ret)
+ goto out_free_devtab;
+
+ ret = gicv5_its_init_domain(its_node, parent_domain);
+ if (ret)
+ goto out_disable_its;
+
+ gicv5_its_print_info(its_node);
+
+ return 0;
+
+out_disable_its:
+ gicv5_its_disable(its_node);
+out_free_devtab:
+ gicv5_its_deinit_devtab(its_node);
+out_free_node:
+ kfree(its_node);
+ return ret;
+}
+
+static int __init gicv5_its_init(struct device_node *node)
+{
+ void __iomem *its_base;
+ int ret, idx;
+
+ idx = of_property_match_string(node, "reg-names", "ns-config");
+ if (idx < 0) {
+ pr_err("%pOF: ns-config reg-name not present\n", node);
+ return -ENODEV;
+ }
+
+ its_base = of_io_request_and_map(node, idx, of_node_full_name(node));
+ if (IS_ERR(its_base)) {
+ pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node);
+ return PTR_ERR(its_base);
+ }
+
+ ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node),
+ gicv5_global_data.lpi_domain);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(its_base);
+ return ret;
+}
+
+void __init gicv5_its_of_probe(struct device_node *parent)
+{
+ struct device_node *np;
+
+ for_each_available_child_of_node(parent, np) {
+ if (!of_device_is_compatible(np, "arm,gic-v5-its"))
+ continue;
+
+ if (gicv5_its_init(np))
+ pr_err("Failed to init ITS %s\n", np->full_name);
+ }
+}
diff --git a/drivers/irqchip/irq-gic-v5-iwb.c b/drivers/irqchip/irq-gic-v5-iwb.c
new file mode 100644
index 000000000000..ad9fdc14d1c6
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v5-iwb.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
+ */
+#define pr_fmt(fmt) "GICv5 IWB: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v5.h>
+
+struct gicv5_iwb_chip_data {
+ void __iomem *iwb_base;
+ u16 nr_regs;
+};
+
+static u32 iwb_readl_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 reg_offset)
+{
+ return readl_relaxed(iwb_node->iwb_base + reg_offset);
+}
+
+static void iwb_writel_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 val,
+ const u32 reg_offset)
+{
+ writel_relaxed(val, iwb_node->iwb_base + reg_offset);
+}
+
+static int gicv5_iwb_wait_for_wenabler(struct gicv5_iwb_chip_data *iwb_node)
+{
+ return gicv5_wait_for_op_atomic(iwb_node->iwb_base, GICV5_IWB_WENABLE_STATUSR,
+ GICV5_IWB_WENABLE_STATUSR_IDLE, NULL);
+}
+
+static int __gicv5_iwb_set_wire_enable(struct gicv5_iwb_chip_data *iwb_node,
+ u32 iwb_wire, bool enable)
+{
+ u32 n = iwb_wire / 32;
+ u8 i = iwb_wire % 32;
+ u32 val;
+
+ if (n >= iwb_node->nr_regs) {
+ pr_err("IWB_WENABLER<n> is invalid for n=%u\n", n);
+ return -EINVAL;
+ }
+
+ /*
+ * Enable IWB wire/pin at this point
+ * Note: This is not the same as enabling the interrupt
+ */
+ val = iwb_readl_relaxed(iwb_node, GICV5_IWB_WENABLER + (4 * n));
+ if (enable)
+ val |= BIT(i);
+ else
+ val &= ~BIT(i);
+ iwb_writel_relaxed(iwb_node, val, GICV5_IWB_WENABLER + (4 * n));
+
+ return gicv5_iwb_wait_for_wenabler(iwb_node);
+}
+
+static int gicv5_iwb_enable_wire(struct gicv5_iwb_chip_data *iwb_node,
+ u32 iwb_wire)
+{
+ return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, true);
+}
+
+static int gicv5_iwb_disable_wire(struct gicv5_iwb_chip_data *iwb_node,
+ u32 iwb_wire)
+{
+ return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, false);
+}
+
+static void gicv5_iwb_irq_disable(struct irq_data *d)
+{
+ struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d);
+
+ gicv5_iwb_disable_wire(iwb_node, d->hwirq);
+ irq_chip_disable_parent(d);
+}
+
+static void gicv5_iwb_irq_enable(struct irq_data *d)
+{
+ struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d);
+
+ gicv5_iwb_enable_wire(iwb_node, d->hwirq);
+ irq_chip_enable_parent(d);
+}
+
+static int gicv5_iwb_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d);
+ u32 iwb_wire, n, wtmr;
+ u8 i;
+
+ iwb_wire = d->hwirq;
+ i = iwb_wire % 32;
+ n = iwb_wire / 32;
+
+ if (n >= iwb_node->nr_regs) {
+ pr_err_once("reg %u out of range\n", n);
+ return -EINVAL;
+ }
+
+ wtmr = iwb_readl_relaxed(iwb_node, GICV5_IWB_WTMR + (4 * n));
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ wtmr |= BIT(i);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ wtmr &= ~BIT(i);
+ break;
+ default:
+ pr_debug("unexpected wire trigger mode");
+ return -EINVAL;
+ }
+
+ iwb_writel_relaxed(iwb_node, wtmr, GICV5_IWB_WTMR + (4 * n));
+
+ return 0;
+}
+
+static void gicv5_iwb_domain_set_desc(msi_alloc_info_t *alloc_info, struct msi_desc *desc)
+{
+ alloc_info->desc = desc;
+ alloc_info->hwirq = (u32)desc->data.icookie.value;
+}
+
+static int gicv5_iwb_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
+{
+ if (!is_of_node(fwspec->fwnode))
+ return -EINVAL;
+
+ if (fwspec->param_count < 2)
+ return -EINVAL;
+
+ /*
+ * param[0] is be the wire
+ * param[1] is the interrupt type
+ */
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static void gicv5_iwb_write_msi_msg(struct irq_data *d, struct msi_msg *msg) {}
+
+static const struct msi_domain_template iwb_msi_template = {
+ .chip = {
+ .name = "GICv5-IWB",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_enable = gicv5_iwb_irq_enable,
+ .irq_disable = gicv5_iwb_irq_disable,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = gicv5_iwb_set_type,
+ .irq_write_msi_msg = gicv5_iwb_write_msi_msg,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = gicv5_iwb_domain_set_desc,
+ .msi_translate = gicv5_iwb_irq_domain_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ },
+
+ .alloc_info = {
+ .flags = MSI_ALLOC_FLAGS_FIXED_MSG_DATA,
+ },
+};
+
+static bool gicv5_iwb_create_device_domain(struct device *dev, unsigned int size,
+ struct gicv5_iwb_chip_data *iwb_node)
+{
+ if (WARN_ON_ONCE(!dev->msi.domain))
+ return false;
+
+ return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &iwb_msi_template, size,
+ NULL, iwb_node);
+}
+
+static struct gicv5_iwb_chip_data *
+gicv5_iwb_init_bases(void __iomem *iwb_base, struct platform_device *pdev)
+{
+ u32 nr_wires, idr0, cr0;
+ unsigned int n;
+ int ret;
+
+ struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc(sizeof(*iwb_node),
+ GFP_KERNEL);
+ if (!iwb_node)
+ return ERR_PTR(-ENOMEM);
+
+ iwb_node->iwb_base = iwb_base;
+
+ idr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_IDR0);
+ nr_wires = (FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1) * 32;
+
+ cr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_CR0);
+ if (!FIELD_GET(GICV5_IWB_CR0_IWBEN, cr0)) {
+ dev_err(&pdev->dev, "IWB must be enabled in firmware\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iwb_node->nr_regs = FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1;
+
+ for (n = 0; n < iwb_node->nr_regs; n++)
+ iwb_writel_relaxed(iwb_node, 0, GICV5_IWB_WENABLER + (sizeof(u32) * n));
+
+ ret = gicv5_iwb_wait_for_wenabler(iwb_node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!gicv5_iwb_create_device_domain(&pdev->dev, nr_wires, iwb_node))
+ return ERR_PTR(-ENOMEM);
+
+ return_ptr(iwb_node);
+}
+
+static int gicv5_iwb_device_probe(struct platform_device *pdev)
+{
+ struct gicv5_iwb_chip_data *iwb_node;
+ void __iomem *iwb_base;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ iwb_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!iwb_base) {
+ dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+ return -ENOMEM;
+ }
+
+ iwb_node = gicv5_iwb_init_bases(iwb_base, pdev);
+ if (IS_ERR(iwb_node))
+ return PTR_ERR(iwb_node);
+
+ return 0;
+}
+
+static const struct of_device_id gicv5_iwb_of_match[] = {
+ { .compatible = "arm,gic-v5-iwb" },
+ { /* END */ }
+};
+MODULE_DEVICE_TABLE(of, gicv5_iwb_of_match);
+
+static struct platform_driver gicv5_iwb_platform_driver = {
+ .driver = {
+ .name = "GICv5 IWB",
+ .of_match_table = gicv5_iwb_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = gicv5_iwb_device_probe,
+};
+
+module_platform_driver(gicv5_iwb_platform_driver);
diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c
new file mode 100644
index 000000000000..4bd224f359a7
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v5.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) "GICv5: " fmt
+
+#include <linux/cpuhotplug.h>
+#include <linux/idr.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v5.h>
+#include <linux/irqchip/arm-vgic-info.h>
+
+#include <asm/cpufeature.h>
+#include <asm/exception.h>
+
+static u8 pri_bits __ro_after_init = 5;
+
+#define GICV5_IRQ_PRI_MASK 0x1f
+#define GICV5_IRQ_PRI_MI (GICV5_IRQ_PRI_MASK & GENMASK(4, 5 - pri_bits))
+
+#define PPI_NR 128
+
+static bool gicv5_cpuif_has_gcie(void)
+{
+ return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF);
+}
+
+struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+static DEFINE_IDA(lpi_ida);
+static u32 num_lpis __ro_after_init;
+
+void __init gicv5_init_lpis(u32 lpis)
+{
+ num_lpis = lpis;
+}
+
+void __init gicv5_deinit_lpis(void)
+{
+ num_lpis = 0;
+}
+
+static int alloc_lpi(void)
+{
+ if (!num_lpis)
+ return -ENOSPC;
+
+ return ida_alloc_max(&lpi_ida, num_lpis - 1, GFP_KERNEL);
+}
+
+static void release_lpi(u32 lpi)
+{
+ ida_free(&lpi_ida, lpi);
+}
+
+int gicv5_alloc_lpi(void)
+{
+ return alloc_lpi();
+}
+
+void gicv5_free_lpi(u32 lpi)
+{
+ release_lpi(lpi);
+}
+
+static void gicv5_ppi_priority_init(void)
+{
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR1_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR2_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR3_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR4_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR5_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR6_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR7_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR8_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR9_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR10_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR11_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR12_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR13_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR14_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR15_EL1);
+
+ /*
+ * Context syncronization required to make sure system register writes
+ * effects are synchronised.
+ */
+ isb();
+}
+
+static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type)
+{
+ u64 cdpri, cdaff;
+ u16 iaffid;
+ int ret;
+
+ if (hwirq_type == GICV5_HWIRQ_TYPE_LPI || hwirq_type == GICV5_HWIRQ_TYPE_SPI) {
+ cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority) |
+ FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type) |
+ FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq);
+ gic_insn(cdpri, CDPRI);
+
+ ret = gicv5_irs_cpu_to_iaffid(smp_processor_id(), &iaffid);
+
+ if (WARN_ON_ONCE(ret))
+ return;
+
+ cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) |
+ FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) |
+ FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, hwirq);
+ gic_insn(cdaff, CDAFF);
+ }
+}
+
+static void gicv5_ppi_irq_mask(struct irq_data *d)
+{
+ u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
+
+ if (d->hwirq < 64)
+ sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, hwirq_id_bit, 0);
+ else
+ sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, hwirq_id_bit, 0);
+
+ /*
+ * We must ensure that the disable takes effect immediately to
+ * guarantee that the lazy-disabled IRQ mechanism works.
+ * A context synchronization event is required to guarantee it.
+ * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
+ */
+ isb();
+}
+
+static void gicv5_iri_irq_mask(struct irq_data *d, u8 hwirq_type)
+{
+ u64 cddis;
+
+ cddis = FIELD_PREP(GICV5_GIC_CDDIS_ID_MASK, d->hwirq) |
+ FIELD_PREP(GICV5_GIC_CDDIS_TYPE_MASK, hwirq_type);
+
+ gic_insn(cddis, CDDIS);
+ /*
+ * We must make sure that GIC CDDIS write effects are propagated
+ * immediately to make sure the disable takes effect to guarantee
+ * that the lazy-disabled IRQ mechanism works.
+ * Rule R_XCLJC states that the effects of a GIC system instruction
+ * complete in finite time.
+ * The GSB ensures completion of the GIC instruction and prevents
+ * loads, stores and GIC instructions from executing part of their
+ * functionality before the GSB SYS.
+ */
+ gsb_sys();
+}
+
+static void gicv5_spi_irq_mask(struct irq_data *d)
+{
+ gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI);
+}
+
+static void gicv5_lpi_irq_mask(struct irq_data *d)
+{
+ gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_LPI);
+}
+
+static void gicv5_ppi_irq_unmask(struct irq_data *d)
+{
+ u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
+
+ if (d->hwirq < 64)
+ sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, 0, hwirq_id_bit);
+ else
+ sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, 0, hwirq_id_bit);
+ /*
+ * We must ensure that the enable takes effect in finite time - a
+ * context synchronization event is required to guarantee it, we
+ * can not take for granted that would happen (eg a core going straight
+ * into idle after enabling a PPI).
+ * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
+ */
+ isb();
+}
+
+static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type)
+{
+ u64 cden;
+
+ cden = FIELD_PREP(GICV5_GIC_CDEN_ID_MASK, d->hwirq) |
+ FIELD_PREP(GICV5_GIC_CDEN_TYPE_MASK, hwirq_type);
+ /*
+ * Rule R_XCLJC states that the effects of a GIC system instruction
+ * complete in finite time and that's the only requirement when
+ * unmasking an SPI/LPI IRQ.
+ */
+ gic_insn(cden, CDEN);
+}
+
+static void gicv5_spi_irq_unmask(struct irq_data *d)
+{
+ gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI);
+}
+
+static void gicv5_lpi_irq_unmask(struct irq_data *d)
+{
+ gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_LPI);
+}
+
+static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type)
+{
+ u64 cddi;
+
+ cddi = FIELD_PREP(GICV5_GIC_CDDI_ID_MASK, hwirq_id) |
+ FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, hwirq_type);
+
+ gic_insn(cddi, CDDI);
+
+ gic_insn(0, CDEOI);
+}
+
+static void gicv5_ppi_irq_eoi(struct irq_data *d)
+{
+ /* Skip deactivate for forwarded PPI interrupts */
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ gic_insn(0, CDEOI);
+ return;
+ }
+
+ gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI);
+}
+
+static void gicv5_spi_irq_eoi(struct irq_data *d)
+{
+ gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI);
+}
+
+static void gicv5_lpi_irq_eoi(struct irq_data *d)
+{
+ gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_LPI);
+}
+
+static int gicv5_iri_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force, u8 hwirq_type)
+{
+ int ret, cpuid;
+ u16 iaffid;
+ u64 cdaff;
+
+ if (force)
+ cpuid = cpumask_first(mask_val);
+ else
+ cpuid = cpumask_any_and(mask_val, cpu_online_mask);
+
+ ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid);
+ if (ret)
+ return ret;
+
+ cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) |
+ FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) |
+ FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, d->hwirq);
+ gic_insn(cdaff, CDAFF);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpuid));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static int gicv5_spi_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return gicv5_iri_irq_set_affinity(d, mask_val, force,
+ GICV5_HWIRQ_TYPE_SPI);
+}
+
+static int gicv5_lpi_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return gicv5_iri_irq_set_affinity(d, mask_val, force,
+ GICV5_HWIRQ_TYPE_LPI);
+}
+
+enum ppi_reg {
+ PPI_PENDING,
+ PPI_ACTIVE,
+ PPI_HM
+};
+
+static __always_inline u64 read_ppi_sysreg_s(unsigned int irq,
+ const enum ppi_reg which)
+{
+ switch (which) {
+ case PPI_PENDING:
+ return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SPENDR0_EL1) :
+ read_sysreg_s(SYS_ICC_PPI_SPENDR1_EL1);
+ case PPI_ACTIVE:
+ return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SACTIVER0_EL1) :
+ read_sysreg_s(SYS_ICC_PPI_SACTIVER1_EL1);
+ case PPI_HM:
+ return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_HMR0_EL1) :
+ read_sysreg_s(SYS_ICC_PPI_HMR1_EL1);
+ default:
+ BUILD_BUG_ON(1);
+ }
+}
+
+static __always_inline void write_ppi_sysreg_s(unsigned int irq, bool set,
+ const enum ppi_reg which)
+{
+ u64 bit = BIT_ULL(irq % 64);
+
+ switch (which) {
+ case PPI_PENDING:
+ if (set) {
+ if (irq < 64)
+ write_sysreg_s(bit, SYS_ICC_PPI_SPENDR0_EL1);
+ else
+ write_sysreg_s(bit, SYS_ICC_PPI_SPENDR1_EL1);
+ } else {
+ if (irq < 64)
+ write_sysreg_s(bit, SYS_ICC_PPI_CPENDR0_EL1);
+ else
+ write_sysreg_s(bit, SYS_ICC_PPI_CPENDR1_EL1);
+ }
+ return;
+ case PPI_ACTIVE:
+ if (set) {
+ if (irq < 64)
+ write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER0_EL1);
+ else
+ write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER1_EL1);
+ } else {
+ if (irq < 64)
+ write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER0_EL1);
+ else
+ write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER1_EL1);
+ }
+ return;
+ default:
+ BUILD_BUG_ON(1);
+ }
+}
+
+static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
+
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_PENDING) & hwirq_id_bit);
+ return 0;
+ case IRQCHIP_STATE_ACTIVE:
+ *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_ACTIVE) & hwirq_id_bit);
+ return 0;
+ default:
+ pr_debug("Unexpected PPI irqchip state\n");
+ return -EINVAL;
+ }
+}
+
+static int gicv5_iri_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state, u8 hwirq_type)
+{
+ u64 icsr, cdrcfg;
+
+ cdrcfg = d->hwirq | FIELD_PREP(GICV5_GIC_CDRCFG_TYPE_MASK, hwirq_type);
+
+ gic_insn(cdrcfg, CDRCFG);
+ isb();
+ icsr = read_sysreg_s(SYS_ICC_ICSR_EL1);
+
+ if (FIELD_GET(ICC_ICSR_EL1_F, icsr)) {
+ pr_err("ICSR_EL1 is invalid\n");
+ return -EINVAL;
+ }
+
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ *state = !!(FIELD_GET(ICC_ICSR_EL1_Pending, icsr));
+ return 0;
+
+ case IRQCHIP_STATE_ACTIVE:
+ *state = !!(FIELD_GET(ICC_ICSR_EL1_Active, icsr));
+ return 0;
+
+ default:
+ pr_debug("Unexpected irqchip_irq_state\n");
+ return -EINVAL;
+ }
+}
+
+static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ return gicv5_iri_irq_get_irqchip_state(d, which, state,
+ GICV5_HWIRQ_TYPE_SPI);
+}
+
+static int gicv5_lpi_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ return gicv5_iri_irq_get_irqchip_state(d, which, state,
+ GICV5_HWIRQ_TYPE_LPI);
+}
+
+static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ write_ppi_sysreg_s(d->hwirq, state, PPI_PENDING);
+ return 0;
+ case IRQCHIP_STATE_ACTIVE:
+ write_ppi_sysreg_s(d->hwirq, state, PPI_ACTIVE);
+ return 0;
+ default:
+ pr_debug("Unexpected PPI irqchip state\n");
+ return -EINVAL;
+ }
+}
+
+static void gicv5_iri_irq_write_pending_state(struct irq_data *d, bool state,
+ u8 hwirq_type)
+{
+ u64 cdpend;
+
+ cdpend = FIELD_PREP(GICV5_GIC_CDPEND_TYPE_MASK, hwirq_type) |
+ FIELD_PREP(GICV5_GIC_CDPEND_ID_MASK, d->hwirq) |
+ FIELD_PREP(GICV5_GIC_CDPEND_PENDING_MASK, state);
+
+ gic_insn(cdpend, CDPEND);
+}
+
+static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state)
+{
+ gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI);
+}
+
+static void gicv5_lpi_irq_write_pending_state(struct irq_data *d, bool state)
+{
+ gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_LPI);
+}
+
+static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ gicv5_spi_irq_write_pending_state(d, state);
+ break;
+ default:
+ pr_debug("Unexpected irqchip_irq_state\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gicv5_lpi_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ gicv5_lpi_irq_write_pending_state(d, state);
+ break;
+
+ default:
+ pr_debug("Unexpected irqchip_irq_state\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gicv5_spi_irq_retrigger(struct irq_data *data)
+{
+ return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
+ true);
+}
+
+static int gicv5_lpi_irq_retrigger(struct irq_data *data)
+{
+ return !gicv5_lpi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
+ true);
+}
+
+static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu)
+{
+ /* Mark the LPI pending */
+ irq_chip_retrigger_hierarchy(d);
+}
+
+static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq)
+{
+ u64 bit = BIT_ULL(hwirq % 64);
+
+ return !!(read_ppi_sysreg_s(hwirq, PPI_HM) & bit);
+}
+
+static int gicv5_ppi_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
+
+ return 0;
+}
+
+static const struct irq_chip gicv5_ppi_irq_chip = {
+ .name = "GICv5-PPI",
+ .irq_mask = gicv5_ppi_irq_mask,
+ .irq_unmask = gicv5_ppi_irq_unmask,
+ .irq_eoi = gicv5_ppi_irq_eoi,
+ .irq_get_irqchip_state = gicv5_ppi_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gicv5_ppi_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gicv5_ppi_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gicv5_spi_irq_chip = {
+ .name = "GICv5-SPI",
+ .irq_mask = gicv5_spi_irq_mask,
+ .irq_unmask = gicv5_spi_irq_unmask,
+ .irq_eoi = gicv5_spi_irq_eoi,
+ .irq_set_type = gicv5_spi_irq_set_type,
+ .irq_set_affinity = gicv5_spi_irq_set_affinity,
+ .irq_retrigger = gicv5_spi_irq_retrigger,
+ .irq_get_irqchip_state = gicv5_spi_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gicv5_spi_irq_set_irqchip_state,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gicv5_lpi_irq_chip = {
+ .name = "GICv5-LPI",
+ .irq_mask = gicv5_lpi_irq_mask,
+ .irq_unmask = gicv5_lpi_irq_unmask,
+ .irq_eoi = gicv5_lpi_irq_eoi,
+ .irq_set_affinity = gicv5_lpi_irq_set_affinity,
+ .irq_retrigger = gicv5_lpi_irq_retrigger,
+ .irq_get_irqchip_state = gicv5_lpi_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gicv5_lpi_irq_set_irqchip_state,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gicv5_ipi_irq_chip = {
+ .name = "GICv5-IPI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .ipi_send_single = gicv5_ipi_send_single,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type,
+ const u8 hwirq_type)
+{
+ if (!is_of_node(fwspec->fwnode))
+ return -EINVAL;
+
+ if (fwspec->param_count < 3)
+ return -EINVAL;
+
+ if (fwspec->param[0] != hwirq_type)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+
+ switch (hwirq_type) {
+ case GICV5_HWIRQ_TYPE_PPI:
+ /*
+ * Handling mode is hardcoded for PPIs, set the type using
+ * HW reported value.
+ */
+ *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW :
+ IRQ_TYPE_EDGE_RISING;
+ break;
+ case GICV5_HWIRQ_TYPE_SPI:
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ break;
+ default:
+ BUILD_BUG_ON(1);
+ }
+
+ return 0;
+}
+
+static int gicv5_irq_ppi_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
+{
+ return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
+ GICV5_HWIRQ_TYPE_PPI);
+}
+
+static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ int ret;
+
+ if (WARN_ON_ONCE(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = gicv5_irq_ppi_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ irq_set_percpu_devid(virq);
+ irq_domain_set_info(domain, virq, hwirq, &gicv5_ppi_irq_chip, NULL,
+ handle_percpu_devid_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void gicv5_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+
+ if (WARN_ON_ONCE(nr_irqs != 1))
+ return;
+
+ d = irq_domain_get_irq_data(domain, virq);
+
+ irq_set_handler(virq, NULL);
+ irq_domain_reset_irq_data(d);
+}
+
+static int gicv5_irq_ppi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ if (fwspec->fwnode != d->fwnode)
+ return 0;
+
+ if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI)
+ return 0;
+
+ return (d == gicv5_global_data.ppi_domain);
+}
+
+static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = {
+ .translate = gicv5_irq_ppi_domain_translate,
+ .alloc = gicv5_irq_ppi_domain_alloc,
+ .free = gicv5_irq_domain_free,
+ .select = gicv5_irq_ppi_domain_select
+};
+
+static int gicv5_irq_spi_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
+{
+ return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
+ GICV5_HWIRQ_TYPE_SPI);
+}
+
+static int gicv5_irq_spi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct gicv5_irs_chip_data *chip_data;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_data *irqd;
+ irq_hw_number_t hwirq;
+ int ret;
+
+ if (WARN_ON_ONCE(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = gicv5_irq_spi_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ irqd = irq_desc_get_irq_data(irq_to_desc(virq));
+ chip_data = gicv5_irs_lookup_by_spi_id(hwirq);
+
+ irq_domain_set_info(domain, virq, hwirq, &gicv5_spi_irq_chip, chip_data,
+ handle_fasteoi_irq, NULL, NULL);
+ irq_set_probe(virq);
+ irqd_set_single_target(irqd);
+
+ gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_SPI);
+
+ return 0;
+}
+
+static int gicv5_irq_spi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ if (fwspec->fwnode != d->fwnode)
+ return 0;
+
+ if (fwspec->param[0] != GICV5_HWIRQ_TYPE_SPI)
+ return 0;
+
+ return (d == gicv5_global_data.spi_domain);
+}
+
+static const struct irq_domain_ops gicv5_irq_spi_domain_ops = {
+ .translate = gicv5_irq_spi_domain_translate,
+ .alloc = gicv5_irq_spi_domain_alloc,
+ .free = gicv5_irq_domain_free,
+ .select = gicv5_irq_spi_domain_select
+};
+
+static void gicv5_lpi_config_reset(struct irq_data *d)
+{
+ u64 cdhm;
+
+ /*
+ * Reset LPIs handling mode to edge by default and clear pending
+ * state to make sure we start the LPI with a clean state from
+ * previous incarnations.
+ */
+ cdhm = FIELD_PREP(GICV5_GIC_CDHM_HM_MASK, 0) |
+ FIELD_PREP(GICV5_GIC_CDHM_TYPE_MASK, GICV5_HWIRQ_TYPE_LPI) |
+ FIELD_PREP(GICV5_GIC_CDHM_ID_MASK, d->hwirq);
+ gic_insn(cdhm, CDHM);
+
+ gicv5_lpi_irq_write_pending_state(d, false);
+}
+
+static int gicv5_irq_lpi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ irq_hw_number_t hwirq;
+ struct irq_data *irqd;
+ u32 *lpi = arg;
+ int ret;
+
+ if (WARN_ON_ONCE(nr_irqs != 1))
+ return -EINVAL;
+
+ hwirq = *lpi;
+
+ irqd = irq_domain_get_irq_data(domain, virq);
+
+ irq_domain_set_info(domain, virq, hwirq, &gicv5_lpi_irq_chip, NULL,
+ handle_fasteoi_irq, NULL, NULL);
+ irqd_set_single_target(irqd);
+
+ ret = gicv5_irs_iste_alloc(hwirq);
+ if (ret < 0)
+ return ret;
+
+ gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_LPI);
+ gicv5_lpi_config_reset(irqd);
+
+ return 0;
+}
+
+static const struct irq_domain_ops gicv5_irq_lpi_domain_ops = {
+ .alloc = gicv5_irq_lpi_domain_alloc,
+ .free = gicv5_irq_domain_free,
+};
+
+void __init gicv5_init_lpi_domain(void)
+{
+ struct irq_domain *d;
+
+ d = irq_domain_create_tree(NULL, &gicv5_irq_lpi_domain_ops, NULL);
+ gicv5_global_data.lpi_domain = d;
+}
+
+void __init gicv5_free_lpi_domain(void)
+{
+ irq_domain_remove(gicv5_global_data.lpi_domain);
+ gicv5_global_data.lpi_domain = NULL;
+}
+
+static int gicv5_irq_ipi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_data *irqd;
+ int ret, i;
+ u32 lpi;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gicv5_alloc_lpi();
+ if (ret < 0)
+ return ret;
+
+ lpi = ret;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
+ if (ret) {
+ gicv5_free_lpi(lpi);
+ return ret;
+ }
+
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ &gicv5_ipi_irq_chip, NULL);
+
+ irqd_set_single_target(irqd);
+
+ irq_set_handler(virq + i, handle_percpu_irq);
+ }
+
+ return 0;
+}
+
+static void gicv5_irq_ipi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+ unsigned int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ d = irq_domain_get_irq_data(domain, virq + i);
+
+ if (!d)
+ return;
+
+ gicv5_free_lpi(d->parent_data->hwirq);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
+}
+
+static const struct irq_domain_ops gicv5_irq_ipi_domain_ops = {
+ .alloc = gicv5_irq_ipi_domain_alloc,
+ .free = gicv5_irq_ipi_domain_free,
+};
+
+static void handle_irq_per_domain(u32 hwirq)
+{
+ u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq);
+ u32 hwirq_id = FIELD_GET(GICV5_HWIRQ_ID, hwirq);
+ struct irq_domain *domain;
+
+ switch (hwirq_type) {
+ case GICV5_HWIRQ_TYPE_PPI:
+ domain = gicv5_global_data.ppi_domain;
+ break;
+ case GICV5_HWIRQ_TYPE_SPI:
+ domain = gicv5_global_data.spi_domain;
+ break;
+ case GICV5_HWIRQ_TYPE_LPI:
+ domain = gicv5_global_data.lpi_domain;
+ break;
+ default:
+ pr_err_once("Unknown IRQ type, bail out\n");
+ return;
+ }
+
+ if (generic_handle_domain_irq(domain, hwirq_id)) {
+ pr_err_once("Could not handle, hwirq = 0x%x", hwirq_id);
+ gicv5_hwirq_eoi(hwirq_id, hwirq_type);
+ }
+}
+
+static void __exception_irq_entry gicv5_handle_irq(struct pt_regs *regs)
+{
+ bool valid;
+ u32 hwirq;
+ u64 ia;
+
+ ia = gicr_insn(CDIA);
+ valid = GICV5_GICR_CDIA_VALID(ia);
+
+ if (!valid)
+ return;
+
+ /*
+ * Ensure that the CDIA instruction effects (ie IRQ activation) are
+ * completed before handling the interrupt.
+ */
+ gsb_ack();
+
+ /*
+ * Ensure instruction ordering between an acknowledgment and subsequent
+ * instructions in the IRQ handler using an ISB.
+ */
+ isb();
+
+ hwirq = FIELD_GET(GICV5_HWIRQ_INTID, ia);
+
+ handle_irq_per_domain(hwirq);
+}
+
+static void gicv5_cpu_disable_interrupts(void)
+{
+ u64 cr0;
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+static void gicv5_cpu_enable_interrupts(void)
+{
+ u64 cr0, pcr;
+
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1);
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1);
+
+ gicv5_ppi_priority_init();
+
+ pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_PRI_MI);
+ write_sysreg_s(pcr, SYS_ICC_PCR_EL1);
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+static int base_ipi_virq;
+
+static int gicv5_starting_cpu(unsigned int cpu)
+{
+ if (WARN(!gicv5_cpuif_has_gcie(),
+ "GICv5 system components present but CPU does not have FEAT_GCIE"))
+ return -ENODEV;
+
+ gicv5_cpu_enable_interrupts();
+
+ return gicv5_irs_register_cpu(cpu);
+}
+
+static void __init gicv5_smp_init(void)
+{
+ unsigned int num_ipis = GICV5_IPIS_PER_CPU * nr_cpu_ids;
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gicv5:starting",
+ gicv5_starting_cpu, NULL);
+
+ base_ipi_virq = irq_domain_alloc_irqs(gicv5_global_data.ipi_domain,
+ num_ipis, NUMA_NO_NODE, NULL);
+ if (WARN(base_ipi_virq <= 0, "IPI IRQ allocation was not successful"))
+ return;
+
+ set_smp_ipi_range_percpu(base_ipi_virq, GICV5_IPIS_PER_CPU, nr_cpu_ids);
+}
+
+static void __init gicv5_free_domains(void)
+{
+ if (gicv5_global_data.ppi_domain)
+ irq_domain_remove(gicv5_global_data.ppi_domain);
+ if (gicv5_global_data.spi_domain)
+ irq_domain_remove(gicv5_global_data.spi_domain);
+ if (gicv5_global_data.ipi_domain)
+ irq_domain_remove(gicv5_global_data.ipi_domain);
+
+ gicv5_global_data.ppi_domain = NULL;
+ gicv5_global_data.spi_domain = NULL;
+ gicv5_global_data.ipi_domain = NULL;
+}
+
+static int __init gicv5_init_domains(struct fwnode_handle *handle)
+{
+ u32 spi_count = gicv5_global_data.global_spi_count;
+ struct irq_domain *d;
+
+ d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL);
+ if (!d)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
+ gicv5_global_data.ppi_domain = d;
+
+ if (spi_count) {
+ d = irq_domain_create_linear(handle, spi_count,
+ &gicv5_irq_spi_domain_ops, NULL);
+
+ if (!d) {
+ gicv5_free_domains();
+ return -ENOMEM;
+ }
+
+ gicv5_global_data.spi_domain = d;
+ irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
+ }
+
+ if (!WARN(!gicv5_global_data.lpi_domain,
+ "LPI domain uninitialized, can't set up IPIs")) {
+ d = irq_domain_create_hierarchy(gicv5_global_data.lpi_domain,
+ 0, GICV5_IPIS_PER_CPU * nr_cpu_ids,
+ NULL, &gicv5_irq_ipi_domain_ops,
+ NULL);
+
+ if (!d) {
+ gicv5_free_domains();
+ return -ENOMEM;
+ }
+ gicv5_global_data.ipi_domain = d;
+ }
+ gicv5_global_data.fwnode = handle;
+
+ return 0;
+}
+
+static void gicv5_set_cpuif_pribits(void)
+{
+ u64 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
+
+ switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) {
+ case ICC_IDR0_EL1_PRI_BITS_4BITS:
+ gicv5_global_data.cpuif_pri_bits = 4;
+ break;
+ case ICC_IDR0_EL1_PRI_BITS_5BITS:
+ gicv5_global_data.cpuif_pri_bits = 5;
+ break;
+ default:
+ pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4");
+ gicv5_global_data.cpuif_pri_bits = 4;
+ break;
+ }
+}
+
+static void gicv5_set_cpuif_idbits(void)
+{
+ u32 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
+
+ switch (FIELD_GET(ICC_IDR0_EL1_ID_BITS, icc_idr0)) {
+ case ICC_IDR0_EL1_ID_BITS_16BITS:
+ gicv5_global_data.cpuif_id_bits = 16;
+ break;
+ case ICC_IDR0_EL1_ID_BITS_24BITS:
+ gicv5_global_data.cpuif_id_bits = 24;
+ break;
+ default:
+ pr_err("Unexpected ICC_IDR0_EL1_ID_BITS value, default to 16");
+ gicv5_global_data.cpuif_id_bits = 16;
+ break;
+ }
+}
+
+#ifdef CONFIG_KVM
+static struct gic_kvm_info gic_v5_kvm_info __initdata;
+
+static bool __init gicv5_cpuif_has_gcie_legacy(void)
+{
+ u64 idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
+ return !!FIELD_GET(ICC_IDR0_EL1_GCIE_LEGACY, idr0);
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+ gic_v5_kvm_info.type = GIC_V5;
+ gic_v5_kvm_info.has_gcie_v3_compat = gicv5_cpuif_has_gcie_legacy();
+
+ /* GIC Virtual CPU interface maintenance interrupt */
+ gic_v5_kvm_info.no_maint_irq_mask = false;
+ gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_v5_kvm_info.maint_irq) {
+ pr_warn("cannot find GICv5 virtual CPU interface maintenance interrupt\n");
+ return;
+ }
+
+ vgic_set_kvm_info(&gic_v5_kvm_info);
+}
+#else
+static inline void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+}
+#endif // CONFIG_KVM
+
+static int __init gicv5_of_init(struct device_node *node, struct device_node *parent)
+{
+ int ret = gicv5_irs_of_probe(node);
+ if (ret)
+ return ret;
+
+ ret = gicv5_init_domains(of_fwnode_handle(node));
+ if (ret)
+ goto out_irs;
+
+ gicv5_set_cpuif_pribits();
+ gicv5_set_cpuif_idbits();
+
+ pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits,
+ gicv5_global_data.irs_pri_bits);
+
+ ret = gicv5_starting_cpu(smp_processor_id());
+ if (ret)
+ goto out_dom;
+
+ ret = set_handle_irq(gicv5_handle_irq);
+ if (ret)
+ goto out_int;
+
+ ret = gicv5_irs_enable();
+ if (ret)
+ goto out_int;
+
+ gicv5_smp_init();
+
+ gicv5_irs_its_probe();
+
+ gic_of_setup_kvm_info(node);
+
+ return 0;
+
+out_int:
+ gicv5_cpu_disable_interrupts();
+out_dom:
+ gicv5_free_domains();
+out_irs:
+ gicv5_irs_remove();
+
+ return ret;
+}
+IRQCHIP_DECLARE(gic_v5, "arm,gic-v5", gicv5_of_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 6503573557fd..1269ab8eb726 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -54,7 +54,7 @@
static void gic_check_cpu_features(void)
{
- WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS),
+ WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GICV3_CPUIF),
TAINT_CPU_OUT_OF_SPEC,
"GICv3 system registers enabled, broken firmware!\n");
}
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 246c30205af4..908944009c21 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -137,7 +137,10 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (!ops)
return 0;
- if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0)
+ struct fwnode_handle *fwh __free(fwnode_handle) =
+ d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode)
+ : fwnode_handle_get(fwspec->fwnode);
+ if (fwh != d->fwnode || fwspec->param_count != 0)
return 0;
/* Handle pure domain searches */
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index d3232d6d8dce..54833717f8a7 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -177,6 +177,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
.ops = &gicp_domain_ops,
};
struct mvebu_gicp *gicp;
+ void __iomem *base;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@@ -236,6 +237,15 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
return -ENODEV;
}
+ base = ioremap(gicp->res->start, resource_size(gicp->res));
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n");
+ } else {
+ for (i = 0; i < 64; i++)
+ writel(i, base + GICP_CLRSPI_NSR_OFFSET);
+ iounmap(base);
+ }
+
return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
}
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 74a2a28f9403..643c8e459611 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -308,7 +308,6 @@ static const struct msi_parent_ops imsic_msi_parent_ops = {
int imsic_irqdomain_init(void)
{
struct irq_domain_info info = {
- .fwnode = imsic->fwnode,
.ops = &imsic_base_domain_ops,
.host_data = imsic,
};
@@ -325,6 +324,7 @@ int imsic_irqdomain_init(void)
}
/* Create Base IRQ domain */
+ info.fwnode = imsic->fwnode,
imsic->base_domain = msi_create_parent_irq_domain(&info, &imsic_msi_parent_ops);
if (!imsic->base_domain) {
pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index c9027f9c4bb7..8923d2df4704 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -471,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set_rv = sso_gpio_set;
+ gc->set = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 55ca663ca506..5e08102a6784 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -136,6 +136,7 @@ config LEDS_TPS6131X
tristate "LED support for TI TPS6131x flash LED driver"
depends on I2C && OF
depends on GPIOLIB
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
select REGMAP_I2C
help
This option enables support for Texas Instruments TPS61310/TPS61311
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index b4c19be51c4d..89cf5120f5d5 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -117,7 +117,7 @@ enum {
REG_MAX_COUNT,
};
-static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
+static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x08, 0, 7), /* status1 */
REG_FIELD(0x09, 0, 7), /* status2 */
REG_FIELD(0x0a, 0, 7), /* status3 */
@@ -132,7 +132,7 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
};
-static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
+static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x06, 0, 7), /* status1 */
REG_FIELD(0x07, 0, 6), /* status2 */
REG_FIELD(0x09, 0, 7), /* status3 */
@@ -854,11 +854,17 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
flash_data->hw_type = QCOM_MVFLASH_3CH;
flash_data->max_channels = 3;
- regs = mvflash_3ch_regs;
+ regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs),
+ GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
} else if (val == FLASH_SUBTYPE_4CH_VAL) {
flash_data->hw_type = QCOM_MVFLASH_4CH;
flash_data->max_channels = 4;
- regs = mvflash_4ch_regs;
+ regs = devm_kmemdup(dev, mvflash_4ch_regs, sizeof(mvflash_4ch_regs),
+ GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val);
if (rc < 0) {
@@ -880,6 +886,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
dev_err(dev, "Failed to allocate regmap field, rc=%d\n", rc);
return rc;
}
+ devm_kfree(dev, regs); /* devm_regmap_field_bulk_alloc() makes copies */
platform_set_drvdata(pdev, flash_data);
mutex_init(&flash_data->lock);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 58592593b8e9..15633fbf3c16 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -256,7 +256,7 @@ static const struct class leds_class = {
* Returns the LED device parsed from the phandle specified in the "leds"
* property of a device tree node or a negative error-code on failure.
*/
-struct led_classdev *of_led_get(struct device_node *np, int index)
+static struct led_classdev *of_led_get(struct device_node *np, int index)
{
struct device *led_dev;
struct device_node *led_node;
@@ -270,7 +270,6 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
return led_module_get(led_dev);
}
-EXPORT_SYMBOL_GPL(of_led_get);
/**
* led_put() - release a LED device
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 02cb1565a9fb..94f8ef6b482c 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -476,6 +476,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
return -ENOMEM;
fwnode_for_each_child_node(child, led_node) {
+ int multi_index;
ret = fwnode_property_read_u32(led_node, "color",
&color_id);
if (ret) {
@@ -483,8 +484,16 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
dev_err(priv->dev, "Cannot read color\n");
return ret;
}
+ ret = fwnode_property_read_u32(led_node, "reg", &multi_index);
+ if (ret != 0) {
+ dev_err(priv->dev, "reg must be set\n");
+ return -EINVAL;
+ } else if (multi_index >= LP50XX_LEDS_PER_MODULE) {
+ dev_err(priv->dev, "reg %i out of range\n", multi_index);
+ return -EINVAL;
+ }
- mc_led_info[num_colors].color_index = color_id;
+ mc_led_info[multi_index].color_index = color_id;
num_colors++;
}
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7d4c071a6cd0..0344189bb991 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -473,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set_rv = pca9532_gpio_set_value;
+ data->gpio.set = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 42fe056b1c74..2007fe6217ec 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -587,7 +587,7 @@ static int pca955x_probe(struct i2c_client *client)
struct pca955x_platform_data *pdata;
bool keep_psc0 = false;
bool set_default_label = false;
- char default_label[8];
+ char default_label[4];
int bit, err, reg;
chip = i2c_get_match_data(client);
@@ -693,7 +693,7 @@ static int pca955x_probe(struct i2c_client *client)
}
if (set_default_label) {
- snprintf(default_label, sizeof(default_label), "%u", i);
+ snprintf(default_label, sizeof(default_label), "%hhu", i);
init_data.default_label = default_label;
} else {
init_data.default_label = NULL;
@@ -737,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set_rv = pca955x_gpio_set_value;
+ pca955x->gpio.set = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 89c165c8ee9c..fd0e8bab9a4b 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -637,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set_rv = tca6507_gpio_set_value;
+ tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 4e048e08c4fd..c15efe3e5078 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -68,7 +68,6 @@ struct led_netdev_data {
unsigned int last_activity;
unsigned long mode;
- unsigned long blink_delay;
int link_speed;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
u8 duplex;
@@ -87,10 +86,6 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
/* Already validated, hw control is possible with the requested mode */
if (trigger_data->hw_control) {
led_cdev->hw_control_set(led_cdev, trigger_data->mode);
- if (led_cdev->blink_set) {
- led_cdev->blink_set(led_cdev, &trigger_data->blink_delay,
- &trigger_data->blink_delay);
- }
return;
}
@@ -459,11 +454,10 @@ static ssize_t interval_store(struct device *dev,
size_t size)
{
struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
- struct led_classdev *led_cdev = trigger_data->led_cdev;
unsigned long value;
int ret;
- if (trigger_data->hw_control && !led_cdev->blink_set)
+ if (trigger_data->hw_control)
return -EINVAL;
ret = kstrtoul(buf, 0, &value);
@@ -472,13 +466,9 @@ static ssize_t interval_store(struct device *dev,
/* impose some basic bounds on the timer interval */
if (value >= 5 && value <= 10000) {
- if (trigger_data->hw_control) {
- trigger_data->blink_delay = value;
- } else {
- cancel_delayed_work_sync(&trigger_data->work);
+ cancel_delayed_work_sync(&trigger_data->work);
- atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
- }
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
set_baseline_state(trigger_data); /* resets timer */
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 4fef4797b110..02432d4a5ccd 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,15 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
config CV1800_MBOX
tristate "cv1800 mailbox"
depends on ARCH_SOPHGO || COMPILE_TEST
@@ -350,4 +359,14 @@ config CIX_MBOX
is unidirectional. Say Y here if you want to use the CIX Mailbox
support.
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 786a46587ba1..98a68f838486 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
@@ -74,3 +76,5 @@ obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index ab4e8d1954a1..532929916e99 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -390,7 +390,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task = kzalloc(sizeof(*task), GFP_ATOMIC);
if (!task) {
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return -ENOMEM;
}
@@ -440,7 +440,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
list_move_tail(&task->list_entry, &thread->task_busy_list);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -488,7 +488,7 @@ done:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -528,7 +528,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
@@ -543,7 +543,7 @@ wait:
return -EFAULT;
}
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index f6714c233f5a..0a00719b2482 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -306,6 +306,22 @@ static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
pcc_chan_reg_read_modify_write(&pchan->db);
}
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -317,6 +333,8 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
@@ -340,7 +358,17 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* required to avoid any possible race in updatation of this flag.
*/
pchan->chan_in_use = false;
- mbox_chan_received_data(chan, NULL);
+
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
+
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
+ }
+
+ mbox_chan_received_data(chan, handle);
pcc_chan_acknowledge(pchan);
@@ -384,9 +412,24 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pcc_mchan = &pchan->chan;
pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
pcc_mchan->shmem_size);
- if (pcc_mchan->shmem)
- return pcc_mchan;
+ if (!pcc_mchan->shmem)
+ goto err;
+
+ pcc_mchan->manage_writes = false;
+
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ return pcc_mchan;
+err:
mbox_free_channel(chan);
return ERR_PTR(-ENXIO);
}
@@ -417,8 +460,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
+
+ if (!pchan->chan.manage_writes)
+ return 0;
+
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
+ return -1;
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
+ return 0;
+}
+
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -434,17 +507,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
@@ -490,6 +583,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index ea44ffb5ce1a..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node),
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c711db6f8f5c..cf17fd46e255 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -215,16 +215,19 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (test_bit(DROP_WRITES, &fc->flags) &&
- (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
+ fc->random_write_corrupt)) {
ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_WRITES, &fc->flags) &&
- (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
+ fc->random_write_corrupt)) {
ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_READS, &fc->flags) &&
- (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == READ) ||
+ fc->random_read_corrupt)) {
ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index b90f34259fbb..8b50c908c6f4 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -241,10 +241,11 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
/*
* First retrieve the target metadata.
*/
- scnprintf(target_metadata_buf, DM_IMA_TARGET_METADATA_BUF_LEN,
- "target_index=%d,target_begin=%llu,target_len=%llu,",
- i, ti->begin, ti->len);
- target_metadata_buf_len = strlen(target_metadata_buf);
+ target_metadata_buf_len =
+ scnprintf(target_metadata_buf,
+ DM_IMA_TARGET_METADATA_BUF_LEN,
+ "target_index=%d,target_begin=%llu,target_len=%llu,",
+ i, ti->begin, ti->len);
/*
* Then retrieve the actual target data.
@@ -448,11 +449,9 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
if (r)
goto error;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;device_resume=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
-
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_resume=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@@ -561,10 +560,9 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;device_remove=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_remove=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
memcpy(device_table_data + l, remove_all_str, remove_all_len);
@@ -647,10 +645,9 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error2;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;table_clear=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;table_clear=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@@ -706,7 +703,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
- int r;
+ int r, len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@@ -728,12 +725,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
md->ima.active_table.device_metadata = new_device_data;
md->ima.active_table.device_metadata_len = strlen(new_device_data);
- scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
- "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
- new_dev_name, new_dev_uuid, capacity_str);
+ len = scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
+ "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
+ new_dev_name, new_dev_uuid, capacity_str);
- dm_ima_measure_data("dm_device_rename", combined_device_data, strlen(combined_device_data),
- noio);
+ dm_ima_measure_data("dm_device_rename", combined_device_data, len, noio);
goto exit;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 15538ec58f8e..73bf290af181 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -170,7 +170,7 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index d484e8e1d48a..679b07dee229 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -893,7 +893,7 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index 3e4cb81ce512..d0b883fabfeb 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -117,16 +117,16 @@ int dm_register_path_selector(struct path_selector_type *pst)
}
EXPORT_SYMBOL_GPL(dm_register_path_selector);
-int dm_unregister_path_selector(struct path_selector_type *pst)
+void dm_unregister_path_selector(struct path_selector_type *pst)
{
struct ps_internal *psi;
down_write(&_ps_lock);
psi = __find_path_selector_type(pst->name);
- if (!psi) {
+ if (WARN_ON(!psi)) {
up_write(&_ps_lock);
- return -EINVAL;
+ return;
}
list_del(&psi->list);
@@ -134,7 +134,5 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
up_write(&_ps_lock);
kfree(psi);
-
- return 0;
}
EXPORT_SYMBOL_GPL(dm_unregister_path_selector);
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 3861b2d8b963..7b2270532e64 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -96,7 +96,7 @@ struct path_selector_type {
int dm_register_path_selector(struct path_selector_type *type);
/* Unregister a path selector */
-int dm_unregister_path_selector(struct path_selector_type *type);
+void dm_unregister_path_selector(struct path_selector_type *type);
/* Returns a registered path selector type */
struct path_selector_type *dm_get_path_selector(const char *name);
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index b49e10d76d03..f07e773d9cc0 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -541,8 +541,10 @@ static int __init dm_hst_init(void)
{
int r = dm_register_path_selector(&hst_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " HST_VERSION " loaded");
@@ -551,10 +553,7 @@ static int __init dm_hst_init(void)
static void __exit dm_hst_exit(void)
{
- int r = dm_unregister_path_selector(&hst_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&hst_ps);
}
module_init(dm_hst_init);
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index 716807e511ee..80415a045c68 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -260,10 +260,7 @@ static int __init dm_ioa_init(void)
static void __exit dm_ioa_exit(void)
{
- int ret = dm_unregister_path_selector(&ioa_ps);
-
- if (ret < 0)
- DMERR("unregister failed %d", ret);
+ dm_unregister_path_selector(&ioa_ps);
}
module_init(dm_ioa_init);
diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
index e305f05ad1e5..9c68701ed7a4 100644
--- a/drivers/md/dm-ps-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
@@ -260,8 +260,10 @@ static int __init dm_ql_init(void)
{
int r = dm_register_path_selector(&ql_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " QL_VERSION " loaded");
@@ -270,10 +272,7 @@ static int __init dm_ql_init(void)
static void __exit dm_ql_exit(void)
{
- int r = dm_unregister_path_selector(&ql_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&ql_ps);
}
module_init(dm_ql_init);
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index d1745b123dc1..0c12f4073461 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -220,8 +220,10 @@ static int __init dm_rr_init(void)
{
int r = dm_register_path_selector(&rr_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " RR_VERSION " loaded");
@@ -230,10 +232,7 @@ static int __init dm_rr_init(void)
static void __exit dm_rr_exit(void)
{
- int r = dm_unregister_path_selector(&rr_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&rr_ps);
}
module_init(dm_rr_init);
diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
index 969d31c40272..0543fe7969c4 100644
--- a/drivers/md/dm-ps-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
@@ -341,8 +341,10 @@ static int __init dm_st_init(void)
{
int r = dm_register_path_selector(&st_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " ST_VERSION " loaded");
@@ -351,10 +353,7 @@ static int __init dm_st_init(void)
static void __exit dm_st_exit(void)
{
- int r = dm_unregister_path_selector(&st_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&st_ps);
}
module_init(dm_st_init);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e8c0a8c6fb51..79ea85d18e24 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -14,7 +14,6 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
-#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -439,7 +438,7 @@ static bool rs_is_reshapable(struct raid_set *rs)
/* Return true, if raid set in @rs is recovering */
static bool rs_is_recovering(struct raid_set *rs)
{
- return rs->md.recovery_cp < rs->md.dev_sectors;
+ return rs->md.resync_offset < rs->md.dev_sectors;
}
/* Return true, if raid set in @rs is reshaping */
@@ -769,7 +768,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
@@ -913,7 +912,7 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
rs->md.external = 0;
rs->md.persistent = 1;
rs->md.major_version = 2;
- } else if (rebuild && !rs->md.recovery_cp) {
+ } else if (rebuild && !rs->md.resync_offset) {
/*
* Without metadata, we will not be able to tell if the array
* is in-sync or not - we must assume it is not. Therefore,
@@ -1696,20 +1695,20 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
/*
* A raid6 set has to be recovered either
* completely or for the grown part to
* ensure proper parity and Q-Syndrome
*/
else if (rs_is_raid6(rs))
- rs->md.recovery_cp = dev_sectors;
+ rs->md.resync_offset = dev_sectors;
/*
* Other raid set types may skip recovery
* depending on the 'nosync' flag.
*/
else
- rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
? MaxSector : dev_sectors;
}
@@ -2144,7 +2143,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->events = cpu_to_le64(mddev->events);
sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
- sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->array_resync_offset = cpu_to_le64(mddev->resync_offset);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
@@ -2335,18 +2334,18 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
- mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->array_resync_offset);
/*
* During load, we set FirstUse if a new superblock was written.
* There are two reasons we might not have a superblock:
* 1) The raid set is brand new - in which case, all of the
* devices must have their In_sync bit set. Also,
- * recovery_cp must be 0, unless forced.
+ * resync_offset must be 0, unless forced.
* 2) This is a new device being added to an old raid set
* and the new device needs to be rebuilt - in which
* case the In_sync bit will /not/ be set and
- * recovery_cp must be MaxSector.
+ * resync_offset must be MaxSector.
* 3) This is/are a new device(s) being added to an old
* raid set during takeover to a higher raid level
* to provide capacity for redundancy or during reshape
@@ -2391,8 +2390,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
new_devs > 1 ? "s" : "");
return -EINVAL;
} else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
- DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
- (unsigned long long) mddev->recovery_cp);
+ DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)",
+ (unsigned long long) mddev->resync_offset);
return -EINVAL;
} else if (rs_is_reshaping(rs)) {
DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
@@ -2532,6 +2531,10 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
struct md_rdev *rdev, *freshest;
struct mddev *mddev = &rs->md;
+ /* Respect resynchronization requested with "sync" argument. */
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+
freshest = NULL;
rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags))
@@ -2697,11 +2700,11 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
}
out:
/*
- * Raise recovery_cp in case data_offset != 0 to
+ * Raise resync_offset in case data_offset != 0 to
* avoid false recovery positives in the constructor.
*/
- if (rs->md.recovery_cp < rs->md.dev_sectors)
- rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+ if (rs->md.resync_offset < rs->md.dev_sectors)
+ rs->md.resync_offset += rs->dev[0].rdev.data_offset;
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
@@ -2756,7 +2759,7 @@ static int rs_setup_takeover(struct raid_set *rs)
}
clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
while (d--) {
rdev = &rs->dev[d].rdev;
@@ -2764,7 +2767,7 @@ static int rs_setup_takeover(struct raid_set *rs)
if (test_bit(d, (void *) rs->rebuild_disks)) {
clear_bit(In_sync, &rdev->flags);
clear_bit(Faulty, &rdev->flags);
- mddev->recovery_cp = rdev->recovery_offset = 0;
+ mddev->resync_offset = rdev->recovery_offset = 0;
/* Bitmap has to be created when we do an "up" takeover */
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
}
@@ -3222,7 +3225,7 @@ size_check:
if (r)
goto bad;
- rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors);
} else {
/* This is no size change or it is shrinking, update size and record in superblocks */
r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
@@ -3305,7 +3308,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
- rs->md.dm_gendisk = ti->table->md->disk;
+ rs->md.dm_gendisk = dm_disk(dm_table_get_md(ti->table));
mddev_unlock(&rs->md);
return 0;
@@ -3446,7 +3449,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
} else {
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
- r = mddev->recovery_cp;
+ r = mddev->resync_offset;
else
r = mddev->curr_resync_completed;
@@ -4074,9 +4077,9 @@ static int raid_preresume(struct dm_target *ti)
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset && mddev->resync_offset < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- mddev->resync_min = mddev->recovery_cp;
+ mddev->resync_min = mddev->resync_offset;
if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
mddev->resync_max_sectors = mddev->dev_sectors;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 5bbbdf8fc1bd..58902091bf79 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -316,7 +316,7 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d9d5e6aa5707..ad0a60a07b93 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -899,17 +899,17 @@ static bool dm_table_supports_dax(struct dm_table *t,
return true;
}
-static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
/* request-based cannot stack on partitions! */
if (bdev_is_partition(bdev))
- return false;
+ return true;
- return queue_is_mq(q);
+ return !queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)
@@ -1005,7 +1005,7 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
+ ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 652627aea11b..2af5a9514c05 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -255,7 +255,7 @@ static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
return -EIO;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 05cf4e3f2bbe..007bb93e5fca 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4111,8 +4111,8 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
- DM_TARGET_IMMUTABLE,
- .version = {1, 23, 0},
+ DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4497,7 +4497,8 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 23, 0},
+ .features = DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c
index ae11941c90a9..0613c82bbe8e 100644
--- a/drivers/md/dm-vdo/funnel-workqueue.c
+++ b/drivers/md/dm-vdo/funnel-workqueue.c
@@ -252,8 +252,7 @@ static void service_work_queue(struct simple_work_queue *queue)
* This speeds up some performance tests; that "other work" might include other VDO
* threads.
*/
- if (need_resched())
- cond_resched();
+ cond_resched();
}
run_finish_hook(queue);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 631a887b487c..d382a390d39a 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -191,7 +191,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
u8 *want_digest, u8 *data)
{
if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true)))
+ verity_io_real_digest(v, io))))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
@@ -392,7 +392,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0))
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 81186bded1ce..66a00a8ccb39 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,7 +19,6 @@
#include "dm-audit.h"
#include <linux/module.h>
#include <linux/reboot.h>
-#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
#include <linux/security.h>
@@ -61,9 +60,6 @@ module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644)
static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
-/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
-static DEFINE_STATIC_KEY_FALSE(ahash_enabled);
-
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -118,100 +114,21 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-static int verity_ahash_update(struct dm_verity *v, struct ahash_request *req,
- const u8 *data, size_t len,
- struct crypto_wait *wait)
-{
- struct scatterlist sg;
-
- if (likely(!is_vmalloc_addr(data))) {
- sg_init_one(&sg, data, len);
- ahash_request_set_crypt(req, &sg, NULL, len);
- return crypto_wait_req(crypto_ahash_update(req), wait);
- }
-
- do {
- int r;
- size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
-
- flush_kernel_vmap_range((void *)data, this_step);
- sg_init_table(&sg, 1);
- sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
- ahash_request_set_crypt(req, &sg, NULL, this_step);
- r = crypto_wait_req(crypto_ahash_update(req), wait);
- if (unlikely(r))
- return r;
- data += this_step;
- len -= this_step;
- } while (len);
-
- return 0;
-}
-
-/*
- * Wrapper for crypto_ahash_init, which handles verity salting.
- */
-static int verity_ahash_init(struct dm_verity *v, struct ahash_request *req,
- struct crypto_wait *wait, bool may_sleep)
-{
- int r;
-
- ahash_request_set_tfm(req, v->ahash_tfm);
- ahash_request_set_callback(req,
- may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
- crypto_req_done, (void *)wait);
- crypto_init_wait(wait);
-
- r = crypto_wait_req(crypto_ahash_init(req), wait);
-
- if (unlikely(r < 0)) {
- if (r != -ENOMEM)
- DMERR("crypto_ahash_init failed: %d", r);
- return r;
- }
-
- if (likely(v->salt_size && (v->version >= 1)))
- r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
-
- return r;
-}
-
-static int verity_ahash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct crypto_wait *wait)
-{
- int r;
-
- if (unlikely(v->salt_size && (!v->version))) {
- r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
-
- if (r < 0) {
- DMERR("%s failed updating salt: %d", __func__, r);
- goto out;
- }
- }
-
- ahash_request_set_crypt(req, NULL, digest, 0);
- r = crypto_wait_req(crypto_ahash_final(req), wait);
-out:
- return r;
-}
-
int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
- const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ const u8 *data, size_t len, u8 *digest)
{
+ struct shash_desc *desc = &io->hash_desc;
int r;
- if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) {
- struct ahash_request *req = verity_io_hash_req(v, io);
- struct crypto_wait wait;
-
- r = verity_ahash_init(v, req, &wait, may_sleep) ?:
- verity_ahash_update(v, req, data, len, &wait) ?:
- verity_ahash_final(v, req, digest, &wait);
+ desc->tfm = v->shash_tfm;
+ if (unlikely(v->initial_hashstate == NULL)) {
+ /* Version 0: salt at end */
+ r = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, data, len) ?:
+ crypto_shash_update(desc, v->salt, v->salt_size) ?:
+ crypto_shash_final(desc, digest);
} else {
- struct shash_desc *desc = verity_io_hash_req(v, io);
-
- desc->tfm = v->shash_tfm;
+ /* Version 1: salt at beginning */
r = crypto_shash_import(desc, v->initial_hashstate) ?:
crypto_shash_finup(desc, data, len, digest);
}
@@ -362,7 +279,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io), !io->in_bh);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0))
goto release_ret_r;
@@ -465,7 +382,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
goto free_ret;
r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true);
+ verity_io_real_digest(v, io));
if (unlikely(r))
goto free_ret;
@@ -581,7 +498,7 @@ static int verity_verify_io(struct dm_verity_io *io)
}
r = verity_hash(v, io, data, block_size,
- verity_io_real_digest(v, io), !io->in_bh);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0)) {
kunmap_local(data);
return r;
@@ -1092,12 +1009,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->zero_digest);
verity_free_sig(v);
- if (v->ahash_tfm) {
- static_branch_dec(&ahash_enabled);
- crypto_free_ahash(v->ahash_tfm);
- } else {
- crypto_free_shash(v->shash_tfm);
- }
+ crypto_free_shash(v->shash_tfm);
kfree(v->alg_name);
@@ -1157,7 +1069,8 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
if (!v->zero_digest)
return r;
- io = kmalloc(sizeof(*io) + v->hash_reqsize, GFP_KERNEL);
+ io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm),
+ GFP_KERNEL);
if (!io)
return r; /* verity_dtr will free zero_digest */
@@ -1168,7 +1081,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
goto out;
r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
- v->zero_digest, true);
+ v->zero_digest);
out:
kfree(io);
@@ -1324,9 +1237,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
{
struct dm_target *ti = v->ti;
- struct crypto_ahash *ahash;
- struct crypto_shash *shash = NULL;
- const char *driver_name;
+ struct crypto_shash *shash;
v->alg_name = kstrdup(alg_name, GFP_KERNEL);
if (!v->alg_name) {
@@ -1334,50 +1245,14 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
return -ENOMEM;
}
- /*
- * Allocate the hash transformation object that this dm-verity instance
- * will use. The vast majority of dm-verity users use CPU-based
- * hashing, so when possible use the shash API to minimize the crypto
- * API overhead. If the ahash API resolves to a different driver
- * (likely an off-CPU hardware offload), use ahash instead. Also use
- * ahash if the obsolete dm-verity format with the appended salt is
- * being used, so that quirk only needs to be handled in one place.
- */
- ahash = crypto_alloc_ahash(alg_name, 0,
- v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
- if (IS_ERR(ahash)) {
+ shash = crypto_alloc_shash(alg_name, 0, 0);
+ if (IS_ERR(shash)) {
ti->error = "Cannot initialize hash function";
- return PTR_ERR(ahash);
- }
- driver_name = crypto_ahash_driver_name(ahash);
- if (v->version >= 1 /* salt prepended, not appended? */) {
- shash = crypto_alloc_shash(alg_name, 0, 0);
- if (!IS_ERR(shash) &&
- strcmp(crypto_shash_driver_name(shash), driver_name) != 0) {
- /*
- * ahash gave a different driver than shash, so probably
- * this is a case of real hardware offload. Use ahash.
- */
- crypto_free_shash(shash);
- shash = NULL;
- }
- }
- if (!IS_ERR_OR_NULL(shash)) {
- crypto_free_ahash(ahash);
- ahash = NULL;
- v->shash_tfm = shash;
- v->digest_size = crypto_shash_digestsize(shash);
- v->hash_reqsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(shash);
- DMINFO("%s using shash \"%s\"", alg_name, driver_name);
- } else {
- v->ahash_tfm = ahash;
- static_branch_inc(&ahash_enabled);
- v->digest_size = crypto_ahash_digestsize(ahash);
- v->hash_reqsize = sizeof(struct ahash_request) +
- crypto_ahash_reqsize(ahash);
- DMINFO("%s using ahash \"%s\"", alg_name, driver_name);
+ return PTR_ERR(shash);
}
+ v->shash_tfm = shash;
+ v->digest_size = crypto_shash_digestsize(shash);
+ DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash));
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
return -EINVAL;
@@ -1402,7 +1277,7 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
return -EINVAL;
}
}
- if (v->shash_tfm) {
+ if (v->version) { /* Version 1: salt at beginning */
SHASH_DESC_ON_STACK(desc, v->shash_tfm);
int r;
@@ -1681,7 +1556,8 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->per_io_data_size = sizeof(struct dm_verity_io) + v->hash_reqsize;
+ ti->per_io_data_size = sizeof(struct dm_verity_io) +
+ crypto_shash_descsize(v->shash_tfm);
r = verity_fec_ctr(v);
if (r)
@@ -1788,10 +1664,7 @@ static int verity_preresume(struct dm_target *ti)
bdev = dm_disk(dm_table_get_md(ti->table))->part0;
root_digest.digest = v->root_digest;
root_digest.digest_len = v->digest_size;
- if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm)
- root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm);
- else
- root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
+ root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
sizeof(root_digest));
@@ -1817,7 +1690,7 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 8cbb57862ae1..6d141abd965c 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -39,11 +39,10 @@ struct dm_verity {
struct dm_target *ti;
struct dm_bufio_client *bufio;
char *alg_name;
- struct crypto_ahash *ahash_tfm; /* either this or shash_tfm is set */
- struct crypto_shash *shash_tfm; /* either this or ahash_tfm is set */
+ struct crypto_shash *shash_tfm;
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
- u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
+ u8 *initial_hashstate; /* salted initial state, if version >= 1 */
u8 *zero_digest; /* digest for a zero block */
#ifdef CONFIG_SECURITY
u8 *root_digest_sig; /* signature of the root digest */
@@ -61,7 +60,6 @@ struct dm_verity {
bool hash_failed:1; /* set if hash of any block failed */
bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
unsigned int digest_size; /* digest size for the current hash algorithm */
- unsigned int hash_reqsize; /* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
enum verity_mode error_mode;/* mode for handling I/O errors */
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
@@ -100,19 +98,13 @@ struct dm_verity_io {
u8 want_digest[HASH_MAX_DIGESTSIZE];
/*
- * This struct is followed by a variable-sized hash request of size
- * v->hash_reqsize, either a struct ahash_request or a struct shash_desc
- * (depending on whether ahash_tfm or shash_tfm is being used). To
- * access it, use verity_io_hash_req().
+ * Temporary space for hashing. This is variable-length and must be at
+ * the end of the struct. struct shash_desc is just the fixed part;
+ * it's followed by a context of size crypto_shash_descsize(shash_tfm).
*/
+ struct shash_desc hash_desc;
};
-static inline void *verity_io_hash_req(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return io + 1;
-}
-
static inline u8 *verity_io_real_digest(struct dm_verity *v,
struct dm_verity_io *io)
{
@@ -126,7 +118,7 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
}
extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
- const u8 *data, size_t len, u8 *digest, bool may_sleep);
+ const u8 *data, size_t len, u8 *digest);
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index a428e1cacf07..d8de4a3076a1 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -13,7 +13,6 @@
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/dax.h>
-#include <linux/pfn_t.h>
#include <linux/libnvdimm.h>
#include <linux/delay.h>
#include "dm-io-tracker.h"
@@ -256,7 +255,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
int r;
loff_t s;
long p, da;
- pfn_t pfn;
+ unsigned long pfn;
int id;
struct page **pages;
sector_t offset;
@@ -290,7 +289,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = da;
goto err2;
}
- if (!pfn_t_has_page(pfn)) {
+ if (!pfn_valid(pfn)) {
wc->memory_map = NULL;
r = -EOPNOTSUPP;
goto err2;
@@ -314,13 +313,13 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = daa ? daa : -EINVAL;
goto err3;
}
- if (!pfn_t_has_page(pfn)) {
+ if (!pfn_valid(pfn)) {
r = -EOPNOTSUPP;
goto err3;
}
while (daa-- && i < p) {
- pages[i++] = pfn_t_to_page(pfn);
- pfn.val++;
+ pages[i++] = pfn_to_page(pfn);
+ pfn++;
if (!(i & 15))
cond_resched();
}
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3d31b82e0730..78e17dd4d01b 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -467,8 +467,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
bdev_offset_from_zone_start(disk->part0,
clone->bi_iter.bi_sector);
}
-
- return;
}
static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 5da3db06da10..9da329078ea4 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1062,7 +1062,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
struct dmz_target *dmz = ti->private;
unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
sector_t capacity;
- int i, r;
+ int i, r = 0;
for (i = 0; i < dmz->nr_ddevs; i++) {
capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index abfe0392b5a4..a44e8c2dccee 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1024,10 +1024,8 @@ static void dm_wq_requeue_work(struct work_struct *work)
*
* 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
*/
-static void dm_io_complete(struct dm_io *io)
+static inline void dm_io_complete(struct dm_io *io)
{
- bool first_requeue;
-
/*
* Only dm_io that has been split needs two stage requeue, otherwise
* we may run into long bio clone chain during suspend and OOM could
@@ -1036,12 +1034,7 @@ static void dm_io_complete(struct dm_io *io)
* Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
* also aren't handled via the first stage requeue.
*/
- if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
- first_requeue = true;
- else
- first_requeue = false;
-
- __dm_io_complete(io, first_requeue);
+ __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
}
/*
@@ -1218,7 +1211,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 7f524a26cebc..334b71404930 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1987,12 +1987,12 @@ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
- if (sec < bitmap->mddev->recovery_cp)
+ if (sec < bitmap->mddev->resync_offset)
/* We are asserting that the array is dirty,
- * so move the recovery_cp address back so
+ * so move the resync_offset address back so
* that it is obvious that it is dirty
*/
- bitmap->mddev->recovery_cp = sec;
+ bitmap->mddev->resync_offset = sec;
}
}
@@ -2258,7 +2258,7 @@ static int bitmap_load(struct mddev *mddev)
|| bitmap->events_cleared == mddev->events)
/* no need to keep dirty bits to optimise a
* re-add of a missing device */
- start = mddev->recovery_cp;
+ start = mddev->resync_offset;
mutex_lock(&mddev->bitmap_info.mutex);
err = md_bitmap_init_from_disk(bitmap, start);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94221d964d4f..5497eaee96e7 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -337,11 +337,11 @@ static void recover_bitmaps(struct md_thread *thread)
md_wakeup_thread(mddev->sync_thread);
if (hi > 0) {
- if (lo < mddev->recovery_cp)
- mddev->recovery_cp = lo;
+ if (lo < mddev->resync_offset)
+ mddev->resync_offset = lo;
/* wake up thread to continue resync in case resync
* is not finished */
- if (mddev->recovery_cp != MaxSector) {
+ if (mddev->resync_offset != MaxSector) {
/*
* clear the REMOTE flag since we will launch
* resync thread in current node.
@@ -863,9 +863,9 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
lockres_free(bm_lockres);
continue;
}
- if ((hi > 0) && (lo < mddev->recovery_cp)) {
+ if ((hi > 0) && (lo < mddev->resync_offset)) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- mddev->recovery_cp = lo;
+ mddev->resync_offset = lo;
md_check_recovery(mddev);
}
@@ -1027,7 +1027,7 @@ static int leave(struct mddev *mddev)
* Also, we should send BITMAP_NEEDS_SYNC message in
* case reshaping is interrupted.
*/
- if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) ||
(mddev->reshape_position != MaxSector &&
test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
@@ -1605,8 +1605,8 @@ static int gather_bitmaps(struct md_rdev *rdev)
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out;
}
- if ((hi > 0) && (lo < mddev->recovery_cp))
- mddev->recovery_cp = lo;
+ if ((hi > 0) && (lo < mddev->resync_offset))
+ mddev->resync_offset = lo;
}
out:
return err;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 046fe85c76fe..ac85ec73a409 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -637,6 +637,12 @@ static void __mddev_put(struct mddev *mddev)
return;
/*
+ * If array is freed by stopping array, MD_DELETED is set by
+ * do_md_stop(), MD_DELETED is still set here in case mddev is freed
+ * directly by closing a mddev that is created by create_on_open.
+ */
+ set_bit(MD_DELETED, &mddev->flags);
+ /*
* Call queue_work inside the spinlock so that flush_workqueue() after
* mddev_find will succeed in waiting for the work to be done.
*/
@@ -1409,13 +1415,13 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
mddev->layout = -1;
if (sb->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
- mddev->recovery_cp = sb->recovery_cp;
+ mddev->resync_offset = sb->resync_offset;
} else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
@@ -1541,13 +1547,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
- sb->recovery_cp = mddev->recovery_cp;
+ sb->resync_offset = mddev->resync_offset;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
- sb->recovery_cp = 0;
+ sb->resync_offset = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
@@ -1895,7 +1901,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
- mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
@@ -2081,7 +2087,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
- sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->resync_offset = cpu_to_le64(mddev->resync_offset);
else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
sb->resync_offset = cpu_to_le64(MaxSector);
else
@@ -2761,7 +2767,7 @@ repeat:
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
- && (mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && (mddev->in_sync && mddev->resync_offset == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
@@ -4297,9 +4303,9 @@ __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
return sprintf(page, "none\n");
- return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
}
static ssize_t
@@ -4325,7 +4331,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
if (!err) {
- mddev->recovery_cp = n;
+ mddev->resync_offset = n;
if (mddev->pers)
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
@@ -6417,7 +6423,7 @@ static void md_clean(struct mddev *mddev)
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
@@ -7362,9 +7368,9 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
@@ -8303,7 +8309,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "\tresync=REMOTE");
return 1;
}
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
seq_printf(seq, "\tresync=PENDING");
return 1;
}
@@ -8946,7 +8952,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
return mddev->resync_min;
case ACTION_RESYNC:
if (!mddev->bitmap)
- return mddev->recovery_cp;
+ return mddev->resync_offset;
return 0;
case ACTION_RESHAPE:
/*
@@ -9184,8 +9190,8 @@ void md_do_sync(struct md_thread *thread)
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
- j > mddev->recovery_cp)
- mddev->recovery_cp = j;
+ j > mddev->resync_offset)
+ mddev->resync_offset = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9305,19 +9311,19 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync > MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
- if (mddev->curr_resync >= mddev->recovery_cp) {
+ if (mddev->curr_resync >= mddev->resync_offset) {
pr_debug("md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync_completed;
else
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync;
}
} else
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
@@ -9421,6 +9427,12 @@ static bool rdev_is_spare(struct md_rdev *rdev)
static bool rdev_addable(struct md_rdev *rdev)
{
+ struct mddev *mddev;
+
+ mddev = READ_ONCE(rdev->mddev);
+ if (!mddev)
+ return false;
+
/* rdev is already used, don't add it again. */
if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
test_bit(Faulty, &rdev->flags))
@@ -9431,7 +9443,7 @@ static bool rdev_addable(struct md_rdev *rdev)
return true;
/* Allow to add if array is read-write. */
- if (md_is_rdwr(rdev->mddev))
+ if (md_is_rdwr(mddev))
return true;
/*
@@ -9533,7 +9545,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
}
/* Check if resync is in progress. */
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
@@ -9714,7 +9726,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2
- && !mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && !mddev->in_sync && mddev->resync_offset == MaxSector)
))
return;
@@ -9771,8 +9783,8 @@ void md_check_recovery(struct mddev *mddev)
* remove disk.
*/
rdev_for_each_safe(rdev, tmp, mddev) {
- if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
- rdev->raid_disk < 0)
+ if (rdev->raid_disk < 0 &&
+ test_and_clear_bit(ClusterRemove, &rdev->flags))
md_kick_rdev_from_array(rdev);
}
}
@@ -10078,8 +10090,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
/* Check for change of roles in the active devices */
rdev_for_each_safe(rdev2, tmp, mddev) {
- if (test_bit(Faulty, &rdev2->flags))
+ if (test_bit(Faulty, &rdev2->flags)) {
+ if (test_bit(ClusterRemove, &rdev2->flags))
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
continue;
+ }
/* Check if the roles changed */
role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 67b365621507..51af29a03079 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -523,7 +523,7 @@ struct mddev {
unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
- sector_t recovery_cp;
+ sector_t resync_offset;
sector_t resync_min; /* user requested sync
* starts here */
sector_t resync_max; /* resync should pause
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index cbe2a9054cb9..f1d8811a542a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -674,7 +674,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->raid_disks--;
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -717,7 +717,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->raid_disks += mddev->delta_disks;
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -760,7 +760,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->delta_disks = 1 - mddev->raid_disks;
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index b8b3a9069701..52881e6032da 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -283,7 +283,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
static inline bool raid1_should_read_first(struct mddev *mddev,
sector_t this_sector, int len)
{
- if ((mddev->recovery_cp < this_sector + len))
+ if ((mddev->resync_offset < this_sector + len))
return true;
if (mddev_is_clustered(mddev) &&
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 64b8176907a9..408c26398321 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -127,10 +127,9 @@ static inline struct r1bio *get_resync_r1bio(struct bio *bio)
return get_resync_pages(bio)->raid_bio;
}
-static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
{
- struct pool_info *pi = data;
- int size = offsetof(struct r1bio, bios[pi->raid_disks]);
+ int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
@@ -145,18 +144,18 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
struct r1bio *r1_bio;
struct bio *bio;
int need_pages;
int j;
struct resync_pages *rps;
- r1_bio = r1bio_pool_alloc(gfp_flags, pi);
+ r1_bio = r1bio_pool_alloc(gfp_flags, conf);
if (!r1_bio)
return NULL;
- rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
+ rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages),
gfp_flags);
if (!rps)
goto out_free_r1bio;
@@ -164,7 +163,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
/*
* Allocate bios : 1 for reading, n-1 for writing
*/
- for (j = pi->raid_disks ; j-- ; ) {
+ for (j = conf->raid_disks * 2; j-- ; ) {
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
@@ -177,11 +176,11 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* If this is a user-requested check/repair, allocate
* RESYNC_PAGES for each bio.
*/
- if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- need_pages = pi->raid_disks;
+ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
+ need_pages = conf->raid_disks * 2;
else
need_pages = 1;
- for (j = 0; j < pi->raid_disks; j++) {
+ for (j = 0; j < conf->raid_disks * 2; j++) {
struct resync_pages *rp = &rps[j];
bio = r1_bio->bios[j];
@@ -207,7 +206,7 @@ out_free_pages:
resync_free_pages(&rps[j]);
out_free_bio:
- while (++j < pi->raid_disks) {
+ while (++j < conf->raid_disks * 2) {
bio_uninit(r1_bio->bios[j]);
kfree(r1_bio->bios[j]);
}
@@ -220,12 +219,12 @@ out_free_r1bio:
static void r1buf_pool_free(void *__r1_bio, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
int i;
struct r1bio *r1bio = __r1_bio;
struct resync_pages *rp = NULL;
- for (i = pi->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp);
bio_uninit(r1bio->bios[i]);
@@ -255,7 +254,7 @@ static void free_r1bio(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
put_all_bios(conf, r1_bio);
- mempool_free(r1_bio, &conf->r1bio_pool);
+ mempool_free(r1_bio, conf->r1bio_pool);
}
static void put_buf(struct r1bio *r1_bio)
@@ -1305,9 +1304,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
- /* Ensure no bio records IO_BLOCKED */
- memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
init_r1bio(r1_bio, mddev, bio);
return r1_bio;
}
@@ -2747,7 +2745,7 @@ static int init_resync(struct r1conf *conf)
BUG_ON(mempool_initialized(&conf->r1buf_pool));
return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
- r1buf_pool_free, conf->poolinfo);
+ r1buf_pool_free, conf);
}
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
@@ -2757,7 +2755,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
struct bio *bio;
int i;
- for (i = conf->poolinfo->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
bio_reset(bio, NULL, 0);
@@ -2822,7 +2820,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
@@ -3085,6 +3083,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
int i;
struct raid1_info *disk;
struct md_rdev *rdev;
+ size_t r1bio_size;
int err = -ENOMEM;
conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
@@ -3121,21 +3120,15 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto abort;
- conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
- if (!conf->poolinfo)
- goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, conf->poolinfo);
- if (err)
+ r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
+ conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
+ if (!conf->r1bio_pool)
goto abort;
err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
if (err)
goto abort;
- conf->poolinfo->mddev = mddev;
-
err = -EINVAL;
spin_lock_init(&conf->device_lock);
conf->raid_disks = mddev->raid_disks;
@@ -3198,10 +3191,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3282,9 +3274,9 @@ static int raid1_run(struct mddev *mddev)
}
if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
@@ -3311,10 +3303,9 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3345,8 +3336,8 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -3367,17 +3358,13 @@ static int raid1_reshape(struct mddev *mddev)
* At the same time, we "pack" the devices so that all the missing
* devices have the higher raid_disk numbers.
*/
- mempool_t newpool, oldpool;
- struct pool_info *newpoolinfo;
+ mempool_t *newpool, *oldpool;
+ size_t new_r1bio_size;
struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
int d, d2;
- int ret;
-
- memset(&newpool, 0, sizeof(newpool));
- memset(&oldpool, 0, sizeof(oldpool));
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3403,24 +3390,16 @@ static int raid1_reshape(struct mddev *mddev)
return -EBUSY;
}
- newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
- if (!newpoolinfo)
+ new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
+ newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
+ if (!newpool) {
return -ENOMEM;
- newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks * 2;
-
- ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, newpoolinfo);
- if (ret) {
- kfree(newpoolinfo);
- return ret;
}
newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
raid_disks, 2),
GFP_KERNEL);
if (!newmirrors) {
- kfree(newpoolinfo);
- mempool_exit(&newpool);
+ mempool_destroy(newpool);
return -ENOMEM;
}
@@ -3429,7 +3408,6 @@ static int raid1_reshape(struct mddev *mddev)
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
- init_waitqueue_head(&conf->r1bio_pool.wait);
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
@@ -3446,8 +3424,6 @@ static int raid1_reshape(struct mddev *mddev)
}
kfree(conf->mirrors);
conf->mirrors = newmirrors;
- kfree(conf->poolinfo);
- conf->poolinfo = newpoolinfo;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (raid_disks - conf->raid_disks);
@@ -3461,7 +3437,7 @@ static int raid1_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- mempool_exit(&oldpool);
+ mempool_destroy(oldpool);
return 0;
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 33f318fcc268..d236ef179cfb 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -49,22 +49,6 @@ struct raid1_info {
sector_t seq_start;
};
-/*
- * memory pools need a pointer to the mddev, so they can force an unplug
- * when memory is tight, and a count of the number of drives that the
- * pool was allocated for, so they know how much to allocate and free.
- * mddev->raid_disks cannot be used, as it can change while a pool is active
- * These two datums are stored in a kmalloced struct.
- * The 'raid_disks' here is twice the raid_disks in r1conf.
- * This allows space for each 'real' device can have a replacement in the
- * second half of the array.
- */
-
-struct pool_info {
- struct mddev *mddev;
- int raid_disks;
-};
-
struct r1conf {
struct mddev *mddev;
struct raid1_info *mirrors; /* twice 'raid_disks' to
@@ -114,11 +98,7 @@ struct r1conf {
*/
int recovery_disabled;
- /* poolinfo contains information about the content of the
- * mempools - it changes when the array grows or shrinks
- */
- struct pool_info *poolinfo;
- mempool_t r1bio_pool;
+ mempool_t *r1bio_pool;
mempool_t r1buf_pool;
struct bio_set bio_split;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 95dc354a86a0..b60c30bfb6c7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2117,7 +2117,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int last = conf->geo.raid_disks - 1;
struct raid10_info *p;
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
* very different from resync
*/
@@ -3185,7 +3185,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* of a clean array, like RAID1 does.
*/
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
mddev->reshape_position == MaxSector &&
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
@@ -4145,7 +4145,7 @@ static int raid10_run(struct mddev *mddev)
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid10:%s: active with %d out of %d devices\n",
@@ -4245,8 +4245,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > oldsize) {
- mddev->recovery_cp = oldsize;
+ mddev->resync_offset > oldsize) {
+ mddev->resync_offset = oldsize;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
calc_sectors(conf, sectors);
@@ -4275,7 +4275,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
mddev->delta_disks = mddev->raid_disks;
mddev->raid_disks *= 2;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev->dev_sectors = size;
conf = setup_conf(mddev);
@@ -5087,8 +5087,8 @@ static void raid10_finish_reshape(struct mddev *mddev)
return;
if (mddev->delta_disks > 0) {
- if (mddev->recovery_cp > mddev->resync_max_sectors) {
- mddev->recovery_cp = mddev->resync_max_sectors;
+ if (mddev->resync_offset > mddev->resync_max_sectors) {
+ mddev->resync_offset = mddev->resync_max_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->resync_max_sectors = mddev->array_sectors;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index c0fb335311aa..56b234683ee6 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1163,7 +1163,7 @@ static int ppl_load_distributed(struct ppl_log *log)
le64_to_cpu(pplhdr->generation));
/* attempt to recover from log if we are starting a dirty array */
- if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
+ if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector)
ret = ppl_recover(log, pplhdr, pplhdr_offset);
/* write empty header if we are starting the array */
@@ -1422,14 +1422,14 @@ int ppl_init_log(struct r5conf *conf)
if (ret) {
goto err;
- } else if (!mddev->pers && mddev->recovery_cp == 0 &&
+ } else if (!mddev->pers && mddev->resync_offset == 0 &&
ppl_conf->recovered_entries > 0 &&
ppl_conf->mismatch_count == 0) {
/*
* If we are starting a dirty array and the recovery succeeds
* without any issues, set the array as clean.
*/
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
/* no mismatch allowed when enabling PPL for a running array */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ec61ee7b218..023649fe2476 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3740,7 +3740,7 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& (rdev->recovery_offset <= sh->sector
- || rdev->mddev->recovery_cp <= sh->sector))
+ || rdev->mddev->resync_offset <= sh->sector))
rv = 1;
return rv;
}
@@ -3832,7 +3832,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
* is missing/faulty, then we need to read everything we can.
*/
if (!force_rcw &&
- sh->sector < sh->raid_conf->mddev->recovery_cp)
+ sh->sector < sh->raid_conf->mddev->resync_offset)
/* reconstruct-write isn't being forced */
return 0;
for (i = 0; i < s->failed && i < 2; i++) {
@@ -4097,7 +4097,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
int disks)
{
int rmw = 0, rcw = 0, i;
- sector_t recovery_cp = conf->mddev->recovery_cp;
+ sector_t resync_offset = conf->mddev->resync_offset;
/* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
@@ -4107,14 +4107,14 @@ static int handle_stripe_dirtying(struct r5conf *conf,
* generate correct data from the parity.
*/
if (conf->rmw_level == PARITY_DISABLE_RMW ||
- (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
+ (resync_offset < MaxSector && sh->sector >= resync_offset &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->rmw_level, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)resync_offset,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
@@ -4770,14 +4770,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
* we must be recovering.
- * else if we are after recovery_cp, we must be syncing
+ * else if we are after resync_offset, we must be syncing
* else if MD_RECOVERY_REQUESTED is set, we also are syncing.
* else we can only be replacing
* sync and recovery both need to read all devices, and so
* use the same flag.
*/
if (do_recovery ||
- sh->sector >= conf->mddev->recovery_cp ||
+ sh->sector >= conf->mddev->resync_offset ||
test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
s->syncing = 1;
else
@@ -7780,7 +7780,7 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
int ret = -EIO;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
@@ -7921,7 +7921,7 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
- } else if (mddev->recovery_cp == MaxSector)
+ } else if (mddev->resync_offset == MaxSector)
set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
@@ -7988,7 +7988,7 @@ static int raid5_run(struct mddev *mddev)
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > dirty_parity_disks &&
- mddev->recovery_cp != MaxSector) {
+ mddev->resync_offset != MaxSector) {
if (test_bit(MD_HAS_PPL, &mddev->flags))
pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
mdname(mddev));
@@ -8328,8 +8328,8 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -8423,7 +8423,7 @@ static int raid5_start_reshape(struct mddev *mddev)
return -EINVAL;
/* raid5 can't handle concurrent reshape and recovery */
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
return -EBUSY;
for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].replacement)
@@ -8648,7 +8648,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
mddev->raid_disks += 1;
mddev->delta_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
return setup_conf(mddev);
}
diff --git a/drivers/media/cec/core/cec-pin-error-inj.c b/drivers/media/cec/core/cec-pin-error-inj.c
index 6e61a04b8168..d9e613c7ce3f 100644
--- a/drivers/media/cec/core/cec-pin-error-inj.c
+++ b/drivers/media/cec/core/cec-pin-error-inj.c
@@ -91,16 +91,22 @@ bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line)
if (!strcmp(token, "clear")) {
memset(pin->error_inj, 0, sizeof(pin->error_inj));
pin->rx_toggle = pin->tx_toggle = false;
+ pin->rx_no_low_drive = false;
pin->tx_ignore_nack_until_eom = false;
pin->tx_custom_pulse = false;
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
+ pin->tx_glitch_low_usecs = CEC_TIM_GLITCH_DEFAULT;
+ pin->tx_glitch_high_usecs = CEC_TIM_GLITCH_DEFAULT;
+ pin->tx_glitch_falling_edge = false;
+ pin->tx_glitch_rising_edge = false;
return true;
}
if (!strcmp(token, "rx-clear")) {
for (i = 0; i <= CEC_ERROR_INJ_OP_ANY; i++)
pin->error_inj[i] &= ~CEC_ERROR_INJ_RX_MASK;
pin->rx_toggle = false;
+ pin->rx_no_low_drive = false;
return true;
}
if (!strcmp(token, "tx-clear")) {
@@ -111,6 +117,14 @@ bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line)
pin->tx_custom_pulse = false;
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
+ pin->tx_glitch_low_usecs = CEC_TIM_GLITCH_DEFAULT;
+ pin->tx_glitch_high_usecs = CEC_TIM_GLITCH_DEFAULT;
+ pin->tx_glitch_falling_edge = false;
+ pin->tx_glitch_rising_edge = false;
+ return true;
+ }
+ if (!strcmp(token, "rx-no-low-drive")) {
+ pin->rx_no_low_drive = true;
return true;
}
if (!strcmp(token, "tx-ignore-nack-until-eom")) {
@@ -122,6 +136,14 @@ bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line)
cec_pin_start_timer(pin);
return true;
}
+ if (!strcmp(token, "tx-glitch-falling-edge")) {
+ pin->tx_glitch_falling_edge = true;
+ return true;
+ }
+ if (!strcmp(token, "tx-glitch-rising-edge")) {
+ pin->tx_glitch_rising_edge = true;
+ return true;
+ }
if (!p)
return false;
@@ -139,7 +161,23 @@ bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line)
if (kstrtou32(p, 0, &usecs) || usecs > 10000000)
return false;
- pin->tx_custom_high_usecs = usecs;
+ pin->tx_glitch_high_usecs = usecs;
+ return true;
+ }
+ if (!strcmp(token, "tx-glitch-low-usecs")) {
+ u32 usecs;
+
+ if (kstrtou32(p, 0, &usecs) || usecs > 100)
+ return false;
+ pin->tx_glitch_low_usecs = usecs;
+ return true;
+ }
+ if (!strcmp(token, "tx-glitch-high-usecs")) {
+ u32 usecs;
+
+ if (kstrtou32(p, 0, &usecs) || usecs > 100)
+ return false;
+ pin->tx_glitch_high_usecs = usecs;
return true;
}
@@ -273,6 +311,9 @@ int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf)
seq_puts(sf, "# <op> rx-clear clear all rx error injections for <op>\n");
seq_puts(sf, "# <op> tx-clear clear all tx error injections for <op>\n");
seq_puts(sf, "#\n");
+ seq_puts(sf, "# RX error injection settings:\n");
+ seq_puts(sf, "# rx-no-low-drive do not generate low-drive pulses\n");
+ seq_puts(sf, "#\n");
seq_puts(sf, "# RX error injection:\n");
seq_puts(sf, "# <op>[,<mode>] rx-nack NACK the message instead of sending an ACK\n");
seq_puts(sf, "# <op>[,<mode>] rx-low-drive <bit> force a low-drive condition at this bit position\n");
@@ -285,6 +326,10 @@ int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf)
seq_puts(sf, "# tx-custom-low-usecs <usecs> define the 'low' time for the custom pulse\n");
seq_puts(sf, "# tx-custom-high-usecs <usecs> define the 'high' time for the custom pulse\n");
seq_puts(sf, "# tx-custom-pulse transmit the custom pulse once the bus is idle\n");
+ seq_puts(sf, "# tx-glitch-low-usecs <usecs> define the 'low' time for the glitch pulse\n");
+ seq_puts(sf, "# tx-glitch-high-usecs <usecs> define the 'high' time for the glitch pulse\n");
+ seq_puts(sf, "# tx-glitch-falling-edge send the glitch pulse after every falling edge\n");
+ seq_puts(sf, "# tx-glitch-rising-edge send the glitch pulse after every rising edge\n");
seq_puts(sf, "#\n");
seq_puts(sf, "# TX error injection:\n");
seq_puts(sf, "# <op>[,<mode>] tx-no-eom don't set the EOM bit\n");
@@ -332,8 +377,14 @@ int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf)
}
}
+ if (pin->rx_no_low_drive)
+ seq_puts(sf, "rx-no-low-drive\n");
if (pin->tx_ignore_nack_until_eom)
seq_puts(sf, "tx-ignore-nack-until-eom\n");
+ if (pin->tx_glitch_falling_edge)
+ seq_puts(sf, "tx-glitch-falling-edge\n");
+ if (pin->tx_glitch_rising_edge)
+ seq_puts(sf, "tx-glitch-rising-edge\n");
if (pin->tx_custom_pulse)
seq_puts(sf, "tx-custom-pulse\n");
if (pin->tx_custom_low_usecs != CEC_TIM_CUSTOM_DEFAULT)
@@ -342,5 +393,11 @@ int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf)
if (pin->tx_custom_high_usecs != CEC_TIM_CUSTOM_DEFAULT)
seq_printf(sf, "tx-custom-high-usecs %u\n",
pin->tx_custom_high_usecs);
+ if (pin->tx_glitch_low_usecs != CEC_TIM_GLITCH_DEFAULT)
+ seq_printf(sf, "tx-glitch-low-usecs %u\n",
+ pin->tx_glitch_low_usecs);
+ if (pin->tx_glitch_high_usecs != CEC_TIM_GLITCH_DEFAULT)
+ seq_printf(sf, "tx-glitch-high-usecs %u\n",
+ pin->tx_glitch_high_usecs);
return 0;
}
diff --git a/drivers/media/cec/core/cec-pin-priv.h b/drivers/media/cec/core/cec-pin-priv.h
index 156a9f81be94..e7801be9adb9 100644
--- a/drivers/media/cec/core/cec-pin-priv.h
+++ b/drivers/media/cec/core/cec-pin-priv.h
@@ -164,6 +164,9 @@ enum cec_pin_state {
/* The default for the low/high time of the custom pulse */
#define CEC_TIM_CUSTOM_DEFAULT 1000
+/* The default for the low/high time of the glitch pulse */
+#define CEC_TIM_GLITCH_DEFAULT 1
+
#define CEC_NUM_PIN_EVENTS 128
#define CEC_PIN_EVENT_FL_IS_HIGH (1 << 0)
#define CEC_PIN_EVENT_FL_DROPPED (1 << 1)
@@ -225,12 +228,17 @@ struct cec_pin {
u32 timer_max_overrun;
u32 timer_sum_overrun;
+ bool rx_no_low_drive;
u32 tx_custom_low_usecs;
u32 tx_custom_high_usecs;
+ u32 tx_glitch_low_usecs;
+ u32 tx_glitch_high_usecs;
bool tx_ignore_nack_until_eom;
bool tx_custom_pulse;
bool tx_generated_poll;
bool tx_post_eom;
+ bool tx_glitch_falling_edge;
+ bool tx_glitch_rising_edge;
u8 tx_extra_bytes;
u32 tx_low_drive_cnt;
#ifdef CONFIG_CEC_PIN_ERROR_INJ
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 59ac12113f3a..4d7155281daa 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -142,15 +142,42 @@ static bool cec_pin_read(struct cec_pin *pin)
return v;
}
+static void cec_pin_insert_glitch(struct cec_pin *pin, bool rising_edge)
+{
+ /*
+ * Insert a short glitch after the falling or rising edge to
+ * simulate reflections on the CEC line. This can be used to
+ * test deglitch filters, which should be present in CEC devices
+ * to deal with noise on the line.
+ */
+ if (!pin->tx_glitch_high_usecs || !pin->tx_glitch_low_usecs)
+ return;
+ if (rising_edge) {
+ udelay(pin->tx_glitch_high_usecs);
+ call_void_pin_op(pin, low);
+ udelay(pin->tx_glitch_low_usecs);
+ call_void_pin_op(pin, high);
+ } else {
+ udelay(pin->tx_glitch_low_usecs);
+ call_void_pin_op(pin, high);
+ udelay(pin->tx_glitch_high_usecs);
+ call_void_pin_op(pin, low);
+ }
+}
+
static void cec_pin_low(struct cec_pin *pin)
{
call_void_pin_op(pin, low);
+ if (pin->tx_glitch_falling_edge && pin->adap->cec_pin_is_high)
+ cec_pin_insert_glitch(pin, false);
cec_pin_update(pin, false, false);
}
static bool cec_pin_high(struct cec_pin *pin)
{
call_void_pin_op(pin, high);
+ if (pin->tx_glitch_rising_edge && !pin->adap->cec_pin_is_high)
+ cec_pin_insert_glitch(pin, true);
return cec_pin_read(pin);
}
@@ -770,7 +797,7 @@ static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
* Go to low drive state when the total bit time is
* too short.
*/
- if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN) {
+ if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN && !pin->rx_no_low_drive) {
if (!pin->rx_data_bit_too_short_cnt++) {
pin->rx_data_bit_too_short_ts = ktime_to_ns(pin->ts);
pin->rx_data_bit_too_short_delta = delta;
@@ -1350,6 +1377,8 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
+ pin->tx_glitch_low_usecs = CEC_TIM_GLITCH_DEFAULT;
+ pin->tx_glitch_high_usecs = CEC_TIM_GLITCH_DEFAULT;
adap = cec_allocate_adapter(&cec_pin_adap_ops, priv, name,
caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index 50cdc557c943..3c27789d8657 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -61,49 +61,51 @@ static void cec_gpio_low(struct cec_adapter *adap)
gpiod_set_value(cec->cec_gpio, 0);
}
-static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
+static irqreturn_t cec_gpio_5v_irq_handler_thread(int irq, void *priv)
{
struct cec_gpio *cec = priv;
+ int val = gpiod_get_value_cansleep(cec->v5_gpio);
+ bool is_high = val > 0;
- cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
+ if (val < 0 || is_high == cec->v5_is_high)
+ return IRQ_HANDLED;
+
+ cec->v5_is_high = is_high;
+ cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
return IRQ_HANDLED;
}
-static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
+static irqreturn_t cec_gpio_5v_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
- int val = gpiod_get_value(cec->v5_gpio);
- bool is_high = val > 0;
- if (val < 0 || is_high == cec->v5_is_high)
- return IRQ_HANDLED;
cec->v5_ts = ktime_get();
- cec->v5_is_high = is_high;
return IRQ_WAKE_THREAD;
}
-static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
+static irqreturn_t cec_gpio_hpd_irq_handler_thread(int irq, void *priv)
{
struct cec_gpio *cec = priv;
+ int val = gpiod_get_value_cansleep(cec->hpd_gpio);
+ bool is_high = val > 0;
- cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
+ if (val < 0 || is_high == cec->hpd_is_high)
+ return IRQ_HANDLED;
+
+ cec->hpd_is_high = is_high;
+ cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
return IRQ_HANDLED;
}
-static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
+static irqreturn_t cec_gpio_hpd_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
- int val = gpiod_get_value(cec->hpd_gpio);
- bool is_high = val > 0;
- if (val < 0 || is_high == cec->hpd_is_high)
- return IRQ_HANDLED;
cec->hpd_ts = ktime_get();
- cec->hpd_is_high = is_high;
return IRQ_WAKE_THREAD;
}
-static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
+static irqreturn_t cec_gpio_cec_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
int val = gpiod_get_value(cec->cec_gpio);
@@ -113,7 +115,7 @@ static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
-static bool cec_gpio_enable_irq(struct cec_adapter *adap)
+static bool cec_gpio_cec_enable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
@@ -121,7 +123,7 @@ static bool cec_gpio_enable_irq(struct cec_adapter *adap)
return true;
}
-static void cec_gpio_disable_irq(struct cec_adapter *adap)
+static void cec_gpio_cec_disable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
@@ -148,7 +150,7 @@ static int cec_gpio_read_hpd(struct cec_adapter *adap)
if (!cec->hpd_gpio)
return -ENOTTY;
- return gpiod_get_value(cec->hpd_gpio);
+ return gpiod_get_value_cansleep(cec->hpd_gpio);
}
static int cec_gpio_read_5v(struct cec_adapter *adap)
@@ -157,15 +159,15 @@ static int cec_gpio_read_5v(struct cec_adapter *adap)
if (!cec->v5_gpio)
return -ENOTTY;
- return gpiod_get_value(cec->v5_gpio);
+ return gpiod_get_value_cansleep(cec->v5_gpio);
}
static const struct cec_pin_ops cec_gpio_pin_ops = {
.read = cec_gpio_read,
.low = cec_gpio_low,
.high = cec_gpio_high,
- .enable_irq = cec_gpio_enable_irq,
- .disable_irq = cec_gpio_disable_irq,
+ .enable_irq = cec_gpio_cec_enable_irq,
+ .disable_irq = cec_gpio_cec_disable_irq,
.status = cec_gpio_status,
.read_hpd = cec_gpio_read_hpd,
.read_5v = cec_gpio_read_5v,
@@ -209,7 +211,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
- ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_irq_handler,
+ ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_cec_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
cec->adap->name, cec);
if (ret)
@@ -218,8 +220,8 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (cec->hpd_gpio) {
cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
ret = devm_request_threaded_irq(dev, cec->hpd_irq,
- cec_hpd_gpio_irq_handler,
- cec_hpd_gpio_irq_handler_thread,
+ cec_gpio_hpd_irq_handler,
+ cec_gpio_hpd_irq_handler_thread,
IRQF_ONESHOT |
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
@@ -230,8 +232,8 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (cec->v5_gpio) {
cec->v5_irq = gpiod_to_irq(cec->v5_gpio);
ret = devm_request_threaded_irq(dev, cec->v5_irq,
- cec_5v_gpio_irq_handler,
- cec_5v_gpio_irq_handler_thread,
+ cec_gpio_5v_irq_handler,
+ cec_gpio_5v_irq_handler_thread,
IRQF_ONESHOT |
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
index ee870ea1a886..6f8d6797c614 100644
--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
@@ -171,11 +171,12 @@ static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data,
{
struct rain *rain = serio_get_drvdata(serio);
+ spin_lock(&rain->buf_lock);
if (rain->buf_len == DATA_SIZE) {
+ spin_unlock(&rain->buf_lock);
dev_warn_once(rain->dev, "buffer overflow\n");
return IRQ_HANDLED;
}
- spin_lock(&rain->buf_lock);
rain->buf_len++;
rain->buf[rain->buf_wr_idx] = data;
rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff;
diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c
index 1f1eaa807811..21edf870d927 100644
--- a/drivers/media/common/b2c2/flexcop-i2c.c
+++ b/drivers/media/common/b2c2/flexcop-i2c.c
@@ -209,7 +209,7 @@ static u32 flexcop_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm flexcop_algo = {
+static const struct i2c_algorithm flexcop_algo = {
.master_xfer = flexcop_master_xfer,
.functionality = flexcop_i2c_func,
};
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index c3d8ced6c3ba..5aa3d45a691a 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -433,7 +433,7 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
return cxd2820r_gpio(&priv->fe, gpio);
}
-static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+static int cxd2820r_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
{
struct cxd2820r_priv *priv = gpiochip_get_data(chip);
struct i2c_client *client = priv->client[0];
@@ -446,7 +446,7 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
(void) cxd2820r_gpio(&priv->fe, gpio);
- return;
+ return 0;
}
static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index b40daf242046..7d3a994b7cc4 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2193,6 +2193,8 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u8 n_overflow = 1;
u16 i = 1000;
+ if (msg[0].len < 3)
+ return -EOPNOTSUPP;
u16 serpar_num = msg[0].buf[0];
while (n_overflow == 1 && i) {
@@ -2212,6 +2214,8 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u8 n_overflow = 1, n_empty = 1;
u16 i = 1000;
+ if (msg[0].len < 1 || msg[1].len < 2)
+ return -EOPNOTSUPP;
u16 serpar_num = msg[0].buf[0];
u16 read_word;
@@ -2256,8 +2260,12 @@ static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap,
u16 word;
if (num == 1) { /* write */
+ if (msg[0].len < 3)
+ return -EOPNOTSUPP;
dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2])));
} else {
+ if (msg[1].len < 2)
+ return -EOPNOTSUPP;
word = dib7000p_read_word(state, apb_address);
msg[1].buf[0] = (word >> 8) & 0xff;
msg[1].buf[1] = (word) & 0xff;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index e68202954a8f..6237fe804a5c 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -141,6 +141,7 @@ config VIDEO_IMX214
depends on GPIOLIB
select REGMAP_I2C
select V4L2_CCI_I2C
+ select VIDEO_CCS_PLL
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
@@ -765,24 +766,25 @@ config VIDEO_THP7312
endmenu
-menu "Lens drivers"
- visible if MEDIA_CAMERA_SUPPORT
+menuconfig VIDEO_CAMERA_LENS
+ bool "Lens drivers"
+ depends on MEDIA_CAMERA_SUPPORT && I2C
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ default y
+
+if VIDEO_CAMERA_LENS
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on GPIOLIB && I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select V4L2_ASYNC
+ depends on GPIOLIB
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
config VIDEO_AK7375
tristate "AK7375 lens voice coil support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_ASYNC
help
This is a driver for the AK7375 camera lens voice coil.
AK7375 is a 12 bit DAC with 120mA output current sink
@@ -791,10 +793,7 @@ config VIDEO_AK7375
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_ASYNC
+ depends on GPIOLIB
help
This is a driver for the DW9714 camera lens voice coil.
DW9714 is a 10 bit DAC with 120mA output current sink
@@ -803,10 +802,6 @@ config VIDEO_DW9714
config VIDEO_DW9719
tristate "DW9719 lens voice coil support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_ASYNC
select V4L2_CCI_I2C
help
This is a driver for the DW9719 camera lens voice coil.
@@ -815,10 +810,6 @@ config VIDEO_DW9719
config VIDEO_DW9768
tristate "DW9768 lens voice coil support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a driver for the DW9768 camera lens voice coil.
DW9768 is a 10 bit DAC with 100mA output current sink
@@ -827,17 +818,13 @@ config VIDEO_DW9768
config VIDEO_DW9807_VCM
tristate "DW9807 lens voice coil support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_ASYNC
help
This is a driver for the DW9807 camera lens voice coil.
DW9807 is a 10 bit DAC with 100mA output current sink
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
-endmenu
+endif
menu "Flash devices"
visible if MEDIA_CAMERA_SUPPORT
@@ -1684,7 +1671,7 @@ config VIDEO_MAX96714
config VIDEO_MAX96717
tristate "Maxim MAX96717 GMSL2 Serializer support"
- depends on OF && I2C && VIDEO_DEV && COMMON_CLK
+ depends on I2C && VIDEO_DEV && COMMON_CLK
select I2C_MUX
select MEDIA_CONTROLLER
select GPIOLIB
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 6e50b14f888f..5d90b8ab9b6d 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -868,21 +868,6 @@ static int adv7180_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
return 0;
}
-static int adv7180_g_pixelaspect(struct v4l2_subdev *sd, struct v4l2_fract *aspect)
-{
- struct adv7180_state *state = to_state(sd);
-
- if (state->curr_norm & V4L2_STD_525_60) {
- aspect->numerator = 11;
- aspect->denominator = 10;
- } else {
- aspect->numerator = 54;
- aspect->denominator = 59;
- }
-
- return 0;
-}
-
static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
{
*norm = V4L2_STD_ALL;
@@ -929,7 +914,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
- .g_pixelaspect = adv7180_g_pixelaspect,
.g_tvnorms = adv7180_g_tvnorms,
.s_stream = adv7180_s_stream,
};
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index 5edb3295dc58..678199196b84 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -161,22 +161,6 @@ int adv748x_afe_s_input(struct adv748x_afe *afe, unsigned int input)
return sdp_write(state, ADV748X_SDP_INSEL, input);
}
-static int adv748x_afe_g_pixelaspect(struct v4l2_subdev *sd,
- struct v4l2_fract *aspect)
-{
- struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
-
- if (afe->curr_norm & V4L2_STD_525_60) {
- aspect->numerator = 11;
- aspect->denominator = 10;
- } else {
- aspect->numerator = 54;
- aspect->denominator = 59;
- }
-
- return 0;
-}
-
/* -----------------------------------------------------------------------------
* v4l2_subdev_video_ops
*/
@@ -307,7 +291,6 @@ static const struct v4l2_subdev_video_ops adv748x_afe_video_ops = {
.g_tvnorms = adv748x_afe_g_tvnorms,
.g_input_status = adv748x_afe_g_input_status,
.s_stream = adv748x_afe_s_stream,
- .g_pixelaspect = adv748x_afe_g_pixelaspect,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index a4db9bae5f79..b154dea29ba2 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -382,19 +382,9 @@ done:
return ret;
}
-static int adv748x_hdmi_g_pixelaspect(struct v4l2_subdev *sd,
- struct v4l2_fract *aspect)
-{
- aspect->numerator = 1;
- aspect->denominator = 1;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops adv748x_video_ops_hdmi = {
.g_input_status = adv748x_hdmi_g_input_status,
.s_stream = adv748x_hdmi_s_stream,
- .g_pixelaspect = adv748x_hdmi_g_pixelaspect,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index e271782b7b70..afed38596362 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2448,8 +2448,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
}
cec_s_phys_addr(state->cec_adap, parent_pa, false);
- /* enable hotplug after 100 ms */
- schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 10);
+ /* enable hotplug after 143 ms */
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 7);
return 0;
}
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index 6d3f8617ef13..a80da2b4a8fa 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -203,9 +203,9 @@ static int ub913_gpio_direction_out(struct gpio_chip *gc, unsigned int offset,
0));
}
-static void ub913_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int ub913_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
- ub913_gpio_direction_out(gc, offset, value);
+ return ub913_gpio_direction_out(gc, offset, value);
}
static int ub913_gpio_of_xlate(struct gpio_chip *gc,
@@ -337,14 +337,6 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
unsigned int i;
int ret;
- /*
- * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
- * frame desc is made dynamically allocated.
- */
-
- if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
- return -EINVAL;
-
ret = v4l2_subdev_routing_validate(sd, routing,
V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
if (ret)
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 59bd92388845..e3fc9d66970a 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -317,14 +317,13 @@ static int ub953_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(v & UB953_REG_GPIO_PIN_STS_GPIO_STS(offset));
}
-static void ub953_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int ub953_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct ub953_data *priv = gpiochip_get_data(gc);
- regmap_update_bits(priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
- UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
- value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) :
- 0);
+ return regmap_update_bits(priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
+ UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
+ value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) : 0);
}
static int ub953_gpio_of_xlate(struct gpio_chip *gc,
@@ -400,14 +399,6 @@ static int _ub953_set_routing(struct v4l2_subdev *sd,
};
int ret;
- /*
- * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
- * frame desc is made dynamically allocated.
- */
-
- if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
- return -EINVAL;
-
ret = v4l2_subdev_routing_validate(sd, routing,
V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
if (ret)
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 082fc62b0f5b..3156f6d6c6de 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -3861,14 +3861,6 @@ static int _ub960_set_routing(struct v4l2_subdev *sd,
};
int ret;
- /*
- * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
- * frame desc is made dynamically allocated.
- */
-
- if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
- return -E2BIG;
-
ret = v4l2_subdev_routing_validate(sd, routing,
V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX);
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 2ddd7daa79e2..1e7ad355a388 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -2,6 +2,7 @@
// Copyright (c) 2015--2017 Intel Corporation.
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -38,6 +39,7 @@ struct dw9714_device {
struct v4l2_subdev sd;
u16 current_val;
struct regulator *vcc;
+ struct gpio_desc *powerdown_gpio;
};
static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl)
@@ -137,6 +139,28 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
return hdl->error;
}
+static int dw9714_power_up(struct dw9714_device *dw9714_dev)
+{
+ int ret;
+
+ ret = regulator_enable(dw9714_dev->vcc);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(dw9714_dev->powerdown_gpio, 0);
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int dw9714_power_down(struct dw9714_device *dw9714_dev)
+{
+ gpiod_set_value_cansleep(dw9714_dev->powerdown_gpio, 1);
+
+ return regulator_disable(dw9714_dev->vcc);
+}
+
static int dw9714_probe(struct i2c_client *client)
{
struct dw9714_device *dw9714_dev;
@@ -144,20 +168,25 @@ static int dw9714_probe(struct i2c_client *client)
dw9714_dev = devm_kzalloc(&client->dev, sizeof(*dw9714_dev),
GFP_KERNEL);
- if (dw9714_dev == NULL)
+ if (!dw9714_dev)
return -ENOMEM;
dw9714_dev->vcc = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(dw9714_dev->vcc))
return PTR_ERR(dw9714_dev->vcc);
- rval = regulator_enable(dw9714_dev->vcc);
- if (rval < 0) {
- dev_err(&client->dev, "failed to enable vcc: %d\n", rval);
- return rval;
- }
+ dw9714_dev->powerdown_gpio = devm_gpiod_get_optional(&client->dev,
+ "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(dw9714_dev->powerdown_gpio))
+ return dev_err_probe(&client->dev,
+ PTR_ERR(dw9714_dev->powerdown_gpio),
+ "could not get powerdown gpio\n");
- usleep_range(1000, 2000);
+ rval = dw9714_power_up(dw9714_dev);
+ if (rval)
+ return dev_err_probe(&client->dev, rval,
+ "failed to power up: %d\n", rval);
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
@@ -185,7 +214,7 @@ static int dw9714_probe(struct i2c_client *client)
return 0;
err_cleanup:
- regulator_disable(dw9714_dev->vcc);
+ dw9714_power_down(dw9714_dev);
v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
media_entity_cleanup(&dw9714_dev->sd.entity);
@@ -200,10 +229,10 @@ static void dw9714_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev)) {
- ret = regulator_disable(dw9714_dev->vcc);
+ ret = dw9714_power_down(dw9714_dev);
if (ret) {
dev_err(&client->dev,
- "Failed to disable vcc: %d\n", ret);
+ "Failed to power down: %d\n", ret);
}
}
pm_runtime_set_suspended(&client->dev);
@@ -234,9 +263,9 @@ static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
}
- ret = regulator_disable(dw9714_dev->vcc);
+ ret = dw9714_power_down(dw9714_dev);
if (ret)
- dev_err(dev, "Failed to disable vcc: %d\n", ret);
+ dev_err(dev, "Failed to power down: %d\n", ret);
return ret;
}
@@ -247,7 +276,7 @@ static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
* The lens position is gradually moved in units of DW9714_CTRL_STEPS,
* to make the movements smoothly.
*/
-static int __maybe_unused dw9714_vcm_resume(struct device *dev)
+static int __maybe_unused dw9714_vcm_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -257,12 +286,11 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
if (pm_runtime_suspended(&client->dev))
return 0;
- ret = regulator_enable(dw9714_dev->vcc);
+ ret = dw9714_power_up(dw9714_dev);
if (ret) {
- dev_err(dev, "Failed to enable vcc: %d\n", ret);
+ dev_err(dev, "Failed to power up: %d\n", ret);
return ret;
}
- usleep_range(1000, 2000);
for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS;
val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1;
@@ -271,7 +299,7 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
DW9714_VAL(val, DW9714_DEFAULT_S));
if (ret)
dev_err_ratelimited(dev, "%s I2C failure: %d",
- __func__, ret);
+ __func__, ret);
usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
}
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index aed258211b8a..076c19fcf99c 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -624,6 +624,12 @@ static const struct hi556_mode supported_modes[] = {
},
};
+static const char * const hi556_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
struct hi556 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -639,7 +645,7 @@ struct hi556 {
/* GPIOs, clocks, etc. */
struct gpio_desc *reset_gpio;
struct clk *clk;
- struct regulator *avdd;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(hi556_supply_names)];
/* Current mode */
const struct hi556_mode *cur_mode;
@@ -756,21 +762,23 @@ static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
int ret;
u32 val;
- if (pattern) {
- ret = hi556_read_reg(hi556, HI556_REG_ISP,
- HI556_REG_VALUE_08BIT, &val);
- if (ret)
- return ret;
+ ret = hi556_read_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
- ret = hi556_write_reg(hi556, HI556_REG_ISP,
- HI556_REG_VALUE_08BIT,
- val | HI556_REG_ISP_TPG_EN);
- if (ret)
- return ret;
- }
+ val = pattern ? (val | HI556_REG_ISP_TPG_EN) :
+ (val & ~HI556_REG_ISP_TPG_EN);
+
+ ret = hi556_write_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = pattern ? BIT(pattern - 1) : 0;
return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
- HI556_REG_VALUE_08BIT, pattern);
+ HI556_REG_VALUE_08BIT, val);
}
static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
@@ -1289,17 +1297,10 @@ static int hi556_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hi556 *hi556 = to_hi556(sd);
- int ret;
gpiod_set_value_cansleep(hi556->reset_gpio, 1);
-
- ret = regulator_disable(hi556->avdd);
- if (ret) {
- dev_err(dev, "failed to disable avdd: %d\n", ret);
- gpiod_set_value_cansleep(hi556->reset_gpio, 0);
- return ret;
- }
-
+ regulator_bulk_disable(ARRAY_SIZE(hi556_supply_names),
+ hi556->supplies);
clk_disable_unprepare(hi556->clk);
return 0;
}
@@ -1314,14 +1315,20 @@ static int hi556_resume(struct device *dev)
if (ret)
return ret;
- ret = regulator_enable(hi556->avdd);
+ ret = regulator_bulk_enable(ARRAY_SIZE(hi556_supply_names),
+ hi556->supplies);
if (ret) {
- dev_err(dev, "failed to enable avdd: %d\n", ret);
+ dev_err(dev, "failed to enable regulators: %d", ret);
clk_disable_unprepare(hi556->clk);
return ret;
}
- gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+ if (hi556->reset_gpio) {
+ /* Assert reset for at least 2ms on back to back off-on */
+ usleep_range(2000, 2200);
+ gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+ }
+
usleep_range(5000, 5500);
return 0;
}
@@ -1330,7 +1337,7 @@ static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
bool full_power;
- int ret;
+ int i, ret;
ret = hi556_check_hwcfg(&client->dev);
if (ret)
@@ -1353,11 +1360,15 @@ static int hi556_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
"failed to get clock\n");
- /* The regulator core will provide a "dummy" regulator if necessary */
- hi556->avdd = devm_regulator_get(&client->dev, "avdd");
- if (IS_ERR(hi556->avdd))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->avdd),
- "failed to get avdd regulator\n");
+ for (i = 0; i < ARRAY_SIZE(hi556_supply_names); i++)
+ hi556->supplies[i].supply = hi556_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(hi556_supply_names),
+ hi556->supplies);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to get regulators\n");
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index dd7bc45523d8..a0cef9e61b41 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -20,6 +20,8 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#include "ccs-pll.h"
+
/* Chip ID */
#define IMX214_REG_CHIP_ID CCI_REG16(0x0016)
#define IMX214_CHIP_ID 0x0214
@@ -30,11 +32,9 @@
#define IMX214_REG_FAST_STANDBY_CTRL CCI_REG8(0x0106)
-#define IMX214_DEFAULT_CLK_FREQ 24000000
#define IMX214_DEFAULT_LINK_FREQ 600000000
/* Keep wrong link frequency for backward compatibility */
#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
-#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
/* V-TIMING internal */
@@ -84,6 +84,7 @@
#define IMX214_CSI_DATA_FORMAT_RAW10 0x0A0A
#define IMX214_CSI_DATA_FORMAT_COMP6 0x0A06
#define IMX214_CSI_DATA_FORMAT_COMP8 0x0A08
+#define IMX214_BITS_PER_PIXEL_MASK 0xFF
#define IMX214_REG_CSI_LANE_MODE CCI_REG8(0x0114)
#define IMX214_CSI_2_LANE_MODE 1
@@ -249,6 +250,10 @@ struct imx214 {
struct clk *xclk;
struct regmap *regmap;
+ struct ccs_pll pll;
+
+ struct v4l2_fwnode_endpoint bus_cfg;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -299,16 +304,6 @@ static const struct cci_reg_sequence mode_4096x2304[] = {
{ IMX214_REG_DIG_CROP_WIDTH, 4096 },
{ IMX214_REG_DIG_CROP_HEIGHT, 2304 },
- { IMX214_REG_VTPXCK_DIV, 5 },
- { IMX214_REG_VTSYCK_DIV, 2 },
- { IMX214_REG_PREPLLCK_VT_DIV, 3 },
- { IMX214_REG_PLL_VT_MPY, 150 },
- { IMX214_REG_OPPXCK_DIV, 10 },
- { IMX214_REG_OPSYCK_DIV, 1 },
- { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
-
- { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
-
{ CCI_REG8(0x3A03), 0x09 },
{ CCI_REG8(0x3A04), 0x50 },
{ CCI_REG8(0x3A05), 0x01 },
@@ -362,16 +357,6 @@ static const struct cci_reg_sequence mode_1920x1080[] = {
{ IMX214_REG_DIG_CROP_WIDTH, 1920 },
{ IMX214_REG_DIG_CROP_HEIGHT, 1080 },
- { IMX214_REG_VTPXCK_DIV, 5 },
- { IMX214_REG_VTSYCK_DIV, 2 },
- { IMX214_REG_PREPLLCK_VT_DIV, 3 },
- { IMX214_REG_PLL_VT_MPY, 150 },
- { IMX214_REG_OPPXCK_DIV, 10 },
- { IMX214_REG_OPSYCK_DIV, 1 },
- { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
-
- { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
-
{ CCI_REG8(0x3A03), 0x04 },
{ CCI_REG8(0x3A04), 0xF8 },
{ CCI_REG8(0x3A05), 0x02 },
@@ -405,9 +390,6 @@ static const struct cci_reg_sequence mode_table_common[] = {
/* ATR setting */
{ IMX214_REG_ATR_FAST_MOVE, 2 },
- /* external clock setting */
- { IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
-
/* global setting */
/* basic config */
{ IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
@@ -777,6 +759,30 @@ static int imx214_entity_init_state(struct v4l2_subdev *subdev,
return 0;
}
+static int imx214_configure_pll(struct imx214 *imx214)
+{
+ int ret = 0;
+
+ cci_write(imx214->regmap, IMX214_REG_VTPXCK_DIV,
+ imx214->pll.vt_bk.pix_clk_div, &ret);
+ cci_write(imx214->regmap, IMX214_REG_VTSYCK_DIV,
+ imx214->pll.vt_bk.sys_clk_div, &ret);
+ cci_write(imx214->regmap, IMX214_REG_PREPLLCK_VT_DIV,
+ imx214->pll.vt_fr.pre_pll_clk_div, &ret);
+ cci_write(imx214->regmap, IMX214_REG_PLL_VT_MPY,
+ imx214->pll.vt_fr.pll_multiplier, &ret);
+ cci_write(imx214->regmap, IMX214_REG_OPPXCK_DIV,
+ imx214->pll.op_bk.pix_clk_div, &ret);
+ cci_write(imx214->regmap, IMX214_REG_OPSYCK_DIV,
+ imx214->pll.op_bk.sys_clk_div, &ret);
+ cci_write(imx214->regmap, IMX214_REG_PLL_MULT_DRIV,
+ IMX214_PLL_SINGLE, &ret);
+ cci_write(imx214->regmap, IMX214_REG_EXCK_FREQ,
+ IMX214_EXCK_FREQ(imx214->pll.ext_clk_freq_hz / 1000000), &ret);
+
+ return ret;
+}
+
static int imx214_update_digital_gain(struct imx214 *imx214, u32 val)
{
int ret = 0;
@@ -877,9 +883,6 @@ static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
static int imx214_ctrls_init(struct imx214 *imx214)
{
- static const s64 link_freq[] = {
- IMX214_DEFAULT_LINK_FREQ
- };
static const struct v4l2_area unit_size = {
.width = 1120,
.height = 1120,
@@ -900,15 +903,14 @@ static int imx214_ctrls_init(struct imx214 *imx214)
if (ret)
return ret;
- imx214->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, NULL,
- V4L2_CID_PIXEL_RATE, 0,
- IMX214_DEFAULT_PIXEL_RATE, 1,
- IMX214_DEFAULT_PIXEL_RATE);
+ imx214->pixel_rate =
+ v4l2_ctrl_new_std(ctrl_hdlr, NULL, V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1, 1);
imx214->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, NULL,
V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(link_freq) - 1,
- 0, link_freq);
+ imx214->bus_cfg.nr_of_link_frequencies - 1,
+ 0, imx214->bus_cfg.link_frequencies);
if (imx214->link_freq)
imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -1011,6 +1013,7 @@ static int imx214_start_streaming(struct imx214 *imx214)
const struct v4l2_mbus_framefmt *fmt;
struct v4l2_subdev_state *state;
const struct imx214_mode *mode;
+ int bit_rate_mbps;
int ret;
ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
@@ -1020,6 +1023,21 @@ static int imx214_start_streaming(struct imx214 *imx214)
return ret;
}
+ ret = imx214_configure_pll(imx214);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure PLL: %d\n", ret);
+ return ret;
+ }
+
+ bit_rate_mbps = (imx214->pll.pixel_rate_csi / 1000000)
+ * imx214->pll.bits_per_pixel;
+ ret = cci_write(imx214->regmap, IMX214_REG_REQ_LINK_BIT_RATE,
+ IMX214_LINK_BIT_RATE_MBPS(bit_rate_mbps), NULL);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure link bit rate\n");
+ return ret;
+ }
+
ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
IMX214_CSI_4_LANE_MODE, NULL);
if (ret) {
@@ -1097,6 +1115,109 @@ err_rpm_put:
return ret;
}
+static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
+ unsigned int link_freq)
+{
+ struct ccs_pll_limits limits = {
+ .min_ext_clk_freq_hz = 6000000,
+ .max_ext_clk_freq_hz = 27000000,
+
+ .vt_fr = {
+ .min_pre_pll_clk_div = 1,
+ .max_pre_pll_clk_div = 15,
+ /* Value is educated guess as we don't have a spec */
+ .min_pll_ip_clk_freq_hz = 6000000,
+ /* Value is educated guess as we don't have a spec */
+ .max_pll_ip_clk_freq_hz = 12000000,
+ .min_pll_multiplier = 12,
+ .max_pll_multiplier = 1200,
+ .min_pll_op_clk_freq_hz = 338000000,
+ .max_pll_op_clk_freq_hz = 1200000000,
+ },
+ .vt_bk = {
+ .min_sys_clk_div = 2,
+ .max_sys_clk_div = 4,
+ .min_pix_clk_div = 5,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+ .op_bk = {
+ .min_sys_clk_div = 1,
+ .max_sys_clk_div = 2,
+ .min_pix_clk_div = 6,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+
+ .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
+ .min_line_length_pck = IMX214_PPL_DEFAULT,
+ };
+ unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ /*
+ * There are no documented constraints on the sys clock frequency, for
+ * either branch. Recover them based on the PLL output clock frequency
+ * and sys_clk_div limits on one hand, and the pix clock frequency and
+ * the pix_clk_div limits on the other hand.
+ */
+ limits.vt_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
+ limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
+ limits.vt_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
+ limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
+
+ limits.op_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
+ limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
+ limits.op_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
+ limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
+
+ memset(pll, 0, sizeof(*pll));
+
+ pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
+ pll->op_lanes = num_lanes;
+ pll->vt_lanes = num_lanes;
+ pll->csi2.lanes = num_lanes;
+
+ pll->binning_horizontal = 1;
+ pll->binning_vertical = 1;
+ pll->scale_m = 1;
+ pll->scale_n = 1;
+ pll->bits_per_pixel =
+ IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
+ pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
+ pll->link_freq = link_freq;
+ pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
+
+ return ccs_pll_calculate(imx214->dev, &limits, pll);
+}
+
+static int imx214_pll_update(struct imx214 *imx214)
+{
+ u64 link_freq;
+ int ret;
+
+ link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
+ ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
+ if (ret) {
+ dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
+ imx214->pll.pixel_rate_pixel_array);
+ if (ret) {
+ dev_err(imx214->dev, "failed to set pixel rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fival)
@@ -1203,12 +1324,10 @@ static int imx214_identify_module(struct imx214 *imx214)
return 0;
}
-static int imx214_parse_fwnode(struct device *dev)
+static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
{
+ struct v4l2_fwnode_endpoint *bus_cfg = &imx214->bus_cfg;
struct fwnode_handle *endpoint;
- struct v4l2_fwnode_endpoint bus_cfg = {
- .bus_type = V4L2_MBUS_CSI2_DPHY,
- };
unsigned int i;
int ret;
@@ -1216,42 +1335,52 @@ static int imx214_parse_fwnode(struct device *dev)
if (!endpoint)
return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
- ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
+ bus_cfg->bus_type = V4L2_MBUS_CSI2_DPHY;
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, bus_cfg);
+ fwnode_handle_put(endpoint);
if (ret) {
dev_err_probe(dev, ret, "parsing endpoint node failed\n");
- goto done;
+ goto error;
}
/* Check the number of MIPI CSI2 data lanes */
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ if (bus_cfg->bus.mipi_csi2.num_data_lanes != 4) {
ret = dev_err_probe(dev, -EINVAL,
"only 4 data lanes are currently supported\n");
- goto done;
+ goto error;
}
- if (bus_cfg.nr_of_link_frequencies != 1)
+ if (bus_cfg->nr_of_link_frequencies != 1)
dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
- if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
+ for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
+ u64 freq = bus_cfg->link_frequencies[i];
+ struct ccs_pll pll;
+
+ if (!imx214_pll_calculate(imx214, &pll, freq))
break;
- if (bus_cfg.link_frequencies[i] ==
- IMX214_DEFAULT_LINK_FREQ_LEGACY) {
+ if (freq == IMX214_DEFAULT_LINK_FREQ_LEGACY) {
dev_warn(dev,
"link-frequencies %d not supported, please review your DT. Continuing anyway\n",
IMX214_DEFAULT_LINK_FREQ);
+ freq = IMX214_DEFAULT_LINK_FREQ;
+ if (imx214_pll_calculate(imx214, &pll, freq))
+ continue;
+ bus_cfg->link_frequencies[i] = freq;
break;
}
}
- if (i == bus_cfg.nr_of_link_frequencies)
+ if (i == bus_cfg->nr_of_link_frequencies)
ret = dev_err_probe(dev, -EINVAL,
- "link-frequencies %d not supported, please review your DT\n",
- IMX214_DEFAULT_LINK_FREQ);
+ "link-frequencies %lld not supported, please review your DT\n",
+ bus_cfg->nr_of_link_frequencies ?
+ bus_cfg->link_frequencies[0] : 0);
-done:
- v4l2_fwnode_endpoint_free(&bus_cfg);
- fwnode_handle_put(endpoint);
+ return 0;
+
+error:
+ v4l2_fwnode_endpoint_free(&imx214->bus_cfg);
return ret;
}
@@ -1261,10 +1390,6 @@ static int imx214_probe(struct i2c_client *client)
struct imx214 *imx214;
int ret;
- ret = imx214_parse_fwnode(dev);
- if (ret)
- return ret;
-
imx214 = devm_kzalloc(dev, sizeof(*imx214), GFP_KERNEL);
if (!imx214)
return -ENOMEM;
@@ -1276,11 +1401,6 @@ static int imx214_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx214->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = imx214_get_regulators(dev, imx214);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get regulators\n");
@@ -1295,6 +1415,10 @@ static int imx214_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx214->regmap),
"failed to initialize CCI\n");
+ ret = imx214_parse_fwnode(dev, imx214);
+ if (ret)
+ return ret;
+
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
imx214->sd.internal_ops = &imx214_internal_ops;
@@ -1302,7 +1426,9 @@ static int imx214_probe(struct i2c_client *client)
* Enable power initially, to avoid warnings
* from clk_disable on power_off
*/
- imx214_power_on(imx214->dev);
+ ret = imx214_power_on(imx214->dev);
+ if (ret < 0)
+ goto error_fwnode;
ret = imx214_identify_module(imx214);
if (ret)
@@ -1333,6 +1459,12 @@ static int imx214_probe(struct i2c_client *client)
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
+ ret = imx214_pll_update(imx214);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to update PLL\n");
+ goto error_subdev_cleanup;
+ }
+
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
@@ -1358,6 +1490,9 @@ free_ctrl:
error_power_off:
imx214_power_off(imx214->dev);
+error_fwnode:
+ v4l2_fwnode_endpoint_free(&imx214->bus_cfg);
+
return ret;
}
@@ -1370,6 +1505,8 @@ static void imx214_remove(struct i2c_client *client)
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&imx214->sd.entity);
v4l2_ctrl_handler_free(&imx214->ctrls);
+ v4l2_fwnode_endpoint_free(&imx214->bus_cfg);
+
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev)) {
imx214_power_off(imx214->dev);
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index fbf7eba3d71d..4f3f386c5353 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1294,7 +1294,6 @@ static int imx290_subdev_init(struct imx290 *imx290)
* will already be prevented even before the delay.
*/
v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
- imx290->sd.dev = imx290->dev;
pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 9f37779bd611..278e743646ea 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -1251,7 +1251,7 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, "inck");
+ sensor->clk = devm_clk_get(sensor->dev, NULL);
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/lt6911uxe.c b/drivers/media/i2c/lt6911uxe.c
index 24857d683fcf..bdefdd157e69 100644
--- a/drivers/media/i2c/lt6911uxe.c
+++ b/drivers/media/i2c/lt6911uxe.c
@@ -600,7 +600,7 @@ static int lt6911uxe_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&lt6911uxe->sd, client, &lt6911uxe_subdev_ops);
- lt6911uxe->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ lt6911uxe->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(lt6911uxe->reset_gpio))
return dev_err_probe(dev, PTR_ERR(lt6911uxe->reset_gpio),
"failed to get reset gpio\n");
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 9fc4e130a273..7c0961688d61 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1193,12 +1193,12 @@ static int max9286_gpio_set(struct max9286_priv *priv, unsigned int offset,
MAX9286_0X0F_RESERVED | priv->gpio_state);
}
-static void max9286_gpiochip_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int max9286_gpiochip_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct max9286_priv *priv = gpiochip_get_data(chip);
- max9286_gpio_set(priv, offset, value);
+ return max9286_gpio_set(priv, offset, value);
}
static int max9286_gpiochip_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c
index 3cc1b1ae47d1..e3e625e6f11a 100644
--- a/drivers/media/i2c/max96714.c
+++ b/drivers/media/i2c/max96714.c
@@ -370,13 +370,6 @@ static int _max96714_set_routing(struct v4l2_subdev *sd,
};
int ret;
- /*
- * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
- * frame desc is made dynamically allocated.
- */
- if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
- return -EINVAL;
-
ret = v4l2_subdev_routing_validate(sd, routing,
V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
if (ret)
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 3746729366ac..c8ae7890d9fa 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -297,13 +297,13 @@ static int max96717_gpiochip_get(struct gpio_chip *gpiochip,
return !!(val & MAX96717_GPIO_OUT);
}
-static void max96717_gpiochip_set(struct gpio_chip *gpiochip,
- unsigned int offset, int value)
+static int max96717_gpiochip_set(struct gpio_chip *gpiochip,
+ unsigned int offset, int value)
{
struct max96717_priv *priv = gpiochip_get_data(gpiochip);
- cci_update_bits(priv->regmap, MAX96717_GPIO_REG_A(offset),
- MAX96717_GPIO_OUT, MAX96717_GPIO_OUT, NULL);
+ return cci_update_bits(priv->regmap, MAX96717_GPIO_REG_A(offset),
+ MAX96717_GPIO_OUT, MAX96717_GPIO_OUT, NULL);
}
static int max96717_gpio_get_direction(struct gpio_chip *gpiochip,
@@ -357,7 +357,6 @@ static int max96717_gpiochip_probe(struct max96717_priv *priv)
gc->direction_output = max96717_gpio_direction_out;
gc->set = max96717_gpiochip_set;
gc->get = max96717_gpiochip_get;
- gc->of_gpio_n_cells = 2;
/* Disable GPIO forwarding */
for (i = 0; i < gc->ngpio; i++)
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index 5f0b0ad8f885..3f540ca40f3c 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -261,6 +261,7 @@
#define MT9M114_CAM_PGA_PGA_CONTROL CCI_REG16(0xc95e)
#define MT9M114_CAM_SYSCTL_PLL_ENABLE CCI_REG8(0xc97e)
#define MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE BIT(0)
+#define MT9M114_CAM_SYSCTL_PLL_DISABLE_VALUE 0x00
#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N CCI_REG16(0xc980)
#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(m, n) (((n) << 8) | (m))
#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_P CCI_REG16(0xc982)
@@ -377,6 +378,7 @@ struct mt9m114 {
struct gpio_desc *reset;
struct regulator_bulk_data supplies[3];
struct v4l2_fwnode_endpoint bus_cfg;
+ bool bypass_pll;
struct {
unsigned int m;
@@ -743,14 +745,21 @@ static int mt9m114_initialize(struct mt9m114 *sensor)
}
/* Configure the PLL. */
- cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_ENABLE,
- MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE, &ret);
- cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N,
- MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(sensor->pll.m,
- sensor->pll.n),
- &ret);
- cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_P,
- MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(sensor->pll.p), &ret);
+ if (sensor->bypass_pll) {
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_ENABLE,
+ MT9M114_CAM_SYSCTL_PLL_DISABLE_VALUE, &ret);
+ } else {
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_ENABLE,
+ MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N,
+ MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(sensor->pll.m,
+ sensor->pll.n),
+ &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_P,
+ MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(sensor->pll.p),
+ &ret);
+ }
+
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_PIXCLK,
sensor->pixrate, &ret);
@@ -781,41 +790,25 @@ static int mt9m114_initialize(struct mt9m114 *sensor)
return 0;
}
-static int mt9m114_configure(struct mt9m114 *sensor,
- struct v4l2_subdev_state *pa_state,
- struct v4l2_subdev_state *ifp_state)
+static int mt9m114_configure_pa(struct mt9m114 *sensor,
+ struct v4l2_subdev_state *state)
{
- const struct v4l2_mbus_framefmt *pa_format;
- const struct v4l2_rect *pa_crop;
- const struct mt9m114_format_info *ifp_info;
- const struct v4l2_mbus_framefmt *ifp_format;
- const struct v4l2_rect *ifp_crop;
- const struct v4l2_rect *ifp_compose;
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
unsigned int hratio, vratio;
- u64 output_format;
u64 read_mode;
- int ret = 0;
-
- pa_format = v4l2_subdev_state_get_format(pa_state, 0);
- pa_crop = v4l2_subdev_state_get_crop(pa_state, 0);
+ int ret;
- ifp_format = v4l2_subdev_state_get_format(ifp_state, 1);
- ifp_info = mt9m114_format_info(sensor, 1, ifp_format->code);
- ifp_crop = v4l2_subdev_state_get_crop(ifp_state, 0);
- ifp_compose = v4l2_subdev_state_get_compose(ifp_state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
ret = cci_read(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
&read_mode, NULL);
if (ret < 0)
return ret;
- ret = cci_read(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
- &output_format, NULL);
- if (ret < 0)
- return ret;
-
- hratio = pa_crop->width / pa_format->width;
- vratio = pa_crop->height / pa_format->height;
+ hratio = crop->width / format->width;
+ vratio = crop->height / format->height;
/*
* Pixel array crop and binning. The CAM_SENSOR_CFG_CPIPE_LAST_ROW
@@ -824,15 +817,15 @@ static int mt9m114_configure(struct mt9m114 *sensor,
* example sensor modes.
*/
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_START,
- pa_crop->left, &ret);
+ crop->left, &ret);
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_START,
- pa_crop->top, &ret);
+ crop->top, &ret);
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_END,
- pa_crop->width + pa_crop->left - 1, &ret);
+ crop->width + crop->left - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_END,
- pa_crop->height + pa_crop->top - 1, &ret);
+ crop->height + crop->top - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW,
- (pa_crop->height - 4) / vratio - 1, &ret);
+ (crop->height - 4) / vratio - 1, &ret);
read_mode &= ~(MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_MASK |
MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_MASK);
@@ -845,6 +838,29 @@ static int mt9m114_configure(struct mt9m114 *sensor,
cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
read_mode, &ret);
+ return ret;
+}
+
+static int mt9m114_configure_ifp(struct mt9m114 *sensor,
+ struct v4l2_subdev_state *state)
+{
+ const struct mt9m114_format_info *info;
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ const struct v4l2_rect *compose;
+ u64 output_format;
+ int ret = 0;
+
+ format = v4l2_subdev_state_get_format(state, 1);
+ info = mt9m114_format_info(sensor, 1, format->code);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ compose = v4l2_subdev_state_get_compose(state, 0);
+
+ ret = cci_read(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
+ &output_format, NULL);
+ if (ret < 0)
+ return ret;
+
/*
* Color pipeline (IFP) cropping and scaling. Subtract 4 from the left
* and top coordinates to compensate for the lines and columns removed
@@ -852,18 +868,18 @@ static int mt9m114_configure(struct mt9m114 *sensor,
* not in the hardware.
*/
cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_XOFFSET,
- ifp_crop->left - 4, &ret);
+ crop->left - 4, &ret);
cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_YOFFSET,
- ifp_crop->top - 4, &ret);
+ crop->top - 4, &ret);
cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_WIDTH,
- ifp_crop->width, &ret);
+ crop->width, &ret);
cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_HEIGHT,
- ifp_crop->height, &ret);
+ crop->height, &ret);
cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_WIDTH,
- ifp_compose->width, &ret);
+ compose->width, &ret);
cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_HEIGHT,
- ifp_compose->height, &ret);
+ compose->height, &ret);
/* AWB and AE windows, use the full frame. */
cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XSTART,
@@ -871,18 +887,18 @@ static int mt9m114_configure(struct mt9m114 *sensor,
cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YSTART,
0, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND,
- ifp_compose->width - 1, &ret);
+ compose->width - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND,
- ifp_compose->height - 1, &ret);
+ compose->height - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XSTART,
0, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YSTART,
0, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND,
- ifp_compose->width / 5 - 1, &ret);
+ compose->width / 5 - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND,
- ifp_compose->height / 5 - 1, &ret);
+ compose->height / 5 - 1, &ret);
cci_write(sensor->regmap, MT9M114_CAM_CROP_CROPMODE,
MT9M114_CAM_CROP_MODE_AWB_AUTO_CROP_EN |
@@ -894,7 +910,7 @@ static int mt9m114_configure(struct mt9m114 *sensor,
MT9M114_CAM_OUTPUT_FORMAT_FORMAT_MASK |
MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES |
MT9M114_CAM_OUTPUT_FORMAT_SWAP_RED_BLUE);
- output_format |= ifp_info->output_format;
+ output_format |= info->output_format;
cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
output_format, &ret);
@@ -925,7 +941,11 @@ static int mt9m114_start_streaming(struct mt9m114 *sensor,
if (ret)
return ret;
- ret = mt9m114_configure(sensor, pa_state, ifp_state);
+ ret = mt9m114_configure_ifp(sensor, ifp_state);
+ if (ret)
+ goto error;
+
+ ret = mt9m114_configure_pa(sensor, pa_state);
if (ret)
goto error;
@@ -1599,13 +1619,9 @@ static int mt9m114_ifp_get_frame_interval(struct v4l2_subdev *sd,
if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- mutex_lock(sensor->ifp.hdl.lock);
-
ival->numerator = 1;
ival->denominator = sensor->ifp.frame_rate;
- mutex_unlock(sensor->ifp.hdl.lock);
-
return 0;
}
@@ -1624,8 +1640,6 @@ static int mt9m114_ifp_set_frame_interval(struct v4l2_subdev *sd,
if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- mutex_lock(sensor->ifp.hdl.lock);
-
if (ival->numerator != 0 && ival->denominator != 0)
sensor->ifp.frame_rate = min_t(unsigned int,
ival->denominator / ival->numerator,
@@ -1639,8 +1653,6 @@ static int mt9m114_ifp_set_frame_interval(struct v4l2_subdev *sd,
if (sensor->streaming)
ret = mt9m114_set_frame_rate(sensor);
- mutex_unlock(sensor->ifp.hdl.lock);
-
return ret;
}
@@ -2235,9 +2247,22 @@ static const struct dev_pm_ops mt9m114_pm_ops = {
* Probe & Remove
*/
+static int mt9m114_verify_link_frequency(struct mt9m114 *sensor,
+ unsigned int pixrate)
+{
+ unsigned int link_freq = sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY
+ ? pixrate * 8 : pixrate * 2;
+
+ if (sensor->bus_cfg.nr_of_link_frequencies != 1 ||
+ sensor->bus_cfg.link_frequencies[0] != link_freq)
+ return -EINVAL;
+
+ return 0;
+}
+
static int mt9m114_clk_init(struct mt9m114 *sensor)
{
- unsigned int link_freq;
+ unsigned int pixrate;
/* Hardcode the PLL multiplier and dividers to default settings. */
sensor->pll.m = 32;
@@ -2249,19 +2274,29 @@ static int mt9m114_clk_init(struct mt9m114 *sensor)
* for 16-bit per pixel, transmitted in DDR over a single lane. For
* parallel mode, the sensor ouputs one pixel in two PIXCLK cycles.
*/
- sensor->pixrate = clk_get_rate(sensor->clk) * sensor->pll.m
- / ((sensor->pll.n + 1) * (sensor->pll.p + 1));
- link_freq = sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY
- ? sensor->pixrate * 8 : sensor->pixrate * 2;
+ /*
+ * Check if EXTCLK fits the configured link frequency. Bypass the PLL
+ * in this case.
+ */
+ pixrate = clk_get_rate(sensor->clk) / 2;
+ if (mt9m114_verify_link_frequency(sensor, pixrate) == 0) {
+ sensor->pixrate = pixrate;
+ sensor->bypass_pll = true;
+ return 0;
+ }
- if (sensor->bus_cfg.nr_of_link_frequencies != 1 ||
- sensor->bus_cfg.link_frequencies[0] != link_freq) {
- dev_err(&sensor->client->dev, "Unsupported DT link-frequencies\n");
- return -EINVAL;
+ /* Check if the PLL configuration fits the configured link frequency. */
+ pixrate = clk_get_rate(sensor->clk) * sensor->pll.m
+ / ((sensor->pll.n + 1) * (sensor->pll.p + 1));
+ if (mt9m114_verify_link_frequency(sensor, pixrate) == 0) {
+ sensor->pixrate = pixrate;
+ sensor->bypass_pll = false;
+ return 0;
}
- return 0;
+ dev_err(&sensor->client->dev, "Unsupported DT link-frequencies\n");
+ return -EINVAL;
}
static int mt9m114_identify(struct mt9m114 *sensor)
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 06b7896c3eaf..586b31ba076b 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1469,14 +1469,15 @@ static int ov2659_probe(struct i2c_client *client)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov2659_test_pattern_menu) - 1,
0, 0, ov2659_test_pattern_menu);
- ov2659->sd.ctrl_handler = &ov2659->ctrls;
if (ov2659->ctrls.error) {
dev_err(&client->dev, "%s: control initialization error %d\n",
__func__, ov2659->ctrls.error);
+ v4l2_ctrl_handler_free(&ov2659->ctrls);
return ov2659->ctrls.error;
}
+ ov2659->sd.ctrl_handler = &ov2659->ctrls;
sd = &ov2659->sd;
client->flags |= I2C_CLIENT_SCCB;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 6cf461e3373c..4e959534e6e7 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -766,11 +766,9 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
- const struct ov2740_mode *cur_mode;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
struct v4l2_fwnode_device_properties props;
- int size;
int ret;
ctrl_hdlr = &ov2740->ctrl_handler;
@@ -778,12 +776,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
if (ret)
return ret;
- cur_mode = ov2740->cur_mode;
- size = ARRAY_SIZE(link_freq_menu_items);
-
ov2740->link_freq =
v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
- V4L2_CID_LINK_FREQ, size - 1,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
ov2740->supported_modes->link_freq_index,
link_freq_menu_items);
if (ov2740->link_freq)
@@ -794,14 +790,14 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
V4L2_CID_PIXEL_RATE, 0,
pixel_rate, 1, pixel_rate);
- vblank_min = cur_mode->vts_min - cur_mode->height;
- vblank_max = cur_mode->vts_max - cur_mode->height;
- vblank_default = cur_mode->vts_def - cur_mode->height;
+ vblank_min = ov2740->cur_mode->vts_min - ov2740->cur_mode->height;
+ vblank_max = ov2740->cur_mode->vts_max - ov2740->cur_mode->height;
+ vblank_default = ov2740->cur_mode->vts_def - ov2740->cur_mode->height;
ov2740->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_VBLANK, vblank_min,
vblank_max, 1, vblank_default);
- h_blank = cur_mode->hts - cur_mode->width;
+ h_blank = ov2740->cur_mode->hts - ov2740->cur_mode->width;
ov2740->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_HBLANK, h_blank, h_blank, 1,
h_blank);
@@ -814,7 +810,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
OV2740_DGTL_GAIN_MIN, OV2740_DGTL_GAIN_MAX,
OV2740_DGTL_GAIN_STEP, OV2740_DGTL_GAIN_DEFAULT);
- exposure_max = cur_mode->vts_def - OV2740_EXPOSURE_MAX_MARGIN;
+ exposure_max = ov2740->cur_mode->vts_def - OV2740_EXPOSURE_MAX_MARGIN;
ov2740->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_EXPOSURE,
OV2740_EXPOSURE_MIN, exposure_max,
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index c54bbc207189..b9efb2d2276a 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2688,10 +2688,15 @@ static int ov5670_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(&client->dev, ret, "GPIO probe failed\n");
- /* Graph Endpoint */
+ /*
+ * Graph Endpoint. If it's missing we defer rather than fail, as this
+ * sensor is known to co-exist on systems with the IPU3 and so it might
+ * be created by the ipu-bridge.
+ */
handle = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
if (!handle)
- return dev_err_probe(&client->dev, -ENXIO, "Endpoint for node get failed\n");
+ return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ "Endpoint for node get failed\n");
ov5670->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
ov5670->endpoint.bus.mipi_csi2.num_data_lanes = 2;
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 46b9ce111676..485efd15257e 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1222,9 +1222,14 @@ static int ov5693_check_hwcfg(struct ov5693_device *ov5693)
unsigned int i;
int ret;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver
+ * Bridge drivers doing this may also add GPIO mappings, wait for this.
+ */
endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!endpoint)
- return -EPROBE_DEFER; /* Could be provided by cio2-bridge */
+ return dev_err_probe(ov5693->dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
fwnode_handle_put(endpoint);
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 3226888d77e9..31a42d81e970 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1486,9 +1486,14 @@ static int ov7251_check_hwcfg(struct ov7251 *ov7251)
unsigned int i, j;
int ret;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver
+ * Bridge drivers doing this may also add GPIO mappings, wait for this.
+ */
endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!endpoint)
- return -EPROBE_DEFER; /* could be provided by cio2-bridge */
+ return dev_err_probe(ov7251->dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
fwnode_handle_put(endpoint);
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index 95ffe7536aa6..a2138f7988aa 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2991,7 +2991,8 @@ static int ov8865_probe(struct i2c_client *client)
handle = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
if (!handle)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
sensor->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index a1c71187e773..b8b8f206ec3a 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -25,6 +25,7 @@
#include "saa711x_regs.h"
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -664,15 +665,6 @@ static const unsigned char saa7115_init_misc[] = {
0x00, 0x00
};
-static int saa711x_odd_parity(u8 c)
-{
- c ^= (c >> 4);
- c ^= (c >> 2);
- c ^= (c >> 1);
-
- return c & 1;
-}
-
static int saa711x_decode_vps(u8 *dst, u8 *p)
{
static const u8 biphase_tbl[] = {
@@ -1227,7 +1219,7 @@ static int saa711x_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vb
vbi->type = V4L2_SLICED_TELETEXT_B;
break;
case 4:
- if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1]))
+ if (!parity8(p[0]) || !parity8(p[1]))
return 0;
vbi->type = V4L2_SLICED_CAPTION_525;
break;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 3d6703b75bfa..1cc7636e446d 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -114,7 +114,7 @@ static inline struct tc358743_state *to_state(struct v4l2_subdev *sd)
/* --------------- I2C --------------- */
-static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+static int i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
{
struct tc358743_state *state = to_state(sd);
struct i2c_client *client = state->i2c_client;
@@ -140,6 +140,7 @@ static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed: %d\n",
__func__, reg, client->addr, err);
}
+ return err != ARRAY_SIZE(msgs);
}
static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
@@ -196,15 +197,24 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
}
}
-static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
+static noinline u32 i2c_rdreg_err(struct v4l2_subdev *sd, u16 reg, u32 n,
+ int *err)
{
+ int error;
__le32 val = 0;
- i2c_rd(sd, reg, (u8 __force *)&val, n);
+ error = i2c_rd(sd, reg, (u8 __force *)&val, n);
+ if (err)
+ *err = error;
return le32_to_cpu(val);
}
+static inline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
+{
+ return i2c_rdreg_err(sd, reg, n, NULL);
+}
+
static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n)
{
__le32 raw = cpu_to_le32(val);
@@ -233,6 +243,13 @@ static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
return i2c_rdreg(sd, reg, 2);
}
+static int i2c_rd16_err(struct v4l2_subdev *sd, u16 reg, u16 *value)
+{
+ int err;
+ *value = i2c_rdreg_err(sd, reg, 2, &err);
+ return err;
+}
+
static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
{
i2c_wrreg(sd, reg, val, 2);
@@ -420,9 +437,9 @@ static void tc358743_enable_edid(struct v4l2_subdev *sd)
v4l2_dbg(2, debug, sd, "%s:\n", __func__);
- /* Enable hotplug after 100 ms. DDC access to EDID is also enabled when
+ /* Enable hotplug after 143 ms. DDC access to EDID is also enabled when
* hotplug is enabled. See register DDC_CTL */
- schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 10);
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 7);
tc358743_enable_interrupts(sd, true);
tc358743_s_ctrl_detect_tx_5v(sd);
@@ -1691,12 +1708,23 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
+static u32 tc358743_g_colorspace(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return V4L2_COLORSPACE_SRGB;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ return V4L2_COLORSPACE_SMPTE170M;
+ default:
+ return 0;
+ }
+}
+
static int tc358743_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tc358743_state *state = to_state(sd);
- u8 vi_rep = i2c_rd8(sd, VI_REP);
if (format->pad != 0)
return -EINVAL;
@@ -1706,23 +1734,7 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd,
format->format.height = state->timings.bt.height;
format->format.field = V4L2_FIELD_NONE;
- switch (vi_rep & MASK_VOUT_COLOR_SEL) {
- case MASK_VOUT_COLOR_RGB_FULL:
- case MASK_VOUT_COLOR_RGB_LIMITED:
- format->format.colorspace = V4L2_COLORSPACE_SRGB;
- break;
- case MASK_VOUT_COLOR_601_YCBCR_LIMITED:
- case MASK_VOUT_COLOR_601_YCBCR_FULL:
- format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
- break;
- case MASK_VOUT_COLOR_709_YCBCR_FULL:
- case MASK_VOUT_COLOR_709_YCBCR_LIMITED:
- format->format.colorspace = V4L2_COLORSPACE_REC709;
- break;
- default:
- format->format.colorspace = 0;
- break;
- }
+ format->format.colorspace = tc358743_g_colorspace(format->format.code);
return 0;
}
@@ -1736,19 +1748,14 @@ static int tc358743_set_fmt(struct v4l2_subdev *sd,
u32 code = format->format.code; /* is overwritten by get_fmt */
int ret = tc358743_get_fmt(sd, sd_state, format);
- format->format.code = code;
+ if (code == MEDIA_BUS_FMT_RGB888_1X24 ||
+ code == MEDIA_BUS_FMT_UYVY8_1X16)
+ format->format.code = code;
+ format->format.colorspace = tc358743_g_colorspace(format->format.code);
if (ret)
return ret;
- switch (code) {
- case MEDIA_BUS_FMT_RGB888_1X24:
- case MEDIA_BUS_FMT_UYVY8_1X16:
- break;
- default:
- return -EINVAL;
- }
-
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
@@ -1972,8 +1979,19 @@ static int tc358743_probe_of(struct tc358743_state *state)
state->pdata.refclk_hz = clk_get_rate(refclk);
state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS;
state->pdata.enable_hdcp = false;
- /* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */
- state->pdata.fifo_level = 16;
+ /*
+ * Ideally the FIFO trigger level should be set based on the input and
+ * output data rates, but the calculations required are buried in
+ * Toshiba's register settings spreadsheet.
+ * A value of 16 works with a 594Mbps data rate for 720p60 (using 2
+ * lanes) and 1080p60 (using 4 lanes), but fails when the data rate
+ * is increased, or a lower pixel clock is used that result in CSI
+ * reading out faster than the data is arriving.
+ *
+ * A value of 374 works with both those modes at 594Mbps, and with most
+ * modes on 972Mbps.
+ */
+ state->pdata.fifo_level = 374;
/*
* The PLL input clock is obtained by dividing refclk by pll_prd.
* It must be between 6 MHz and 40 MHz, lower frequency is better.
@@ -1993,6 +2011,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
/*
* The CSI bps per lane must be between 62.5 Mbps and 1 Gbps.
* The default is 594 Mbps for 4-lane 1080p60 or 2-lane 720p60.
+ * 972 Mbps allows 1080P50 UYVY over 2-lane.
*/
bps_pr_lane = 2 * endpoint.link_frequencies[0];
if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
@@ -2006,23 +2025,42 @@ static int tc358743_probe_of(struct tc358743_state *state)
state->pdata.refclk_hz * state->pdata.pll_prd;
/*
- * FIXME: These timings are from REF_02 for 594 Mbps per lane (297 MHz
- * link frequency). In principle it should be possible to calculate
+ * FIXME: These timings are from REF_02 for 594 or 972 Mbps per lane
+ * (297 MHz or 486 MHz link frequency).
+ * In principle it should be possible to calculate
* them based on link frequency and resolution.
*/
- if (bps_pr_lane != 594000000U)
+ switch (bps_pr_lane) {
+ default:
dev_warn(dev, "untested bps per lane: %u bps\n", bps_pr_lane);
- state->pdata.lineinitcnt = 0xe80;
- state->pdata.lptxtimecnt = 0x003;
- /* tclk-preparecnt: 3, tclk-zerocnt: 20 */
- state->pdata.tclk_headercnt = 0x1403;
- state->pdata.tclk_trailcnt = 0x00;
- /* ths-preparecnt: 3, ths-zerocnt: 1 */
- state->pdata.ths_headercnt = 0x0103;
- state->pdata.twakeup = 0x4882;
- state->pdata.tclk_postcnt = 0x008;
- state->pdata.ths_trailcnt = 0x2;
- state->pdata.hstxvregcnt = 0;
+ fallthrough;
+ case 594000000U:
+ state->pdata.lineinitcnt = 0xe80;
+ state->pdata.lptxtimecnt = 0x003;
+ /* tclk-preparecnt: 3, tclk-zerocnt: 20 */
+ state->pdata.tclk_headercnt = 0x1403;
+ state->pdata.tclk_trailcnt = 0x00;
+ /* ths-preparecnt: 3, ths-zerocnt: 1 */
+ state->pdata.ths_headercnt = 0x0103;
+ state->pdata.twakeup = 0x4882;
+ state->pdata.tclk_postcnt = 0x008;
+ state->pdata.ths_trailcnt = 0x2;
+ state->pdata.hstxvregcnt = 0;
+ break;
+ case 972000000U:
+ state->pdata.lineinitcnt = 0x1b58;
+ state->pdata.lptxtimecnt = 0x007;
+ /* tclk-preparecnt: 6, tclk-zerocnt: 40 */
+ state->pdata.tclk_headercnt = 0x2806;
+ state->pdata.tclk_trailcnt = 0x00;
+ /* ths-preparecnt: 6, ths-zerocnt: 8 */
+ state->pdata.ths_headercnt = 0x0806;
+ state->pdata.twakeup = 0x4268;
+ state->pdata.tclk_postcnt = 0x008;
+ state->pdata.ths_trailcnt = 0x5;
+ state->pdata.hstxvregcnt = 0;
+ break;
+ }
state->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
@@ -2061,6 +2099,7 @@ static int tc358743_probe(struct i2c_client *client)
struct tc358743_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK;
+ u16 chipid;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -2092,7 +2131,8 @@ static int tc358743_probe(struct i2c_client *client)
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
/* i2c access */
- if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) {
+ if (i2c_rd16_err(sd, CHIPID, &chipid) ||
+ (chipid & MASK_CHIPID) != 0) {
v4l2_info(sd, "not a TC358743 on address 0x%x\n",
client->addr << 1);
return -ENODEV;
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 959590afc80f..1087d2bddaf2 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -589,8 +589,8 @@ static void tda1997x_enable_edid(struct v4l2_subdev *sd)
v4l2_dbg(1, debug, sd, "%s\n", __func__);
- /* Enable hotplug after 100ms */
- schedule_delayed_work(&state->delayed_work_enable_hpd, HZ / 10);
+ /* Enable hotplug after 143ms */
+ schedule_delayed_work(&state->delayed_work_enable_hpd, HZ / 7);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index 25e2fc88a036..c0754fd03b1d 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -111,9 +111,9 @@
#define VD55G1_WIDTH 804
#define VD55G1_HEIGHT 704
-#define VD55G1_DEFAULT_MODE 0
+#define VD55G1_MODE_DEF 0
#define VD55G1_NB_GPIOS 4
-#define VD55G1_MEDIA_BUS_FMT_DEF MEDIA_BUS_FMT_Y8_1X8
+#define VD55G1_MBUS_CODE_DEF 0
#define VD55G1_DGAIN_DEF 256
#define VD55G1_AGAIN_DEF 19
#define VD55G1_EXPO_MAX_TERM 64
@@ -129,8 +129,8 @@
#define VD55G1_FWPATCH_REVISION_MINOR 9
#define VD55G1_XCLK_FREQ_MIN (6 * HZ_PER_MHZ)
#define VD55G1_XCLK_FREQ_MAX (27 * HZ_PER_MHZ)
-#define VD55G1_MIPI_RATE_MIN (250 * HZ_PER_MHZ)
-#define VD55G1_MIPI_RATE_MAX (1200 * HZ_PER_MHZ)
+#define VD55G1_MIPI_RATE_MIN (250 * MEGA)
+#define VD55G1_MIPI_RATE_MAX (1200 * MEGA)
static const u8 patch_array[] = {
0x44, 0x03, 0x09, 0x02, 0xe6, 0x01, 0x42, 0x00, 0xea, 0x01, 0x42, 0x00,
@@ -883,10 +883,9 @@ static int vd55g1_apply_cold_start(struct vd55g1 *sensor,
return ret;
}
-static void vd55g1_update_img_pad_format(struct vd55g1 *sensor,
- const struct vd55g1_mode *mode,
- u32 code,
- struct v4l2_mbus_framefmt *fmt)
+static void vd55g1_update_pad_fmt(struct vd55g1 *sensor,
+ const struct vd55g1_mode *mode, u32 code,
+ struct v4l2_mbus_framefmt *fmt)
{
fmt->code = code;
fmt->width = mode->width;
@@ -1038,8 +1037,6 @@ static int vd55g1_enable_streams(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- vd55g1_write(sensor, VD55G1_REG_EXT_CLOCK, sensor->xclk_freq, &ret);
-
/* Configure output */
vd55g1_write(sensor, VD55G1_REG_MIPI_DATA_RATE,
sensor->mipi_rate, &ret);
@@ -1084,7 +1081,7 @@ static int vd55g1_enable_streams(struct v4l2_subdev *sd,
err_rpm_put:
pm_runtime_put(sensor->dev);
- return 0;
+ return -EINVAL;
}
static int vd55g1_disable_streams(struct v4l2_subdev *sd,
@@ -1231,8 +1228,8 @@ static int vd55g1_set_pad_fmt(struct v4l2_subdev *sd,
width, height, sd_fmt->format.width,
sd_fmt->format.height);
- vd55g1_update_img_pad_format(sensor, new_mode, sd_fmt->format.code,
- &sd_fmt->format);
+ vd55g1_update_pad_fmt(sensor, new_mode, sd_fmt->format.code,
+ &sd_fmt->format);
/*
* Use binning to maximize the crop rectangle size, and centre it in the
@@ -1262,7 +1259,6 @@ static int vd55g1_set_pad_fmt(struct v4l2_subdev *sd,
static int vd55g1_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
- unsigned int def_mode = VD55G1_DEFAULT_MODE;
struct vd55g1 *sensor = to_vd55g1(sd);
struct v4l2_subdev_format fmt = { 0 };
struct v4l2_subdev_route routes[] = {
@@ -1279,8 +1275,9 @@ static int vd55g1_init_state(struct v4l2_subdev *sd,
if (ret)
return ret;
- vd55g1_update_img_pad_format(sensor, &vd55g1_supported_modes[def_mode],
- VD55G1_MEDIA_BUS_FMT_DEF, &fmt.format);
+ vd55g1_update_pad_fmt(sensor, &vd55g1_supported_modes[VD55G1_MODE_DEF],
+ vd55g1_mbus_codes[VD55G1_MBUS_CODE_DEF].code,
+ &fmt.format);
return vd55g1_set_pad_fmt(sd, sd_state, &fmt);
}
@@ -1613,6 +1610,9 @@ static int vd55g1_power_on(struct device *dev)
goto disable_clock;
}
+ /* Setup clock now to advance through system FSM states */
+ vd55g1_write(sensor, VD55G1_REG_EXT_CLOCK, sensor->xclk_freq, &ret);
+
ret = vd55g1_patch(sensor);
if (ret) {
dev_err(dev, "Sensor patch failed %d\n", ret);
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index 65281d40c681..1a113aad9cd4 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -8,6 +8,7 @@
*/
+#include <linux/bitops.h>
#include "cx18-driver.h"
/*
@@ -56,15 +57,6 @@ struct vbi_anc_data {
/* u8 fill[]; Variable number of fill bytes */
};
-static int odd_parity(u8 c)
-{
- c ^= (c >> 4);
- c ^= (c >> 2);
- c ^= (c >> 1);
-
- return c & 1;
-}
-
static int decode_vps(u8 *dst, u8 *p)
{
static const u8 biphase_tbl[] = {
@@ -278,7 +270,7 @@ int cx18_av_decode_vbi_line(struct v4l2_subdev *sd,
break;
case 6:
sdid = V4L2_SLICED_CAPTION_525;
- err = !odd_parity(p[0]) || !odd_parity(p[1]);
+ err = !parity8(p[0]) || !parity8(p[1]);
break;
case 9:
sdid = V4L2_SLICED_VPS;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index af05bde75816..485ca9747c4c 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -271,18 +271,6 @@ struct cx18_options {
#define CX18_SLICED_TYPE_WSS_625 (5)
#define CX18_SLICED_TYPE_VPS (7)
-/**
- * list_entry_is_past_end - check if a previous loop cursor is off list end
- * @pos: the type * previously used as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Check if the entry's list_head is the head of the list, thus it's not a
- * real entry but was the loop cursor that walked past the end
- */
-#define list_entry_is_past_end(pos, head, member) \
- (&pos->member == (head))
-
struct cx18_vb2_buffer {
/* Common video buffer sub-system struct */
struct vb2_v4l2_buffer vb;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 315577d71d95..cefa91b37f89 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -371,7 +371,7 @@ static size_t cx18_copy_mdl_to_user(struct cx18_stream *s,
mdl->curr_buf = list_first_entry(&mdl->buf_list,
struct cx18_buffer, list);
- if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) {
+ if (list_entry_is_head(mdl->curr_buf, &mdl->buf_list, list)) {
/*
* For some reason we've exhausted the buffers, but the MDL
* object still said some data was unread.
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 1817b9ed042c..9a1512b1ccaa 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -764,7 +764,7 @@ static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl,
mdl->curr_buf = list_first_entry(&mdl->buf_list,
struct cx18_buffer, list);
- if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) {
+ if (list_entry_is_head(mdl->curr_buf, &mdl->buf_list, list)) {
/*
* For some reason we've exhausted the buffers, but the MDL
* object still said some data was unread.
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 83e682e1a4b7..4e579352ab2c 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -55,11 +55,15 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
/* Himax HM2172 */
IPU_SENSOR_CONFIG("HIMX2172", 1, 384000000),
/* GalaxyCore GC0310 */
- IPU_SENSOR_CONFIG("INT0310", 0),
+ IPU_SENSOR_CONFIG("INT0310", 1, 55692000),
/* Omnivision OV5693 */
IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
+ /* Onsemi MT9M114 */
+ IPU_SENSOR_CONFIG("INT33F0", 1, 384000000),
/* Omnivision OV2740 */
IPU_SENSOR_CONFIG("INT3474", 1, 180000000),
+ /* Omnivision OV5670 */
+ IPU_SENSOR_CONFIG("INT3479", 1, 422400000),
/* Omnivision OV8865 */
IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
/* Omnivision OV7251 */
@@ -78,7 +82,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
/* Omnivision OV08A10 */
IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000),
/* Omnivision OV08x40 */
- IPU_SENSOR_CONFIG("OVTI08F4", 1, 400000000),
+ IPU_SENSOR_CONFIG("OVTI08F4", 3, 400000000, 749000000, 800000000),
/* Omnivision OV13B10 */
IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000),
IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
@@ -86,6 +90,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000),
/* Omnivision OV8856 */
IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
+ /* Toshiba T4KA3 */
+ IPU_SENSOR_CONFIG("XMCC0003", 1, 321468000),
};
static const struct ipu_property_names prop_names = {
@@ -809,7 +815,8 @@ int ipu_bridge_init(struct device *dev,
return 0;
if (!ipu_bridge_ivsc_is_ready())
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for IVSC to become ready\n");
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 16fde96c9fb2..a87f105beb5e 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -358,6 +358,8 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
CIO2_FBPT_SUBENTRY_UNIT);
const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
+ struct v4l2_subdev_state *state;
+ const struct v4l2_mbus_framefmt *format;
const struct ipu3_cio2_fmt *fmt;
void __iomem *const base = cio2->base;
u8 lanes, csi2bus = q->csi2.port;
@@ -365,7 +367,13 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
struct cio2_csi2_timing timing = { 0 };
int i, r;
- fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
+ state = v4l2_subdev_lock_and_get_active_state(&q->subdev);
+ format = v4l2_subdev_state_get_format(state, CIO2_PAD_SINK);
+
+ fmt = cio2_find_format(NULL, &format->code);
+
+ v4l2_subdev_unlock_state(state);
+
if (!fmt)
return -EINVAL;
@@ -1194,9 +1202,9 @@ static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
return v4l2_event_subscribe(fh, sub, 0, NULL);
}
-static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int cio2_subdev_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
- struct v4l2_mbus_framefmt *format;
const struct v4l2_mbus_framefmt fmt_default = {
.width = 1936,
.height = 1096,
@@ -1207,42 +1215,23 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
.quantization = V4L2_QUANTIZATION_DEFAULT,
.xfer_func = V4L2_XFER_FUNC_DEFAULT,
};
+ struct v4l2_mbus_framefmt *format;
- /* Initialize try_fmt */
- format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SINK);
+ /* Initialize the format on the sink and source pads. */
+ format = v4l2_subdev_state_get_format(state, CIO2_PAD_SINK);
*format = fmt_default;
/* same as sink */
- format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SOURCE);
+ format = v4l2_subdev_state_get_format(state, CIO2_PAD_SOURCE);
*format = fmt_default;
return 0;
}
-static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
- mutex_lock(&q->subdev_lock);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_state_get_format(sd_state,
- fmt->pad);
- else
- fmt->format = q->subdev_fmt;
-
- mutex_unlock(&q->subdev_lock);
-
- return 0;
-}
-
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
struct v4l2_mbus_framefmt *mbus;
u32 mbus_code = fmt->format.code;
unsigned int i;
@@ -1252,12 +1241,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
* source always propagates from sink
*/
if (fmt->pad == CIO2_PAD_SOURCE)
- return cio2_subdev_get_fmt(sd, sd_state, fmt);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mbus = v4l2_subdev_state_get_format(sd_state, fmt->pad);
- else
- mbus = &q->subdev_fmt;
+ return v4l2_subdev_get_fmt(sd, sd_state, fmt);
fmt->format.code = formats[0].mbus_code;
@@ -1272,9 +1256,12 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
fmt->format.field = V4L2_FIELD_NONE;
- mutex_lock(&q->subdev_lock);
+ mbus = v4l2_subdev_state_get_format(sd_state, CIO2_PAD_SINK);
+ *mbus = fmt->format;
+
+ /* Propagate the format to the source pad. */
+ mbus = v4l2_subdev_state_get_format(sd_state, CIO2_PAD_SOURCE);
*mbus = fmt->format;
- mutex_unlock(&q->subdev_lock);
return 0;
}
@@ -1345,12 +1332,12 @@ static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
};
static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
- .open = cio2_subdev_open,
+ .init_state = cio2_subdev_init_state,
};
static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
.link_validate = v4l2_subdev_link_validate_default,
- .get_fmt = cio2_subdev_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = cio2_subdev_set_fmt,
.enum_mbus_code = cio2_subdev_enum_mbus_code,
};
@@ -1502,28 +1489,18 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
{
static const u32 default_width = 1936;
static const u32 default_height = 1096;
- const struct ipu3_cio2_fmt dflt_fmt = formats[0];
struct device *dev = &cio2->pci_dev->dev;
struct video_device *vdev = &q->vdev;
struct vb2_queue *vbq = &q->vbq;
struct v4l2_subdev *subdev = &q->subdev;
- struct v4l2_mbus_framefmt *fmt;
int r;
/* Initialize miscellaneous variables */
mutex_init(&q->lock);
- mutex_init(&q->subdev_lock);
-
- /* Initialize formats to default values */
- fmt = &q->subdev_fmt;
- fmt->width = default_width;
- fmt->height = default_height;
- fmt->code = dflt_fmt.mbus_code;
- fmt->field = V4L2_FIELD_NONE;
q->format.width = default_width;
q->format.height = default_height;
- q->format.pixelformat = dflt_fmt.fourcc;
+ q->format.pixelformat = formats[0].fourcc;
q->format.colorspace = V4L2_COLORSPACE_RAW;
q->format.field = V4L2_FIELD_NONE;
q->format.num_planes = 1;
@@ -1567,9 +1544,16 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
CIO2_ENTITY_NAME " %td", q - cio2->queue);
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
v4l2_set_subdevdata(subdev, cio2);
+
+ r = v4l2_subdev_init_finalize(subdev);
+ if (r) {
+ dev_err(dev, "failed to initialize subdev (%d)\n", r);
+ goto fail_subdev;
+ }
+
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
- dev_err(dev, "failed initialize subdev (%d)\n", r);
+ dev_err(dev, "failed to register subdev (%d)\n", r);
goto fail_subdev;
}
@@ -1626,7 +1610,6 @@ fail_vdev_media_entity:
fail_subdev_media_entity:
cio2_fbpt_exit(q, dev);
fail_fbpt:
- mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
return r;
@@ -1639,7 +1622,6 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_device_unregister_subdev(&q->subdev);
media_entity_cleanup(&q->subdev.entity);
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
- mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index d7cb7dae665b..19258190936a 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -351,9 +351,7 @@ struct cio2_queue {
/* Subdev, /dev/v4l-subdevX */
struct v4l2_subdev subdev;
- struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
struct media_pad subdev_pads[CIO2_PADS];
- struct v4l2_mbus_framefmt subdev_fmt;
atomic_t frame_sequence;
/* Video device, /dev/videoX */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index da8581a37e22..6030bd23b4b9 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -354,9 +354,9 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd,
remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]);
remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC,
- CSI2_PAD_SINK,
- &streams_mask);
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK,
+ &streams_mask);
ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
if (ret)
@@ -384,9 +384,9 @@ static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd,
struct media_pad *remote_pad;
u64 sink_streams;
- sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC,
- CSI2_PAD_SINK,
- &streams_mask);
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK,
+ &streams_mask);
remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]);
remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.h b/drivers/media/pci/intel/ipu6/ipu6-isys.h
index f488e782c26e..0e2c8b71edfc 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.h
@@ -40,7 +40,7 @@ struct ipu6_bus_device;
#define IPU6_ISYS_NUM_RECV_QUEUE 1
#define IPU6_ISYS_MIN_WIDTH 2U
-#define IPU6_ISYS_MIN_HEIGHT 2U
+#define IPU6_ISYS_MIN_HEIGHT 1U
#define IPU6_ISYS_MAX_WIDTH 4672U
#define IPU6_ISYS_MAX_HEIGHT 3416U
diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
index 3622271c71c8..98310b8511b1 100644
--- a/drivers/media/pci/intel/ivsc/mei_ace.c
+++ b/drivers/media/pci/intel/ivsc/mei_ace.c
@@ -529,6 +529,8 @@ static void mei_ace_remove(struct mei_cl_device *cldev)
ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+ mei_cldev_disable(cldev);
+
mutex_destroy(&ace->lock);
}
@@ -574,7 +576,7 @@ static struct mei_cl_driver mei_ace_driver = {
module_mei_cl_driver(mei_ace_driver);
-MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Wentong Wu");
MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_DESCRIPTION("Device driver for IVSC ACE");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 92d871a378ba..c2917e156345 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -760,6 +760,8 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
pm_runtime_disable(&cldev->dev);
+ mei_cldev_disable(cldev);
+
mutex_destroy(&csi->lock);
}
@@ -783,7 +785,7 @@ static struct mei_cl_driver mei_csi_driver = {
module_mei_cl_driver(mei_csi_driver);
MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
-MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Wentong Wu");
MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_DESCRIPTION("Device driver for IVSC CSI");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 14c5725bd4d8..c179c425e167 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -492,7 +492,14 @@ static int vidioc_s_dv_timings(struct file *file, void *fh,
static int vidioc_enum_dv_timings(struct file *file, void *fh,
struct v4l2_enum_dv_timings *timings)
{
- return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ if (timings->index != 0)
+ return -EINVAL;
+
+ get_timings(voutdev, &timings->timings);
+
+ return 0;
}
static int vidioc_dv_timings_cap(struct file *file, void *fh,
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 89c5b79a5b24..7820e4f47fd5 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -52,26 +52,6 @@
* | etc
*/
-void saa7164_buffer_display(struct saa7164_buffer *buf)
-{
- struct saa7164_dev *dev = buf->port->dev;
- int i;
-
- dprintk(DBGLVL_BUF, "%s() buffer @ 0x%p nr=%d\n",
- __func__, buf, buf->idx);
- dprintk(DBGLVL_BUF, " pci_cpu @ 0x%p dma @ 0x%08llx len = 0x%x\n",
- buf->cpu, (long long)buf->dma, buf->pci_size);
- dprintk(DBGLVL_BUF, " pt_cpu @ 0x%p pt_dma @ 0x%08llx len = 0x%x\n",
- buf->pt_cpu, (long long)buf->pt_dma, buf->pt_size);
-
- /* Format the Page Table Entries to point into the data buffer */
- for (i = 0 ; i < SAA7164_PT_ENTRIES; i++) {
-
- dprintk(DBGLVL_BUF, " pt[%02d] = 0x%p -> 0x%llx\n",
- i, buf->pt_cpu, (u64)*(buf->pt_cpu));
-
- }
-}
/* Allocate a new buffer structure and associated PCI space in bytes.
* len must be a multiple of sizeof(u64)
*/
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 42bd8e76005b..a95662776ee8 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -295,34 +295,6 @@ static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
return ret;
}
-void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno)
-{
- int i;
- dprintk(DBGLVL_CMD, "%s()\n", __func__);
-
- mutex_lock(&dev->lock);
- for (i = 0; i < SAA_CMD_MAX_MSG_UNITS; i++) {
- if (dev->cmds[i].inuse == 1) {
- dprintk(DBGLVL_CMD,
- "seqno %d inuse, sig = %d, t/out = %d\n",
- dev->cmds[i].seqno,
- dev->cmds[i].signalled,
- dev->cmds[i].timeout);
- }
- }
-
- for (i = 0; i < SAA_CMD_MAX_MSG_UNITS; i++) {
- if ((dev->cmds[i].inuse == 1) && ((i == 0) ||
- (dev->cmds[i].signalled) || (dev->cmds[i].timeout))) {
- dprintk(DBGLVL_CMD, "%s(seqno=%d) calling wake_up\n",
- __func__, i);
- dev->cmds[i].signalled = 1;
- wake_up(&dev->cmds[i].wait);
- }
- }
- mutex_unlock(&dev->lock);
-}
-
int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
u16 controlselector, u16 size, void *buf)
{
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index eede47b686a3..e1bac1fe19d3 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -508,7 +508,6 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
int saa7164_cmd_send(struct saa7164_dev *dev,
u8 id, enum tmComResCmd command, u16 controlselector,
u16 size, void *buf);
-void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno);
int saa7164_irq_dequeue(struct saa7164_dev *dev);
/* ----------------------------------------------------------- */
@@ -570,7 +569,6 @@ extern int saa7164_dvb_unregister(struct saa7164_port *port);
extern struct saa7164_buffer *saa7164_buffer_alloc(
struct saa7164_port *port, u32 len);
extern int saa7164_buffer_dealloc(struct saa7164_buffer *buf);
-extern void saa7164_buffer_display(struct saa7164_buffer *buf);
extern int saa7164_buffer_activate(struct saa7164_buffer *buf, int i);
extern int saa7164_buffer_cfg_port(struct saa7164_port *port);
extern struct saa7164_user_buffer *saa7164_buffer_alloc_user(
diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c
index 084c30760e45..71848741c55c 100644
--- a/drivers/media/pci/solo6x10/solo6x10-gpio.c
+++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c
@@ -116,18 +116,6 @@ static int solo_gpiochip_get_direction(struct gpio_chip *chip,
return -1;
}
-static int solo_gpiochip_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return -1;
-}
-
-static int solo_gpiochip_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- return -1;
-}
-
static int solo_gpiochip_get(struct gpio_chip *chip,
unsigned int offset)
{
@@ -139,8 +127,8 @@ static int solo_gpiochip_get(struct gpio_chip *chip,
return 1 & (ret >> (offset + 8));
}
-static void solo_gpiochip_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int solo_gpiochip_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct solo_dev *solo_dev = gpiochip_get_data(chip);
@@ -148,6 +136,8 @@ static void solo_gpiochip_set(struct gpio_chip *chip,
solo_gpio_set(solo_dev, 1 << (offset + 8));
else
solo_gpio_clear(solo_dev, 1 << (offset + 8));
+
+ return 0;
}
#endif
@@ -167,8 +157,6 @@ int solo_gpio_init(struct solo_dev *solo_dev)
solo_dev->gpio_dev.can_sleep = 0;
solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction;
- solo_dev->gpio_dev.direction_input = solo_gpiochip_direction_input;
- solo_dev->gpio_dev.direction_output = solo_gpiochip_direction_output;
solo_dev->gpio_dev.get = solo_gpiochip_get;
solo_dev->gpio_dev.set = solo_gpiochip_set;
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 85d518823159..32eef2fd1f2a 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -26,6 +26,7 @@
#include "vpu_cmds.h"
#include "vpu_rpc.h"
+#define VDEC_SLOT_CNT_DFT 32
#define VDEC_MIN_BUFFER_CAP 8
#define VDEC_MIN_BUFFER_OUT 8
@@ -41,6 +42,14 @@ struct vdec_fs_info {
u32 tag;
};
+struct vdec_frame_store_t {
+ struct vpu_vb2_buffer *curr;
+ struct vpu_vb2_buffer *pend;
+ dma_addr_t addr;
+ unsigned int state;
+ u32 tag;
+};
+
struct vdec_t {
u32 seq_hdr_found;
struct vpu_buffer udata;
@@ -48,7 +57,8 @@ struct vdec_t {
struct vpu_dec_codec_info codec_info;
enum vpu_codec_state state;
- struct vpu_vb2_buffer *slots[VB2_MAX_FRAME];
+ struct vdec_frame_store_t *slots;
+ u32 slot_count;
u32 req_frame_count;
struct vdec_fs_info mbi;
struct vdec_fs_info dcp;
@@ -232,6 +242,35 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE,
0, 1, 1, 0);
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, NULL,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, NULL,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+ 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, NULL,
+ V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ ~((1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)),
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, NULL,
+ V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ 0,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_4);
+
ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
if (ctrl)
@@ -258,6 +297,63 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
return 0;
}
+static void vdec_attach_frame_store(struct vpu_inst *inst, struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
+ struct vdec_t *vdec = inst->priv;
+ struct vdec_frame_store_t *new_slots = NULL;
+ dma_addr_t addr;
+ int i;
+
+ addr = vpu_get_vb_phy_addr(vb, 0);
+ for (i = 0; i < vdec->slot_count; i++) {
+ if (addr == vdec->slots[i].addr) {
+ if (vdec->slots[i].curr && vdec->slots[i].curr != vpu_buf) {
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_CHANGED);
+ vdec->slots[i].pend = vpu_buf;
+ } else {
+ vpu_set_buffer_state(vbuf, vdec->slots[i].state);
+ }
+ vpu_buf->fs_id = i;
+ return;
+ }
+ }
+
+ for (i = 0; i < vdec->slot_count; i++) {
+ if (!vdec->slots[i].addr) {
+ vdec->slots[i].addr = addr;
+ vpu_buf->fs_id = i;
+ return;
+ }
+ }
+
+ new_slots = krealloc_array(vdec->slots, vdec->slot_count * 2,
+ sizeof(*vdec->slots),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_slots) {
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
+ return;
+ }
+
+ vdec->slots = new_slots;
+ vdec->slot_count *= 2;
+
+ vdec->slots[i].addr = addr;
+ vpu_buf->fs_id = i;
+}
+
+static void vdec_reset_frame_store(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
+
+ if (!vdec->slots || !vdec->slot_count)
+ return;
+
+ vpu_trace(inst->dev, "inst[%d] reset slots\n", inst->id);
+ memset(vdec->slots, 0, sizeof(*vdec->slots) * vdec->slot_count);
+}
+
static void vdec_handle_resolution_change(struct vpu_inst *inst)
{
struct vdec_t *vdec = inst->priv;
@@ -714,11 +810,11 @@ static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
struct vb2_v4l2_buffer *src_buf;
int ret = 0;
- if (!info || info->id >= ARRAY_SIZE(vdec->slots))
+ if (!info || info->id >= vdec->slot_count)
return -EINVAL;
vpu_inst_lock(inst);
- vpu_buf = vdec->slots[info->id];
+ vpu_buf = vdec->slots[info->id].curr;
if (!vpu_buf) {
dev_err(inst->dev, "[%d] decoded invalid frame[%d]\n", inst->id, info->id);
ret = -EINVAL;
@@ -739,11 +835,13 @@ static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_DECODED)
dev_info(inst->dev, "[%d] buf[%d] has been decoded\n", inst->id, info->id);
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_DECODED);
+ vdec->slots[info->id].state = VPU_BUF_STATE_DECODED;
vdec->decoded_frame_count++;
if (vdec->params.display_delay_enable) {
struct vpu_format *cur_fmt;
cur_fmt = vpu_get_format(inst, inst->cap_format.type);
+ vdec->slots[info->id].state = VPU_BUF_STATE_READY;
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
for (int i = 0; i < vbuf->vb2_buf.num_planes; i++)
vb2_set_plane_payload(&vbuf->vb2_buf,
@@ -766,11 +864,11 @@ static struct vpu_vb2_buffer *vdec_find_buffer(struct vpu_inst *inst, u32 luma)
struct vdec_t *vdec = inst->priv;
int i;
- for (i = 0; i < ARRAY_SIZE(vdec->slots); i++) {
- if (!vdec->slots[i])
+ for (i = 0; i < vdec->slot_count; i++) {
+ if (!vdec->slots[i].curr)
continue;
- if (luma == vdec->slots[i]->luma)
- return vdec->slots[i];
+ if (luma == vdec->slots[i].addr)
+ return vdec->slots[i].curr;
}
return NULL;
@@ -804,11 +902,11 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
cur_fmt = vpu_get_format(inst, inst->cap_format.type);
vbuf = &vpu_buf->m2m_buf.vb;
- if (vbuf->vb2_buf.index != frame->id)
- dev_err(inst->dev, "[%d] buffer id(%d, %d) mismatch\n",
- inst->id, vbuf->vb2_buf.index, frame->id);
+ if (vpu_buf->fs_id != frame->id)
+ dev_err(inst->dev, "[%d] buffer id(%d(%d), %d) mismatch\n",
+ inst->id, vpu_buf->fs_id, vbuf->vb2_buf.index, frame->id);
- if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_READY && vdec->params.display_delay_enable)
+ if (vdec->params.display_delay_enable)
return;
if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_DECODED)
@@ -821,10 +919,11 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
vbuf->sequence = vdec->sequence;
dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
vpu_inst_lock(inst);
+ vdec->slots[vpu_buf->fs_id].state = VPU_BUF_STATE_READY;
vdec->display_frame_count++;
vpu_inst_unlock(inst);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
dev_dbg(inst->dev, "[%d] decoded : %d, display : %d, sequence : %d\n",
inst->id, vdec->decoded_frame_count, vdec->display_frame_count, vdec->sequence);
}
@@ -1052,18 +1151,30 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
if (!vbuf)
return -EINVAL;
- if (vdec->slots[vbuf->vb2_buf.index]) {
- dev_err(inst->dev, "[%d] repeat alloc fs %d\n",
- inst->id, vbuf->vb2_buf.index);
+ vpu_buf = to_vpu_vb2_buffer(vbuf);
+ if (vpu_buf->fs_id < 0 || vpu_buf->fs_id >= vdec->slot_count) {
+ dev_err(inst->dev, "invalid fs %d for v4l2 buffer %d\n",
+ vpu_buf->fs_id, vbuf->vb2_buf.index);
return -EINVAL;
}
+ if (vdec->slots[vpu_buf->fs_id].curr) {
+ if (vdec->slots[vpu_buf->fs_id].curr != vpu_buf) {
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_CHANGED);
+ vdec->slots[vpu_buf->fs_id].pend = vpu_buf;
+ } else {
+ vpu_set_buffer_state(vbuf, vdec->slots[vpu_buf->fs_id].state);
+ }
+ dev_err(inst->dev, "[%d] repeat alloc fs %d (v4l2 index %d)\n",
+ inst->id, vpu_buf->fs_id, vbuf->vb2_buf.index);
+ return -EAGAIN;
+ }
+
dev_dbg(inst->dev, "[%d] state = %s, alloc fs %d, tag = 0x%x\n",
inst->id, vpu_codec_state_name(inst->state), vbuf->vb2_buf.index, vdec->seq_tag);
- vpu_buf = to_vpu_vb2_buffer(vbuf);
memset(&info, 0, sizeof(info));
- info.id = vbuf->vb2_buf.index;
+ info.id = vpu_buf->fs_id;
info.type = MEM_RES_FRAME;
info.tag = vdec->seq_tag;
info.luma_addr = vpu_get_vb_phy_addr(&vbuf->vb2_buf, 0);
@@ -1078,12 +1189,13 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
if (ret)
return ret;
- vpu_buf->tag = info.tag;
vpu_buf->luma = info.luma_addr;
vpu_buf->chroma_u = info.chroma_addr;
vpu_buf->chroma_v = 0;
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_INUSE);
- vdec->slots[info.id] = vpu_buf;
+ vdec->slots[info.id].tag = info.tag;
+ vdec->slots[info.id].curr = vpu_buf;
+ vdec->slots[info.id].state = VPU_BUF_STATE_INUSE;
vdec->req_frame_count--;
return 0;
@@ -1144,25 +1256,76 @@ static void vdec_recycle_buffer(struct vpu_inst *inst, struct vb2_v4l2_buffer *v
v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
}
-static void vdec_clear_slots(struct vpu_inst *inst)
+static void vdec_release_curr_frame_store(struct vpu_inst *inst, u32 id)
{
struct vdec_t *vdec = inst->priv;
struct vpu_vb2_buffer *vpu_buf;
struct vb2_v4l2_buffer *vbuf;
+
+ if (id >= vdec->slot_count)
+ return;
+ if (!vdec->slots[id].curr)
+ return;
+
+ vpu_buf = vdec->slots[id].curr;
+ vbuf = &vpu_buf->m2m_buf.vb;
+
+ vdec_response_fs_release(inst, id, vdec->slots[id].tag);
+ if (vpu_buf->fs_id == id) {
+ if (vpu_buf->state != VPU_BUF_STATE_READY)
+ vdec_recycle_buffer(inst, vbuf);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ }
+
+ vdec->slots[id].curr = NULL;
+ vdec->slots[id].state = VPU_BUF_STATE_IDLE;
+
+ if (vdec->slots[id].pend) {
+ vpu_set_buffer_state(&vdec->slots[id].pend->m2m_buf.vb, VPU_BUF_STATE_IDLE);
+ vdec->slots[id].pend = NULL;
+ }
+}
+
+static void vdec_clear_slots(struct vpu_inst *inst)
+{
+ struct vdec_t *vdec = inst->priv;
int i;
- for (i = 0; i < ARRAY_SIZE(vdec->slots); i++) {
- if (!vdec->slots[i])
+ for (i = 0; i < vdec->slot_count; i++) {
+ if (!vdec->slots[i].curr)
continue;
- vpu_buf = vdec->slots[i];
- vbuf = &vpu_buf->m2m_buf.vb;
-
vpu_trace(inst->dev, "clear slot %d\n", i);
- vdec_response_fs_release(inst, i, vpu_buf->tag);
- vdec_recycle_buffer(inst, vbuf);
- vdec->slots[i]->state = VPU_BUF_STATE_IDLE;
- vdec->slots[i] = NULL;
+ vdec_release_curr_frame_store(inst, i);
+ }
+}
+
+static void vdec_update_v4l2_ctrl(struct vpu_inst *inst, u32 id, u32 val)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&inst->ctrl_handler, id);
+
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, val);
+}
+
+static void vdec_update_v4l2_profile_level(struct vpu_inst *inst, struct vpu_dec_codec_info *hdr)
+{
+ switch (inst->out_format.pixfmt) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_MVC:
+ vdec_update_v4l2_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ vpu_get_h264_v4l2_profile(hdr));
+ vdec_update_v4l2_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ vpu_get_h264_v4l2_level(hdr));
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ vdec_update_v4l2_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ vpu_get_hevc_v4l2_profile(hdr));
+ vdec_update_v4l2_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ vpu_get_hevc_v4l2_level(hdr));
+ break;
+ default:
+ return;
}
}
@@ -1189,6 +1352,7 @@ static void vdec_event_seq_hdr(struct vpu_inst *inst, struct vpu_dec_codec_info
vdec_init_crop(inst);
vdec_init_mbi(inst);
vdec_init_dcp(inst);
+ vdec_update_v4l2_profile_level(inst, hdr);
if (!vdec->seq_hdr_found) {
vdec->seq_tag = vdec->codec_info.tag;
if (vdec->is_source_changed) {
@@ -1263,39 +1427,29 @@ static void vdec_event_req_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
static void vdec_evnet_rel_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
{
struct vdec_t *vdec = inst->priv;
- struct vpu_vb2_buffer *vpu_buf;
- struct vb2_v4l2_buffer *vbuf;
- if (!fs || fs->id >= ARRAY_SIZE(vdec->slots))
+ if (!fs || fs->id >= vdec->slot_count)
return;
if (fs->type != MEM_RES_FRAME)
return;
- if (fs->id >= vpu_get_num_buffers(inst, inst->cap_format.type)) {
+ if (fs->id >= vdec->slot_count) {
dev_err(inst->dev, "[%d] invalid fs(%d) to release\n", inst->id, fs->id);
return;
}
vpu_inst_lock(inst);
- vpu_buf = vdec->slots[fs->id];
- vdec->slots[fs->id] = NULL;
-
- if (!vpu_buf) {
+ if (!vdec->slots[fs->id].curr) {
dev_dbg(inst->dev, "[%d] fs[%d] has bee released\n", inst->id, fs->id);
goto exit;
}
- vbuf = &vpu_buf->m2m_buf.vb;
- if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_DECODED) {
+ if (vdec->slots[fs->id].state == VPU_BUF_STATE_DECODED) {
dev_dbg(inst->dev, "[%d] frame skip\n", inst->id);
vdec->sequence++;
}
- vdec_response_fs_release(inst, fs->id, vpu_buf->tag);
- if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_READY)
- vdec_recycle_buffer(inst, vbuf);
-
- vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+ vdec_release_curr_frame_store(inst, fs->id);
vpu_process_capture_buffer(inst);
exit:
@@ -1485,6 +1639,11 @@ static void vdec_cleanup(struct vpu_inst *inst)
return;
vdec = inst->priv;
+ if (vdec) {
+ kfree(vdec->slots);
+ vdec->slots = NULL;
+ vdec->slot_count = 0;
+ }
vfree(vdec);
inst->priv = NULL;
vfree(inst);
@@ -1606,11 +1765,43 @@ static int vdec_stop_session(struct vpu_inst *inst, u32 type)
return 0;
}
-static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i)
+static int vdec_get_slot_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i)
{
struct vdec_t *vdec = inst->priv;
+ struct vpu_vb2_buffer *vpu_buf;
int num = -1;
+ vpu_inst_lock(inst);
+ if (i >= vdec->slot_count || !vdec->slots[i].addr)
+ goto exit;
+
+ vpu_buf = vdec->slots[i].curr;
+
+ num = scnprintf(str, size, "slot[%2d] :", i);
+ if (vpu_buf) {
+ num += scnprintf(str + num, size - num, " %2d",
+ vpu_buf->m2m_buf.vb.vb2_buf.index);
+ num += scnprintf(str + num, size - num, "; state = %d", vdec->slots[i].state);
+ } else {
+ num += scnprintf(str + num, size - num, " -1");
+ }
+
+ if (vdec->slots[i].pend)
+ num += scnprintf(str + num, size - num, "; %d",
+ vdec->slots[i].pend->m2m_buf.vb.vb2_buf.index);
+
+ num += scnprintf(str + num, size - num, "\n");
+exit:
+ vpu_inst_unlock(inst);
+
+ return num;
+}
+
+static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i)
+{
+ struct vdec_t *vdec = inst->priv;
+ int num;
+
switch (i) {
case 0:
num = scnprintf(str, size,
@@ -1664,6 +1855,7 @@ static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i
vdec->codec_info.vui_present);
break;
default:
+ num = vdec_get_slot_debug_info(inst, str, size, i - 10);
break;
}
@@ -1687,6 +1879,8 @@ static struct vpu_inst_ops vdec_inst_ops = {
.get_debug_info = vdec_get_debug_info,
.wait_prepare = vpu_inst_unlock,
.wait_finish = vpu_inst_lock,
+ .attach_frame_store = vdec_attach_frame_store,
+ .reset_frame_store = vdec_reset_frame_store,
};
static void vdec_init(struct file *file)
@@ -1727,6 +1921,16 @@ static int vdec_open(struct file *file)
return -ENOMEM;
}
+ vdec->slots = kmalloc_array(VDEC_SLOT_CNT_DFT,
+ sizeof(*vdec->slots),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vdec->slots) {
+ vfree(vdec);
+ vfree(inst);
+ return -ENOMEM;
+ }
+ vdec->slot_count = VDEC_SLOT_CNT_DFT;
+
inst->ops = &vdec_inst_ops;
inst->formats = vdec_formats;
inst->type = VPU_CORE_TYPE_DEC;
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index 1451549c9dd2..cac0f1a64fea 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -222,6 +222,8 @@ struct vpu_inst_ops {
int (*get_debug_info)(struct vpu_inst *inst, char *str, u32 size, u32 i);
void (*wait_prepare)(struct vpu_inst *inst);
void (*wait_finish)(struct vpu_inst *inst);
+ void (*attach_frame_store)(struct vpu_inst *inst, struct vb2_buffer *vb);
+ void (*reset_frame_store)(struct vpu_inst *inst);
};
struct vpu_inst {
@@ -295,7 +297,8 @@ enum {
VPU_BUF_STATE_DECODED,
VPU_BUF_STATE_READY,
VPU_BUF_STATE_SKIP,
- VPU_BUF_STATE_ERROR
+ VPU_BUF_STATE_ERROR,
+ VPU_BUF_STATE_CHANGED
};
struct vpu_vb2_buffer {
@@ -304,8 +307,8 @@ struct vpu_vb2_buffer {
dma_addr_t chroma_u;
dma_addr_t chroma_v;
unsigned int state;
- u32 tag;
u32 average_qp;
+ s32 fs_id;
};
void vpu_writel(struct vpu_dev *vpu, u32 reg, u32 val);
diff --git a/drivers/media/platform/amphion/vpu_color.c b/drivers/media/platform/amphion/vpu_color.c
index 4ae435cbc5cd..7c0ab8289a7b 100644
--- a/drivers/media/platform/amphion/vpu_color.c
+++ b/drivers/media/platform/amphion/vpu_color.c
@@ -108,76 +108,3 @@ u32 vpu_color_cvrt_full_range_i2v(u32 full_range)
return V4L2_QUANTIZATION_LIM_RANGE;
}
-
-int vpu_color_check_primaries(u32 primaries)
-{
- return vpu_color_cvrt_primaries_v2i(primaries) ? 0 : -EINVAL;
-}
-
-int vpu_color_check_transfers(u32 transfers)
-{
- return vpu_color_cvrt_transfers_v2i(transfers) ? 0 : -EINVAL;
-}
-
-int vpu_color_check_matrix(u32 matrix)
-{
- return vpu_color_cvrt_matrix_v2i(matrix) ? 0 : -EINVAL;
-}
-
-int vpu_color_check_full_range(u32 full_range)
-{
- int ret = -EINVAL;
-
- switch (full_range) {
- case V4L2_QUANTIZATION_FULL_RANGE:
- case V4L2_QUANTIZATION_LIM_RANGE:
- ret = 0;
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-int vpu_color_get_default(u32 primaries, u32 *ptransfers, u32 *pmatrix, u32 *pfull_range)
-{
- u32 transfers;
- u32 matrix;
- u32 full_range;
-
- switch (primaries) {
- case V4L2_COLORSPACE_REC709:
- transfers = V4L2_XFER_FUNC_709;
- matrix = V4L2_YCBCR_ENC_709;
- break;
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- case V4L2_COLORSPACE_SMPTE170M:
- transfers = V4L2_XFER_FUNC_709;
- matrix = V4L2_YCBCR_ENC_601;
- break;
- case V4L2_COLORSPACE_SMPTE240M:
- transfers = V4L2_XFER_FUNC_SMPTE240M;
- matrix = V4L2_YCBCR_ENC_SMPTE240M;
- break;
- case V4L2_COLORSPACE_BT2020:
- transfers = V4L2_XFER_FUNC_709;
- matrix = V4L2_YCBCR_ENC_BT2020;
- break;
- default:
- transfers = V4L2_XFER_FUNC_DEFAULT;
- matrix = V4L2_YCBCR_ENC_DEFAULT;
- break;
- }
- full_range = V4L2_QUANTIZATION_LIM_RANGE;
-
- if (ptransfers)
- *ptransfers = transfers;
- if (pmatrix)
- *pmatrix = matrix;
- if (pfull_range)
- *pfull_range = full_range;
-
- return 0;
-}
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index 940e5bda5fa3..497ae4e8a229 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -48,6 +48,7 @@ static char *vpu_stat_name[] = {
[VPU_BUF_STATE_READY] = "ready",
[VPU_BUF_STATE_SKIP] = "skip",
[VPU_BUF_STATE_ERROR] = "error",
+ [VPU_BUF_STATE_CHANGED] = "changed",
};
static inline const char *to_vpu_stat_name(int state)
@@ -164,6 +165,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
for (i = 0; i < vb2_get_num_buffers(vq); i++) {
struct vb2_buffer *vb;
struct vb2_v4l2_buffer *vbuf;
+ struct vpu_vb2_buffer *vpu_buf;
vb = vb2_get_buffer(vq, i);
if (!vb)
@@ -173,13 +175,24 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
continue;
vbuf = to_vb2_v4l2_buffer(vb);
+ vpu_buf = to_vpu_vb2_buffer(vbuf);
num = scnprintf(str, sizeof(str),
- "capture[%2d] state = %10s, %8s\n",
+ "capture[%2d] state = %10s, %8s",
i, vb2_stat_name[vb->state],
to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
if (seq_write(s, str, num))
return 0;
+
+ if (vpu_buf->fs_id >= 0) {
+ num = scnprintf(str, sizeof(str), "; fs %d", vpu_buf->fs_id);
+ if (seq_write(s, str, num))
+ return 0;
+ }
+
+ num = scnprintf(str, sizeof(str), "\n");
+ if (seq_write(s, str, num))
+ return 0;
}
num = scnprintf(str, sizeof(str), "sequence = %d\n", inst->sequence);
diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
index 428d988cf2f7..f56245ae2205 100644
--- a/drivers/media/platform/amphion/vpu_defs.h
+++ b/drivers/media/platform/amphion/vpu_defs.h
@@ -134,6 +134,7 @@ struct vpu_dec_codec_info {
u32 decoded_height;
struct v4l2_fract frame_rate;
u32 dsp_asp_ratio;
+ u32 profile_idc;
u32 level_idc;
u32 bit_depth_luma;
u32 bit_depth_chroma;
@@ -147,6 +148,17 @@ struct vpu_dec_codec_info {
u32 mbi_size;
u32 dcp_size;
u32 stride;
+ union {
+ struct {
+ u32 constraint_set5_flag : 1;
+ u32 constraint_set4_flag : 1;
+ u32 constraint_set3_flag : 1;
+ u32 constraint_set2_flag : 1;
+ u32 constraint_set1_flag : 1;
+ u32 constraint_set0_flag : 1;
+ };
+ u32 constraint_set_flags;
+ };
};
struct vpu_dec_pic_info {
diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
index d12310af9ebc..886d5632388e 100644
--- a/drivers/media/platform/amphion/vpu_helpers.c
+++ b/drivers/media/platform/amphion/vpu_helpers.c
@@ -509,3 +509,126 @@ const char *vpu_codec_state_name(enum vpu_codec_state state)
}
return "<unknown>";
}
+
+struct codec_id_mapping {
+ u32 id;
+ u32 v4l2_id;
+};
+
+static struct codec_id_mapping h264_profiles[] = {
+ {66, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE},
+ {77, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN},
+ {88, V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED},
+ {100, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH}
+};
+
+static struct codec_id_mapping h264_levels[] = {
+ {10, V4L2_MPEG_VIDEO_H264_LEVEL_1_0},
+ {9, V4L2_MPEG_VIDEO_H264_LEVEL_1B},
+ {11, V4L2_MPEG_VIDEO_H264_LEVEL_1_1},
+ {12, V4L2_MPEG_VIDEO_H264_LEVEL_1_2},
+ {13, V4L2_MPEG_VIDEO_H264_LEVEL_1_3},
+ {20, V4L2_MPEG_VIDEO_H264_LEVEL_2_0},
+ {21, V4L2_MPEG_VIDEO_H264_LEVEL_2_1},
+ {22, V4L2_MPEG_VIDEO_H264_LEVEL_2_2},
+ {30, V4L2_MPEG_VIDEO_H264_LEVEL_3_0},
+ {31, V4L2_MPEG_VIDEO_H264_LEVEL_3_1},
+ {32, V4L2_MPEG_VIDEO_H264_LEVEL_3_2},
+ {40, V4L2_MPEG_VIDEO_H264_LEVEL_4_0},
+ {41, V4L2_MPEG_VIDEO_H264_LEVEL_4_1},
+ {42, V4L2_MPEG_VIDEO_H264_LEVEL_4_2},
+ {50, V4L2_MPEG_VIDEO_H264_LEVEL_5_0},
+ {51, V4L2_MPEG_VIDEO_H264_LEVEL_5_1},
+ {52, V4L2_MPEG_VIDEO_H264_LEVEL_5_2},
+ {60, V4L2_MPEG_VIDEO_H264_LEVEL_6_0},
+ {61, V4L2_MPEG_VIDEO_H264_LEVEL_6_1},
+ {62, V4L2_MPEG_VIDEO_H264_LEVEL_6_2}
+};
+
+static struct codec_id_mapping hevc_profiles[] = {
+ {1, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN},
+ {2, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10}
+};
+
+static struct codec_id_mapping hevc_levels[] = {
+ {30, V4L2_MPEG_VIDEO_HEVC_LEVEL_1},
+ {60, V4L2_MPEG_VIDEO_HEVC_LEVEL_2},
+ {63, V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1},
+ {90, V4L2_MPEG_VIDEO_HEVC_LEVEL_3},
+ {93, V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1},
+ {120, V4L2_MPEG_VIDEO_HEVC_LEVEL_4},
+ {123, V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1},
+ {150, V4L2_MPEG_VIDEO_HEVC_LEVEL_5},
+ {153, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1},
+ {156, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2},
+ {180, V4L2_MPEG_VIDEO_HEVC_LEVEL_6},
+ {183, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1},
+ {186, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2}
+};
+
+static u32 vpu_find_v4l2_id(u32 id, struct codec_id_mapping *array, u32 array_sz)
+{
+ u32 i;
+
+ if (!array || !array_sz)
+ return 0;
+
+ for (i = 0; i < array_sz; i++) {
+ if (id == array[i].id)
+ return array[i].v4l2_id;
+ }
+
+ return 0;
+}
+
+u32 vpu_get_h264_v4l2_profile(struct vpu_dec_codec_info *hdr)
+{
+ if (!hdr)
+ return 0;
+
+ /*
+ * In H.264 Document section A.2.1.1 Constrained Baseline profile
+ * Conformance of a bitstream to the Constrained Baseline profile is indicated by
+ * profile_idc being equal to 66 with constraint_set1_flag being equal to 1.
+ */
+ if (hdr->profile_idc == 66 && hdr->constraint_set1_flag)
+ return V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE;
+
+ return vpu_find_v4l2_id(hdr->profile_idc, h264_profiles, ARRAY_SIZE(h264_profiles));
+}
+
+u32 vpu_get_h264_v4l2_level(struct vpu_dec_codec_info *hdr)
+{
+ if (!hdr)
+ return 0;
+
+ /*
+ * In H.264 Document section 7.4.2.1.1 Sequence parameter set data semantics
+ * If profile_idc is equal to 66, 77, or 88 and level_idc is equal to 11,
+ * constraint_set3_flag equal to 1 indicates that the coded video sequence
+ * obeys all constraints specified in Annex A for level 1b
+ * and constraint_set3_flag equal to 0 indicates that the coded video sequence
+ * obeys all constraints specified in Annex A for level 1.1.
+ */
+ if (hdr->level_idc == 11 && hdr->constraint_set3_flag &&
+ (hdr->profile_idc == 66 || hdr->profile_idc == 77 || hdr->profile_idc == 88))
+ return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+
+ return vpu_find_v4l2_id(hdr->level_idc, h264_levels, ARRAY_SIZE(h264_levels));
+}
+
+u32 vpu_get_hevc_v4l2_profile(struct vpu_dec_codec_info *hdr)
+{
+ if (!hdr)
+ return 0;
+
+ return vpu_find_v4l2_id(hdr->profile_idc, hevc_profiles, ARRAY_SIZE(hevc_profiles));
+}
+
+u32 vpu_get_hevc_v4l2_level(struct vpu_dec_codec_info *hdr)
+{
+ if (!hdr)
+ return 0;
+
+ return vpu_find_v4l2_id(hdr->level_idc, hevc_levels, ARRAY_SIZE(hevc_levels));
+}
diff --git a/drivers/media/platform/amphion/vpu_helpers.h b/drivers/media/platform/amphion/vpu_helpers.h
index 0eaddb07190d..76fba0d6090c 100644
--- a/drivers/media/platform/amphion/vpu_helpers.h
+++ b/drivers/media/platform/amphion/vpu_helpers.h
@@ -6,6 +6,8 @@
#ifndef _AMPHION_VPU_HELPERS_H
#define _AMPHION_VPU_HELPERS_H
+#include "vpu_defs.h"
+
struct vpu_pair {
u32 src;
u32 dst;
@@ -54,10 +56,6 @@ static inline u8 vpu_helper_read_byte(struct vpu_buffer *stream_buffer, u32 pos)
return pdata[pos % stream_buffer->length];
}
-int vpu_color_check_primaries(u32 primaries);
-int vpu_color_check_transfers(u32 transfers);
-int vpu_color_check_matrix(u32 matrix);
-int vpu_color_check_full_range(u32 full_range);
u32 vpu_color_cvrt_primaries_v2i(u32 primaries);
u32 vpu_color_cvrt_primaries_i2v(u32 primaries);
u32 vpu_color_cvrt_transfers_v2i(u32 transfers);
@@ -66,8 +64,12 @@ u32 vpu_color_cvrt_matrix_v2i(u32 matrix);
u32 vpu_color_cvrt_matrix_i2v(u32 matrix);
u32 vpu_color_cvrt_full_range_v2i(u32 full_range);
u32 vpu_color_cvrt_full_range_i2v(u32 full_range);
-int vpu_color_get_default(u32 primaries, u32 *ptransfers, u32 *pmatrix, u32 *pfull_range);
int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src);
int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst);
+
+u32 vpu_get_h264_v4l2_profile(struct vpu_dec_codec_info *hdr);
+u32 vpu_get_h264_v4l2_level(struct vpu_dec_codec_info *hdr);
+u32 vpu_get_hevc_v4l2_profile(struct vpu_dec_codec_info *hdr);
+u32 vpu_get_hevc_v4l2_level(struct vpu_dec_codec_info *hdr);
#endif
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index feca7d4220ed..ba688566dffd 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -908,7 +908,8 @@ static void vpu_malone_unpack_seq_hdr(struct vpu_rpc_event *pkt,
info->frame_rate.numerator = 1000;
info->frame_rate.denominator = pkt->data[8];
info->dsp_asp_ratio = pkt->data[9];
- info->level_idc = pkt->data[10];
+ info->profile_idc = (pkt->data[10] >> 8) & 0xff;
+ info->level_idc = pkt->data[10] & 0xff;
info->bit_depth_luma = pkt->data[13];
info->bit_depth_chroma = pkt->data[14];
info->chroma_fmt = pkt->data[15];
@@ -925,6 +926,8 @@ static void vpu_malone_unpack_seq_hdr(struct vpu_rpc_event *pkt,
info->pixfmt = V4L2_PIX_FMT_NV12M_10BE_8L128;
else
info->pixfmt = V4L2_PIX_FMT_NV12M_8L128;
+ if (pkt->hdr.num > 28)
+ info->constraint_set_flags = pkt->data[28];
if (info->frame_rate.numerator && info->frame_rate.denominator) {
unsigned long n, d;
diff --git a/drivers/media/platform/amphion/vpu_mbox.c b/drivers/media/platform/amphion/vpu_mbox.c
index c2963b8deb48..b2ac8de6a2d9 100644
--- a/drivers/media/platform/amphion/vpu_mbox.c
+++ b/drivers/media/platform/amphion/vpu_mbox.c
@@ -109,7 +109,3 @@ void vpu_mbox_send_msg(struct vpu_core *core, u32 type, u32 data)
mbox_send_message(core->tx_data.ch, &data);
mbox_send_message(core->tx_type.ch, &type);
}
-
-void vpu_mbox_enable_rx(struct vpu_dev *dev)
-{
-}
diff --git a/drivers/media/platform/amphion/vpu_mbox.h b/drivers/media/platform/amphion/vpu_mbox.h
index 79cfd874e92b..8b7aea4f606c 100644
--- a/drivers/media/platform/amphion/vpu_mbox.h
+++ b/drivers/media/platform/amphion/vpu_mbox.h
@@ -11,6 +11,5 @@ int vpu_mbox_request(struct vpu_core *core);
void vpu_mbox_free(struct vpu_core *core);
void vpu_mbox_send_msg(struct vpu_core *core, u32 type, u32 data);
void vpu_mbox_send_type(struct vpu_core *core, u32 type);
-void vpu_mbox_enable_rx(struct vpu_dev *dev);
#endif
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 45707931bc4f..74668fa362e2 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -501,14 +501,25 @@ static int vpu_vb2_queue_setup(struct vb2_queue *vq,
call_void_vop(inst, release);
}
+ if (V4L2_TYPE_IS_CAPTURE(vq->type))
+ call_void_vop(inst, reset_frame_store);
+
return 0;
}
static int vpu_vb2_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
+ struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ vpu_buf->fs_id = -1;
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
+
+ if (!inst->ops->attach_frame_store || V4L2_TYPE_IS_OUTPUT(vb->type))
+ return 0;
+
+ call_void_vop(inst, attach_frame_store, vb);
return 0;
}
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index cebcae196eec..7f1ce95cdc3f 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -57,6 +57,25 @@
#define CSI2RX_LANES_MAX 4
#define CSI2RX_STREAMS_MAX 4
+#define CSI2RX_ERROR_IRQS_REG 0x28
+#define CSI2RX_ERROR_IRQS_MASK_REG 0x2C
+
+#define CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ BIT(19)
+#define CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ BIT(18)
+#define CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ BIT(17)
+#define CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ BIT(16)
+#define CSI2RX_FRONT_TRUNC_HDR_IRQ BIT(12)
+#define CSI2RX_PROT_TRUNCATED_PACKET_IRQ BIT(11)
+#define CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ BIT(10)
+#define CSI2RX_SP_INVALID_RCVD_IRQ BIT(9)
+#define CSI2RX_DATA_ID_IRQ BIT(7)
+#define CSI2RX_HEADER_CORRECTED_ECC_IRQ BIT(6)
+#define CSI2RX_HEADER_ECC_IRQ BIT(5)
+#define CSI2RX_PAYLOAD_CRC_IRQ BIT(4)
+
+#define CSI2RX_ECC_ERRORS GENMASK(7, 4)
+#define CSI2RX_PACKET_ERRORS GENMASK(12, 9)
+
enum csi2rx_pads {
CSI2RX_PAD_SINK,
CSI2RX_PAD_SOURCE_STREAM0,
@@ -71,9 +90,32 @@ struct csi2rx_fmt {
u8 bpp;
};
+struct csi2rx_event {
+ u32 mask;
+ const char *name;
+};
+
+static const struct csi2rx_event csi2rx_events[] = {
+ { CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 3 FIFO detected" },
+ { CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 2 FIFO detected" },
+ { CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 1 FIFO detected" },
+ { CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 0 FIFO detected" },
+ { CSI2RX_FRONT_TRUNC_HDR_IRQ, "A truncated header [short or long] has been received" },
+ { CSI2RX_PROT_TRUNCATED_PACKET_IRQ, "A truncated long packet has been received" },
+ { CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ, "A truncated long packet has been received. No payload" },
+ { CSI2RX_SP_INVALID_RCVD_IRQ, "A reserved or invalid short packet has been received" },
+ { CSI2RX_DATA_ID_IRQ, "Data ID error in the header packet" },
+ { CSI2RX_HEADER_CORRECTED_ECC_IRQ, "ECC error detected and corrected" },
+ { CSI2RX_HEADER_ECC_IRQ, "Unrecoverable ECC error" },
+ { CSI2RX_PAYLOAD_CRC_IRQ, "CRC error" },
+};
+
+#define CSI2RX_NUM_EVENTS ARRAY_SIZE(csi2rx_events)
+
struct csi2rx_priv {
struct device *dev;
unsigned int count;
+ int error_irq;
/*
* Used to prevent race conditions between multiple,
@@ -95,6 +137,7 @@ struct csi2rx_priv {
u8 max_lanes;
u8 max_streams;
bool has_internal_dphy;
+ u32 events[CSI2RX_NUM_EVENTS];
struct v4l2_subdev subdev;
struct v4l2_async_notifier notifier;
@@ -124,6 +167,54 @@ static const struct csi2rx_fmt formats[] = {
{ .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, },
};
+static void csi2rx_configure_error_irq_mask(void __iomem *base,
+ struct csi2rx_priv *csi2rx)
+{
+ u32 error_irq_mask = 0;
+
+ error_irq_mask |= CSI2RX_ECC_ERRORS;
+ error_irq_mask |= CSI2RX_PACKET_ERRORS;
+
+ /*
+ * Iterate through all source pads and check if they are linked
+ * to an active remote pad. If an active remote pad is found,
+ * calculate the corresponding bit position and set it in
+ * mask, enabling the stream overflow error in the mask.
+ */
+ for (int i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
+ struct media_pad *remote_pad;
+
+ remote_pad = media_pad_remote_pad_first(&csi2rx->pads[i]);
+ if (remote_pad) {
+ int pad = i - CSI2RX_PAD_SOURCE_STREAM0;
+ u32 bit_mask = CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ << pad;
+
+ error_irq_mask |= bit_mask;
+ }
+ }
+
+ writel(error_irq_mask, base + CSI2RX_ERROR_IRQS_MASK_REG);
+}
+
+static irqreturn_t csi2rx_irq_handler(int irq, void *dev_id)
+{
+ struct csi2rx_priv *csi2rx = dev_id;
+ int i;
+ u32 error_status, error_mask;
+
+ error_status = readl(csi2rx->base + CSI2RX_ERROR_IRQS_REG);
+ error_mask = readl(csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG);
+
+ for (i = 0; i < CSI2RX_NUM_EVENTS; i++)
+ if ((error_status & csi2rx_events[i].mask) &&
+ (error_mask & csi2rx_events[i].mask))
+ csi2rx->events[i]++;
+
+ writel(error_status, csi2rx->base + CSI2RX_ERROR_IRQS_REG);
+
+ return IRQ_HANDLED;
+}
+
static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
{
unsigned int i;
@@ -220,6 +311,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
reset_control_deassert(csi2rx->p_rst);
csi2rx_reset(csi2rx);
+ if (csi2rx->error_irq >= 0)
+ csi2rx_configure_error_irq_mask(csi2rx->base, csi2rx);
+
reg = csi2rx->num_lanes << 8;
for (i = 0; i < csi2rx->num_lanes; i++) {
reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
@@ -332,6 +426,8 @@ static void csi2rx_stop(struct csi2rx_priv *csi2rx)
reset_control_assert(csi2rx->sys_rst);
clk_disable_unprepare(csi2rx->sys_clk);
+ writel(0, csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG);
+
for (i = 0; i < csi2rx->max_streams; i++) {
writel(CSI2RX_STREAM_CTRL_STOP,
csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
@@ -363,6 +459,21 @@ static void csi2rx_stop(struct csi2rx_priv *csi2rx)
}
}
+static int csi2rx_log_status(struct v4l2_subdev *sd)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(sd);
+ unsigned int i;
+
+ for (i = 0; i < CSI2RX_NUM_EVENTS; i++) {
+ if (csi2rx->events[i])
+ dev_info(csi2rx->dev, "%s events: %d\n",
+ csi2rx_events[i].name,
+ csi2rx->events[i]);
+ }
+
+ return 0;
+}
+
static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
@@ -468,7 +579,12 @@ static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
.s_stream = csi2rx_s_stream,
};
+static const struct v4l2_subdev_core_ops csi2rx_core_ops = {
+ .log_status = csi2rx_log_status,
+};
+
static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
+ .core = &csi2rx_core_ops,
.video = &csi2rx_video_ops,
.pad = &csi2rx_pad_ops,
};
@@ -705,6 +821,21 @@ static int csi2rx_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup;
+ csi2rx->error_irq = platform_get_irq_byname_optional(pdev, "error_irq");
+
+ if (csi2rx->error_irq < 0) {
+ dev_dbg(csi2rx->dev, "Optional interrupt not defined, proceeding without it\n");
+ } else {
+ ret = devm_request_irq(csi2rx->dev, csi2rx->error_irq,
+ csi2rx_irq_handler, 0,
+ dev_name(&pdev->dev), csi2rx);
+ if (ret) {
+ dev_err(csi2rx->dev,
+ "Unable to request interrupt: %d\n", ret);
+ goto err_cleanup;
+ }
+ }
+
ret = v4l2_subdev_init_finalize(&csi2rx->subdev);
if (ret)
goto err_cleanup;
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 5c17bc58181e..8681dd193033 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -598,6 +598,27 @@ static void _bswap16(u16 *a)
*a = ((*a & 0x00FF) << 8) | ((*a & 0xFF00) >> 8);
}
+static dma_addr_t mxc_jpeg_get_plane_dma_addr(struct vb2_buffer *buf, unsigned int plane_no)
+{
+ if (plane_no >= buf->num_planes)
+ return 0;
+ return vb2_dma_contig_plane_dma_addr(buf, plane_no) + buf->planes[plane_no].data_offset;
+}
+
+static void *mxc_jpeg_get_plane_vaddr(struct vb2_buffer *buf, unsigned int plane_no)
+{
+ if (plane_no >= buf->num_planes)
+ return NULL;
+ return vb2_plane_vaddr(buf, plane_no) + buf->planes[plane_no].data_offset;
+}
+
+static unsigned long mxc_jpeg_get_plane_payload(struct vb2_buffer *buf, unsigned int plane_no)
+{
+ if (plane_no >= buf->num_planes)
+ return 0;
+ return vb2_get_plane_payload(buf, plane_no) - buf->planes[plane_no].data_offset;
+}
+
static void print_mxc_buf(struct mxc_jpeg_dev *jpeg, struct vb2_buffer *buf,
unsigned long len)
{
@@ -610,11 +631,11 @@ static void print_mxc_buf(struct mxc_jpeg_dev *jpeg, struct vb2_buffer *buf,
return;
for (plane_no = 0; plane_no < buf->num_planes; plane_no++) {
- payload = vb2_get_plane_payload(buf, plane_no);
+ payload = mxc_jpeg_get_plane_payload(buf, plane_no);
if (len == 0)
len = payload;
- dma_addr = vb2_dma_contig_plane_dma_addr(buf, plane_no);
- vaddr = vb2_plane_vaddr(buf, plane_no);
+ dma_addr = mxc_jpeg_get_plane_dma_addr(buf, plane_no);
+ vaddr = mxc_jpeg_get_plane_vaddr(buf, plane_no);
v4l2_dbg(3, debug, &jpeg->v4l2_dev,
"plane %d (vaddr=%p dma_addr=%x payload=%ld):",
plane_no, vaddr, dma_addr, payload);
@@ -712,16 +733,15 @@ static void mxc_jpeg_addrs(struct mxc_jpeg_desc *desc,
struct mxc_jpeg_q_data *q_data;
q_data = mxc_jpeg_get_q_data(ctx, raw_buf->type);
- desc->buf_base0 = vb2_dma_contig_plane_dma_addr(raw_buf, 0);
+ desc->buf_base0 = mxc_jpeg_get_plane_dma_addr(raw_buf, 0);
desc->buf_base1 = 0;
if (img_fmt == STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV420)) {
if (raw_buf->num_planes == 2)
- desc->buf_base1 = vb2_dma_contig_plane_dma_addr(raw_buf, 1);
+ desc->buf_base1 = mxc_jpeg_get_plane_dma_addr(raw_buf, 1);
else
desc->buf_base1 = desc->buf_base0 + q_data->sizeimage[0];
}
- desc->stm_bufbase = vb2_dma_contig_plane_dma_addr(jpeg_buf, 0) +
- offset;
+ desc->stm_bufbase = mxc_jpeg_get_plane_dma_addr(jpeg_buf, 0) + offset;
}
static bool mxc_jpeg_is_extended_sequential(const struct mxc_jpeg_fmt *fmt)
@@ -1029,8 +1049,8 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
vb2_set_plane_payload(&dst_buf->vb2_buf, 1, payload);
}
dev_dbg(dev, "Decoding finished, payload size: %ld + %ld\n",
- vb2_get_plane_payload(&dst_buf->vb2_buf, 0),
- vb2_get_plane_payload(&dst_buf->vb2_buf, 1));
+ mxc_jpeg_get_plane_payload(&dst_buf->vb2_buf, 0),
+ mxc_jpeg_get_plane_payload(&dst_buf->vb2_buf, 1));
}
/* short preview of the results */
@@ -1889,8 +1909,8 @@ static int mxc_jpeg_parse(struct mxc_jpeg_ctx *ctx, struct vb2_buffer *vb)
struct mxc_jpeg_sof *psof = NULL;
struct mxc_jpeg_sos *psos = NULL;
struct mxc_jpeg_src_buf *jpeg_src_buf = vb2_to_mxc_buf(vb);
- u8 *src_addr = (u8 *)vb2_plane_vaddr(vb, 0);
- u32 size = vb2_get_plane_payload(vb, 0);
+ u8 *src_addr = (u8 *)mxc_jpeg_get_plane_vaddr(vb, 0);
+ u32 size = mxc_jpeg_get_plane_payload(vb, 0);
int ret;
memset(&header, 0, sizeof(header));
@@ -2027,6 +2047,11 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
i, vb2_plane_size(vb, i), sizeimage);
return -EINVAL;
}
+ if (!IS_ALIGNED(mxc_jpeg_get_plane_dma_addr(vb, i), MXC_JPEG_ADDR_ALIGNMENT)) {
+ dev_err(dev, "planes[%d] address is not %d aligned\n",
+ i, MXC_JPEG_ADDR_ALIGNMENT);
+ return -EINVAL;
+ }
}
if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) {
vb2_set_plane_payload(vb, 0, 0);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index fdde45f7e163..44e46face6d1 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -30,6 +30,7 @@
#define MXC_JPEG_MAX_PLANES 2
#define MXC_JPEG_PATTERN_WIDTH 128
#define MXC_JPEG_PATTERN_HEIGHT 64
+#define MXC_JPEG_ADDR_ALIGNMENT 16
enum mxc_jpeg_enc_state {
MXC_JPEG_ENCODING = 0, /* jpeg encode phase */
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index d060eadebc7a..2beb5f43c2c0 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -28,6 +28,7 @@
#include <linux/reset.h>
#include <linux/spinlock.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -229,25 +230,6 @@
#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
-/* MIPI CSI-2 Data Types */
-#define MIPI_CSI2_DATA_TYPE_YUV420_8 0x18
-#define MIPI_CSI2_DATA_TYPE_YUV420_10 0x19
-#define MIPI_CSI2_DATA_TYPE_LE_YUV420_8 0x1a
-#define MIPI_CSI2_DATA_TYPE_CS_YUV420_8 0x1c
-#define MIPI_CSI2_DATA_TYPE_CS_YUV420_10 0x1d
-#define MIPI_CSI2_DATA_TYPE_YUV422_8 0x1e
-#define MIPI_CSI2_DATA_TYPE_YUV422_10 0x1f
-#define MIPI_CSI2_DATA_TYPE_RGB565 0x22
-#define MIPI_CSI2_DATA_TYPE_RGB666 0x23
-#define MIPI_CSI2_DATA_TYPE_RGB888 0x24
-#define MIPI_CSI2_DATA_TYPE_RAW6 0x28
-#define MIPI_CSI2_DATA_TYPE_RAW7 0x29
-#define MIPI_CSI2_DATA_TYPE_RAW8 0x2a
-#define MIPI_CSI2_DATA_TYPE_RAW10 0x2b
-#define MIPI_CSI2_DATA_TYPE_RAW12 0x2c
-#define MIPI_CSI2_DATA_TYPE_RAW14 0x2d
-#define MIPI_CSI2_DATA_TYPE_USER(x) (0x30 + (x))
-
struct mipi_csis_event {
bool debug;
u32 mask;
@@ -357,116 +339,116 @@ static const struct csis_pix_format mipi_csis_formats[] = {
{
.code = MEDIA_BUS_FMT_UYVY8_1X16,
.output = MEDIA_BUS_FMT_UYVY8_1X16,
- .data_type = MIPI_CSI2_DATA_TYPE_YUV422_8,
+ .data_type = MIPI_CSI2_DT_YUV422_8B,
.width = 16,
},
/* RGB formats. */
{
.code = MEDIA_BUS_FMT_RGB565_1X16,
.output = MEDIA_BUS_FMT_RGB565_1X16,
- .data_type = MIPI_CSI2_DATA_TYPE_RGB565,
+ .data_type = MIPI_CSI2_DT_RGB565,
.width = 16,
}, {
.code = MEDIA_BUS_FMT_BGR888_1X24,
.output = MEDIA_BUS_FMT_RGB888_1X24,
- .data_type = MIPI_CSI2_DATA_TYPE_RGB888,
+ .data_type = MIPI_CSI2_DT_RGB888,
.width = 24,
},
/* RAW (Bayer and greyscale) formats. */
{
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.output = MEDIA_BUS_FMT_SBGGR8_1X8,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
.output = MEDIA_BUS_FMT_SGBRG8_1X8,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
.output = MEDIA_BUS_FMT_SGRBG8_1X8,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
.output = MEDIA_BUS_FMT_SRGGB8_1X8,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_Y8_1X8,
.output = MEDIA_BUS_FMT_Y8_1X8,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.output = MEDIA_BUS_FMT_SBGGR10_1X10,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
+ .data_type = MIPI_CSI2_DT_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
.output = MEDIA_BUS_FMT_SGBRG10_1X10,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
+ .data_type = MIPI_CSI2_DT_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
.output = MEDIA_BUS_FMT_SGRBG10_1X10,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
+ .data_type = MIPI_CSI2_DT_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.output = MEDIA_BUS_FMT_SRGGB10_1X10,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
+ .data_type = MIPI_CSI2_DT_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_Y10_1X10,
.output = MEDIA_BUS_FMT_Y10_1X10,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
+ .data_type = MIPI_CSI2_DT_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
.output = MEDIA_BUS_FMT_SBGGR12_1X12,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
+ .data_type = MIPI_CSI2_DT_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
.output = MEDIA_BUS_FMT_SGBRG12_1X12,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
+ .data_type = MIPI_CSI2_DT_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
.output = MEDIA_BUS_FMT_SGRBG12_1X12,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
+ .data_type = MIPI_CSI2_DT_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
.output = MEDIA_BUS_FMT_SRGGB12_1X12,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
+ .data_type = MIPI_CSI2_DT_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_Y12_1X12,
.output = MEDIA_BUS_FMT_Y12_1X12,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
+ .data_type = MIPI_CSI2_DT_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SBGGR14_1X14,
.output = MEDIA_BUS_FMT_SBGGR14_1X14,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
+ .data_type = MIPI_CSI2_DT_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGBRG14_1X14,
.output = MEDIA_BUS_FMT_SGBRG14_1X14,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
+ .data_type = MIPI_CSI2_DT_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGRBG14_1X14,
.output = MEDIA_BUS_FMT_SGRBG14_1X14,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
+ .data_type = MIPI_CSI2_DT_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SRGGB14_1X14,
.output = MEDIA_BUS_FMT_SRGGB14_1X14,
- .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
+ .data_type = MIPI_CSI2_DT_RAW14,
.width = 14,
},
/* JPEG */
@@ -494,7 +476,7 @@ static const struct csis_pix_format mipi_csis_formats[] = {
* SoC that can support quad pixel mode, this will have to be
* revisited.
*/
- .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
+ .data_type = MIPI_CSI2_DT_RAW8,
.width = 8,
}
};
@@ -583,7 +565,7 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
*
* TODO: Verify which other formats require DUAL (or QUAD) modes.
*/
- if (csis_fmt->data_type == MIPI_CSI2_DATA_TYPE_YUV422_8)
+ if (csis_fmt->data_type == MIPI_CSI2_DT_YUV422_8B)
val |= MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL;
val |= MIPI_CSIS_ISPCFG_FMT(csis_fmt->data_type);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 1e79b1211b60..981648a03113 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -3,6 +3,7 @@
* Copyright 2019-2020 NXP
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -245,26 +246,41 @@ static void mxc_isi_v4l2_cleanup(struct mxc_isi_dev *isi)
/* Panic will assert when the buffers are 50% full */
-/* For i.MX8QXP C0 and i.MX8MN ISI IER version */
+/* For i.MX8MN ISI IER version */
static const struct mxc_isi_ier_reg mxc_imx8_isi_ier_v1 = {
- .oflw_y_buf_en = { .offset = 19, .mask = 0x80000 },
- .oflw_u_buf_en = { .offset = 21, .mask = 0x200000 },
- .oflw_v_buf_en = { .offset = 23, .mask = 0x800000 },
+ .oflw_y_buf_en = { .mask = BIT(19) },
+ .oflw_u_buf_en = { .mask = BIT(21) },
+ .oflw_v_buf_en = { .mask = BIT(23) },
- .panic_y_buf_en = {.offset = 20, .mask = 0x100000 },
- .panic_u_buf_en = {.offset = 22, .mask = 0x400000 },
- .panic_v_buf_en = {.offset = 24, .mask = 0x1000000 },
+ .panic_y_buf_en = { .mask = BIT(20) },
+ .panic_u_buf_en = { .mask = BIT(22) },
+ .panic_v_buf_en = { .mask = BIT(24) },
};
-/* For i.MX8MP ISI IER version */
+/* For i.MX8QXP C0 and i.MX8MP ISI IER version */
static const struct mxc_isi_ier_reg mxc_imx8_isi_ier_v2 = {
- .oflw_y_buf_en = { .offset = 18, .mask = 0x40000 },
- .oflw_u_buf_en = { .offset = 20, .mask = 0x100000 },
- .oflw_v_buf_en = { .offset = 22, .mask = 0x400000 },
+ .oflw_y_buf_en = { .mask = BIT(18) },
+ .oflw_u_buf_en = { .mask = BIT(20) },
+ .oflw_v_buf_en = { .mask = BIT(22) },
- .panic_y_buf_en = {.offset = 19, .mask = 0x80000 },
- .panic_u_buf_en = {.offset = 21, .mask = 0x200000 },
- .panic_v_buf_en = {.offset = 23, .mask = 0x800000 },
+ .panic_y_buf_en = { .mask = BIT(19) },
+ .panic_u_buf_en = { .mask = BIT(21) },
+ .panic_v_buf_en = { .mask = BIT(23) },
+};
+
+/* For i.MX8QM ISI IER version */
+static const struct mxc_isi_ier_reg mxc_imx8_isi_ier_qm = {
+ .oflw_y_buf_en = { .mask = BIT(16) },
+ .oflw_u_buf_en = { .mask = BIT(19) },
+ .oflw_v_buf_en = { .mask = BIT(22) },
+
+ .excs_oflw_y_buf_en = { .mask = BIT(17) },
+ .excs_oflw_u_buf_en = { .mask = BIT(20) },
+ .excs_oflw_v_buf_en = { .mask = BIT(23) },
+
+ .panic_y_buf_en = { .mask = BIT(18) },
+ .panic_u_buf_en = { .mask = BIT(21) },
+ .panic_v_buf_en = { .mask = BIT(24) },
};
/* Panic will assert when the buffers are 50% full */
@@ -274,11 +290,6 @@ static const struct mxc_isi_set_thd mxc_imx8_isi_thd_v1 = {
.panic_set_thd_v = { .mask = 0xf0000, .offset = 16, .threshold = 0x7 },
};
-static const struct clk_bulk_data mxc_imx8mn_clks[] = {
- { .id = "axi" },
- { .id = "apb" },
-};
-
static const struct mxc_isi_plat_data mxc_imx8mn_data = {
.model = MXC_ISI_IMX8MN,
.num_ports = 1,
@@ -286,8 +297,6 @@ static const struct mxc_isi_plat_data mxc_imx8mn_data = {
.reg_offset = 0,
.ier_reg = &mxc_imx8_isi_ier_v1,
.set_thd = &mxc_imx8_isi_thd_v1,
- .clks = mxc_imx8mn_clks,
- .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = false,
.gasket_ops = &mxc_imx8_gasket_ops,
.has_36bit_dma = false,
@@ -300,8 +309,6 @@ static const struct mxc_isi_plat_data mxc_imx8mp_data = {
.reg_offset = 0x2000,
.ier_reg = &mxc_imx8_isi_ier_v2,
.set_thd = &mxc_imx8_isi_thd_v1,
- .clks = mxc_imx8mn_clks,
- .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = true,
.gasket_ops = &mxc_imx8_gasket_ops,
.has_36bit_dma = true,
@@ -314,8 +321,6 @@ static const struct mxc_isi_plat_data mxc_imx8ulp_data = {
.reg_offset = 0x0,
.ier_reg = &mxc_imx8_isi_ier_v2,
.set_thd = &mxc_imx8_isi_thd_v1,
- .clks = mxc_imx8mn_clks,
- .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = true,
.has_36bit_dma = false,
};
@@ -327,13 +332,33 @@ static const struct mxc_isi_plat_data mxc_imx93_data = {
.reg_offset = 0,
.ier_reg = &mxc_imx8_isi_ier_v2,
.set_thd = &mxc_imx8_isi_thd_v1,
- .clks = mxc_imx8mn_clks,
- .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = true,
.gasket_ops = &mxc_imx93_gasket_ops,
.has_36bit_dma = false,
};
+static const struct mxc_isi_plat_data mxc_imx8qm_data = {
+ .model = MXC_ISI_IMX8QM,
+ .num_ports = 5,
+ .num_channels = 8,
+ .reg_offset = 0x10000,
+ .ier_reg = &mxc_imx8_isi_ier_qm,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+ .buf_active_reverse = true,
+ .has_36bit_dma = false,
+};
+
+static const struct mxc_isi_plat_data mxc_imx8qxp_data = {
+ .model = MXC_ISI_IMX8QXP,
+ .num_ports = 5,
+ .num_channels = 6,
+ .reg_offset = 0x10000,
+ .ier_reg = &mxc_imx8_isi_ier_v2,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+ .buf_active_reverse = true,
+ .has_36bit_dma = false,
+};
+
/* -----------------------------------------------------------------------------
* Power management
*/
@@ -385,7 +410,7 @@ static int mxc_isi_runtime_suspend(struct device *dev)
{
struct mxc_isi_dev *isi = dev_get_drvdata(dev);
- clk_bulk_disable_unprepare(isi->pdata->num_clks, isi->clks);
+ clk_bulk_disable_unprepare(isi->num_clks, isi->clks);
return 0;
}
@@ -395,7 +420,7 @@ static int mxc_isi_runtime_resume(struct device *dev)
struct mxc_isi_dev *isi = dev_get_drvdata(dev);
int ret;
- ret = clk_bulk_prepare_enable(isi->pdata->num_clks, isi->clks);
+ ret = clk_bulk_prepare_enable(isi->num_clks, isi->clks);
if (ret) {
dev_err(dev, "Failed to enable clocks (%d)\n", ret);
return ret;
@@ -413,27 +438,6 @@ static const struct dev_pm_ops mxc_isi_pm_ops = {
* Probe, remove & driver
*/
-static int mxc_isi_clk_get(struct mxc_isi_dev *isi)
-{
- unsigned int size = isi->pdata->num_clks
- * sizeof(*isi->clks);
- int ret;
-
- isi->clks = devm_kmemdup(isi->dev, isi->pdata->clks, size, GFP_KERNEL);
- if (!isi->clks)
- return -ENOMEM;
-
- ret = devm_clk_bulk_get(isi->dev, isi->pdata->num_clks,
- isi->clks);
- if (ret < 0) {
- dev_err(isi->dev, "Failed to acquire clocks: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
static int mxc_isi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -456,34 +460,25 @@ static int mxc_isi_probe(struct platform_device *pdev)
if (!isi->pipes)
return -ENOMEM;
- ret = mxc_isi_clk_get(isi);
- if (ret < 0) {
- dev_err(dev, "Failed to get clocks\n");
- return ret;
- }
+ isi->num_clks = devm_clk_bulk_get_all(dev, &isi->clks);
+ if (isi->num_clks < 0)
+ return dev_err_probe(dev, isi->num_clks, "Failed to get clocks\n");
isi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(isi->regs)) {
- dev_err(dev, "Failed to get ISI register map\n");
- return PTR_ERR(isi->regs);
- }
+ if (IS_ERR(isi->regs))
+ return dev_err_probe(dev, PTR_ERR(isi->regs),
+ "Failed to get ISI register map\n");
if (isi->pdata->gasket_ops) {
isi->gasket = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,blk-ctrl");
- if (IS_ERR(isi->gasket)) {
- ret = PTR_ERR(isi->gasket);
- dev_err(dev, "failed to get gasket: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(isi->gasket))
+ return dev_err_probe(dev, PTR_ERR(isi->gasket),
+ "failed to get gasket\n");
}
dma_size = isi->pdata->has_36bit_dma ? 36 : 32;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_size));
- if (ret) {
- dev_err(dev, "failed to set DMA mask\n");
- return ret;
- }
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_size));
pm_runtime_enable(dev);
@@ -541,6 +536,8 @@ static void mxc_isi_remove(struct platform_device *pdev)
static const struct of_device_id mxc_isi_of_match[] = {
{ .compatible = "fsl,imx8mn-isi", .data = &mxc_imx8mn_data },
{ .compatible = "fsl,imx8mp-isi", .data = &mxc_imx8mp_data },
+ { .compatible = "fsl,imx8qm-isi", .data = &mxc_imx8qm_data },
+ { .compatible = "fsl,imx8qxp-isi", .data = &mxc_imx8qxp_data },
{ .compatible = "fsl,imx8ulp-isi", .data = &mxc_imx8ulp_data },
{ .compatible = "fsl,imx93-isi", .data = &mxc_imx93_data },
{ /* sentinel */ },
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 9c7fe9e5f941..206995bedca4 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -114,7 +114,6 @@ struct mxc_isi_buffer {
};
struct mxc_isi_reg {
- u32 offset;
u32 mask;
};
@@ -158,6 +157,8 @@ struct mxc_gasket_ops {
enum model {
MXC_ISI_IMX8MN,
MXC_ISI_IMX8MP,
+ MXC_ISI_IMX8QM,
+ MXC_ISI_IMX8QXP,
MXC_ISI_IMX8ULP,
MXC_ISI_IMX93,
};
@@ -170,8 +171,6 @@ struct mxc_isi_plat_data {
const struct mxc_isi_ier_reg *ier_reg;
const struct mxc_isi_set_thd *set_thd;
const struct mxc_gasket_ops *gasket_ops;
- const struct clk_bulk_data *clks;
- unsigned int num_clks;
bool buf_active_reverse;
bool has_36bit_dma;
};
@@ -283,6 +282,7 @@ struct mxc_isi_dev {
void __iomem *regs;
struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *gasket;
struct mxc_isi_crossbar crossbar;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index 93a55c97cd17..ede6cc74c023 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -188,11 +188,12 @@ static int mxc_isi_crossbar_init_state(struct v4l2_subdev *sd,
* Create a 1:1 mapping between pixel link inputs and outputs to
* pipelines by default.
*/
- routes = kcalloc(xbar->num_sources, sizeof(*routes), GFP_KERNEL);
+ routing.num_routes = min(xbar->num_sinks - 1, xbar->num_sources);
+ routes = kcalloc(routing.num_routes, sizeof(*routes), GFP_KERNEL);
if (!routes)
return -ENOMEM;
- for (i = 0; i < xbar->num_sources; ++i) {
+ for (i = 0; i < routing.num_routes; ++i) {
struct v4l2_subdev_route *route = &routes[i];
route->sink_pad = i;
@@ -200,7 +201,6 @@ static int mxc_isi_crossbar_init_state(struct v4l2_subdev *sd,
route->flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
}
- routing.num_routes = xbar->num_sources;
routing.routes = routes;
ret = __mxc_isi_crossbar_set_routing(sd, state, &routing);
@@ -352,9 +352,8 @@ static int mxc_isi_crossbar_enable_streams(struct v4l2_subdev *sd,
sink_streams);
if (ret) {
dev_err(xbar->isi->dev,
- "failed to %s streams 0x%llx on '%s':%u: %d\n",
- "enable", sink_streams, remote_sd->name,
- remote_pad, ret);
+ "failed to enable streams 0x%llx on '%s':%u: %d\n",
+ sink_streams, remote_sd->name, remote_pad, ret);
mxc_isi_crossbar_gasket_disable(xbar, sink_pad);
return ret;
}
@@ -392,9 +391,8 @@ static int mxc_isi_crossbar_disable_streams(struct v4l2_subdev *sd,
sink_streams);
if (ret)
dev_err(xbar->isi->dev,
- "failed to %s streams 0x%llx on '%s':%u: %d\n",
- "disable", sink_streams, remote_sd->name,
- remote_pad, ret);
+ "failed to disable streams 0x%llx on '%s':%u: %d\n",
+ sink_streams, remote_sd->name, remote_pad, ret);
mxc_isi_crossbar_gasket_disable(xbar, sink_pad);
}
@@ -453,7 +451,7 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
* the memory input.
*/
xbar->num_sinks = isi->pdata->num_ports + 1;
- xbar->num_sources = isi->pdata->num_ports;
+ xbar->num_sources = isi->pdata->num_channels;
num_pads = xbar->num_sinks + xbar->num_sources;
xbar->pads = kcalloc(num_pads, sizeof(*xbar->pads), GFP_KERNEL);
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index a8bcf60e2f37..3a4645f59a44 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Purism SPC
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -62,6 +63,8 @@
#define CSI2RX_CFG_VID_P_FIFO_SEND_LEVEL 0x188
#define CSI2RX_CFG_DISABLE_PAYLOAD_1 0x130
+struct csi_state;
+
enum {
ST_POWERED = 1,
ST_STREAMING = 2,
@@ -83,11 +86,11 @@ static const char * const imx8mq_mipi_csi_clk_id[CSI2_NUM_CLKS] = {
#define CSI2_NUM_CLKS ARRAY_SIZE(imx8mq_mipi_csi_clk_id)
-#define GPR_CSI2_1_RX_ENABLE BIT(13)
-#define GPR_CSI2_1_VID_INTFC_ENB BIT(12)
-#define GPR_CSI2_1_HSEL BIT(10)
-#define GPR_CSI2_1_CONT_CLK_MODE BIT(8)
-#define GPR_CSI2_1_S_PRG_RXHS_SETTLE(x) (((x) & 0x3f) << 2)
+struct imx8mq_plat_data {
+ int (*enable)(struct csi_state *state, u32 hs_settle);
+ void (*disable)(struct csi_state *state);
+ bool use_reg_csr;
+};
/*
* The send level configures the number of entries that must accumulate in
@@ -106,6 +109,7 @@ static const char * const imx8mq_mipi_csi_clk_id[CSI2_NUM_CLKS] = {
struct csi_state {
struct device *dev;
+ const struct imx8mq_plat_data *pdata;
void __iomem *regs;
struct clk_bulk_data clks[CSI2_NUM_CLKS];
struct reset_control *rst;
@@ -137,6 +141,123 @@ struct csi2_pix_format {
u8 width;
};
+/* -----------------------------------------------------------------------------
+ * i.MX8MQ GPR
+ */
+
+#define GPR_CSI2_1_RX_ENABLE BIT(13)
+#define GPR_CSI2_1_VID_INTFC_ENB BIT(12)
+#define GPR_CSI2_1_HSEL BIT(10)
+#define GPR_CSI2_1_CONT_CLK_MODE BIT(8)
+#define GPR_CSI2_1_S_PRG_RXHS_SETTLE(x) (((x) & 0x3f) << 2)
+
+static int imx8mq_gpr_enable(struct csi_state *state, u32 hs_settle)
+{
+ regmap_update_bits(state->phy_gpr,
+ state->phy_gpr_reg,
+ 0x3fff,
+ GPR_CSI2_1_RX_ENABLE |
+ GPR_CSI2_1_VID_INTFC_ENB |
+ GPR_CSI2_1_HSEL |
+ GPR_CSI2_1_CONT_CLK_MODE |
+ GPR_CSI2_1_S_PRG_RXHS_SETTLE(hs_settle));
+
+ return 0;
+}
+
+static const struct imx8mq_plat_data imx8mq_data = {
+ .enable = imx8mq_gpr_enable,
+};
+
+/* -----------------------------------------------------------------------------
+ * i.MX8QXP
+ */
+
+#define CSI2SS_PL_CLK_INTERVAL_US 100
+#define CSI2SS_PL_CLK_TIMEOUT_US 100000
+
+#define CSI2SS_PLM_CTRL 0x0
+#define CSI2SS_PLM_CTRL_ENABLE_PL BIT(0)
+#define CSI2SS_PLM_CTRL_VSYNC_OVERRIDE BIT(9)
+#define CSI2SS_PLM_CTRL_HSYNC_OVERRIDE BIT(10)
+#define CSI2SS_PLM_CTRL_VALID_OVERRIDE BIT(11)
+#define CSI2SS_PLM_CTRL_POLARITY_HIGH BIT(12)
+#define CSI2SS_PLM_CTRL_PL_CLK_RUN BIT(31)
+
+#define CSI2SS_PHY_CTRL 0x4
+#define CSI2SS_PHY_CTRL_RX_ENABLE BIT(0)
+#define CSI2SS_PHY_CTRL_AUTO_PD_EN BIT(1)
+#define CSI2SS_PHY_CTRL_DDRCLK_EN BIT(2)
+#define CSI2SS_PHY_CTRL_CONT_CLK_MODE BIT(3)
+#define CSI2SS_PHY_CTRL_RX_HS_SETTLE_MASK GENMASK(9, 4)
+#define CSI2SS_PHY_CTRL_RTERM_SEL BIT(21)
+#define CSI2SS_PHY_CTRL_PD BIT(22)
+
+#define CSI2SS_DATA_TYPE_DISABLE_BF 0x38
+#define CSI2SS_DATA_TYPE_DISABLE_BF_MASK GENMASK(23, 0)
+
+#define CSI2SS_CTRL_CLK_RESET 0x44
+#define CSI2SS_CTRL_CLK_RESET_EN BIT(0)
+
+static int imx8qxp_gpr_enable(struct csi_state *state, u32 hs_settle)
+{
+ int ret;
+ u32 val;
+
+ /* Clear format */
+ regmap_clear_bits(state->phy_gpr, CSI2SS_DATA_TYPE_DISABLE_BF,
+ CSI2SS_DATA_TYPE_DISABLE_BF_MASK);
+
+ regmap_write(state->phy_gpr, CSI2SS_PLM_CTRL, 0x0);
+
+ regmap_write(state->phy_gpr, CSI2SS_PHY_CTRL,
+ FIELD_PREP(CSI2SS_PHY_CTRL_RX_HS_SETTLE_MASK, hs_settle) |
+ CSI2SS_PHY_CTRL_RX_ENABLE | CSI2SS_PHY_CTRL_DDRCLK_EN |
+ CSI2SS_PHY_CTRL_CONT_CLK_MODE | CSI2SS_PHY_CTRL_PD |
+ CSI2SS_PHY_CTRL_RTERM_SEL | CSI2SS_PHY_CTRL_AUTO_PD_EN);
+
+ ret = regmap_read_poll_timeout(state->phy_gpr, CSI2SS_PLM_CTRL,
+ val, !(val & CSI2SS_PLM_CTRL_PL_CLK_RUN),
+ CSI2SS_PL_CLK_INTERVAL_US,
+ CSI2SS_PL_CLK_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(state->dev, "Timeout waiting for Pixel-Link clock\n");
+ return ret;
+ }
+
+ /* Enable Pixel link Master */
+ regmap_set_bits(state->phy_gpr, CSI2SS_PLM_CTRL,
+ CSI2SS_PLM_CTRL_ENABLE_PL | CSI2SS_PLM_CTRL_VALID_OVERRIDE);
+
+ /* PHY Enable */
+ regmap_clear_bits(state->phy_gpr, CSI2SS_PHY_CTRL,
+ CSI2SS_PHY_CTRL_PD | CSI2SS_PLM_CTRL_POLARITY_HIGH);
+
+ /* Release Reset */
+ regmap_set_bits(state->phy_gpr, CSI2SS_CTRL_CLK_RESET, CSI2SS_CTRL_CLK_RESET_EN);
+
+ return ret;
+}
+
+static void imx8qxp_gpr_disable(struct csi_state *state)
+{
+ /* Disable Pixel Link */
+ regmap_write(state->phy_gpr, CSI2SS_PLM_CTRL, 0x0);
+
+ /* Disable PHY */
+ regmap_write(state->phy_gpr, CSI2SS_PHY_CTRL, 0x0);
+
+ regmap_clear_bits(state->phy_gpr, CSI2SS_CTRL_CLK_RESET,
+ CSI2SS_CTRL_CLK_RESET_EN);
+};
+
+static const struct imx8mq_plat_data imx8qxp_data = {
+ .enable = imx8qxp_gpr_enable,
+ .disable = imx8qxp_gpr_disable,
+ .use_reg_csr = true,
+};
+
static const struct csi2_pix_format imx8mq_mipi_csi_formats[] = {
/* RAW (Bayer and greyscale) formats. */
{
@@ -371,14 +492,9 @@ static int imx8mq_mipi_csi_start_stream(struct csi_state *state,
if (ret)
return ret;
- regmap_update_bits(state->phy_gpr,
- state->phy_gpr_reg,
- 0x3fff,
- GPR_CSI2_1_RX_ENABLE |
- GPR_CSI2_1_VID_INTFC_ENB |
- GPR_CSI2_1_HSEL |
- GPR_CSI2_1_CONT_CLK_MODE |
- GPR_CSI2_1_S_PRG_RXHS_SETTLE(hs_settle));
+ ret = state->pdata->enable(state, hs_settle);
+ if (ret)
+ return ret;
return 0;
}
@@ -386,6 +502,9 @@ static int imx8mq_mipi_csi_start_stream(struct csi_state *state,
static void imx8mq_mipi_csi_stop_stream(struct csi_state *state)
{
imx8mq_mipi_csi_write(state, CSI2RX_CFG_DISABLE_DATA_LANES, 0xf);
+
+ if (state->pdata->disable)
+ state->pdata->disable(state);
}
/* -----------------------------------------------------------------------------
@@ -837,6 +956,25 @@ static int imx8mq_mipi_csi_parse_dt(struct csi_state *state)
return PTR_ERR(state->rst);
}
+ if (state->pdata->use_reg_csr) {
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(to_platform_device(dev), 1);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Missing CSR register\n");
+
+ state->phy_gpr = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(state->phy_gpr))
+ return dev_err_probe(dev, PTR_ERR(state->phy_gpr),
+ "Failed to init CSI MMIO regmap\n");
+ return 0;
+ }
+
ret = of_property_read_u32_array(np, "fsl,mipi-phy-gpr", out_val,
ARRAY_SIZE(out_val));
if (ret) {
@@ -876,6 +1014,8 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
state->dev = dev;
+ state->pdata = of_device_get_match_data(dev);
+
ret = imx8mq_mipi_csi_parse_dt(state);
if (ret < 0) {
dev_err(dev, "Failed to parse device tree: %d\n", ret);
@@ -953,7 +1093,8 @@ static void imx8mq_mipi_csi_remove(struct platform_device *pdev)
}
static const struct of_device_id imx8mq_mipi_csi_of_match[] = {
- { .compatible = "fsl,imx8mq-mipi-csi2", },
+ { .compatible = "fsl,imx8mq-mipi-csi2", .data = &imx8mq_data },
+ { .compatible = "fsl,imx8qxp-mipi-csi2", .data = &imx8qxp_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx8mq_mipi_csi_of_match);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index f732a76de93e..88c0ba495c32 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -849,8 +849,7 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->offset = 0x1000;
break;
default:
- WARN(1, "unknown csiphy version\n");
- return -ENODEV;
+ break;
}
return 0;
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index c622efcc92ff..2de97f58f9ae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -103,11 +103,6 @@ const struct csiphy_formats csiphy_formats_8x96 = {
.formats = formats_8x96
};
-const struct csiphy_formats csiphy_formats_sc7280 = {
- .nformats = ARRAY_SIZE(formats_sdm845),
- .formats = formats_sdm845
-};
-
const struct csiphy_formats csiphy_formats_sdm845 = {
.nformats = ARRAY_SIZE(formats_sdm845),
.formats = formats_sdm845
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index ab91273303b9..895f80003c44 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -126,7 +126,6 @@ void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
extern const struct csiphy_formats csiphy_formats_8x16;
extern const struct csiphy_formats csiphy_formats_8x96;
-extern const struct csiphy_formats csiphy_formats_sc7280;
extern const struct csiphy_formats csiphy_formats_sdm845;
extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index aa021fd5e123..8d05802d1735 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -225,6 +225,21 @@ static int video_check_format(struct camss_video *video)
return 0;
}
+static int video_prepare_streaming(struct vb2_queue *q)
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+ int ret;
+
+ ret = v4l2_pipeline_pm_get(&vdev->entity);
+ if (ret < 0) {
+ dev_err(video->camss->dev, "Failed to power up pipeline: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
static int video_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct camss_video *video = vb2_get_drv_priv(q);
@@ -308,13 +323,23 @@ static void video_stop_streaming(struct vb2_queue *q)
video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
}
+static void video_unprepare_streaming(struct vb2_queue *q)
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+
+ v4l2_pipeline_pm_put(&vdev->entity);
+}
+
static const struct vb2_ops msm_video_vb2_q_ops = {
.queue_setup = video_queue_setup,
.buf_init = video_buf_init,
.buf_prepare = video_buf_prepare,
.buf_queue = video_buf_queue,
+ .prepare_streaming = video_prepare_streaming,
.start_streaming = video_start_streaming,
.stop_streaming = video_stop_streaming,
+ .unprepare_streaming = video_unprepare_streaming,
};
/* -----------------------------------------------------------------------------
@@ -599,20 +624,10 @@ static int video_open(struct file *file)
file->private_data = vfh;
- ret = v4l2_pipeline_pm_get(&vdev->entity);
- if (ret < 0) {
- dev_err(video->camss->dev, "Failed to power up pipeline: %d\n",
- ret);
- goto error_pm_use;
- }
-
mutex_unlock(&video->lock);
return 0;
-error_pm_use:
- v4l2_fh_release(file);
-
error_alloc:
mutex_unlock(&video->lock);
@@ -621,12 +636,8 @@ error_alloc:
static int video_release(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
-
vb2_fop_release(file);
- v4l2_pipeline_pm_put(&vdev->entity);
-
file->private_data = NULL;
return 0;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 06f42875702f..e08e70b93824 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1481,7 +1481,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.csiphy = {
.id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
- .formats = &csiphy_formats_sc7280
+ .formats = &csiphy_formats_sdm845,
}
},
/* CSIPHY1 */
@@ -1496,7 +1496,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.csiphy = {
.id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
- .formats = &csiphy_formats_sc7280
+ .formats = &csiphy_formats_sdm845,
}
},
/* CSIPHY2 */
@@ -1511,7 +1511,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.csiphy = {
.id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
- .formats = &csiphy_formats_sc7280
+ .formats = &csiphy_formats_sdm845,
}
},
/* CSIPHY3 */
@@ -1526,7 +1526,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.csiphy = {
.id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
- .formats = &csiphy_formats_sc7280
+ .formats = &csiphy_formats_sdm845,
}
},
/* CSIPHY4 */
@@ -1541,7 +1541,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.csiphy = {
.id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
- .formats = &csiphy_formats_sc7280
+ .formats = &csiphy_formats_sdm845,
}
},
};
@@ -2486,8 +2486,8 @@ static const struct resources_icc icc_res_sm8550[] = {
static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
/* CSIPHY0 */
{
- .regulators = { "vdd-csiphy-0p8-supply",
- "vdd-csiphy-1p2-supply" },
+ .regulators = { "vdd-csiphy-0p8",
+ "vdd-csiphy-1p2" },
.clock = { "csiphy0", "csiphy0_timer" },
.clock_rate = { { 300000000, 400000000, 480000000 },
{ 266666667, 400000000 } },
@@ -2501,8 +2501,8 @@ static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
},
/* CSIPHY1 */
{
- .regulators = { "vdd-csiphy-0p8-supply",
- "vdd-csiphy-1p2-supply" },
+ .regulators = { "vdd-csiphy-0p8",
+ "vdd-csiphy-1p2" },
.clock = { "csiphy1", "csiphy1_timer" },
.clock_rate = { { 300000000, 400000000, 480000000 },
{ 266666667, 400000000 } },
@@ -2516,8 +2516,8 @@ static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
},
/* CSIPHY2 */
{
- .regulators = { "vdd-csiphy-0p8-supply",
- "vdd-csiphy-1p2-supply" },
+ .regulators = { "vdd-csiphy-0p8",
+ "vdd-csiphy-1p2" },
.clock = { "csiphy2", "csiphy2_timer" },
.clock_rate = { { 300000000, 400000000, 480000000 },
{ 266666667, 400000000 } },
@@ -2531,8 +2531,8 @@ static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
},
/* CSIPHY4 */
{
- .regulators = { "vdd-csiphy-0p8-supply",
- "vdd-csiphy-1p2-supply" },
+ .regulators = { "vdd-csiphy-0p8",
+ "vdd-csiphy-1p2" },
.clock = { "csiphy4", "csiphy4_timer" },
.clock_rate = { { 300000000, 400000000, 480000000 },
{ 266666667, 400000000 } },
@@ -3386,43 +3386,39 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
struct camss *camss = container_of(async, struct camss, notifier);
struct v4l2_device *v4l2_dev = &camss->v4l2_dev;
struct v4l2_subdev *sd;
- int ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- if (sd->host_priv) {
- struct media_entity *sensor = &sd->entity;
- struct csiphy_device *csiphy =
- (struct csiphy_device *) sd->host_priv;
- struct media_entity *input = &csiphy->subdev.entity;
- unsigned int i;
-
- for (i = 0; i < sensor->num_pads; i++) {
- if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- break;
- }
- if (i == sensor->num_pads) {
- dev_err(camss->dev,
- "No source pad in external entity\n");
- return -EINVAL;
- }
+ struct csiphy_device *csiphy = sd->host_priv;
+ struct media_entity *input, *sensor;
+ unsigned int i;
+ int ret;
- ret = media_create_pad_link(sensor, i,
- input, MSM_CSIPHY_PAD_SINK,
- MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
- if (ret < 0) {
- camss_link_err(camss, sensor->name,
- input->name,
- ret);
- return ret;
- }
+ if (!csiphy)
+ continue;
+
+ input = &csiphy->subdev.entity;
+ sensor = &sd->entity;
+
+ for (i = 0; i < sensor->num_pads; i++) {
+ if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->num_pads) {
+ dev_err(camss->dev,
+ "No source pad in external entity\n");
+ return -EINVAL;
}
- }
- ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
- if (ret < 0)
- return ret;
+ ret = media_create_pad_link(sensor, i, input,
+ MSM_CSIPHY_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret < 0) {
+ camss_link_err(camss, sensor->name, input->name, ret);
+ return ret;
+ }
+ }
- return media_device_register(&camss->media_dev);
+ return v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
}
static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
@@ -3625,7 +3621,7 @@ static int camss_probe(struct platform_device *pdev)
ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
- goto err_genpd_cleanup;
+ goto err_media_device_cleanup;
}
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
@@ -3646,6 +3642,12 @@ static int camss_probe(struct platform_device *pdev)
if (ret < 0)
goto err_register_subdevs;
+ ret = media_device_register(&camss->media_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register media device: %d\n", ret);
+ goto err_register_subdevs;
+ }
+
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
@@ -3654,32 +3656,29 @@ static int camss_probe(struct platform_device *pdev)
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
ret);
- goto err_register_subdevs;
+ goto err_media_device_unregister;
}
} else {
ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register subdev nodes: %d\n",
ret);
- goto err_register_subdevs;
- }
-
- ret = media_device_register(&camss->media_dev);
- if (ret < 0) {
- dev_err(dev, "Failed to register media device: %d\n",
- ret);
- goto err_register_subdevs;
+ goto err_media_device_unregister;
}
}
return 0;
+err_media_device_unregister:
+ media_device_unregister(&camss->media_dev);
err_register_subdevs:
camss_unregister_entities(camss);
err_v4l2_device_unregister:
v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
pm_runtime_disable(dev);
+err_media_device_cleanup:
+ media_device_cleanup(&camss->media_dev);
err_genpd_cleanup:
camss_genpd_cleanup(camss);
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.c b/drivers/media/platform/qcom/iris/iris_buffer.c
index e5c5a564fcb8..6425e4919e3b 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_buffer.c
@@ -205,6 +205,9 @@ static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
if (num_mbs > NUM_MBS_4K) {
div_factor = 4;
base_res_mbs = caps->max_mbpf;
+ } else {
+ if (inst->codec == V4L2_PIX_FMT_VP9)
+ div_factor = 1;
}
/*
@@ -376,7 +379,7 @@ int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buf
return 0;
}
-int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane)
+static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool force)
{
const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
struct iris_buffer *buf, *next;
@@ -396,6 +399,14 @@ int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane)
for (i = 0; i < len; i++) {
buffers = &inst->buffers[internal_buf_type[i]];
list_for_each_entry_safe(buf, next, &buffers->list, list) {
+ /*
+ * during stream on, skip destroying internal(DPB) buffer
+ * if firmware did not return it.
+ * during close, destroy all buffers irrespectively.
+ */
+ if (!force && buf->attr & BUF_ATTR_QUEUED)
+ continue;
+
ret = iris_destroy_internal_buffer(inst, buf);
if (ret)
return ret;
@@ -405,6 +416,16 @@ int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane)
return 0;
}
+int iris_destroy_all_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ return iris_destroy_internal_buffers(inst, plane, true);
+}
+
+int iris_destroy_dequeued_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ return iris_destroy_internal_buffers(inst, plane, false);
+}
+
static int iris_release_internal_buffers(struct iris_inst *inst,
enum iris_buffer_type buffer_type)
{
@@ -593,10 +614,13 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
vb2 = &vbuf->vb2_buf;
- if (buf->flags & V4L2_BUF_FLAG_ERROR)
+ if (buf->flags & V4L2_BUF_FLAG_ERROR) {
state = VB2_BUF_STATE_ERROR;
- else
- state = VB2_BUF_STATE_DONE;
+ vb2_set_plane_payload(vb2, 0, 0);
+ vb2->timestamp = 0;
+ v4l2_m2m_buf_done(vbuf, state);
+ return 0;
+ }
vbuf->flags |= buf->flags;
@@ -615,7 +639,10 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
v4l2_event_queue_fh(&inst->fh, &ev);
v4l2_m2m_mark_stopped(m2m_ctx);
}
+ inst->last_buffer_dequeued = true;
}
+
+ state = VB2_BUF_STATE_DONE;
vb2->timestamp = buf->timestamp;
v4l2_m2m_buf_done(vbuf, state);
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.h b/drivers/media/platform/qcom/iris/iris_buffer.h
index c36b6347b077..00825ad2dc3a 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_buffer.h
@@ -106,7 +106,8 @@ void iris_get_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_create_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer);
-int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_destroy_all_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_destroy_dequeued_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst);
int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst);
int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf);
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.c b/drivers/media/platform/qcom/iris/iris_ctrls.c
index b690578256d5..9136b723c0f2 100644
--- a/drivers/media/platform/qcom/iris/iris_ctrls.c
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.c
@@ -17,12 +17,20 @@ static inline bool iris_valid_cap_id(enum platform_inst_fw_cap_type cap_id)
static enum platform_inst_fw_cap_type iris_get_cap_id(u32 id)
{
switch (id) {
- case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- return DEBLOCK;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- return PROFILE;
+ return PROFILE_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ return PROFILE_HEVC;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return PROFILE_VP9;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- return LEVEL;
+ return LEVEL_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ return LEVEL_HEVC;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ return LEVEL_VP9;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ return TIER;
default:
return INST_FW_CAP_MAX;
}
@@ -34,12 +42,20 @@ static u32 iris_get_v4l2_id(enum platform_inst_fw_cap_type cap_id)
return 0;
switch (cap_id) {
- case DEBLOCK:
- return V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER;
- case PROFILE:
+ case PROFILE_H264:
return V4L2_CID_MPEG_VIDEO_H264_PROFILE;
- case LEVEL:
+ case PROFILE_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_PROFILE;
+ case PROFILE_VP9:
+ return V4L2_CID_MPEG_VIDEO_VP9_PROFILE;
+ case LEVEL_H264:
return V4L2_CID_MPEG_VIDEO_H264_LEVEL;
+ case LEVEL_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_LEVEL;
+ case LEVEL_VP9:
+ return V4L2_CID_MPEG_VIDEO_VP9_LEVEL;
+ case TIER:
+ return V4L2_CID_MPEG_VIDEO_HEVC_TIER;
default:
return 0;
}
@@ -84,8 +100,6 @@ int iris_ctrls_init(struct iris_inst *inst)
if (iris_get_v4l2_id(cap[idx].cap_id))
num_ctrls++;
}
- if (!num_ctrls)
- return -EINVAL;
/* Adding 1 to num_ctrls to include V4L2_CID_MIN_BUFFERS_FOR_CAPTURE */
@@ -163,6 +177,7 @@ void iris_session_init_caps(struct iris_core *core)
core->inst_fw_caps[cap_id].value = caps[i].value;
core->inst_fw_caps[cap_id].flags = caps[i].flags;
core->inst_fw_caps[cap_id].hfi_id = caps[i].hfi_id;
+ core->inst_fw_caps[cap_id].set = caps[i].set;
}
}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.h b/drivers/media/platform/qcom/iris/iris_hfi_common.h
index b2c541367fc6..9e6aadb83783 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_common.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.h
@@ -140,6 +140,7 @@ struct hfi_subscription_params {
u32 color_info;
u32 profile;
u32 level;
+ u32 tier;
};
u32 iris_hfi_get_v4l2_color_primaries(u32 hfi_primaries);
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
index 64f887d9a17d..5fc30d54af4d 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
@@ -88,16 +88,29 @@ static int iris_hfi_gen1_sys_pc_prep(struct iris_core *core)
static int iris_hfi_gen1_session_open(struct iris_inst *inst)
{
struct hfi_session_open_pkt packet;
+ u32 codec = 0;
int ret;
if (inst->state != IRIS_INST_DEINIT)
return -EALREADY;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_H264:
+ codec = HFI_VIDEO_CODEC_H264;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ codec = HFI_VIDEO_CODEC_HEVC;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ codec = HFI_VIDEO_CODEC_VP9;
+ break;
+ }
+
packet.shdr.hdr.size = sizeof(struct hfi_session_open_pkt);
packet.shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
packet.shdr.session_id = inst->session_id;
packet.session_domain = HFI_SESSION_TYPE_DEC;
- packet.session_codec = HFI_VIDEO_CODEC_H264;
+ packet.session_codec = codec;
reinit_completion(&inst->completion);
@@ -208,8 +221,10 @@ static int iris_hfi_gen1_session_stop(struct iris_inst *inst, u32 plane)
flush_pkt.flush_type = flush_type;
ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
- if (!ret)
+ if (!ret) {
+ inst->flush_responses_pending++;
ret = iris_wait_for_session_response(inst, true);
+ }
}
return ret;
@@ -386,6 +401,8 @@ static int iris_hfi_gen1_session_drain(struct iris_inst *inst, u32 plane)
ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
ip_pkt.shdr.session_id = inst->session_id;
ip_pkt.flags = HFI_BUFFERFLAG_EOS;
+ if (inst->codec == V4L2_PIX_FMT_VP9)
+ ip_pkt.packet_buffer = 0xdeadb000;
return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
}
@@ -490,14 +507,6 @@ iris_hfi_gen1_packet_session_set_property(struct hfi_session_set_property_pkt *p
packet->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
break;
}
- case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
- struct hfi_enable *en = prop_data;
- u32 *in = pdata;
-
- en->enable = *in;
- packet->shdr.hdr.size += sizeof(u32) + sizeof(*en);
- break;
- }
default:
return -EINVAL;
}
@@ -546,14 +555,15 @@ static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
struct hfi_framesize fs;
int ret;
- fs.buffer_type = HFI_BUFFER_INPUT;
- fs.width = inst->fmt_src->fmt.pix_mp.width;
- fs.height = inst->fmt_src->fmt.pix_mp.height;
-
- ret = hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
- if (ret)
- return ret;
+ if (!iris_drc_pending(inst)) {
+ fs.buffer_type = HFI_BUFFER_INPUT;
+ fs.width = inst->fmt_src->fmt.pix_mp.width;
+ fs.height = inst->fmt_src->fmt.pix_mp.height;
+ ret = hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
+ if (ret)
+ return ret;
+ }
fs.buffer_type = HFI_BUFFER_OUTPUT2;
fs.width = inst->fmt_dst->fmt.pix_mp.width;
fs.height = inst->fmt_dst->fmt.pix_mp.height;
@@ -768,8 +778,8 @@ static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 p
iris_hfi_gen1_set_bufsize},
};
- config_params = core->iris_platform_data->input_config_params;
- config_params_size = core->iris_platform_data->input_config_params_size;
+ config_params = core->iris_platform_data->input_config_params_default;
+ config_params_size = core->iris_platform_data->input_config_params_default_size;
if (V4L2_TYPE_IS_OUTPUT(plane)) {
for (i = 0; i < config_params_size; i++) {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
index 9f246816a286..d4d119ca98b0 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
@@ -13,6 +13,8 @@
#define HFI_SESSION_TYPE_DEC 2
#define HFI_VIDEO_CODEC_H264 0x00000002
+#define HFI_VIDEO_CODEC_HEVC 0x00002000
+#define HFI_VIDEO_CODEC_VP9 0x00004000
#define HFI_ERR_NONE 0x0
@@ -65,7 +67,6 @@
#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001
-#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001
#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004
@@ -117,6 +118,8 @@
#define HFI_FRAME_NOTCODED 0x7f002000
#define HFI_FRAME_YUV 0x7f004000
#define HFI_UNUSED_PICT 0x10000000
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
struct hfi_pkt_hdr {
u32 size;
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
index b72d503dd740..8d1ce8a19a45 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
@@ -200,14 +200,14 @@ static void iris_hfi_gen1_event_seq_changed(struct iris_inst *inst,
iris_hfi_gen1_read_changed_params(inst, pkt);
- if (inst->state != IRIS_INST_ERROR) {
- reinit_completion(&inst->flush_completion);
+ if (inst->state != IRIS_INST_ERROR && !(inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)) {
flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
flush_pkt.shdr.session_id = inst->session_id;
flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
- iris_hfi_queue_cmd_write(inst->core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!iris_hfi_queue_cmd_write(inst->core, &flush_pkt, flush_pkt.shdr.hdr.size))
+ inst->flush_responses_pending++;
}
iris_vdec_src_change(inst);
@@ -348,6 +348,10 @@ static void iris_hfi_gen1_session_etb_done(struct iris_inst *inst, void *packet)
struct iris_buffer *buf = NULL;
bool found = false;
+ /* EOS buffer sent via drain won't be in v4l2 buffer list */
+ if (pkt->packet_buffer == 0xdeadb000)
+ return;
+
v4l2_m2m_for_each_src_buf_safe(m2m_ctx, m2m_buffer, n) {
buf = to_iris_buffer(&m2m_buffer->vb);
if (buf->index == pkt->input_tag) {
@@ -408,7 +412,9 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
flush_pkt.shdr.session_id = inst->session_id;
flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
- iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size))
+ inst->flush_responses_pending++;
+
iris_inst_sub_state_change_drain_last(inst);
return;
@@ -455,7 +461,12 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
timestamp_us = timestamp_hi;
timestamp_us = (timestamp_us << 32) | timestamp_lo;
} else {
- flags |= V4L2_BUF_FLAG_LAST;
+ if (pkt->stream_id == 1 && !inst->last_buffer_dequeued) {
+ if (iris_drc_pending(inst)) {
+ flags |= V4L2_BUF_FLAG_LAST;
+ inst->last_buffer_dequeued = true;
+ }
+ }
}
buf->timestamp = timestamp_us;
@@ -481,6 +492,12 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
buf->attr |= BUF_ATTR_DEQUEUED;
buf->attr |= BUF_ATTR_BUFFER_DONE;
+ if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT)
+ flags |= V4L2_BUF_FLAG_ERROR;
+
+ if (hfi_flags & HFI_BUFFERFLAG_DROP_FRAME)
+ flags |= V4L2_BUF_FLAG_ERROR;
+
buf->flags |= flags;
iris_vb2_buffer_done(inst, buf);
@@ -558,7 +575,6 @@ static void iris_hfi_gen1_handle_response(struct iris_core *core, void *response
const struct iris_hfi_gen1_response_pkt_info *pkt_info;
struct device *dev = core->dev;
struct hfi_session_pkt *pkt;
- struct completion *done;
struct iris_inst *inst;
bool found = false;
u32 i;
@@ -619,9 +635,12 @@ static void iris_hfi_gen1_handle_response(struct iris_core *core, void *response
if (shdr->error_type != HFI_ERR_NONE)
iris_inst_change_state(inst, IRIS_INST_ERROR);
- done = pkt_info->pkt == HFI_MSG_SESSION_FLUSH ?
- &inst->flush_completion : &inst->completion;
- complete(done);
+ if (pkt_info->pkt == HFI_MSG_SESSION_FLUSH) {
+ if (!(--inst->flush_responses_pending))
+ complete(&inst->flush_completion);
+ } else {
+ complete(&inst->completion);
+ }
}
mutex_unlock(&inst->lock);
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
index a908b41e2868..7ca5ae13d62b 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
@@ -178,7 +178,7 @@ static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
sizeof(u64));
}
-static int iris_hfi_gen2_set_bit_dpeth(struct iris_inst *inst)
+static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
@@ -295,7 +295,19 @@ static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- u32 profile = inst->fw_caps[PROFILE].value;
+ u32 profile = 0;
+
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_HEVC:
+ profile = inst->fw_caps[PROFILE_HEVC].value;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ profile = inst->fw_caps[PROFILE_VP9].value;
+ break;
+ case V4L2_PIX_FMT_H264:
+ profile = inst->fw_caps[PROFILE_H264].value;
+ break;
+ }
inst_hfi_gen2->src_subcr_params.profile = profile;
@@ -312,7 +324,19 @@ static int iris_hfi_gen2_set_level(struct iris_inst *inst)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- u32 level = inst->fw_caps[LEVEL].value;
+ u32 level = 0;
+
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_HEVC:
+ level = inst->fw_caps[LEVEL_HEVC].value;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ level = inst->fw_caps[LEVEL_VP9].value;
+ break;
+ case V4L2_PIX_FMT_H264:
+ level = inst->fw_caps[LEVEL_H264].value;
+ break;
+ }
inst_hfi_gen2->src_subcr_params.level = level;
@@ -367,18 +391,35 @@ static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
sizeof(u64));
}
+static int iris_hfi_gen2_set_tier(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 tier = inst->fw_caps[TIER].value;
+
+ inst_hfi_gen2->src_subcr_params.tier = tier;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_TIER,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32_ENUM,
+ &tier,
+ sizeof(u32));
+}
+
static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 plane)
{
struct iris_core *core = inst->core;
- u32 config_params_size, i, j;
- const u32 *config_params;
+ u32 config_params_size = 0, i, j;
+ const u32 *config_params = NULL;
int ret;
static const struct iris_hfi_prop_type_handle prop_type_handle_arr[] = {
{HFI_PROP_BITSTREAM_RESOLUTION, iris_hfi_gen2_set_bitstream_resolution },
{HFI_PROP_CROP_OFFSETS, iris_hfi_gen2_set_crop_offsets },
{HFI_PROP_CODED_FRAMES, iris_hfi_gen2_set_coded_frames },
- {HFI_PROP_LUMA_CHROMA_BIT_DEPTH, iris_hfi_gen2_set_bit_dpeth },
+ {HFI_PROP_LUMA_CHROMA_BIT_DEPTH, iris_hfi_gen2_set_bit_depth },
{HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT, iris_hfi_gen2_set_min_output_count },
{HFI_PROP_PIC_ORDER_CNT_TYPE, iris_hfi_gen2_set_picture_order_count },
{HFI_PROP_SIGNAL_COLOR_INFO, iris_hfi_gen2_set_colorspace },
@@ -386,11 +427,27 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
{HFI_PROP_LEVEL, iris_hfi_gen2_set_level },
{HFI_PROP_COLOR_FORMAT, iris_hfi_gen2_set_colorformat },
{HFI_PROP_LINEAR_STRIDE_SCANLINE, iris_hfi_gen2_set_linear_stride_scanline },
+ {HFI_PROP_TIER, iris_hfi_gen2_set_tier },
};
if (V4L2_TYPE_IS_OUTPUT(plane)) {
- config_params = core->iris_platform_data->input_config_params;
- config_params_size = core->iris_platform_data->input_config_params_size;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_H264:
+ config_params = core->iris_platform_data->input_config_params_default;
+ config_params_size =
+ core->iris_platform_data->input_config_params_default_size;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ config_params = core->iris_platform_data->input_config_params_hevc;
+ config_params_size =
+ core->iris_platform_data->input_config_params_hevc_size;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ config_params = core->iris_platform_data->input_config_params_vp9;
+ config_params_size =
+ core->iris_platform_data->input_config_params_vp9_size;
+ break;
+ }
} else {
config_params = core->iris_platform_data->output_config_params;
config_params_size = core->iris_platform_data->output_config_params_size;
@@ -416,7 +473,19 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
static int iris_hfi_gen2_session_set_codec(struct iris_inst *inst)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 codec = HFI_CODEC_DECODE_AVC;
+ u32 codec = 0;
+
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_H264:
+ codec = HFI_CODEC_DECODE_AVC;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ codec = HFI_CODEC_DECODE_HEVC;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ codec = HFI_CODEC_DECODE_VP9;
+ break;
+ }
iris_hfi_gen2_packet_session_property(inst,
HFI_PROP_CODEC,
@@ -548,8 +617,8 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
struct hfi_subscription_params subsc_params;
u32 prop_type, payload_size, payload_type;
struct iris_core *core = inst->core;
- const u32 *change_param;
- u32 change_param_size;
+ const u32 *change_param = NULL;
+ u32 change_param_size = 0;
u32 payload[32] = {0};
u32 hfi_port = 0, i;
int ret;
@@ -560,8 +629,23 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
return 0;
}
- change_param = core->iris_platform_data->input_config_params;
- change_param_size = core->iris_platform_data->input_config_params_size;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_H264:
+ change_param = core->iris_platform_data->input_config_params_default;
+ change_param_size =
+ core->iris_platform_data->input_config_params_default_size;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ change_param = core->iris_platform_data->input_config_params_hevc;
+ change_param_size =
+ core->iris_platform_data->input_config_params_hevc_size;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ change_param = core->iris_platform_data->input_config_params_vp9;
+ change_param_size =
+ core->iris_platform_data->input_config_params_vp9_size;
+ break;
+ }
payload[0] = HFI_MODE_PORT_SETTINGS_CHANGE;
@@ -608,6 +692,11 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
payload_size = sizeof(u32);
payload_type = HFI_PAYLOAD_U32;
break;
+ case HFI_PROP_LUMA_CHROMA_BIT_DEPTH:
+ payload[0] = subsc_params.bit_depth;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
payload[0] = subsc_params.fw_min_count;
payload_size = sizeof(u32);
@@ -633,6 +722,11 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
payload_size = sizeof(u32);
payload_type = HFI_PAYLOAD_U32;
break;
+ case HFI_PROP_TIER:
+ payload[0] = subsc_params.tier;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
default:
prop_type = 0;
ret = -EINVAL;
@@ -659,8 +753,8 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
{
struct iris_core *core = inst->core;
- u32 subscribe_prop_size, i;
- const u32 *subcribe_prop;
+ u32 subscribe_prop_size = 0, i;
+ const u32 *subcribe_prop = NULL;
u32 payload[32] = {0};
payload[0] = HFI_MODE_PROPERTY;
@@ -669,8 +763,23 @@ static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
subscribe_prop_size = core->iris_platform_data->dec_input_prop_size;
subcribe_prop = core->iris_platform_data->dec_input_prop;
} else {
- subscribe_prop_size = core->iris_platform_data->dec_output_prop_size;
- subcribe_prop = core->iris_platform_data->dec_output_prop;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_H264:
+ subcribe_prop = core->iris_platform_data->dec_output_prop_avc;
+ subscribe_prop_size =
+ core->iris_platform_data->dec_output_prop_avc_size;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ subcribe_prop = core->iris_platform_data->dec_output_prop_hevc;
+ subscribe_prop_size =
+ core->iris_platform_data->dec_output_prop_hevc_size;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ subcribe_prop = core->iris_platform_data->dec_output_prop_vp9;
+ subscribe_prop_size =
+ core->iris_platform_data->dec_output_prop_vp9_size;
+ break;
+ }
}
for (i = 0; i < subscribe_prop_size; i++)
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
index 806f8bb7f505..5f13dc11bea5 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
@@ -46,6 +46,7 @@
#define HFI_PROP_CROP_OFFSETS 0x03000105
#define HFI_PROP_PROFILE 0x03000107
#define HFI_PROP_LEVEL 0x03000108
+#define HFI_PROP_TIER 0x03000109
#define HFI_PROP_STAGE 0x0300010a
#define HFI_PROP_PIPE 0x0300010b
#define HFI_PROP_LUMA_CHROMA_BIT_DEPTH 0x0300010f
@@ -104,6 +105,9 @@ enum hfi_color_format {
enum hfi_codec_type {
HFI_CODEC_DECODE_AVC = 1,
HFI_CODEC_ENCODE_AVC = 2,
+ HFI_CODEC_DECODE_HEVC = 3,
+ HFI_CODEC_ENCODE_HEVC = 4,
+ HFI_CODEC_DECODE_VP9 = 5,
};
enum hfi_picture_type {
@@ -113,6 +117,7 @@ enum hfi_picture_type {
HFI_PICTURE_I = 0x00000008,
HFI_PICTURE_CRA = 0x00000010,
HFI_PICTURE_BLA = 0x00000020,
+ HFI_PICTURE_NOSHOW = 0x00000040,
};
enum hfi_buffer_type {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
index b75a01641d5d..a8c30fc5c0d0 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
@@ -91,7 +91,9 @@ static int iris_hfi_gen2_get_driver_buffer_flags(struct iris_inst *inst, u32 hfi
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
u32 driver_flags = 0;
- if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
+ if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_NOSHOW)
+ driver_flags |= V4L2_BUF_FLAG_ERROR;
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
driver_flags |= V4L2_BUF_FLAG_KEYFRAME;
else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_P)
driver_flags |= V4L2_BUF_FLAG_PFRAME;
@@ -265,7 +267,8 @@ static int iris_hfi_gen2_handle_system_error(struct iris_core *core,
{
struct iris_inst *instance;
- dev_err(core->dev, "received system error of type %#x\n", pkt->type);
+ if (pkt)
+ dev_err(core->dev, "received system error of type %#x\n", pkt->type);
core->state = IRIS_CORE_ERROR;
@@ -377,6 +380,11 @@ static int iris_hfi_gen2_handle_output_buffer(struct iris_inst *inst,
buf->flags = iris_hfi_gen2_get_driver_buffer_flags(inst, hfi_buffer->flags);
+ if (!buf->data_size && inst->state == IRIS_INST_STREAMING &&
+ !(hfi_buffer->flags & HFI_BUF_FW_FLAG_LAST)) {
+ buf->flags |= V4L2_BUF_FLAG_ERROR;
+ }
+
return 0;
}
@@ -563,9 +571,23 @@ static void iris_hfi_gen2_read_input_subcr_params(struct iris_inst *inst)
inst->crop.width = pixmp_ip->width -
((subsc_params.crop_offsets[1] >> 16) & 0xFFFF) - inst->crop.left;
- inst->fw_caps[PROFILE].value = subsc_params.profile;
- inst->fw_caps[LEVEL].value = subsc_params.level;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_HEVC:
+ inst->fw_caps[PROFILE_HEVC].value = subsc_params.profile;
+ inst->fw_caps[LEVEL_HEVC].value = subsc_params.level;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ inst->fw_caps[PROFILE_VP9].value = subsc_params.profile;
+ inst->fw_caps[LEVEL_VP9].value = subsc_params.level;
+ break;
+ case V4L2_PIX_FMT_H264:
+ inst->fw_caps[PROFILE_H264].value = subsc_params.profile;
+ inst->fw_caps[LEVEL_H264].value = subsc_params.level;
+ break;
+ }
+
inst->fw_caps[POC].value = subsc_params.pic_order_cnt;
+ inst->fw_caps[TIER].value = subsc_params.tier;
if (subsc_params.bit_depth != BIT_DEPTH_8 ||
!(subsc_params.coded_frames & HFI_BITMASK_FRAME_MBS_ONLY_FLAG)) {
@@ -636,9 +658,6 @@ static int iris_hfi_gen2_handle_session_property(struct iris_inst *inst,
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- if (pkt->port != HFI_PORT_BITSTREAM)
- return 0;
-
if (pkt->flags & HFI_FW_FLAGS_INFORMATION)
return 0;
@@ -650,6 +669,9 @@ static int iris_hfi_gen2_handle_session_property(struct iris_inst *inst,
inst_hfi_gen2->src_subcr_params.crop_offsets[0] = pkt->payload[0];
inst_hfi_gen2->src_subcr_params.crop_offsets[1] = pkt->payload[1];
break;
+ case HFI_PROP_LUMA_CHROMA_BIT_DEPTH:
+ inst_hfi_gen2->src_subcr_params.bit_depth = pkt->payload[0];
+ break;
case HFI_PROP_CODED_FRAMES:
inst_hfi_gen2->src_subcr_params.coded_frames = pkt->payload[0];
break;
@@ -668,6 +690,9 @@ static int iris_hfi_gen2_handle_session_property(struct iris_inst *inst,
case HFI_PROP_LEVEL:
inst_hfi_gen2->src_subcr_params.level = pkt->payload[0];
break;
+ case HFI_PROP_TIER:
+ inst_hfi_gen2->src_subcr_params.tier = pkt->payload[0];
+ break;
case HFI_PROP_PICTURE_TYPE:
inst_hfi_gen2->hfi_frame_info.picture_type = pkt->payload[0];
break;
@@ -791,8 +816,21 @@ static void iris_hfi_gen2_init_src_change_param(struct iris_inst *inst)
full_range, video_format,
video_signal_type_present_flag);
- subsc_params->profile = inst->fw_caps[PROFILE].value;
- subsc_params->level = inst->fw_caps[LEVEL].value;
+ switch (inst->codec) {
+ case V4L2_PIX_FMT_HEVC:
+ subsc_params->profile = inst->fw_caps[PROFILE_HEVC].value;
+ subsc_params->level = inst->fw_caps[LEVEL_HEVC].value;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ subsc_params->profile = inst->fw_caps[PROFILE_VP9].value;
+ subsc_params->level = inst->fw_caps[LEVEL_VP9].value;
+ break;
+ case V4L2_PIX_FMT_H264:
+ subsc_params->profile = inst->fw_caps[PROFILE_H264].value;
+ subsc_params->level = inst->fw_caps[LEVEL_H264].value;
+ break;
+ }
+
subsc_params->pic_order_cnt = inst->fw_caps[POC].value;
subsc_params->bit_depth = inst->fw_caps[BIT_DEPTH].value;
if (inst->fw_caps[CODED_FRAMES].value ==
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
index fac7df0c4d1a..221dcd09e1e1 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -113,7 +113,7 @@ int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_s
{
struct iris_iface_q_info *q_info = &core->command_queue;
- if (core->state == IRIS_CORE_ERROR)
+ if (core->state == IRIS_CORE_ERROR || core->state == IRIS_CORE_DEINIT)
return -EINVAL;
if (!iris_hfi_queue_write(q_info, pkt, pkt_size)) {
diff --git a/drivers/media/platform/qcom/iris/iris_instance.h b/drivers/media/platform/qcom/iris/iris_instance.h
index caa3c6507006..0e1f5799b72d 100644
--- a/drivers/media/platform/qcom/iris/iris_instance.h
+++ b/drivers/media/platform/qcom/iris/iris_instance.h
@@ -27,6 +27,7 @@
* @crop: structure of crop info
* @completion: structure of signal completions
* @flush_completion: structure of signal completions for flush cmd
+ * @flush_responses_pending: counter to track number of pending flush responses
* @fw_caps: array of supported instance firmware capabilities
* @buffers: array of different iris buffers
* @fw_min_count: minimnum count of buffers needed by fw
@@ -42,6 +43,8 @@
* @sequence_out: a sequence counter for output queue
* @tss: timestamp metadata
* @metadata_idx: index for metadata buffer
+ * @codec: codec type
+ * @last_buffer_dequeued: a flag to indicate that last buffer is sent by driver
*/
struct iris_inst {
@@ -57,6 +60,7 @@ struct iris_inst {
struct iris_hfi_rect_desc crop;
struct completion completion;
struct completion flush_completion;
+ u32 flush_responses_pending;
struct platform_inst_fw_cap fw_caps[INST_FW_CAP_MAX];
struct iris_buffers buffers[BUF_TYPE_MAX];
u32 fw_min_count;
@@ -72,6 +76,8 @@ struct iris_inst {
u32 sequence_out;
struct iris_ts_metadata tss[VIDEO_MAX_FRAME];
u32 metadata_idx;
+ u32 codec;
+ bool last_buffer_dequeued;
};
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
index ac76d9e1ef9c..adafdce8a856 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_common.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -21,6 +21,7 @@ struct iris_inst;
#define DEFAULT_MAX_HOST_BUF_COUNT 64
#define DEFAULT_MAX_HOST_BURST_BUF_COUNT 256
#define DEFAULT_FPS 30
+#define NUM_MBS_8K ((8192 * 4352) / 256)
enum stage_type {
STAGE_1 = 1,
@@ -80,8 +81,12 @@ struct platform_inst_caps {
};
enum platform_inst_fw_cap_type {
- PROFILE = 1,
- LEVEL,
+ PROFILE_H264 = 1,
+ PROFILE_HEVC,
+ PROFILE_VP9,
+ LEVEL_H264,
+ LEVEL_HEVC,
+ LEVEL_VP9,
INPUT_BUF_HOST_MAX_COUNT,
STAGE,
PIPE,
@@ -89,7 +94,7 @@ enum platform_inst_fw_cap_type {
CODED_FRAMES,
BIT_DEPTH,
RAP_FRAME,
- DEBLOCK,
+ TIER,
INST_FW_CAP_MAX,
};
@@ -172,15 +177,24 @@ struct iris_platform_data {
struct ubwc_config_data *ubwc_config;
u32 num_vpp_pipe;
u32 max_session_count;
+ /* max number of macroblocks per frame supported */
u32 max_core_mbpf;
- const u32 *input_config_params;
- unsigned int input_config_params_size;
+ const u32 *input_config_params_default;
+ unsigned int input_config_params_default_size;
+ const u32 *input_config_params_hevc;
+ unsigned int input_config_params_hevc_size;
+ const u32 *input_config_params_vp9;
+ unsigned int input_config_params_vp9_size;
const u32 *output_config_params;
unsigned int output_config_params_size;
const u32 *dec_input_prop;
unsigned int dec_input_prop_size;
- const u32 *dec_output_prop;
- unsigned int dec_output_prop_size;
+ const u32 *dec_output_prop_avc;
+ unsigned int dec_output_prop_avc_size;
+ const u32 *dec_output_prop_hevc;
+ unsigned int dec_output_prop_hevc_size;
+ const u32 *dec_output_prop_vp9;
+ unsigned int dec_output_prop_vp9_size;
const u32 *dec_ip_int_buf_tbl;
unsigned int dec_ip_int_buf_tbl_size;
const u32 *dec_op_int_buf_tbl;
diff --git a/drivers/media/platform/qcom/iris/iris_platform_gen2.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
index 1e69ba15db0f..d3026b2bcb70 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_gen2.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
@@ -17,7 +17,7 @@
static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
{
- .cap_id = PROFILE,
+ .cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
.max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
.step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
@@ -31,7 +31,29 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
.set = iris_set_u32_enum,
},
{
- .cap_id = LEVEL,
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = PROFILE_VP9,
+ .min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .max = V4L2_MPEG_VIDEO_VP9_PROFILE_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_VP9_PROFILE_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_PROFILE_2),
+ .value = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL_H264,
.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
.step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
@@ -60,6 +82,60 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
.set = iris_set_u32_enum,
},
{
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL_VP9,
+ .min = V4L2_MPEG_VIDEO_VP9_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_VP9_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_VP9_LEVEL_6_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = TIER,
+ .min = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_TIER_HIGH),
+ .value = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH,
+ .hfi_id = HFI_PROP_TIER,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
.cap_id = INPUT_BUF_HOST_MAX_COUNT,
.min = DEFAULT_MAX_HOST_BUF_COUNT,
.max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
@@ -181,9 +257,10 @@ static struct tz_cp_config tz_cp_config_sm8550 = {
.cp_nonpixel_size = 0x24800000,
};
-static const u32 sm8550_vdec_input_config_params[] = {
+static const u32 sm8550_vdec_input_config_params_default[] = {
HFI_PROP_BITSTREAM_RESOLUTION,
HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
HFI_PROP_CODED_FRAMES,
HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
HFI_PROP_PIC_ORDER_CNT_TYPE,
@@ -192,6 +269,26 @@ static const u32 sm8550_vdec_input_config_params[] = {
HFI_PROP_SIGNAL_COLOR_INFO,
};
+static const u32 sm8550_vdec_input_config_param_hevc[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_PROP_PROFILE,
+ HFI_PROP_LEVEL,
+ HFI_PROP_TIER,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+};
+
+static const u32 sm8550_vdec_input_config_param_vp9[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_PROP_PROFILE,
+ HFI_PROP_LEVEL,
+};
+
static const u32 sm8550_vdec_output_config_params[] = {
HFI_PROP_COLOR_FORMAT,
HFI_PROP_LINEAR_STRIDE_SCANLINE,
@@ -201,11 +298,19 @@ static const u32 sm8550_vdec_subscribe_input_properties[] = {
HFI_PROP_NO_OUTPUT,
};
-static const u32 sm8550_vdec_subscribe_output_properties[] = {
+static const u32 sm8550_vdec_subscribe_output_properties_avc[] = {
HFI_PROP_PICTURE_TYPE,
HFI_PROP_CABAC_SESSION,
};
+static const u32 sm8550_vdec_subscribe_output_properties_hevc[] = {
+ HFI_PROP_PICTURE_TYPE,
+};
+
+static const u32 sm8550_vdec_subscribe_output_properties_vp9[] = {
+ HFI_PROP_PICTURE_TYPE,
+};
+
static const u32 sm8550_dec_ip_int_buf_tbl[] = {
BUF_BIN,
BUF_COMV,
@@ -248,19 +353,34 @@ struct iris_platform_data sm8550_data = {
.ubwc_config = &ubwc_config_sm8550,
.num_vpp_pipe = 4,
.max_session_count = 16,
- .max_core_mbpf = ((8192 * 4352) / 256) * 2,
- .input_config_params =
- sm8550_vdec_input_config_params,
- .input_config_params_size =
- ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .max_core_mbpf = NUM_MBS_8K * 2,
+ .input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .input_config_params_default_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params_default),
+ .input_config_params_hevc =
+ sm8550_vdec_input_config_param_hevc,
+ .input_config_params_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
+ .input_config_params_vp9 =
+ sm8550_vdec_input_config_param_vp9,
+ .input_config_params_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
.output_config_params =
sm8550_vdec_output_config_params,
.output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
- .dec_output_prop = sm8550_vdec_subscribe_output_properties,
- .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+ .dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
+ .dec_output_prop_avc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_avc),
+ .dec_output_prop_hevc = sm8550_vdec_subscribe_output_properties_hevc,
+ .dec_output_prop_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_hevc),
+ .dec_output_prop_vp9 = sm8550_vdec_subscribe_output_properties_vp9,
+ .dec_output_prop_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_vp9),
.dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
@@ -308,19 +428,34 @@ struct iris_platform_data sm8650_data = {
.ubwc_config = &ubwc_config_sm8550,
.num_vpp_pipe = 4,
.max_session_count = 16,
- .max_core_mbpf = ((8192 * 4352) / 256) * 2,
- .input_config_params =
- sm8550_vdec_input_config_params,
- .input_config_params_size =
- ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .max_core_mbpf = NUM_MBS_8K * 2,
+ .input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .input_config_params_default_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params_default),
+ .input_config_params_hevc =
+ sm8550_vdec_input_config_param_hevc,
+ .input_config_params_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
+ .input_config_params_vp9 =
+ sm8550_vdec_input_config_param_vp9,
+ .input_config_params_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
.output_config_params =
sm8550_vdec_output_config_params,
.output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
- .dec_output_prop = sm8550_vdec_subscribe_output_properties,
- .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+ .dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
+ .dec_output_prop_avc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_avc),
+ .dec_output_prop_hevc = sm8550_vdec_subscribe_output_properties_hevc,
+ .dec_output_prop_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_hevc),
+ .dec_output_prop_vp9 = sm8550_vdec_subscribe_output_properties_vp9,
+ .dec_output_prop_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_vp9),
.dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
@@ -365,18 +500,33 @@ struct iris_platform_data qcs8300_data = {
.num_vpp_pipe = 2,
.max_session_count = 16,
.max_core_mbpf = ((4096 * 2176) / 256) * 4,
- .input_config_params =
- sm8550_vdec_input_config_params,
- .input_config_params_size =
- ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .input_config_params_default_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params_default),
+ .input_config_params_hevc =
+ sm8550_vdec_input_config_param_hevc,
+ .input_config_params_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
+ .input_config_params_vp9 =
+ sm8550_vdec_input_config_param_vp9,
+ .input_config_params_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
.output_config_params =
sm8550_vdec_output_config_params,
.output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
- .dec_output_prop = sm8550_vdec_subscribe_output_properties,
- .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+ .dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
+ .dec_output_prop_avc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_avc),
+ .dec_output_prop_hevc = sm8550_vdec_subscribe_output_properties_hevc,
+ .dec_output_prop_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_hevc),
+ .dec_output_prop_vp9 = sm8550_vdec_subscribe_output_properties_vp9,
+ .dec_output_prop_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_vp9),
.dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
diff --git a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
index f82355d72fcf..a8d66ed388a3 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
@@ -5,49 +5,125 @@
static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
{
- .cap_id = PROFILE,
+ .cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
.max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
.step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH),
.value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
.hfi_id = HFI_PROP_PROFILE,
.flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
.set = iris_set_u32_enum,
},
{
- .cap_id = LEVEL,
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = PROFILE_VP9,
+ .min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .max = V4L2_MPEG_VIDEO_VP9_PROFILE_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_VP9_PROFILE_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_PROFILE_2),
+ .value = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL_H264,
.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
.step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
.value = V4L2_MPEG_VIDEO_H264_LEVEL_6_1,
.hfi_id = HFI_PROP_LEVEL,
.flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
.set = iris_set_u32_enum,
},
{
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL_VP9,
+ .min = V4L2_MPEG_VIDEO_VP9_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_VP9_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_VP9_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_VP9_LEVEL_6_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = TIER,
+ .min = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_TIER_HIGH),
+ .value = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH,
+ .hfi_id = HFI_PROP_TIER,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
.cap_id = INPUT_BUF_HOST_MAX_COUNT,
.min = DEFAULT_MAX_HOST_BUF_COUNT,
.max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
index 5c86fd7b7b6f..8d0816a67ae0 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
@@ -30,15 +30,6 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
.hfi_id = HFI_PROPERTY_PARAM_WORK_MODE,
.set = iris_set_stage,
},
- {
- .cap_id = DEBLOCK,
- .min = 0,
- .max = 1,
- .step_or_mask = 1,
- .value = 0,
- .hfi_id = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
- .set = iris_set_u32,
- },
};
static struct platform_inst_caps platform_inst_cap_sm8250 = {
@@ -136,10 +127,10 @@ struct iris_platform_data sm8250_data = {
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
.num_vpp_pipe = 4,
.max_session_count = 16,
- .max_core_mbpf = (8192 * 4352) / 256,
- .input_config_params =
+ .max_core_mbpf = NUM_MBS_8K,
+ .input_config_params_default =
sm8250_vdec_input_config_param_default,
- .input_config_params_size =
+ .input_config_params_default_size =
ARRAY_SIZE(sm8250_vdec_input_config_param_default),
.dec_ip_int_buf_tbl = sm8250_dec_ip_int_buf_tbl,
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
index 9a7ce142f700..4e6e92357968 100644
--- a/drivers/media/platform/qcom/iris/iris_probe.c
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -53,7 +53,7 @@ static int iris_init_power_domains(struct iris_core *core)
struct dev_pm_domain_attach_data iris_opp_pd_data = {
.pd_names = core->iris_platform_data->opp_pd_tbl,
.num_pd_names = core->iris_platform_data->opp_pd_tbl_size,
- .pd_flags = PD_FLAG_DEV_LINK_ON,
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
};
ret = devm_pm_domain_attach_list(core->dev, &iris_pd_data, &core->pmdomain_tbl);
diff --git a/drivers/media/platform/qcom/iris/iris_state.c b/drivers/media/platform/qcom/iris/iris_state.c
index 5976e926c83d..104e1687ad39 100644
--- a/drivers/media/platform/qcom/iris/iris_state.c
+++ b/drivers/media/platform/qcom/iris/iris_state.c
@@ -245,7 +245,7 @@ int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane)
return iris_inst_change_sub_state(inst, 0, set_sub_state);
}
-static inline bool iris_drc_pending(struct iris_inst *inst)
+bool iris_drc_pending(struct iris_inst *inst)
{
return inst->sub_state & IRIS_INST_SUB_DRC &&
inst->sub_state & IRIS_INST_SUB_DRC_LAST;
diff --git a/drivers/media/platform/qcom/iris/iris_state.h b/drivers/media/platform/qcom/iris/iris_state.h
index 78c61aac5e7e..e718386dbe04 100644
--- a/drivers/media/platform/qcom/iris/iris_state.h
+++ b/drivers/media/platform/qcom/iris/iris_state.h
@@ -140,5 +140,6 @@ int iris_inst_sub_state_change_drain_last(struct iris_inst *inst);
int iris_inst_sub_state_change_drc_last(struct iris_inst *inst);
int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane);
bool iris_allow_cmd(struct iris_inst *inst, u32 cmd);
+bool iris_drc_pending(struct iris_inst *inst);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.c b/drivers/media/platform/qcom/iris/iris_vb2.c
index cdf11feb590b..8b17c7c39487 100644
--- a/drivers/media/platform/qcom/iris/iris_vb2.c
+++ b/drivers/media/platform/qcom/iris/iris_vb2.c
@@ -259,13 +259,14 @@ int iris_vb2_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_OUTPUT))
- return -EINVAL;
- if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
- vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_INPUT))
- return -EINVAL;
-
+ if (!(inst->sub_state & IRIS_INST_SUB_DRC)) {
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_OUTPUT))
+ return -EINVAL;
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_INPUT))
+ return -EINVAL;
+ }
return 0;
}
@@ -304,7 +305,7 @@ void iris_vb2_buf_queue(struct vb2_buffer *vb2)
goto exit;
}
- if (V4L2_TYPE_IS_CAPTURE(vb2->vb2_queue->type)) {
+ if (!inst->last_buffer_dequeued && V4L2_TYPE_IS_CAPTURE(vb2->vb2_queue->type)) {
if ((inst->sub_state & IRIS_INST_SUB_DRC &&
inst->sub_state & IRIS_INST_SUB_DRC_LAST) ||
(inst->sub_state & IRIS_INST_SUB_DRAIN &&
@@ -318,6 +319,7 @@ void iris_vb2_buf_queue(struct vb2_buffer *vb2)
v4l2_event_queue_fh(&inst->fh, &eos);
v4l2_m2m_mark_stopped(m2m_ctx);
}
+ inst->last_buffer_dequeued = true;
goto exit;
}
}
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.c b/drivers/media/platform/qcom/iris/iris_vdec.c
index 4143acedfc57..d670b51c5839 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.c
+++ b/drivers/media/platform/qcom/iris/iris_vdec.c
@@ -32,6 +32,7 @@ int iris_vdec_inst_init(struct iris_inst *inst)
f->fmt.pix_mp.width = DEFAULT_WIDTH;
f->fmt.pix_mp.height = DEFAULT_HEIGHT;
f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ inst->codec = f->fmt.pix_mp.pixelformat;
f->fmt.pix_mp.num_planes = 1;
f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
@@ -67,14 +68,67 @@ void iris_vdec_inst_deinit(struct iris_inst *inst)
kfree(inst->fmt_src);
}
+static const struct iris_fmt iris_vdec_formats[] = {
+ [IRIS_FMT_H264] = {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+ [IRIS_FMT_HEVC] = {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+ [IRIS_FMT_VP9] = {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+};
+
+static const struct iris_fmt *
+find_format(struct iris_inst *inst, u32 pixfmt, u32 type)
+{
+ unsigned int size = ARRAY_SIZE(iris_vdec_formats);
+ const struct iris_fmt *fmt = iris_vdec_formats;
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct iris_fmt *
+find_format_by_index(struct iris_inst *inst, u32 index, u32 type)
+{
+ const struct iris_fmt *fmt = iris_vdec_formats;
+ unsigned int size = ARRAY_SIZE(iris_vdec_formats);
+
+ if (index >= size || fmt[index].type != type)
+ return NULL;
+
+ return &fmt[index];
+}
+
int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f)
{
+ const struct iris_fmt *fmt;
+
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- f->pixelformat = V4L2_PIX_FMT_H264;
+ fmt = find_format_by_index(inst, f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
f->flags = V4L2_FMT_FLAG_COMPRESSED | V4L2_FMT_FLAG_DYN_RESOLUTION;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (f->index)
+ return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_NV12;
break;
default:
@@ -88,13 +142,15 @@ int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ const struct iris_fmt *fmt;
struct v4l2_format *f_inst;
struct vb2_queue *src_q;
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264) {
+ if (!fmt) {
f_inst = inst->fmt_src;
f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
@@ -102,7 +158,7 @@ int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
}
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
+ if (!fmt) {
f_inst = inst->fmt_dst;
f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
@@ -145,13 +201,14 @@ int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264)
+ if (!(find_format(inst, f->fmt.pix_mp.pixelformat, f->type)))
return -EINVAL;
fmt = inst->fmt_src;
fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
-
- codec_align = DEFAULT_CODEC_ALIGNMENT;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->codec = fmt->fmt.pix_mp.pixelformat;
+ codec_align = inst->codec == V4L2_PIX_FMT_HEVC ? 32 : 16;
fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, codec_align);
fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, codec_align);
fmt->fmt.pix_mp.num_planes = 1;
@@ -171,6 +228,11 @@ int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
output_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
output_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+ /* Update capture format based on new ip w/h */
+ output_fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, 128);
+ output_fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, 32);
+ inst->buffers[BUF_OUTPUT].size = iris_get_buffer_size(inst, BUF_OUTPUT);
+
inst->crop.left = 0;
inst->crop.top = 0;
inst->crop.width = f->fmt.pix_mp.width;
@@ -239,35 +301,6 @@ void iris_vdec_src_change(struct iris_inst *inst)
v4l2_event_queue_fh(&inst->fh, &event);
}
-static int iris_vdec_get_num_queued_buffers(struct iris_inst *inst,
- enum iris_buffer_type type)
-{
- struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
- struct v4l2_m2m_buffer *buffer, *n;
- struct iris_buffer *buf;
- u32 count = 0;
-
- switch (type) {
- case BUF_INPUT:
- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (!(buf->attr & BUF_ATTR_QUEUED))
- continue;
- count++;
- }
- return count;
- case BUF_OUTPUT:
- v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (!(buf->attr & BUF_ATTR_QUEUED))
- continue;
- count++;
- }
- return count;
- default:
- return count;
- }
-}
static void iris_vdec_flush_deferred_buffers(struct iris_inst *inst,
enum iris_buffer_type type)
@@ -316,7 +349,6 @@ int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
{
const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
enum iris_buffer_type buffer_type;
- u32 count;
int ret;
switch (plane) {
@@ -334,12 +366,6 @@ int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
if (ret)
goto error;
- count = iris_vdec_get_num_queued_buffers(inst, buffer_type);
- if (count) {
- ret = -EINVAL;
- goto error;
- }
-
ret = iris_inst_state_change_streamoff(inst, plane);
if (ret)
goto error;
@@ -408,7 +434,7 @@ int iris_vdec_streamon_input(struct iris_inst *inst)
iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (ret)
return ret;
@@ -482,6 +508,8 @@ static int iris_vdec_process_streamon_output(struct iris_inst *inst)
if (ret)
return ret;
+ inst->last_buffer_dequeued = false;
+
return iris_inst_change_sub_state(inst, clear_sub_state, 0);
}
@@ -496,7 +524,7 @@ int iris_vdec_streamon_output(struct iris_inst *inst)
iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.h b/drivers/media/platform/qcom/iris/iris_vdec.h
index b24932dc511a..cd7aab66dc7c 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.h
+++ b/drivers/media/platform/qcom/iris/iris_vdec.h
@@ -8,6 +8,17 @@
struct iris_inst;
+enum iris_fmt_type {
+ IRIS_FMT_H264,
+ IRIS_FMT_HEVC,
+ IRIS_FMT_VP9,
+};
+
+struct iris_fmt {
+ u32 pixfmt;
+ u32 type;
+};
+
int iris_vdec_inst_init(struct iris_inst *inst);
void iris_vdec_inst_deinit(struct iris_inst *inst);
int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
index ca0f4e310f77..c417e8c31f80 100644
--- a/drivers/media/platform/qcom/iris/iris_vidc.c
+++ b/drivers/media/platform/qcom/iris/iris_vidc.c
@@ -221,6 +221,33 @@ static void iris_session_close(struct iris_inst *inst)
iris_wait_for_session_response(inst, false);
}
+static void iris_check_num_queued_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ struct iris_buffer *buf, *next;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+ u32 count = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ list_for_each_entry_safe(buf, next, &buffers->list, list)
+ count++;
+ if (count)
+ dev_err(inst->core->dev, "%d buffer of type %d not released",
+ count, internal_buf_type[i]);
+ }
+}
+
int iris_close(struct file *filp)
{
struct iris_inst *inst = iris_get_inst(filp, NULL);
@@ -233,8 +260,10 @@ int iris_close(struct file *filp)
iris_session_close(inst);
iris_inst_change_state(inst, IRIS_INST_DEINIT);
iris_v4l2_fh_deinit(inst);
- iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_check_num_queued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ iris_check_num_queued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
iris_remove_session(inst);
mutex_unlock(&inst->lock);
mutex_destroy(&inst->ctx_q_lock);
@@ -249,9 +278,6 @@ static int iris_enum_fmt(struct file *filp, void *fh, struct v4l2_fmtdesc *f)
{
struct iris_inst *inst = iris_get_inst(filp, NULL);
- if (f->index)
- return -EINVAL;
-
return iris_vdec_enum_fmt(inst, f);
}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
index dce25e410d80..f92fd39fe310 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
@@ -31,6 +31,42 @@ static u32 hfi_buffer_bin_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_p
return size_h264d_hw_bin_buffer(n_aligned_w, n_aligned_h, num_vpp_pipes);
}
+static u32 size_h265d_hw_bin_buffer(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 product = frame_width * frame_height;
+ u32 size_yuv, size_bin_hdr, size_bin_res;
+
+ size_yuv = (product <= BIN_BUFFER_THRESHOLD) ?
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1);
+ size_bin_hdr = size_yuv * H265_CABAC_HDR_RATIO_HD_TOT;
+ size_bin_res = size_yuv * H265_CABAC_RES_RATIO_HD_TOT;
+ size_bin_hdr = ALIGN(size_bin_hdr / num_vpp_pipes, DMA_ALIGNMENT) * num_vpp_pipes;
+ size_bin_res = ALIGN(size_bin_res / num_vpp_pipes, DMA_ALIGNMENT) * num_vpp_pipes;
+
+ return size_bin_hdr + size_bin_res;
+}
+
+static u32 hfi_buffer_bin_vp9d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 _size_yuv = ALIGN(frame_width, 16) * ALIGN(frame_height, 16) * 3 / 2;
+ u32 _size = ALIGN(((max_t(u32, _size_yuv, ((BIN_BUFFER_THRESHOLD * 3) >> 1)) *
+ VPX_DECODER_FRAME_BIN_HDR_BUDGET / VPX_DECODER_FRAME_BIN_DENOMINATOR *
+ VPX_DECODER_FRAME_CONCURENCY_LVL) / num_vpp_pipes), DMA_ALIGNMENT) +
+ ALIGN(((max_t(u32, _size_yuv, ((BIN_BUFFER_THRESHOLD * 3) >> 1)) *
+ VPX_DECODER_FRAME_BIN_RES_BUDGET / VPX_DECODER_FRAME_BIN_DENOMINATOR *
+ VPX_DECODER_FRAME_CONCURENCY_LVL) / num_vpp_pipes), DMA_ALIGNMENT);
+
+ return _size * num_vpp_pipes;
+}
+
+static u32 hfi_buffer_bin_h265d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 n_aligned_w = ALIGN(frame_width, 16);
+ u32 n_aligned_h = ALIGN(frame_height, 16);
+
+ return size_h265d_hw_bin_buffer(n_aligned_w, n_aligned_h, num_vpp_pipes);
+}
+
static u32 hfi_buffer_comv_h264d(u32 frame_width, u32 frame_height, u32 _comv_bufcount)
{
u32 frame_height_in_mbs = DIV_ROUND_UP(frame_height, 16);
@@ -55,6 +91,17 @@ static u32 hfi_buffer_comv_h264d(u32 frame_width, u32 frame_height, u32 _comv_bu
return (size_colloc * (_comv_bufcount)) + 512;
}
+static u32 hfi_buffer_comv_h265d(u32 frame_width, u32 frame_height, u32 _comv_bufcount)
+{
+ u32 frame_height_in_mbs = (frame_height + 15) >> 4;
+ u32 frame_width_in_mbs = (frame_width + 15) >> 4;
+ u32 _size;
+
+ _size = ALIGN(((frame_width_in_mbs * frame_height_in_mbs) << 8), 512);
+
+ return (_size * (_comv_bufcount)) + 512;
+}
+
static u32 size_h264d_bse_cmd_buf(u32 frame_height)
{
u32 height = ALIGN(frame_height, 32);
@@ -63,6 +110,44 @@ static u32 size_h264d_bse_cmd_buf(u32 frame_height)
SIZE_H264D_BSE_CMD_PER_BUF;
}
+static u32 size_h265d_bse_cmd_buf(u32 frame_width, u32 frame_height)
+{
+ u32 _size = ALIGN(((ALIGN(frame_width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(frame_height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) *
+ NUM_HW_PIC_BUF, DMA_ALIGNMENT);
+ _size = min_t(u32, _size, H265D_MAX_SLICE + 1);
+ _size = 2 * _size * SIZE_H265D_BSE_CMD_PER_BUF;
+
+ return _size;
+}
+
+static u32 hfi_buffer_persist_h265d(u32 rpu_enabled)
+{
+ return ALIGN((SIZE_SLIST_BUF_H265 * NUM_SLIST_BUF_H265 +
+ H265_NUM_FRM_INFO * H265_DISPLAY_BUF_SIZE +
+ H265_NUM_TILE * sizeof(u32) +
+ NUM_HW_PIC_BUF * SIZE_SEI_USERDATA +
+ rpu_enabled * NUM_HW_PIC_BUF * SIZE_DOLBY_RPU_METADATA),
+ DMA_ALIGNMENT);
+}
+
+static inline
+u32 hfi_iris3_vp9d_comv_size(void)
+{
+ return (((8192 + 63) >> 6) * ((4320 + 63) >> 6) * 8 * 8 * 2 * 8);
+}
+
+static u32 hfi_buffer_persist_vp9d(void)
+{
+ return ALIGN(VP9_NUM_PROBABILITY_TABLE_BUF * VP9_PROB_TABLE_SIZE, DMA_ALIGNMENT) +
+ ALIGN(hfi_iris3_vp9d_comv_size(), DMA_ALIGNMENT) +
+ ALIGN(MAX_SUPERFRAME_HEADER_LEN, DMA_ALIGNMENT) +
+ ALIGN(VP9_UDC_HEADER_BUF_SIZE, DMA_ALIGNMENT) +
+ ALIGN(VP9_NUM_FRAME_INFO_BUF * CCE_TILE_OFFSET_SIZE, DMA_ALIGNMENT) +
+ ALIGN(VP9_NUM_FRAME_INFO_BUF * VP9_FRAME_INFO_BUF_SIZE, DMA_ALIGNMENT) +
+ HDR10_HIST_EXTRADATA_SIZE;
+}
+
static u32 size_h264d_vpp_cmd_buf(u32 frame_height)
{
u32 size, height = ALIGN(frame_height, 32);
@@ -83,17 +168,45 @@ static u32 hfi_buffer_persist_h264d(void)
static u32 hfi_buffer_non_comv_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
{
- u32 size_bse, size_vpp, size;
-
- size_bse = size_h264d_bse_cmd_buf(frame_height);
- size_vpp = size_h264d_vpp_cmd_buf(frame_height);
- size = ALIGN(size_bse, DMA_ALIGNMENT) +
+ u32 size_bse = size_h264d_bse_cmd_buf(frame_height);
+ u32 size_vpp = size_h264d_vpp_cmd_buf(frame_height);
+ u32 size = ALIGN(size_bse, DMA_ALIGNMENT) +
ALIGN(size_vpp, DMA_ALIGNMENT) +
ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), DMA_ALIGNMENT);
return ALIGN(size, DMA_ALIGNMENT);
}
+static u32 size_h265d_vpp_cmd_buf(u32 frame_width, u32 frame_height)
+{
+ u32 _size = ALIGN(((ALIGN(frame_width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(frame_height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) *
+ NUM_HW_PIC_BUF, DMA_ALIGNMENT);
+ _size = min_t(u32, _size, H265D_MAX_SLICE + 1);
+ _size = ALIGN(_size, 4);
+ _size = 2 * _size * SIZE_H265D_VPP_CMD_PER_BUF;
+ if (_size > VPP_CMD_MAX_SIZE)
+ _size = VPP_CMD_MAX_SIZE;
+
+ return _size;
+}
+
+static u32 hfi_buffer_non_comv_h265d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 _size_bse = size_h265d_bse_cmd_buf(frame_width, frame_height);
+ u32 _size_vpp = size_h265d_vpp_cmd_buf(frame_width, frame_height);
+ u32 _size = ALIGN(_size_bse, DMA_ALIGNMENT) +
+ ALIGN(_size_vpp, DMA_ALIGNMENT) +
+ ALIGN(NUM_HW_PIC_BUF * 20 * 22 * 4, DMA_ALIGNMENT) +
+ ALIGN(2 * sizeof(u16) *
+ (ALIGN(frame_width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
+ (ALIGN(frame_height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS), DMA_ALIGNMENT) +
+ ALIGN(SIZE_HW_PIC(SIZE_H265D_HW_PIC_T), DMA_ALIGNMENT) +
+ HDR10_HIST_EXTRADATA_SIZE;
+
+ return ALIGN(_size, DMA_ALIGNMENT);
+}
+
static u32 size_vpss_lb(u32 frame_width, u32 frame_height)
{
u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size;
@@ -119,6 +232,203 @@ static u32 size_vpss_lb(u32 frame_width, u32 frame_height)
opb_lb_wr_llb_y_buffer_size;
}
+static inline
+u32 size_h265d_lb_fe_top_data(u32 frame_width, u32 frame_height)
+{
+ return MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE *
+ (ALIGN(frame_width, 64) + 8) * 2;
+}
+
+static inline
+u32 size_h265d_lb_fe_top_ctrl(u32 frame_width, u32 frame_height)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE *
+ (ALIGN(frame_width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS);
+}
+
+static inline
+u32 size_h265d_lb_fe_left_ctrl(u32 frame_width, u32 frame_height)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE *
+ (ALIGN(frame_height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS);
+}
+
+static inline
+u32 size_h265d_lb_se_top_ctrl(u32 frame_width, u32 frame_height)
+{
+ return (LCU_MAX_SIZE_PELS / 8 * (128 / 8)) * ((frame_width + 15) >> 4);
+}
+
+static inline
+u32 size_h265d_lb_se_left_ctrl(u32 frame_width, u32 frame_height)
+{
+ return max_t(u32, ((frame_height + 16 - 1) / 8) *
+ MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE,
+ max_t(u32, ((frame_height + 32 - 1) / 8) *
+ MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE,
+ ((frame_height + 64 - 1) / 8) *
+ MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE));
+}
+
+static inline
+u32 size_h265d_lb_pe_top_data(u32 frame_width, u32 frame_height)
+{
+ return MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE *
+ (ALIGN(frame_width, LCU_MIN_SIZE_PELS) / LCU_MIN_SIZE_PELS);
+}
+
+static inline
+u32 size_h265d_lb_vsp_top(u32 frame_width, u32 frame_height)
+{
+ return ((frame_width + 63) >> 6) * 128;
+}
+
+static inline
+u32 size_h265d_lb_vsp_left(u32 frame_width, u32 frame_height)
+{
+ return ((frame_height + 63) >> 6) * 128;
+}
+
+static inline
+u32 size_h265d_lb_recon_dma_metadata_wr(u32 frame_width, u32 frame_height)
+{
+ return size_h264d_lb_recon_dma_metadata_wr(frame_height);
+}
+
+static inline
+u32 size_h265d_qp(u32 frame_width, u32 frame_height)
+{
+ return size_h264d_qp(frame_width, frame_height);
+}
+
+static inline
+u32 hfi_buffer_line_h265d(u32 frame_width, u32 frame_height, bool is_opb, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0, _size;
+
+ _size = ALIGN(size_h265d_lb_fe_top_data(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_h265d_lb_fe_top_ctrl(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_h265d_lb_fe_left_ctrl(frame_width, frame_height),
+ DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h265d_lb_se_left_ctrl(frame_width, frame_height),
+ DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h265d_lb_se_top_ctrl(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_h265d_lb_pe_top_data(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_h265d_lb_vsp_top(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_h265d_lb_vsp_left(frame_width, frame_height),
+ DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h265d_lb_recon_dma_metadata_wr(frame_width, frame_height),
+ DMA_ALIGNMENT) * 4 +
+ ALIGN(size_h265d_qp(frame_width, frame_height), DMA_ALIGNMENT);
+ if (is_opb)
+ vpss_lb_size = size_vpss_lb(frame_width, frame_height);
+
+ return ALIGN((_size + vpss_lb_size), DMA_ALIGNMENT);
+}
+
+static inline
+u32 size_vpxd_lb_fe_left_ctrl(u32 frame_width, u32 frame_height)
+{
+ return max_t(u32, ((frame_height + 15) >> 4) *
+ MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE,
+ max_t(u32, ((frame_height + 31) >> 5) *
+ MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE,
+ ((frame_height + 63) >> 6) *
+ MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE));
+}
+
+static inline
+u32 size_vpxd_lb_fe_top_ctrl(u32 frame_width, u32 frame_height)
+{
+ return ((ALIGN(frame_width, 64) + 8) * 10 * 2);
+}
+
+static inline
+u32 size_vpxd_lb_se_top_ctrl(u32 frame_width, u32 frame_height)
+{
+ return ((frame_width + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
+}
+
+static inline
+u32 size_vpxd_lb_se_left_ctrl(u32 frame_width, u32 frame_height)
+{
+ return max_t(u32, ((frame_height + 15) >> 4) *
+ MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE,
+ max_t(u32, ((frame_height + 31) >> 5) *
+ MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE,
+ ((frame_height + 63) >> 6) *
+ MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE));
+}
+
+static inline
+u32 size_vpxd_lb_recon_dma_metadata_wr(u32 frame_width, u32 frame_height)
+{
+ return ALIGN((ALIGN(frame_height, 8) / (4 / 2)) * 64,
+ BUFFER_ALIGNMENT_32_BYTES);
+}
+
+static inline __maybe_unused
+u32 size_mp2d_lb_fe_top_data(u32 frame_width, u32 frame_height)
+{
+ return ((ALIGN(frame_width, 16) + 8) * 10 * 2);
+}
+
+static inline
+u32 size_vp9d_lb_fe_top_data(u32 frame_width, u32 frame_height)
+{
+ return (ALIGN(ALIGN(frame_width, 8), 64) + 8) * 10 * 2;
+}
+
+static inline
+u32 size_vp9d_lb_pe_top_data(u32 frame_width, u32 frame_height)
+{
+ return ((ALIGN(ALIGN(frame_width, 8), 64) >> 6) * 176);
+}
+
+static inline
+u32 size_vp9d_lb_vsp_top(u32 frame_width, u32 frame_height)
+{
+ return (((ALIGN(ALIGN(frame_width, 8), 64) >> 6) * 64 * 8) + 256);
+}
+
+static inline
+u32 size_vp9d_qp(u32 frame_width, u32 frame_height)
+{
+ return size_h264d_qp(frame_width, frame_height);
+}
+
+static inline
+u32 hfi_iris3_vp9d_lb_size(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ return ALIGN(size_vpxd_lb_fe_left_ctrl(frame_width, frame_height), DMA_ALIGNMENT) *
+ num_vpp_pipes +
+ ALIGN(size_vpxd_lb_se_left_ctrl(frame_width, frame_height), DMA_ALIGNMENT) *
+ num_vpp_pipes +
+ ALIGN(size_vp9d_lb_vsp_top(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_vpxd_lb_fe_top_ctrl(frame_width, frame_height), DMA_ALIGNMENT) +
+ 2 * ALIGN(size_vpxd_lb_recon_dma_metadata_wr(frame_width, frame_height),
+ DMA_ALIGNMENT) +
+ ALIGN(size_vpxd_lb_se_top_ctrl(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_vp9d_lb_pe_top_data(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_vp9d_lb_fe_top_data(frame_width, frame_height), DMA_ALIGNMENT) +
+ ALIGN(size_vp9d_qp(frame_width, frame_height), DMA_ALIGNMENT);
+}
+
+static inline
+u32 hfi_buffer_line_vp9d(u32 frame_width, u32 frame_height, u32 _yuv_bufcount_min, bool is_opb,
+ u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0;
+ u32 _lb_size;
+
+ _lb_size = hfi_iris3_vp9d_lb_size(frame_width, frame_height, num_vpp_pipes);
+
+ if (is_opb)
+ vpss_lb_size = size_vpss_lb(frame_width, frame_height);
+
+ return _lb_size + vpss_lb_size + 4096;
+}
+
static u32 hfi_buffer_line_h264d(u32 frame_width, u32 frame_height,
bool is_opb, u32 num_vpp_pipes)
{
@@ -148,7 +458,14 @@ static u32 iris_vpu_dec_bin_size(struct iris_inst *inst)
u32 height = f->fmt.pix_mp.height;
u32 width = f->fmt.pix_mp.width;
- return hfi_buffer_bin_h264d(width, height, num_vpp_pipes);
+ if (inst->codec == V4L2_PIX_FMT_H264)
+ return hfi_buffer_bin_h264d(width, height, num_vpp_pipes);
+ else if (inst->codec == V4L2_PIX_FMT_HEVC)
+ return hfi_buffer_bin_h265d(width, height, num_vpp_pipes);
+ else if (inst->codec == V4L2_PIX_FMT_VP9)
+ return hfi_buffer_bin_vp9d(width, height, num_vpp_pipes);
+
+ return 0;
}
static u32 iris_vpu_dec_comv_size(struct iris_inst *inst)
@@ -158,12 +475,24 @@ static u32 iris_vpu_dec_comv_size(struct iris_inst *inst)
u32 height = f->fmt.pix_mp.height;
u32 width = f->fmt.pix_mp.width;
- return hfi_buffer_comv_h264d(width, height, num_comv);
+ if (inst->codec == V4L2_PIX_FMT_H264)
+ return hfi_buffer_comv_h264d(width, height, num_comv);
+ else if (inst->codec == V4L2_PIX_FMT_HEVC)
+ return hfi_buffer_comv_h265d(width, height, num_comv);
+
+ return 0;
}
static u32 iris_vpu_dec_persist_size(struct iris_inst *inst)
{
- return hfi_buffer_persist_h264d();
+ if (inst->codec == V4L2_PIX_FMT_H264)
+ return hfi_buffer_persist_h264d();
+ else if (inst->codec == V4L2_PIX_FMT_HEVC)
+ return hfi_buffer_persist_h265d(0);
+ else if (inst->codec == V4L2_PIX_FMT_VP9)
+ return hfi_buffer_persist_vp9d();
+
+ return 0;
}
static u32 iris_vpu_dec_dpb_size(struct iris_inst *inst)
@@ -181,7 +510,12 @@ static u32 iris_vpu_dec_non_comv_size(struct iris_inst *inst)
u32 height = f->fmt.pix_mp.height;
u32 width = f->fmt.pix_mp.width;
- return hfi_buffer_non_comv_h264d(width, height, num_vpp_pipes);
+ if (inst->codec == V4L2_PIX_FMT_H264)
+ return hfi_buffer_non_comv_h264d(width, height, num_vpp_pipes);
+ else if (inst->codec == V4L2_PIX_FMT_HEVC)
+ return hfi_buffer_non_comv_h265d(width, height, num_vpp_pipes);
+
+ return 0;
}
static u32 iris_vpu_dec_line_size(struct iris_inst *inst)
@@ -191,11 +525,20 @@ static u32 iris_vpu_dec_line_size(struct iris_inst *inst)
u32 height = f->fmt.pix_mp.height;
u32 width = f->fmt.pix_mp.width;
bool is_opb = false;
+ u32 out_min_count = inst->buffers[BUF_OUTPUT].min_count;
if (iris_split_mode_enabled(inst))
is_opb = true;
- return hfi_buffer_line_h264d(width, height, is_opb, num_vpp_pipes);
+ if (inst->codec == V4L2_PIX_FMT_H264)
+ return hfi_buffer_line_h264d(width, height, is_opb, num_vpp_pipes);
+ else if (inst->codec == V4L2_PIX_FMT_HEVC)
+ return hfi_buffer_line_h265d(width, height, is_opb, num_vpp_pipes);
+ else if (inst->codec == V4L2_PIX_FMT_VP9)
+ return hfi_buffer_line_vp9d(width, height, out_min_count, is_opb,
+ num_vpp_pipes);
+
+ return 0;
}
static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
@@ -205,6 +548,24 @@ static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
iris_vpu_dec_line_size(inst);
}
+static int output_min_count(struct iris_inst *inst)
+{
+ int output_min_count = 4;
+
+ /* fw_min_count > 0 indicates reconfig event has already arrived */
+ if (inst->fw_min_count) {
+ if (iris_split_mode_enabled(inst) && inst->codec == V4L2_PIX_FMT_VP9)
+ return min_t(u32, 4, inst->fw_min_count);
+ else
+ return inst->fw_min_count;
+ }
+
+ if (inst->codec == V4L2_PIX_FMT_VP9)
+ output_min_count = 9;
+
+ return output_min_count;
+}
+
struct iris_vpu_buf_type_handle {
enum iris_buffer_type type;
u32 (*handle)(struct iris_inst *inst);
@@ -238,6 +599,19 @@ int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
return size;
}
+static u32 internal_buffer_count(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ if (buffer_type == BUF_BIN || buffer_type == BUF_LINE ||
+ buffer_type == BUF_PERSIST) {
+ return 1;
+ } else if (buffer_type == BUF_COMV || buffer_type == BUF_NON_COMV) {
+ if (inst->codec == V4L2_PIX_FMT_H264 || inst->codec == V4L2_PIX_FMT_HEVC)
+ return 1;
+ }
+ return 0;
+}
+
static inline int iris_vpu_dpb_count(struct iris_inst *inst)
{
if (iris_split_mode_enabled(inst)) {
@@ -254,12 +628,13 @@ int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type
case BUF_INPUT:
return MIN_BUFFERS;
case BUF_OUTPUT:
- return inst->fw_min_count;
+ return output_min_count(inst);
case BUF_BIN:
case BUF_COMV:
case BUF_NON_COMV:
case BUF_LINE:
case BUF_PERSIST:
+ return internal_buffer_count(inst, buffer_type);
case BUF_SCRATCH_1:
return 1; /* internal buffer count needed by firmware is 1 */
case BUF_DPB:
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
index 62af6ea6ba1f..ee95fd20b794 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
@@ -13,6 +13,10 @@ struct iris_inst;
#define DMA_ALIGNMENT 256
#define NUM_HW_PIC_BUF 32
+#define LCU_MAX_SIZE_PELS 64
+#define LCU_MIN_SIZE_PELS 16
+#define HDR10_HIST_EXTRADATA_SIZE (4 * 1024)
+
#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf))
#define MAX_TILE_COLUMNS 32
@@ -28,11 +32,47 @@ struct iris_inst;
#define SIZE_SLIST_BUF_H264 512
#define H264_DISPLAY_BUF_SIZE 3328
#define H264_NUM_FRM_INFO 66
-
-#define SIZE_SEI_USERDATA 4096
-
+#define H265_NUM_TILE_COL 32
+#define H265_NUM_TILE_ROW 128
+#define H265_NUM_TILE (H265_NUM_TILE_ROW * H265_NUM_TILE_COL + 1)
+#define SIZE_H265D_BSE_CMD_PER_BUF (16 * sizeof(u32))
+
+#define NUM_SLIST_BUF_H265 (80 + 20)
+#define SIZE_SLIST_BUF_H265 (BIT(10))
+#define H265_DISPLAY_BUF_SIZE (3072)
+#define H265_NUM_FRM_INFO (48)
+
+#define VP9_NUM_FRAME_INFO_BUF 32
+#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4)
+#define VP9_PROB_TABLE_SIZE (3840)
+#define VP9_FRAME_INFO_BUF_SIZE (6144)
+#define BUFFER_ALIGNMENT_32_BYTES 32
+#define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, BUFFER_ALIGNMENT_32_BYTES)
+#define MAX_SUPERFRAME_HEADER_LEN (34)
+#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
+#define MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE 64
+#define MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE 64
+#define MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE (128 / 8)
+#define MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE (128 / 8)
+#define VP9_UDC_HEADER_BUF_SIZE (3 * 128)
+
+#define SIZE_SEI_USERDATA 4096
+#define SIZE_DOLBY_RPU_METADATA (41 * 1024)
#define H264_CABAC_HDR_RATIO_HD_TOT 1
#define H264_CABAC_RES_RATIO_HD_TOT 3
+#define H265D_MAX_SLICE 1200
+#define SIZE_H265D_HW_PIC_T SIZE_H264D_HW_PIC_T
+#define H265_CABAC_HDR_RATIO_HD_TOT 2
+#define H265_CABAC_RES_RATIO_HD_TOT 2
+#define SIZE_H265D_VPP_CMD_PER_BUF (256)
+
+#define VPX_DECODER_FRAME_CONCURENCY_LVL (2)
+#define VPX_DECODER_FRAME_BIN_HDR_BUDGET 1
+#define VPX_DECODER_FRAME_BIN_RES_BUDGET 3
+#define VPX_DECODER_FRAME_BIN_DENOMINATOR 2
+
+#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO (3 / 2)
+
#define SIZE_H264D_HW_PIC_T (BIT(11))
#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index d305d74bb152..4c049c694d9c 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -424,13 +424,13 @@ static int venus_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
init_waitqueue_head(&core->sys_err_done);
- ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "venus", core);
+ ret = hfi_create(core, &venus_core_ops);
if (ret)
goto err_core_put;
- ret = hfi_create(core, &venus_core_ops);
+ ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "venus", core);
if (ret)
goto err_core_put;
@@ -709,11 +709,11 @@ static const struct venus_resources msm8996_res = {
};
static const struct freq_tbl msm8998_freq_table[] = {
- { 1944000, 465000000 }, /* 4k UHD @ 60 (decode only) */
- { 972000, 465000000 }, /* 4k UHD @ 30 */
- { 489600, 360000000 }, /* 1080p @ 60 */
- { 244800, 186000000 }, /* 1080p @ 30 */
- { 108000, 100000000 }, /* 720p @ 30 */
+ { 1728000, 533000000 }, /* 4k UHD @ 60 (decode only) */
+ { 1036800, 444000000 }, /* 2k @ 120 */
+ { 829440, 355200000 }, /* 4k @ 44 */
+ { 489600, 269330000 },/* 4k @ 30 */
+ { 108000, 200000000 }, /* 1080p @ 60 */
};
static const struct reg_val msm8998_reg_preset[] = {
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index b412e0c5515a..5b1ba1c69adb 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -28,6 +28,8 @@
#define VIDC_RESETS_NUM_MAX 2
#define VIDC_MAX_HIER_CODING_LAYER 6
+#define VENUS_MAX_FPS 240
+
extern int venus_fw_debug;
struct freq_tbl {
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 0a041b4db9ef..cf0d97cbc463 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -33,8 +33,9 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
struct hfi_dpb_counts *dpb_count;
+ u32 ptype, rem_bytes;
+ u32 size_read = 0;
u8 *data_ptr;
- u32 ptype;
inst->error = HFI_ERR_NONE;
@@ -44,86 +45,118 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
break;
default:
inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
- goto done;
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+ return;
}
event.event_type = pkt->event_data1;
num_properties_changed = pkt->event_data2;
- if (!num_properties_changed) {
- inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
- goto done;
- }
+ if (!num_properties_changed)
+ goto error;
data_ptr = (u8 *)&pkt->ext_event_data[0];
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
+
do {
+ if (rem_bytes < sizeof(u32))
+ goto error;
ptype = *((u32 *)data_ptr);
+
+ data_ptr += sizeof(u32);
+ rem_bytes -= sizeof(u32);
+
switch (ptype) {
case HFI_PROPERTY_PARAM_FRAME_SIZE:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_framesize))
+ goto error;
+
frame_sz = (struct hfi_framesize *)data_ptr;
event.width = frame_sz->width;
event.height = frame_sz->height;
- data_ptr += sizeof(*frame_sz);
+ size_read = sizeof(struct hfi_framesize);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_profile_level))
+ goto error;
+
profile_level = (struct hfi_profile_level *)data_ptr;
event.profile = profile_level->profile;
event.level = profile_level->level;
- data_ptr += sizeof(*profile_level);
+ size_read = sizeof(struct hfi_profile_level);
break;
case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_bit_depth))
+ goto error;
+
pixel_depth = (struct hfi_bit_depth *)data_ptr;
event.bit_depth = pixel_depth->bit_depth;
- data_ptr += sizeof(*pixel_depth);
+ size_read = sizeof(struct hfi_bit_depth);
break;
case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_pic_struct))
+ goto error;
+
pic_struct = (struct hfi_pic_struct *)data_ptr;
event.pic_struct = pic_struct->progressive_only;
- data_ptr += sizeof(*pic_struct);
+ size_read = sizeof(struct hfi_pic_struct);
break;
case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_colour_space))
+ goto error;
+
colour_info = (struct hfi_colour_space *)data_ptr;
event.colour_space = colour_info->colour_space;
- data_ptr += sizeof(*colour_info);
+ size_read = sizeof(struct hfi_colour_space);
break;
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(u32))
+ goto error;
+
event.entropy_mode = *(u32 *)data_ptr;
- data_ptr += sizeof(u32);
+ size_read = sizeof(u32);
break;
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_buffer_requirements))
+ goto error;
+
bufreq = (struct hfi_buffer_requirements *)data_ptr;
event.buf_count = hfi_bufreq_get_count_min(bufreq, ver);
- data_ptr += sizeof(*bufreq);
+ size_read = sizeof(struct hfi_buffer_requirements);
break;
case HFI_INDEX_EXTRADATA_INPUT_CROP:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_extradata_input_crop))
+ goto error;
+
crop = (struct hfi_extradata_input_crop *)data_ptr;
event.input_crop.left = crop->left;
event.input_crop.top = crop->top;
event.input_crop.width = crop->width;
event.input_crop.height = crop->height;
- data_ptr += sizeof(*crop);
+ size_read = sizeof(struct hfi_extradata_input_crop);
break;
case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
- data_ptr += sizeof(u32);
+ if (rem_bytes < sizeof(struct hfi_dpb_counts))
+ goto error;
+
dpb_count = (struct hfi_dpb_counts *)data_ptr;
event.buf_count = dpb_count->fw_min_cnt;
- data_ptr += sizeof(*dpb_count);
+ size_read = sizeof(struct hfi_dpb_counts);
break;
default:
+ size_read = 0;
break;
}
+ data_ptr += size_read;
+ rem_bytes -= size_read;
num_properties_changed--;
} while (num_properties_changed > 0);
-done:
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+ return;
+
+error:
+ inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index b5f2ea879950..cec7f5964d3d 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -239,6 +239,7 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
static int venus_read_queue(struct venus_hfi_device *hdev,
struct iface_queue *queue, void *pkt, u32 *tx_req)
{
+ struct hfi_pkt_hdr *pkt_hdr = NULL;
struct hfi_queue_header *qhdr;
u32 dwords, new_rd_idx;
u32 rd_idx, wr_idx, type, qsize;
@@ -304,6 +305,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
memcpy(pkt, rd_ptr, len);
memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
}
+ pkt_hdr = (struct hfi_pkt_hdr *)(pkt);
+ if ((pkt_hdr->size >> 2) != dwords)
+ return -EINVAL;
} else {
/* bad packet received, dropping */
new_rd_idx = qhdr->write_idx;
@@ -1678,6 +1682,7 @@ void venus_hfi_destroy(struct venus_core *core)
venus_interface_queues_release(hdev);
mutex_destroy(&hdev->lock);
kfree(hdev);
+ disable_irq(core->irq);
core->ops = NULL;
}
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 409aa9bd0b5d..e32f8862a9f9 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -41,16 +41,15 @@ static int core_clks_get(struct venus_core *core)
static int core_clks_enable(struct venus_core *core)
{
const struct venus_resources *res = core->res;
- const struct freq_tbl *freq_tbl = core->res->freq_tbl;
- unsigned int freq_tbl_size = core->res->freq_tbl_size;
- unsigned long freq;
+ struct device *dev = core->dev;
+ unsigned long freq = 0;
+ struct dev_pm_opp *opp;
unsigned int i;
int ret;
- if (!freq_tbl)
- return -EINVAL;
-
- freq = freq_tbl[freq_tbl_size - 1].freq;
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
for (i = 0; i < res->clks_num; i++) {
if (IS_V6(core)) {
@@ -636,7 +635,9 @@ static int decide_core(struct venus_inst *inst)
u32 min_coreid, min_load, cur_inst_load;
u32 min_lp_coreid, min_lp_load, cur_inst_lp_load;
struct hfi_videocores_usage_type cu;
- unsigned long max_freq;
+ unsigned long max_freq = ULONG_MAX;
+ struct device *dev = core->dev;
+ struct dev_pm_opp *opp;
int ret = 0;
if (legacy_binding) {
@@ -659,7 +660,9 @@ static int decide_core(struct venus_inst *inst)
cur_inst_lp_load *= inst->clk_data.low_power_freq;
/*TODO : divide this inst->load by work_route */
- max_freq = core->res->freq_tbl[0].freq;
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
min_loaded_core(inst, &min_coreid, &min_load, false);
min_loaded_core(inst, &min_lp_coreid, &min_lp_load, true);
@@ -949,7 +952,10 @@ static int core_resets_get(struct venus_core *core)
static int core_get_v4(struct venus_core *core)
{
struct device *dev = core->dev;
+ const struct freq_tbl *freq_tbl = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
const struct venus_resources *res = core->res;
+ unsigned int i;
int ret;
ret = core_clks_get(core);
@@ -986,9 +992,17 @@ static int core_get_v4(struct venus_core *core)
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "invalid OPP table in device tree\n");
- return ret;
+ if (ret) {
+ if (ret == -ENODEV) {
+ for (i = 0; i < num_rows; i++) {
+ ret = dev_pm_opp_add(dev, freq_tbl[i].freq, 0);
+ if (ret)
+ return ret;
+ }
+ } else {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
}
}
@@ -1078,11 +1092,11 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
static int load_scale_v4(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
- const struct freq_tbl *table = core->res->freq_tbl;
- unsigned int num_rows = core->res->freq_tbl_size;
struct device *dev = core->dev;
unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long max_freq = ULONG_MAX;
unsigned long filled_len = 0;
+ struct dev_pm_opp *opp;
int i, ret = 0;
for (i = 0; i < inst->num_input_bufs; i++)
@@ -1108,20 +1122,20 @@ static int load_scale_v4(struct venus_inst *inst)
freq = max(freq_core1, freq_core2);
- if (freq > table[0].freq) {
- dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
- freq, table[0].freq);
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
- freq = table[0].freq;
+ if (freq > max_freq) {
+ dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
+ freq, max_freq);
+ freq = max_freq;
goto set_freq;
}
- for (i = num_rows - 1 ; i >= 0; i--) {
- if (freq <= table[i].freq) {
- freq = table[i].freq;
- break;
- }
- }
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
set_freq:
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 99ce5fd41577..29b0d6a5303d 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -481,11 +481,9 @@ static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
do_div(us_per_frame, timeperframe->denominator);
- if (!us_per_frame)
- return -EINVAL;
-
- fps = (u64)USEC_PER_SEC;
- do_div(fps, us_per_frame);
+ us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC);
+ fps = USEC_PER_SEC / (u32)us_per_frame;
+ fps = min(VENUS_MAX_FPS, fps);
inst->fps = fps;
inst->timeperframe = *timeperframe;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index c7f8e37dba9b..c0a0ccdded80 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -411,11 +411,9 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
do_div(us_per_frame, timeperframe->denominator);
- if (!us_per_frame)
- return -EINVAL;
-
- fps = (u64)USEC_PER_SEC;
- do_div(fps, us_per_frame);
+ us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC);
+ fps = USEC_PER_SEC / (u32)us_per_frame;
+ fps = min(VENUS_MAX_FPS, fps);
inst->timeperframe = *timeperframe;
inst->fps = fps;
diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
index 46765a2e4c4d..a9e51fd94aad 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig
+++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_RASPBERRYPI_PISP_BE
depends on V4L_PLATFORM_DRIVERS
depends on VIDEO_DEV
depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on PM
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
index 7596ae1f7de6..b30891718d8d 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
@@ -9,9 +9,11 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
@@ -161,8 +163,6 @@ struct pispbe_node {
struct mutex node_lock;
/* vb2_queue lock */
struct mutex queue_lock;
- /* Protect pispbe_node->ready_queue and pispbe_buffer->ready_list */
- spinlock_t ready_lock;
struct list_head ready_queue;
struct vb2_queue queue;
struct v4l2_format format;
@@ -190,6 +190,8 @@ struct pispbe_hw_enables {
/* Records a job configuration and memory addresses. */
struct pispbe_job_descriptor {
+ struct list_head queue;
+ struct pispbe_buffer *buffers[PISPBE_NUM_NODES];
dma_addr_t hw_dma_addrs[N_HW_ADDRESSES];
struct pisp_be_tiles_config *config;
struct pispbe_hw_enables hw_enables;
@@ -215,8 +217,10 @@ struct pispbe_dev {
unsigned int sequence;
u32 streaming_map;
struct pispbe_job queued_job, running_job;
- spinlock_t hw_lock; /* protects "hw_busy" flag and streaming_map */
+ /* protects "hw_busy" flag, streaming_map and job_queue */
+ spinlock_t hw_lock;
bool hw_busy; /* non-zero if a job is queued or is being started */
+ struct list_head job_queue;
int irq;
u32 hw_version;
u8 done, started;
@@ -368,10 +372,7 @@ static void pispbe_xlate_addrs(struct pispbe_dev *pispbe,
ret = pispbe_get_planes_addr(addrs, buf[MAIN_INPUT_NODE],
&pispbe->node[MAIN_INPUT_NODE]);
if (ret <= 0) {
- /*
- * This shouldn't happen; pispbe_schedule_internal should insist
- * on an input.
- */
+ /* Shouldn't happen, we have validated an input is available. */
dev_warn(pispbe->dev, "ISP-BE missing input\n");
hw_en->bayer_enables = 0;
hw_en->rgb_enables = 0;
@@ -443,42 +444,48 @@ static void pispbe_xlate_addrs(struct pispbe_dev *pispbe,
* For Output0, Output1, Tdn and Stitch, a buffer only needs to be
* available if the blocks are enabled in the config.
*
- * Needs to be called with hw_lock held.
+ * If all the buffers required to form a job are available, append the
+ * job descriptor to the job queue to be later queued to the HW.
*
* Returns 0 if a job has been successfully prepared, < 0 otherwise.
*/
-static int pispbe_prepare_job(struct pispbe_dev *pispbe,
- struct pispbe_job_descriptor *job)
+static int pispbe_prepare_job(struct pispbe_dev *pispbe)
{
+ struct pispbe_job_descriptor __free(kfree) *job = NULL;
struct pispbe_buffer *buf[PISPBE_NUM_NODES] = {};
+ unsigned int streaming_map;
unsigned int config_index;
struct pispbe_node *node;
- unsigned long flags;
- lockdep_assert_held(&pispbe->hw_lock);
+ lockdep_assert_irqs_enabled();
- memset(job, 0, sizeof(struct pispbe_job_descriptor));
+ scoped_guard(spinlock_irq, &pispbe->hw_lock) {
+ static const u32 mask = BIT(CONFIG_NODE) | BIT(MAIN_INPUT_NODE);
- if (((BIT(CONFIG_NODE) | BIT(MAIN_INPUT_NODE)) &
- pispbe->streaming_map) !=
- (BIT(CONFIG_NODE) | BIT(MAIN_INPUT_NODE)))
- return -ENODEV;
+ if ((pispbe->streaming_map & mask) != mask)
+ return -ENODEV;
+
+ /*
+ * Take a copy of streaming_map: nodes activated after this
+ * point are ignored when preparing this job.
+ */
+ streaming_map = pispbe->streaming_map;
+ }
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
node = &pispbe->node[CONFIG_NODE];
- spin_lock_irqsave(&node->ready_lock, flags);
buf[CONFIG_NODE] = list_first_entry_or_null(&node->ready_queue,
struct pispbe_buffer,
ready_list);
- if (buf[CONFIG_NODE]) {
- list_del(&buf[CONFIG_NODE]->ready_list);
- pispbe->queued_job.buf[CONFIG_NODE] = buf[CONFIG_NODE];
- }
- spin_unlock_irqrestore(&node->ready_lock, flags);
-
- /* Exit early if no config buffer has been queued. */
if (!buf[CONFIG_NODE])
return -ENODEV;
+ list_del(&buf[CONFIG_NODE]->ready_list);
+ job->buffers[CONFIG_NODE] = buf[CONFIG_NODE];
+
config_index = buf[CONFIG_NODE]->vb.vb2_buf.index;
job->config = &pispbe->config[config_index];
job->tiles = pispbe->config_dma_addr +
@@ -498,7 +505,7 @@ static int pispbe_prepare_job(struct pispbe_dev *pispbe,
continue;
buf[i] = NULL;
- if (!(pispbe->streaming_map & BIT(i)))
+ if (!(streaming_map & BIT(i)))
continue;
if ((!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT0) &&
@@ -525,25 +532,28 @@ static int pispbe_prepare_job(struct pispbe_dev *pispbe,
node = &pispbe->node[i];
/* Pull a buffer from each V4L2 queue to form the queued job */
- spin_lock_irqsave(&node->ready_lock, flags);
buf[i] = list_first_entry_or_null(&node->ready_queue,
struct pispbe_buffer,
ready_list);
if (buf[i]) {
list_del(&buf[i]->ready_list);
- pispbe->queued_job.buf[i] = buf[i];
+ job->buffers[i] = buf[i];
}
- spin_unlock_irqrestore(&node->ready_lock, flags);
if (!buf[i] && !ignore_buffers)
goto err_return_buffers;
}
- pispbe->queued_job.valid = true;
-
/* Convert buffers to DMA addresses for the hardware */
pispbe_xlate_addrs(pispbe, job, buf);
+ scoped_guard(spinlock_irq, &pispbe->hw_lock) {
+ list_add_tail(&job->queue, &pispbe->job_queue);
+ }
+
+ /* Set job to NULL to avoid automatic release due to __free(). */
+ job = NULL;
+
return 0;
err_return_buffers:
@@ -554,33 +564,37 @@ err_return_buffers:
continue;
/* Return the buffer to the ready_list queue */
- spin_lock_irqsave(&n->ready_lock, flags);
list_add(&buf[i]->ready_list, &n->ready_queue);
- spin_unlock_irqrestore(&n->ready_lock, flags);
}
- memset(&pispbe->queued_job, 0, sizeof(pispbe->queued_job));
-
return -ENODEV;
}
static void pispbe_schedule(struct pispbe_dev *pispbe, bool clear_hw_busy)
{
- struct pispbe_job_descriptor job;
- unsigned long flags;
- int ret;
+ struct pispbe_job_descriptor *job;
- spin_lock_irqsave(&pispbe->hw_lock, flags);
+ scoped_guard(spinlock_irqsave, &pispbe->hw_lock) {
+ if (clear_hw_busy)
+ pispbe->hw_busy = false;
- if (clear_hw_busy)
- pispbe->hw_busy = false;
+ if (pispbe->hw_busy)
+ return;
- if (pispbe->hw_busy)
- goto unlock_and_return;
+ job = list_first_entry_or_null(&pispbe->job_queue,
+ struct pispbe_job_descriptor,
+ queue);
+ if (!job)
+ return;
- ret = pispbe_prepare_job(pispbe, &job);
- if (ret)
- goto unlock_and_return;
+ list_del(&job->queue);
+
+ for (unsigned int i = 0; i < PISPBE_NUM_NODES; i++)
+ pispbe->queued_job.buf[i] = job->buffers[i];
+ pispbe->queued_job.valid = true;
+
+ pispbe->hw_busy = true;
+ }
/*
* We can kick the job off without the hw_lock, as this can
@@ -588,34 +602,8 @@ static void pispbe_schedule(struct pispbe_dev *pispbe, bool clear_hw_busy)
* only when the following job has been queued and an interrupt
* is rised.
*/
- pispbe->hw_busy = true;
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
-
- if (job.config->num_tiles <= 0 ||
- job.config->num_tiles > PISP_BACK_END_NUM_TILES ||
- !((job.hw_enables.bayer_enables | job.hw_enables.rgb_enables) &
- PISP_BE_BAYER_ENABLE_INPUT)) {
- /*
- * Bad job. We can't let it proceed as it could lock up
- * the hardware, or worse!
- *
- * For now, just force num_tiles to 0, which causes the
- * H/W to do something bizarre but survivable. It
- * increments (started,done) counters by more than 1,
- * but we seem to survive...
- */
- dev_dbg(pispbe->dev, "Bad job: invalid number of tiles: %u\n",
- job.config->num_tiles);
- job.config->num_tiles = 0;
- }
-
- pispbe_queue_job(pispbe, &job);
-
- return;
-
-unlock_and_return:
- /* No job has been queued, just release the lock and return. */
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
+ pispbe_queue_job(pispbe, job);
+ kfree(job);
}
static void pispbe_isr_jobdone(struct pispbe_dev *pispbe,
@@ -706,6 +694,13 @@ static int pisp_be_validate_config(struct pispbe_dev *pispbe,
return -EIO;
}
+ if (config->num_tiles == 0 ||
+ config->num_tiles > PISP_BACK_END_NUM_TILES) {
+ dev_dbg(dev, "%s: Invalid number of tiles: %d\n", __func__,
+ config->num_tiles);
+ return -EINVAL;
+ }
+
/* Ensure output config strides and buffer sizes match the V4L2 formats. */
fmt = &pispbe->node[TDN_OUTPUT_NODE].format;
if (bayer_enables & PISP_BE_BAYER_ENABLE_TDN_OUTPUT) {
@@ -860,18 +855,16 @@ static void pispbe_node_buffer_queue(struct vb2_buffer *buf)
container_of(vbuf, struct pispbe_buffer, vb);
struct pispbe_node *node = vb2_get_drv_priv(buf->vb2_queue);
struct pispbe_dev *pispbe = node->pispbe;
- unsigned long flags;
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node));
- spin_lock_irqsave(&node->ready_lock, flags);
list_add_tail(&buffer->ready_list, &node->ready_queue);
- spin_unlock_irqrestore(&node->ready_lock, flags);
/*
* Every time we add a buffer, check if there's now some work for the hw
* to do.
*/
- pispbe_schedule(pispbe, false);
+ if (!pispbe_prepare_job(pispbe))
+ pispbe_schedule(pispbe, false);
}
static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -879,17 +872,16 @@ static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
struct pispbe_node *node = vb2_get_drv_priv(q);
struct pispbe_dev *pispbe = node->pispbe;
struct pispbe_buffer *buf, *tmp;
- unsigned long flags;
int ret;
ret = pm_runtime_resume_and_get(pispbe->dev);
if (ret < 0)
goto err_return_buffers;
- spin_lock_irqsave(&pispbe->hw_lock, flags);
- node->pispbe->streaming_map |= BIT(node->id);
- node->pispbe->sequence = 0;
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
+ scoped_guard(spinlock_irq, &pispbe->hw_lock) {
+ node->pispbe->streaming_map |= BIT(node->id);
+ node->pispbe->sequence = 0;
+ }
dev_dbg(pispbe->dev, "%s: for node %s (count %u)\n",
__func__, NODE_NAME(node), count);
@@ -897,17 +889,16 @@ static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
node->pispbe->streaming_map);
/* Maybe we're ready to run. */
- pispbe_schedule(pispbe, false);
+ if (!pispbe_prepare_job(pispbe))
+ pispbe_schedule(pispbe, false);
return 0;
err_return_buffers:
- spin_lock_irqsave(&pispbe->hw_lock, flags);
list_for_each_entry_safe(buf, tmp, &node->ready_queue, ready_list) {
list_del(&buf->ready_list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
return ret;
}
@@ -916,8 +907,9 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
{
struct pispbe_node *node = vb2_get_drv_priv(q);
struct pispbe_dev *pispbe = node->pispbe;
+ struct pispbe_job_descriptor *job, *temp;
struct pispbe_buffer *buf;
- unsigned long flags;
+ LIST_HEAD(tmp_list);
/*
* Now this is a bit awkward. In a simple M2M device we could just wait
@@ -929,11 +921,7 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
* This may return buffers out of order.
*/
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node));
- spin_lock_irqsave(&pispbe->hw_lock, flags);
do {
- unsigned long flags1;
-
- spin_lock_irqsave(&node->ready_lock, flags1);
buf = list_first_entry_or_null(&node->ready_queue,
struct pispbe_buffer,
ready_list);
@@ -941,15 +929,26 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
list_del(&buf->ready_list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
- spin_unlock_irqrestore(&node->ready_lock, flags1);
} while (buf);
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
vb2_wait_for_all_buffers(&node->queue);
- spin_lock_irqsave(&pispbe->hw_lock, flags);
+ spin_lock_irq(&pispbe->hw_lock);
pispbe->streaming_map &= ~BIT(node->id);
- spin_unlock_irqrestore(&pispbe->hw_lock, flags);
+
+ if (pispbe->streaming_map == 0) {
+ /*
+ * If all nodes have stopped streaming release all jobs
+ * without holding the lock.
+ */
+ list_splice_init(&pispbe->job_queue, &tmp_list);
+ }
+ spin_unlock_irq(&pispbe->hw_lock);
+
+ list_for_each_entry_safe(job, temp, &tmp_list, queue) {
+ list_del(&job->queue);
+ kfree(job);
+ }
pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
@@ -1114,10 +1113,12 @@ static void pispbe_try_format(struct v4l2_format *f, struct pispbe_node *node)
f->fmt.pix_mp.pixelformat = fmt->fourcc;
f->fmt.pix_mp.num_planes = fmt->num_planes;
f->fmt.pix_mp.field = V4L2_FIELD_NONE;
- f->fmt.pix_mp.width = max(min(f->fmt.pix_mp.width, 65536u),
- PISP_BACK_END_MIN_TILE_WIDTH);
- f->fmt.pix_mp.height = max(min(f->fmt.pix_mp.height, 65536u),
- PISP_BACK_END_MIN_TILE_HEIGHT);
+ f->fmt.pix_mp.width = clamp(f->fmt.pix_mp.width,
+ PISP_BACK_END_MIN_TILE_WIDTH,
+ PISP_BACK_END_MAX_TILE_WIDTH);
+ f->fmt.pix_mp.height = clamp(f->fmt.pix_mp.height,
+ PISP_BACK_END_MIN_TILE_HEIGHT,
+ PISP_BACK_END_MAX_TILE_HEIGHT);
/*
* Fill in the actual colour space when the requested one was
@@ -1407,7 +1408,6 @@ static int pispbe_init_node(struct pispbe_dev *pispbe, unsigned int id)
mutex_init(&node->node_lock);
mutex_init(&node->queue_lock);
INIT_LIST_HEAD(&node->ready_queue);
- spin_lock_init(&node->ready_lock);
node->format.type = node->buf_type;
pispbe_node_def_fmt(node);
@@ -1691,6 +1691,8 @@ static int pispbe_probe(struct platform_device *pdev)
if (!pispbe)
return -ENOMEM;
+ INIT_LIST_HEAD(&pispbe->job_queue);
+
dev_set_drvdata(&pdev->dev, pispbe);
pispbe->dev = &pdev->dev;
platform_set_drvdata(pdev, pispbe);
@@ -1726,7 +1728,7 @@ static int pispbe_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(pispbe->dev);
pm_runtime_enable(pispbe->dev);
- ret = pispbe_runtime_resume(pispbe->dev);
+ ret = pm_runtime_resume_and_get(pispbe->dev);
if (ret)
goto pm_runtime_disable_err;
@@ -1748,7 +1750,7 @@ static int pispbe_probe(struct platform_device *pdev)
disable_devs_err:
pispbe_destroy_devices(pispbe);
pm_runtime_suspend_err:
- pispbe_runtime_suspend(pispbe->dev);
+ pm_runtime_put(pispbe->dev);
pm_runtime_disable_err:
pm_runtime_dont_use_autosuspend(pispbe->dev);
pm_runtime_disable(pispbe->dev);
@@ -1762,7 +1764,6 @@ static void pispbe_remove(struct platform_device *pdev)
pispbe_destroy_devices(pispbe);
- pispbe_runtime_suspend(pispbe->dev);
pm_runtime_dont_use_autosuspend(pispbe->dev);
pm_runtime_disable(pispbe->dev);
}
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
index fcadb2143c88..62dca76b468d 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
@@ -1024,9 +1024,6 @@ static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
node->buffer_queue.type);
- if (vq->max_num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->max_num_buffers;
-
if (*nplanes) {
if (sizes[0] < size) {
cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size);
@@ -1998,6 +1995,7 @@ static int cfe_register_node(struct cfe_device *cfe, int id)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->lock;
q->min_queued_buffers = 1;
+ q->min_reqbufs_allocation = 3;
q->dev = &cfe->pdev->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 9979de4f6ef1..d1b31ab8b8c4 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -172,20 +172,27 @@ struct rcar_csi2;
#define V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG 0x21c0a
#define V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG 0x21c0c
#define V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG 0x21c10
+#define V4H_PPI_RW_DDLCAL_CFG_n_REG(n) (0x21c40 + ((n) * 2)) /* n = 0 - 7 */
#define V4H_PPI_RW_COMMON_CFG_REG 0x21c6c
#define V4H_PPI_RW_TERMCAL_CFG_0_REG 0x21c80
#define V4H_PPI_RW_OFFSETCAL_CFG_0_REG 0x21ca0
/* V4H CORE registers */
-#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(n) (0x22040 + ((n) * 2)) /* n = 0 - 15 */
-#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(n) (0x22440 + ((n) * 2)) /* n = 0 - 15 */
-#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(n) (0x22840 + ((n) * 2)) /* n = 0 - 15 */
-#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(n) (0x22c40 + ((n) * 2)) /* n = 0 - 15 */
-#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(n) (0x23040 + ((n) * 2)) /* n = 0 - 15 */
+
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, n) (0x22040 + ((l) * 0x400) + ((n) * 2))
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_3_REG(l, n) (0x22060 + ((l) * 0x400) + ((n) * 2))
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_4_REG(l, n) (0x22080 + ((l) * 0x400) + ((n) * 2))
+
#define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */
#define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */
#define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */
-#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
+#define V4H_CORE_DIG_COMMON_RW_DESKEW_FINE_MEM_REG 0x23fe0
+
+#define V4H_CORE_DIG_DLANE_l_RW_CFG_n_REG(l, n) (0x26000 + ((l) * 0x400) + ((n) * 2))
+#define V4H_CORE_DIG_DLANE_l_RW_LP_n_REG(l, n) (0x26080 + ((l) * 0x400) + ((n) * 2))
+#define V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, n) (0x26100 + ((l) * 0x400) + ((n) * 2))
+#define V4H_CORE_DIG_DLANE_CLK_RW_LP_n_REG(n) V4H_CORE_DIG_DLANE_l_RW_LP_n_REG(4, (n))
+#define V4H_CORE_DIG_DLANE_CLK_RW_HS_RX_n_REG(n) V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(4, (n))
/* V4H C-PHY */
#define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */
@@ -197,6 +204,7 @@ struct rcar_csi2;
#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
#define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480
#define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
#define V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG 0x2a800
#define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880
#define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */
@@ -954,6 +962,7 @@ static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps)
static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
unsigned int lanes)
{
+ struct media_pad *remote_pad;
struct v4l2_subdev *source;
s64 freq;
u64 mbps;
@@ -962,8 +971,9 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
return -ENODEV;
source = priv->remote;
+ remote_pad = &source->entity.pads[priv->remote_pad];
- freq = v4l2_get_link_freq(source->ctrl_handler, bpp, 2 * lanes);
+ freq = v4l2_get_link_freq(remote_pad, bpp, 2 * lanes);
if (freq < 0) {
int ret = (int)freq;
@@ -975,10 +985,6 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
mbps = div_u64(freq * 2, MEGA);
- /* Adjust for C-PHY, divide by 2.8. */
- if (priv->cphy)
- mbps = div_u64(mbps * 5, 14);
-
return mbps;
}
@@ -1203,9 +1209,14 @@ static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match)
return -ETIMEDOUT;
}
-static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
+static const struct rcsi2_cphy_setting *
+rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int mbps)
{
const struct rcsi2_cphy_setting *conf;
+ int msps;
+
+ /* Adjust for C-PHY symbols, divide by 2.8. */
+ msps = div_u64(mbps * 5, 14);
for (conf = cphy_setting_table_r8a779g0; conf->msps != 0; conf++) {
if (conf->msps > msps)
@@ -1214,7 +1225,7 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
if (!conf->msps) {
dev_err(priv->dev, "Unsupported PHY speed for msps setting (%u Msps)", msps);
- return -ERANGE;
+ return NULL;
}
/* C-PHY specific */
@@ -1246,11 +1257,11 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(2), conf->rx2);
rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(2), conf->rx2);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(2), 0x0001);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(2), 0);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(2), 0x0001);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(2), 0x0001);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(2), 0);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(0, 2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(1, 2), 0);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(2, 2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(3, 2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(4, 2), 0);
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(0), conf->trio0);
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(0), conf->trio0);
@@ -1267,30 +1278,198 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
/* Configure data line order. */
rsci2_set_line_order(priv, priv->line_orders[0],
V4H_CORE_DIG_CLANE_0_RW_CFG_0_REG,
- V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9));
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(0, 9));
rsci2_set_line_order(priv, priv->line_orders[1],
V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG,
- V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(9));
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(1, 9));
rsci2_set_line_order(priv, priv->line_orders[2],
V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG,
- V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(9));
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(2, 9));
/* TODO: This registers is not documented. */
rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000);
- /* Leave Shutdown mode */
- rcsi2_write(priv, V4H_DPHY_RSTZ_REG, BIT(0));
- rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, BIT(0));
+ return conf;
+}
- /* Wait for calibration */
- if (rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_PHY_READY)) {
- dev_err(priv->dev, "PHY calibration failed\n");
- return -ETIMEDOUT;
+struct rcsi2_d_phy_setting_v4h_lut_value {
+ unsigned int mbps;
+ unsigned char cfg_1;
+ unsigned char cfg_5_94;
+ unsigned char cfg_5_30;
+ unsigned char lane_ctrl_2_8;
+ unsigned char rw_hs_rx_3_83;
+ unsigned char rw_hs_rx_3_20;
+ unsigned char rw_hs_rx_6;
+ unsigned char rw_hs_rx_1;
+};
+
+static const struct rcsi2_d_phy_setting_v4h_lut_value *
+rcsi2_d_phy_setting_v4h_lut_lookup(int mbps)
+{
+ static const struct rcsi2_d_phy_setting_v4h_lut_value values[] = {
+ { 4500, 0x3f, 0x07, 0x00, 0x01, 0x02, 0x01, 0x0d, 0x10 },
+ { 4000, 0x47, 0x08, 0x01, 0x01, 0x05, 0x01, 0x0f, 0x0d },
+ { 3600, 0x4f, 0x09, 0x01, 0x01, 0x06, 0x01, 0x10, 0x0b },
+ { 3230, 0x57, 0x0a, 0x01, 0x01, 0x06, 0x01, 0x12, 0x09 },
+ { 3000, 0x47, 0x08, 0x00, 0x00, 0x03, 0x01, 0x0f, 0x0c },
+ { 2700, 0x4f, 0x09, 0x01, 0x00, 0x06, 0x01, 0x10, 0x0b },
+ { 2455, 0x57, 0x0a, 0x01, 0x00, 0x06, 0x01, 0x12, 0x09 },
+ { 2250, 0x5f, 0x0b, 0x01, 0x00, 0x08, 0x01, 0x13, 0x08 },
+ { 2077, 0x67, 0x0c, 0x01, 0x00, 0x06, 0x02, 0x15, 0x0d },
+ { 1929, 0x6f, 0x0d, 0x02, 0x00, 0x06, 0x02, 0x17, 0x0d },
+ { 1800, 0x77, 0x0e, 0x02, 0x00, 0x06, 0x02, 0x18, 0x0d },
+ { 1688, 0x7f, 0x0f, 0x02, 0x00, 0x08, 0x02, 0x1a, 0x0d },
+ { 1588, 0x87, 0x10, 0x02, 0x00, 0x08, 0x02, 0x1b, 0x0d },
+ { 1500, 0x8f, 0x11, 0x03, 0x00, 0x08, 0x02, 0x1d, 0x0c },
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(values); i++)
+ if (mbps >= values[i].mbps)
+ return &values[i];
+
+ return NULL;
+}
+
+static int rcsi2_d_phy_setting_v4h(struct rcar_csi2 *priv, int mbps)
+{
+ const struct rcsi2_d_phy_setting_v4h_lut_value *lut =
+ rcsi2_d_phy_setting_v4h_lut_lookup(mbps);
+ u16 val;
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_COMMON_REG(7), 0x0000);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(7), mbps > 1500 ? 0x0028 : 0x0068);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(8), 0x0050);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(0), 0x0063);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(7), 0x1132);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(1), 0x1340);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(2), 0x4b13);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(4), 0x000a);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(6), 0x800a);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(7), 0x1109);
+
+ if (mbps > 1500) {
+ val = DIV_ROUND_UP(5 * mbps, 64);
+ rcsi2_write16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(3), val);
+ }
+
+ if (lut) {
+ rcsi2_modify16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(1),
+ lut->cfg_1, 0x00ff);
+ rcsi2_modify16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(5),
+ lut->cfg_5_94 << 4, 0x03f0);
+ rcsi2_modify16(priv, V4H_PPI_RW_DDLCAL_CFG_n_REG(5),
+ lut->cfg_5_30 << 0, 0x000f);
+
+ for (unsigned int l = 0; l < 5; l++)
+ rcsi2_modify16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 8),
+ lut->lane_ctrl_2_8 << 12, 0x1000);
+ }
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_LP_n_REG(l, 0), 0x463c);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(0, 2), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(1, 2), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(2, 2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(3, 2), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(4, 2), 0x0000);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_COMMON_REG(6), 0x0009);
+
+ val = mbps > 1500 ? 0x0800 : 0x0802;
+ for (unsigned int l = 0; l < 5; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 12), val);
+
+ val = mbps > 1500 ? 0x0000 : 0x0002;
+ for (unsigned int l = 0; l < 5; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 13), val);
+
+ if (mbps >= 80) {
+ /* 2560: 6, 1280: 5, 640: 4, 320: 3, 160: 2, 80: 1 */
+ val = ilog2(mbps / 80) + 1;
+ rcsi2_modify16(priv,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(2, 9),
+ val << 5, 0xe0);
+ }
+
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_CLK_RW_HS_RX_n_REG(0), 0x091c);
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_CLK_RW_HS_RX_n_REG(7), 0x3b06);
+
+ val = DIV_ROUND_UP(1200, mbps) + 12;
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_modify16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 0), val << 8, 0xf0);
+
+ val = mbps > 1500 ? 0x0004 : 0x0008;
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_CFG_n_REG(l, 1), val);
+
+ val = mbps > 2500 ? 0x669a : mbps > 1500 ? 0xe69a : 0xe69b;
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 2), val);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_LP_n_REG(l, 0), 0x163c);
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_CLK_RW_LP_n_REG(0), 0x163c);
+
+ if (lut) {
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_modify16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 1),
+ lut->rw_hs_rx_1, 0xff);
+ }
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 3), 0x9209);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 4), 0x0096);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 5), 0x0100);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 6), 0x2d02);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_write16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 7), 0x1b06);
+
+ if (lut) {
+ /*
+ * Documentation LUT have two values but document writing both
+ * values in a single write.
+ */
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_modify16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 3),
+ lut->rw_hs_rx_3_83 << 3 | lut->rw_hs_rx_3_20, 0x1ff);
+
+ for (unsigned int l = 0; l < 4; l++)
+ rcsi2_modify16(priv, V4H_CORE_DIG_DLANE_l_RW_HS_RX_n_REG(l, 6),
+ lut->rw_hs_rx_6 << 8, 0xff00);
}
- /* C-PHY setting - analog programing*/
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9), conf->lane29);
- rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(7), conf->lane27);
+ static const u16 deskew_fine[] = {
+ 0x0404, 0x040c, 0x0414, 0x041c, 0x0423, 0x0429, 0x0430, 0x043a,
+ 0x0445, 0x044a, 0x0450, 0x045a, 0x0465, 0x0469, 0x0472, 0x047a,
+ 0x0485, 0x0489, 0x0490, 0x049a, 0x04a4, 0x04ac, 0x04b4, 0x04bc,
+ 0x04c4, 0x04cc, 0x04d4, 0x04dc, 0x04e4, 0x04ec, 0x04f4, 0x04fc,
+ 0x0504, 0x050c, 0x0514, 0x051c, 0x0523, 0x0529, 0x0530, 0x053a,
+ 0x0545, 0x054a, 0x0550, 0x055a, 0x0565, 0x0569, 0x0572, 0x057a,
+ 0x0585, 0x0589, 0x0590, 0x059a, 0x05a4, 0x05ac, 0x05b4, 0x05bc,
+ 0x05c4, 0x05cc, 0x05d4, 0x05dc, 0x05e4, 0x05ec, 0x05f4, 0x05fc,
+ 0x0604, 0x060c, 0x0614, 0x061c, 0x0623, 0x0629, 0x0632, 0x063a,
+ 0x0645, 0x064a, 0x0650, 0x065a, 0x0665, 0x0669, 0x0672, 0x067a,
+ 0x0685, 0x0689, 0x0690, 0x069a, 0x06a4, 0x06ac, 0x06b4, 0x06bc,
+ 0x06c4, 0x06cc, 0x06d4, 0x06dc, 0x06e4, 0x06ec, 0x06f4, 0x06fc,
+ 0x0704, 0x070c, 0x0714, 0x071c, 0x0723, 0x072a, 0x0730, 0x073a,
+ 0x0745, 0x074a, 0x0750, 0x075a, 0x0765, 0x0769, 0x0772, 0x077a,
+ 0x0785, 0x0789, 0x0790, 0x079a, 0x07a4, 0x07ac, 0x07b4, 0x07bc,
+ 0x07c4, 0x07cc, 0x07d4, 0x07dc, 0x07e4, 0x07ec, 0x07f4, 0x07fc,
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(deskew_fine); i++) {
+ rcsi2_write16(priv, V4H_CORE_DIG_COMMON_RW_DESKEW_FINE_MEM_REG,
+ deskew_fine[i]);
+ }
return 0;
}
@@ -1298,10 +1477,11 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv,
struct v4l2_subdev_state *state)
{
+ const struct rcsi2_cphy_setting *cphy = NULL;
const struct rcar_csi2_format *format;
const struct v4l2_mbus_framefmt *fmt;
unsigned int lanes;
- int msps;
+ int mbps;
int ret;
/* Use the format on the sink pad to compute the receiver config. */
@@ -1314,28 +1494,40 @@ static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv,
if (ret)
return ret;
- msps = rcsi2_calc_mbps(priv, format->bpp, lanes);
- if (msps < 0)
- return msps;
+ mbps = rcsi2_calc_mbps(priv, format->bpp, lanes);
+ if (mbps < 0)
+ return mbps;
- /* Reset LINK and PHY*/
+ /* T0: Reset LINK and PHY*/
rcsi2_write(priv, V4H_CSI2_RESETN_REG, 0);
rcsi2_write(priv, V4H_DPHY_RSTZ_REG, 0);
rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, 0);
- /* PHY static setting */
- rcsi2_write(priv, V4H_PHY_EN_REG, V4H_PHY_EN_ENABLE_CLK);
+ /* T1: PHY static setting */
+ rcsi2_write(priv, V4H_PHY_EN_REG, V4H_PHY_EN_ENABLE_CLK |
+ V4H_PHY_EN_ENABLE_0 | V4H_PHY_EN_ENABLE_1 |
+ V4H_PHY_EN_ENABLE_2 | V4H_PHY_EN_ENABLE_3);
rcsi2_write(priv, V4H_FLDC_REG, 0);
rcsi2_write(priv, V4H_FLDD_REG, 0);
rcsi2_write(priv, V4H_IDIC_REG, 0);
- rcsi2_write(priv, V4H_PHY_MODE_REG, V4H_PHY_MODE_CPHY);
+ rcsi2_write(priv, V4H_PHY_MODE_REG,
+ priv->cphy ? V4H_PHY_MODE_CPHY : V4H_PHY_MODE_DPHY);
rcsi2_write(priv, V4H_N_LANES_REG, lanes - 1);
- /* Reset CSI2 */
+ rcsi2_write(priv, V4M_FRXM_REG,
+ V4M_FRXM_FORCERXMODE_0 | V4M_FRXM_FORCERXMODE_1 |
+ V4M_FRXM_FORCERXMODE_2 | V4M_FRXM_FORCERXMODE_3);
+ rcsi2_write(priv, V4M_OVR1_REG,
+ V4M_OVR1_FORCERXMODE_0 | V4M_OVR1_FORCERXMODE_1 |
+ V4M_OVR1_FORCERXMODE_2 | V4M_OVR1_FORCERXMODE_3);
+
+ /* T2: Reset CSI2 */
rcsi2_write(priv, V4H_CSI2_RESETN_REG, BIT(0));
/* Registers static setting through APB */
/* Common setting */
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(10), 0x0030);
+ rcsi2_write16(priv, V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(2), 0x1444);
rcsi2_write16(priv, V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(0), 0x1bfd);
rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG, 0x0233);
rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(6), 0x0027);
@@ -1350,20 +1542,71 @@ static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv,
rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG, 0x0105);
rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x1000);
rcsi2_write16(priv, V4H_PPI_RW_COMMON_CFG_REG, 0x0003);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(0), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(1), 0x0400);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(3), 0x41f6);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(0), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(3), 0x43f6);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x3000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(7), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x7000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(7), 0x0000);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(5), 0x4000);
+
+ /* T3: PHY settings */
+ if (priv->cphy) {
+ cphy = rcsi2_c_phy_setting_v4h(priv, mbps);
+ if (!cphy)
+ return -ERANGE;
+ } else {
+ ret = rcsi2_d_phy_setting_v4h(priv, mbps);
+ if (ret)
+ return ret;
+ }
- /* C-PHY settings */
- ret = rcsi2_c_phy_setting_v4h(priv, msps);
- if (ret)
- return ret;
+ /* T4: Leave Shutdown mode */
+ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, BIT(0));
+ rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, BIT(0));
+
+ /* T5: Wait for calibration */
+ if (rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_PHY_READY)) {
+ dev_err(priv->dev, "PHY calibration failed\n");
+ return -ETIMEDOUT;
+ }
+
+ /* T6: Analog programming */
+ if (priv->cphy) {
+ for (unsigned int l = 0; l < 3; l++) {
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 9),
+ cphy->lane29);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 7),
+ cphy->lane27);
+ }
+ } else {
+ u16 val_2_9 = mbps > 2500 ? 0x14 : mbps > 1500 ? 0x04 : 0x00;
+ u16 val_2_15 = mbps > 1500 ? 0x03 : 0x00;
+
+ for (unsigned int l = 0; l < 5; l++) {
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 9),
+ val_2_9);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANEl_CTRL_2_REG(l, 15),
+ val_2_15);
+ }
+ }
+ /* T7: Wait for stop state */
rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_STOPSTATE_0 |
V4H_ST_PHYST_ST_STOPSTATE_1 |
- V4H_ST_PHYST_ST_STOPSTATE_2);
+ V4H_ST_PHYST_ST_STOPSTATE_2 |
+ V4H_ST_PHYST_ST_STOPSTATE_3);
+
+ /* T8: De-assert FRXM */
+ rcsi2_write(priv, V4M_FRXM_REG, 0);
return 0;
}
-static int rcsi2_d_phy_setting_v4m(struct rcar_csi2 *priv, int data_rate)
+static int rcsi2_d_phy_setting_v4m(struct rcar_csi2 *priv, int mbps)
{
unsigned int timeout;
int ret;
@@ -2213,6 +2456,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a779g0 = {
.start_receiver = rcsi2_start_receiver_v4h,
.use_isp = true,
.support_cphy = true,
+ .support_dphy = true,
};
static const struct rcsi2_register_layout rcsi2_registers_v4m = {
diff --git a/drivers/media/platform/renesas/rcar-fcp.c b/drivers/media/platform/renesas/rcar-fcp.c
index cee9bbce4e3a..f90c86bbce6e 100644
--- a/drivers/media/platform/renesas/rcar-fcp.c
+++ b/drivers/media/platform/renesas/rcar-fcp.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -19,14 +21,25 @@
#include <media/rcar-fcp.h>
+#define RCAR_FCP_REG_RST 0x0010
+#define RCAR_FCP_REG_RST_SOFTRST BIT(0)
+#define RCAR_FCP_REG_STA 0x0018
+#define RCAR_FCP_REG_STA_ACT BIT(0)
+
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
+ void __iomem *base;
};
static LIST_HEAD(fcp_devices);
static DEFINE_MUTEX(fcp_lock);
+static inline void rcar_fcp_write(struct rcar_fcp_device *fcp, u32 reg, u32 val)
+{
+ iowrite32(val, fcp->base + reg);
+}
+
/* -----------------------------------------------------------------------------
* Public API
*/
@@ -117,6 +130,25 @@ void rcar_fcp_disable(struct rcar_fcp_device *fcp)
}
EXPORT_SYMBOL_GPL(rcar_fcp_disable);
+int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp)
+{
+ u32 value;
+ int ret;
+
+ if (!fcp)
+ return 0;
+
+ rcar_fcp_write(fcp, RCAR_FCP_REG_RST, RCAR_FCP_REG_RST_SOFTRST);
+ ret = readl_poll_timeout(fcp->base + RCAR_FCP_REG_STA,
+ value, !(value & RCAR_FCP_REG_STA_ACT),
+ 1, 100);
+ if (ret)
+ dev_err(fcp->dev, "Failed to soft-reset\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_soft_reset);
+
/* -----------------------------------------------------------------------------
* Platform Driver
*/
@@ -131,6 +163,10 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
+ fcp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fcp->base))
+ return PTR_ERR(fcp->base);
+
dma_set_max_seg_size(fcp->dev, UINT_MAX);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 846ae7989b1d..f73729f59671 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -2,14 +2,14 @@
/*
* Driver for Renesas R-Car VIN
*
+ * Copyright (C) 2025 Niklas Söderlund <niklas.soderlund@ragnatech.se>
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2011-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
* Copyright (C) 2008 Magnus Damm
- *
- * Based on the soc-camera rcar_vin driver
*/
+#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -55,6 +55,7 @@
* be only one group for all instances.
*/
+static DEFINE_IDA(rvin_ida);
static DEFINE_MUTEX(rvin_group_lock);
static struct rvin_group *rvin_group_data;
@@ -65,7 +66,7 @@ static void rvin_group_cleanup(struct rvin_group *group)
}
static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin,
- int (*link_setup)(struct rvin_dev *),
+ int (*link_setup)(struct rvin_group *),
const struct media_device_ops *ops)
{
struct media_device *mdev = &group->mdev;
@@ -115,27 +116,12 @@ static void rvin_group_release(struct kref *kref)
}
static int rvin_group_get(struct rvin_dev *vin,
- int (*link_setup)(struct rvin_dev *),
+ int (*link_setup)(struct rvin_group *),
const struct media_device_ops *ops)
{
struct rvin_group *group;
- u32 id;
int ret;
- /* Make sure VIN id is present and sane */
- ret = of_property_read_u32(vin->dev->of_node, "renesas,id", &id);
- if (ret) {
- vin_err(vin, "%pOF: No renesas,id property found\n",
- vin->dev->of_node);
- return -EINVAL;
- }
-
- if (id >= RCAR_VIN_NUM) {
- vin_err(vin, "%pOF: Invalid renesas,id '%u'\n",
- vin->dev->of_node, id);
- return -EINVAL;
- }
-
/* Join or create a VIN group */
mutex_lock(&rvin_group_lock);
if (rvin_group_data) {
@@ -156,6 +142,7 @@ static int rvin_group_get(struct rvin_dev *vin,
}
kref_init(&group->refcount);
+ group->info = vin->info;
rvin_group_data = group;
}
@@ -164,16 +151,15 @@ static int rvin_group_get(struct rvin_dev *vin,
/* Add VIN to group */
mutex_lock(&group->lock);
- if (group->vin[id]) {
- vin_err(vin, "Duplicate renesas,id property value %u\n", id);
+ if (group->vin[vin->id]) {
+ vin_err(vin, "Duplicate renesas,id property value %u\n", vin->id);
mutex_unlock(&group->lock);
kref_put(&group->refcount, rvin_group_release);
return -EINVAL;
}
- group->vin[id] = vin;
+ group->vin[vin->id] = vin;
- vin->id = id;
vin->group = group;
vin->v4l2_dev.mdev = &group->mdev;
@@ -213,7 +199,7 @@ static int rvin_group_entity_to_remote_id(struct rvin_group *group,
sd = media_entity_to_v4l2_subdev(entity);
- for (i = 0; i < RVIN_REMOTES_MAX; i++)
+ for (i = 0; i < ARRAY_SIZE(group->remotes); i++)
if (group->remotes[i].subdev == sd)
return i;
@@ -246,7 +232,7 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
}
}
- return vin->group->link_setup(vin);
+ return vin->group->link_setup(vin->group);
}
static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
@@ -254,20 +240,32 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
+ struct rvin_group *group = vin->group;
- for (i = 0; i < RCAR_VIN_NUM; i++)
- if (vin->group->vin[i])
- rvin_v4l2_unregister(vin->group->vin[i]);
+ for (unsigned int i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i])
+ rvin_v4l2_unregister(group->vin[i]);
+ }
mutex_lock(&vin->group->lock);
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->remotes[i].asc != asc)
+ for (unsigned int i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!group->vin[i] || group->vin[i]->parallel.asc != asc)
continue;
- vin->group->remotes[i].subdev = NULL;
+
+ group->vin[i]->parallel.subdev = NULL;
+
+ vin_dbg(group->vin[i], "Unbind parallel subdev %s\n",
+ subdev->name);
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(group->remotes); i++) {
+ if (group->remotes[i].asc != asc)
+ continue;
+
+ group->remotes[i].subdev = NULL;
+
vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
- break;
}
mutex_unlock(&vin->group->lock);
@@ -280,21 +278,38 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
+ struct rvin_group *group = vin->group;
- mutex_lock(&vin->group->lock);
+ guard(mutex)(&group->lock);
- for (i = 0; i < RVIN_CSI_MAX; i++) {
+ for (unsigned int i = 0; i < RCAR_VIN_NUM; i++) {
+ struct rvin_dev *pvin = group->vin[i];
+
+ if (!pvin || pvin->parallel.asc != asc)
+ continue;
+
+ pvin->parallel.source_pad = 0;
+ for (unsigned int pad = 0; pad < subdev->entity.num_pads; pad++)
+ if (subdev->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)
+ pvin->parallel.source_pad = pad;
+
+ pvin->parallel.subdev = subdev;
+ vin_dbg(pvin, "Bound subdev %s\n", subdev->name);
+
+ return 0;
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(group->remotes); i++) {
if (vin->group->remotes[i].asc != asc)
continue;
+
vin->group->remotes[i].subdev = subdev;
vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
- break;
- }
- mutex_unlock(&vin->group->lock);
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
@@ -343,12 +358,49 @@ out:
return ret;
}
-static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
+static int rvin_parallel_parse_of(struct rvin_dev *vin)
{
- if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
- v4l2_async_nf_unregister(&vin->group->notifier);
- v4l2_async_nf_cleanup(&vin->group->notifier);
+ struct fwnode_handle *fwnode __free(fwnode_handle) = NULL;
+ struct fwnode_handle *ep __free(fwnode_handle) = NULL;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_UNKNOWN,
+ };
+ struct v4l2_async_connection *asc;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 0, 0, 0);
+ if (!ep)
+ return 0;
+
+ if (v4l2_fwnode_endpoint_parse(ep, &vep)) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(ep));
+ return -EINVAL;
}
+
+ switch (vep.bus_type) {
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ vin_dbg(vin, "Found %s media bus\n",
+ vep.bus_type == V4L2_MBUS_PARALLEL ?
+ "PARALLEL" : "BT656");
+ vin->parallel.mbus_type = vep.bus_type;
+ vin->parallel.bus = vep.bus.parallel;
+ break;
+ default:
+ vin_err(vin, "Unknown media bus type\n");
+ return -EINVAL;
+ }
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ asc = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc))
+ return PTR_ERR(asc);
+
+ vin->parallel.asc = asc;
+
+ vin_dbg(vin, "Add parallel OF device %pOF\n", to_of_node(fwnode));
+
+ return 0;
}
static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
@@ -385,6 +437,12 @@ static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
if (!(vin_mask & BIT(i)))
continue;
+ /* Parse local subdevice. */
+ ret = rvin_parallel_parse_of(vin->group->vin[i]);
+ if (ret)
+ return ret;
+
+ /* Parse shared subdevices. */
for (id = 0; id < max_id; id++) {
if (vin->group->remotes[id].asc)
continue;
@@ -437,11 +495,11 @@ static void rvin_free_controls(struct rvin_dev *vin)
vin->vdev.ctrl_handler = NULL;
}
-static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev)
+static int rvin_create_controls(struct rvin_dev *vin)
{
int ret;
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 1);
if (ret < 0)
return ret;
@@ -455,287 +513,12 @@ static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev
return ret;
}
- /* For the non-MC mode add controls from the subdevice. */
- if (subdev) {
- ret = v4l2_ctrl_add_handler(&vin->ctrl_handler,
- subdev->ctrl_handler, NULL, true);
- if (ret < 0) {
- rvin_free_controls(vin);
- return ret;
- }
- }
-
vin->vdev.ctrl_handler = &vin->ctrl_handler;
return 0;
}
/* -----------------------------------------------------------------------------
- * Async notifier
- */
-
-static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
-{
- unsigned int pad;
-
- if (sd->entity.num_pads <= 1)
- return 0;
-
- for (pad = 0; pad < sd->entity.num_pads; pad++)
- if (sd->entity.pads[pad].flags & direction)
- return pad;
-
- return -EINVAL;
-}
-
-/* -----------------------------------------------------------------------------
- * Parallel async notifier
- */
-
-/* The vin lock should be held when calling the subdevice attach and detach */
-static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
- struct v4l2_subdev *subdev)
-{
- struct v4l2_subdev_mbus_code_enum code = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- int ret;
-
- /* Find source and sink pad of remote subdevice */
- ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
- if (ret < 0)
- return ret;
- vin->parallel.source_pad = ret;
-
- ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->parallel.sink_pad = ret < 0 ? 0 : ret;
-
- if (vin->info->use_mc) {
- vin->parallel.subdev = subdev;
- return 0;
- }
-
- /* Find compatible subdevices mbus format */
- vin->mbus_code = 0;
- code.index = 0;
- code.pad = vin->parallel.source_pad;
- while (!vin->mbus_code &&
- !v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
- code.index++;
- switch (code.code) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_UYVY8_1X16:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_UYVY10_2X10:
- case MEDIA_BUS_FMT_RGB888_1X24:
- vin->mbus_code = code.code;
- vin_dbg(vin, "Found media bus format for %s: %d\n",
- subdev->name, vin->mbus_code);
- break;
- default:
- break;
- }
- }
-
- if (!vin->mbus_code) {
- vin_err(vin, "Unsupported media bus format for %s\n",
- subdev->name);
- return -EINVAL;
- }
-
- /* Read tvnorms */
- ret = v4l2_subdev_call(subdev, video, g_tvnorms, &vin->vdev.tvnorms);
- if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
-
- /* Read standard */
- vin->std = V4L2_STD_UNKNOWN;
- ret = v4l2_subdev_call(subdev, video, g_std, &vin->std);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- /* Add the controls */
- ret = rvin_create_controls(vin, subdev);
- if (ret < 0)
- return ret;
-
- vin->parallel.subdev = subdev;
-
- return 0;
-}
-
-static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
-{
- rvin_v4l2_unregister(vin);
- vin->parallel.subdev = NULL;
-
- if (!vin->info->use_mc)
- rvin_free_controls(vin);
-}
-
-static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- struct media_entity *source;
- struct media_entity *sink;
- int ret;
-
- ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
- if (ret < 0) {
- vin_err(vin, "Failed to register subdev nodes\n");
- return ret;
- }
-
- if (!video_is_registered(&vin->vdev)) {
- ret = rvin_v4l2_register(vin);
- if (ret < 0)
- return ret;
- }
-
- if (!vin->info->use_mc)
- return 0;
-
- /* If we're running with media-controller, link the subdevs. */
- source = &vin->parallel.subdev->entity;
- sink = &vin->vdev.entity;
-
- ret = media_create_pad_link(source, vin->parallel.source_pad,
- sink, vin->parallel.sink_pad, 0);
- if (ret)
- vin_err(vin, "Error adding link from %s to %s: %d\n",
- source->name, sink->name, ret);
-
- return ret;
-}
-
-static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_connection *asc)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
-
- vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
-
- mutex_lock(&vin->lock);
- rvin_parallel_subdevice_detach(vin);
- mutex_unlock(&vin->lock);
-}
-
-static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_connection *asc)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- int ret;
-
- mutex_lock(&vin->lock);
- ret = rvin_parallel_subdevice_attach(vin, subdev);
- mutex_unlock(&vin->lock);
- if (ret)
- return ret;
-
- v4l2_set_subdev_hostdata(subdev, vin);
-
- vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->parallel.source_pad,
- vin->parallel.sink_pad);
-
- return 0;
-}
-
-static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = {
- .bound = rvin_parallel_notify_bound,
- .unbind = rvin_parallel_notify_unbind,
- .complete = rvin_parallel_notify_complete,
-};
-
-static int rvin_parallel_parse_of(struct rvin_dev *vin)
-{
- struct fwnode_handle *ep, *fwnode;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_UNKNOWN,
- };
- struct v4l2_async_connection *asc;
- int ret;
-
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 0, 0, 0);
- if (!ep)
- return 0;
-
- fwnode = fwnode_graph_get_remote_endpoint(ep);
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- fwnode_handle_put(ep);
- if (ret) {
- vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
- ret = -EINVAL;
- goto out;
- }
-
- switch (vep.bus_type) {
- case V4L2_MBUS_PARALLEL:
- case V4L2_MBUS_BT656:
- vin_dbg(vin, "Found %s media bus\n",
- vep.bus_type == V4L2_MBUS_PARALLEL ?
- "PARALLEL" : "BT656");
- vin->parallel.mbus_type = vep.bus_type;
- vin->parallel.bus = vep.bus.parallel;
- break;
- default:
- vin_err(vin, "Unknown media bus type\n");
- ret = -EINVAL;
- goto out;
- }
-
- asc = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
- struct v4l2_async_connection);
- if (IS_ERR(asc)) {
- ret = PTR_ERR(asc);
- goto out;
- }
-
- vin->parallel.asc = asc;
-
- vin_dbg(vin, "Add parallel OF device %pOF\n", to_of_node(fwnode));
-out:
- fwnode_handle_put(fwnode);
-
- return ret;
-}
-
-static void rvin_parallel_cleanup(struct rvin_dev *vin)
-{
- v4l2_async_nf_unregister(&vin->notifier);
- v4l2_async_nf_cleanup(&vin->notifier);
-}
-
-static int rvin_parallel_init(struct rvin_dev *vin)
-{
- int ret;
-
- v4l2_async_nf_init(&vin->notifier, &vin->v4l2_dev);
-
- ret = rvin_parallel_parse_of(vin);
- if (ret)
- return ret;
-
- if (!vin->parallel.asc)
- return -ENODEV;
-
- vin_dbg(vin, "Found parallel subdevice %pOF\n",
- to_of_node(vin->parallel.asc->match.fwnode));
-
- vin->notifier.ops = &rvin_parallel_notify_ops;
- ret = v4l2_async_nf_register(&vin->notifier);
- if (ret < 0) {
- vin_err(vin, "Notifier registration failed\n");
- v4l2_async_nf_cleanup(&vin->notifier);
- return ret;
- }
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* CSI-2
*/
@@ -909,80 +692,91 @@ static int rvin_csi2_create_link(struct rvin_group *group, unsigned int id,
return 0;
}
-static int rvin_csi2_setup_links(struct rvin_dev *vin)
+static int rvin_parallel_setup_links(struct rvin_group *group)
+{
+ u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+
+ guard(mutex)(&group->lock);
+
+ /* If the group also has links don't enable the link. */
+ for (unsigned int i = 0; i < ARRAY_SIZE(group->remotes); i++) {
+ if (group->remotes[i].subdev) {
+ flags = 0;
+ break;
+ }
+ }
+
+ /* Create links. */
+ for (unsigned int i = 0; i < RCAR_VIN_NUM; i++) {
+ struct rvin_dev *vin = group->vin[i];
+ struct media_entity *source;
+ struct media_entity *sink;
+ int ret;
+
+ /* Nothing to do if there is no VIN or parallel subdev. */
+ if (!vin || !vin->parallel.subdev)
+ continue;
+
+ source = &vin->parallel.subdev->entity;
+ sink = &vin->vdev.entity;
+
+ ret = media_create_pad_link(source, vin->parallel.source_pad,
+ sink, 0, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rvin_csi2_setup_links(struct rvin_group *group)
{
const struct rvin_group_route *route;
unsigned int id;
- int ret = -EINVAL;
+ int ret;
+
+ ret = rvin_parallel_setup_links(group);
+ if (ret)
+ return ret;
/* Create all media device links between VINs and CSI-2's. */
- mutex_lock(&vin->group->lock);
- for (route = vin->info->routes; route->chsel; route++) {
+ mutex_lock(&group->lock);
+ for (route = group->info->routes; route->chsel; route++) {
/* Check that VIN' master is part of the group. */
- if (!vin->group->vin[route->master])
+ if (!group->vin[route->master])
continue;
/* Check that CSI-2 is part of the group. */
- if (!vin->group->remotes[route->csi].subdev)
+ if (!group->remotes[route->csi].subdev)
continue;
for (id = route->master; id < route->master + 4; id++) {
/* Check that VIN is part of the group. */
- if (!vin->group->vin[id])
+ if (!group->vin[id])
continue;
- ret = rvin_csi2_create_link(vin->group, id, route);
+ ret = rvin_csi2_create_link(group, id, route);
if (ret)
goto out;
}
}
out:
- mutex_unlock(&vin->group->lock);
+ mutex_unlock(&group->lock);
return ret;
}
-static void rvin_csi2_cleanup(struct rvin_dev *vin)
-{
- rvin_parallel_cleanup(vin);
- rvin_group_notifier_cleanup(vin);
- rvin_group_put(vin);
- rvin_free_controls(vin);
-}
-
static int rvin_csi2_init(struct rvin_dev *vin)
{
int ret;
- vin->pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
- if (ret)
- return ret;
-
- ret = rvin_create_controls(vin, NULL);
- if (ret < 0)
- return ret;
-
ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops);
if (ret)
- goto err_controls;
-
- /* It's OK to not have a parallel subdevice. */
- ret = rvin_parallel_init(vin);
- if (ret && ret != -ENODEV)
- goto err_group;
+ return ret;
ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX);
if (ret)
- goto err_parallel;
-
- return 0;
-err_parallel:
- rvin_parallel_cleanup(vin);
-err_group:
- rvin_group_put(vin);
-err_controls:
- rvin_free_controls(vin);
+ rvin_group_put(vin);
return ret;
}
@@ -991,30 +785,31 @@ err_controls:
* ISP
*/
-static int rvin_isp_setup_links(struct rvin_dev *vin)
+static int rvin_isp_setup_links(struct rvin_group *group)
{
unsigned int i;
int ret = -EINVAL;
/* Create all media device links between VINs and ISP's. */
- mutex_lock(&vin->group->lock);
+ mutex_lock(&group->lock);
for (i = 0; i < RCAR_VIN_NUM; i++) {
struct media_pad *source_pad, *sink_pad;
struct media_entity *source, *sink;
unsigned int source_slot = i / 8;
unsigned int source_idx = i % 8 + 1;
+ struct rvin_dev *vin = group->vin[i];
- if (!vin->group->vin[i])
+ if (!vin)
continue;
/* Check that ISP is part of the group. */
- if (!vin->group->remotes[source_slot].subdev)
+ if (!group->remotes[source_slot].subdev)
continue;
- source = &vin->group->remotes[source_slot].subdev->entity;
+ source = &group->remotes[source_slot].subdev->entity;
source_pad = &source->pads[source_idx];
- sink = &vin->group->vin[i]->vdev.entity;
+ sink = &vin->vdev.entity;
sink_pad = &sink->pads[0];
/* Skip if link already exists. */
@@ -1030,44 +825,22 @@ static int rvin_isp_setup_links(struct rvin_dev *vin)
break;
}
}
- mutex_unlock(&vin->group->lock);
+ mutex_unlock(&group->lock);
return ret;
}
-static void rvin_isp_cleanup(struct rvin_dev *vin)
-{
- rvin_group_notifier_cleanup(vin);
- rvin_group_put(vin);
- rvin_free_controls(vin);
-}
-
static int rvin_isp_init(struct rvin_dev *vin)
{
int ret;
- vin->pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
- if (ret)
- return ret;
-
- ret = rvin_create_controls(vin, NULL);
- if (ret < 0)
- return ret;
-
ret = rvin_group_get(vin, rvin_isp_setup_links, NULL);
if (ret)
- goto err_controls;
+ return ret;
ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX);
if (ret)
- goto err_group;
-
- return 0;
-err_group:
- rvin_group_put(vin);
-err_controls:
- rvin_free_controls(vin);
+ rvin_group_put(vin);
return ret;
}
@@ -1102,7 +875,7 @@ static int __maybe_unused rvin_resume(struct device *dev)
* as we don't know if and in which order the master VINs will
* be resumed.
*/
- if (vin->info->use_mc) {
+ if (vin->info->model == RCAR_GEN3) {
unsigned int master_id = rvin_group_id_to_master(vin->id);
struct rvin_dev *master = vin->group->vin[master_id];
int ret;
@@ -1124,7 +897,6 @@ static int __maybe_unused rvin_resume(struct device *dev)
static const struct rvin_info rcar_info_h1 = {
.model = RCAR_H1,
- .use_mc = false,
.max_width = 2048,
.max_height = 2048,
.scaler = rvin_scaler_gen2,
@@ -1132,7 +904,6 @@ static const struct rvin_info rcar_info_h1 = {
static const struct rvin_info rcar_info_m1 = {
.model = RCAR_M1,
- .use_mc = false,
.max_width = 2048,
.max_height = 2048,
.scaler = rvin_scaler_gen2,
@@ -1140,7 +911,6 @@ static const struct rvin_info rcar_info_m1 = {
static const struct rvin_info rcar_info_gen2 = {
.model = RCAR_GEN2,
- .use_mc = false,
.max_width = 2048,
.max_height = 2048,
.scaler = rvin_scaler_gen2,
@@ -1155,7 +925,6 @@ static const struct rvin_group_route rcar_info_r8a774e1_routes[] = {
static const struct rvin_info rcar_info_r8a774e1 = {
.model = RCAR_GEN3,
- .use_mc = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a774e1_routes,
@@ -1171,7 +940,6 @@ static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
static const struct rvin_info rcar_info_r8a7795 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1189,7 +957,6 @@ static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
static const struct rvin_info rcar_info_r8a7796 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1207,7 +974,6 @@ static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
static const struct rvin_info rcar_info_r8a77965 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1222,7 +988,6 @@ static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
static const struct rvin_info rcar_info_r8a77970 = {
.model = RCAR_GEN3,
- .use_mc = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77970_routes,
@@ -1236,7 +1001,6 @@ static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
static const struct rvin_info rcar_info_r8a77980 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1250,7 +1014,6 @@ static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
static const struct rvin_info rcar_info_r8a77990 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1264,7 +1027,6 @@ static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
static const struct rvin_info rcar_info_r8a77995 = {
.model = RCAR_GEN3,
- .use_mc = true,
.nv12 = true,
.max_width = 4096,
.max_height = 4096,
@@ -1274,7 +1036,6 @@ static const struct rvin_info rcar_info_r8a77995 = {
static const struct rvin_info rcar_info_gen4 = {
.model = RCAR_GEN4,
- .use_mc = true,
.use_isp = true,
.nv12 = true,
.raw10 = true,
@@ -1361,6 +1122,56 @@ static const struct of_device_id rvin_of_id_table[] = {
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
+static int rvin_id_get(struct rvin_dev *vin)
+{
+ u32 oid;
+ int id;
+
+ switch (vin->info->model) {
+ case RCAR_GEN3:
+ case RCAR_GEN4:
+ if (of_property_read_u32(vin->dev->of_node, "renesas,id", &oid)) {
+ vin_err(vin, "%pOF: No renesas,id property found\n",
+ vin->dev->of_node);
+ return -EINVAL;
+ }
+
+ if (oid < 0 || oid >= RCAR_VIN_NUM) {
+ vin_err(vin, "%pOF: Invalid renesas,id '%u'\n",
+ vin->dev->of_node, oid);
+ return -EINVAL;
+ }
+
+ vin->id = oid;
+ break;
+ default:
+ id = ida_alloc_range(&rvin_ida, 0, RCAR_VIN_NUM - 1,
+ GFP_KERNEL);
+ if (id < 0) {
+ vin_err(vin, "%pOF: Failed to allocate VIN group ID\n",
+ vin->dev->of_node);
+ return -EINVAL;
+ }
+
+ vin->id = id;
+ break;
+ }
+
+ return 0;
+}
+
+static void rvin_id_put(struct rvin_dev *vin)
+{
+ switch (vin->info->model) {
+ case RCAR_GEN3:
+ case RCAR_GEN4:
+ break;
+ default:
+ ida_free(&rvin_ida, vin->id);
+ break;
+ }
+}
+
static int rcar_vin_probe(struct platform_device *pdev)
{
struct rvin_dev *vin;
@@ -1388,30 +1199,59 @@ static int rcar_vin_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vin);
- if (vin->info->use_isp) {
- ret = rvin_isp_init(vin);
- } else if (vin->info->use_mc) {
- ret = rvin_csi2_init(vin);
+ if (rvin_id_get(vin)) {
+ ret = -EINVAL;
+ goto err_dma;
+ }
- if (vin->info->scaler &&
- rvin_group_id_to_master(vin->id) == vin->id)
- vin->scaler = vin->info->scaler;
- } else {
- ret = rvin_parallel_init(vin);
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ goto err_id;
+
+ ret = rvin_create_controls(vin);
+ if (ret < 0)
+ goto err_id;
+
+ switch (vin->info->model) {
+ case RCAR_GEN3:
+ case RCAR_GEN4:
+ if (vin->info->use_isp) {
+ ret = rvin_isp_init(vin);
+ } else {
+ ret = rvin_csi2_init(vin);
+
+ if (vin->info->scaler &&
+ rvin_group_id_to_master(vin->id) == vin->id)
+ vin->scaler = vin->info->scaler;
+ }
+ break;
+ default:
+ ret = rvin_group_get(vin, rvin_parallel_setup_links, NULL);
+ if (!ret)
+ ret = rvin_group_notifier_init(vin, 0, 0);
if (vin->info->scaler)
vin->scaler = vin->info->scaler;
+ break;
}
- if (ret) {
- rvin_dma_unregister(vin);
- return ret;
- }
+ if (ret)
+ goto err_ctrl;
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
+
+err_ctrl:
+ rvin_free_controls(vin);
+err_id:
+ rvin_id_put(vin);
+err_dma:
+ rvin_dma_unregister(vin);
+
+ return ret;
}
static void rcar_vin_remove(struct platform_device *pdev)
@@ -1422,12 +1262,16 @@ static void rcar_vin_remove(struct platform_device *pdev)
rvin_v4l2_unregister(vin);
- if (vin->info->use_isp)
- rvin_isp_cleanup(vin);
- else if (vin->info->use_mc)
- rvin_csi2_cleanup(vin);
- else
- rvin_parallel_cleanup(vin);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_nf_unregister(&vin->group->notifier);
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ }
+
+ rvin_group_put(vin);
+
+ rvin_free_controls(vin);
+
+ rvin_id_put(vin);
rvin_dma_unregister(vin);
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 5c08ee2c9807..b619d1436a41 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -2,18 +2,18 @@
/*
* Driver for Renesas R-Car VIN
*
+ * Copyright (C) 2025 Niklas Söderlund <niklas.soderlund@ragnatech.se>
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2011-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
* Copyright (C) 2008 Magnus Damm
- *
- * Based on the soc-camera rcar_vin driver
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-event.h>
#include <media/videobuf2-dma-contig.h>
#include "rcar-vin.h"
@@ -115,11 +115,16 @@
#define VNFC_S_FRAME (1 << 0)
/* Video n Interrupt Enable Register bits */
-#define VNIE_FIE (1 << 4)
-#define VNIE_EFE (1 << 1)
+#define VNIE_VFE BIT(17)
+#define VNIE_VRE BIT(16)
+#define VNIE_FIE BIT(4)
+#define VNIE_EFE BIT(1)
/* Video n Interrupt Status Register bits */
-#define VNINTS_FIS (1 << 4)
+#define VNINTS_VFS BIT(17)
+#define VNINTS_VRS BIT(16)
+#define VNINTS_FIS BIT(4)
+#define VNINTS_EFS BIT(1)
/* Video n Data Mode Register bits */
#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
@@ -555,17 +560,12 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
void rvin_scaler_gen2(struct rvin_dev *vin)
{
- unsigned int crop_height;
u32 xs, ys;
/* Set scaling coefficient */
- crop_height = vin->crop.height;
- if (V4L2_FIELD_HAS_BOTH(vin->format.field))
- crop_height *= 2;
-
ys = 0;
- if (crop_height != vin->compose.height)
- ys = (4096 * crop_height) / vin->compose.height;
+ if (vin->crop.height != vin->compose.height)
+ ys = (4096 * vin->crop.height) / vin->compose.height;
rvin_write(vin, ys, VNYS_REG);
xs = 0;
@@ -700,9 +700,6 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED:
/* Default to TB */
vnmc = VNMC_IM_FULL;
- /* Use BT if video standard can be read and is 60 Hz format */
- if (!vin->info->use_mc && vin->std & V4L2_STD_525_60)
- vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
case V4L2_FIELD_INTERLACED_TB:
vnmc = VNMC_IM_FULL;
@@ -897,6 +894,8 @@ static int rvin_setup(struct rvin_dev *vin)
/* Progressive or interlaced mode */
interrupts = progressive ? VNIE_FIE : VNIE_EFE;
+ /* Enable VSYNC Rising Edge Detection. */
+ interrupts |= VNIE_VRE;
/* Ack interrupts */
rvin_write(vin, interrupts, VNINTS_REG);
@@ -912,21 +911,6 @@ static int rvin_setup(struct rvin_dev *vin)
return 0;
}
-static void rvin_disable_interrupts(struct rvin_dev *vin)
-{
- rvin_write(vin, 0, VNIE_REG);
-}
-
-static u32 rvin_get_interrupt_status(struct rvin_dev *vin)
-{
- return rvin_read(vin, VNINTS_REG);
-}
-
-static void rvin_ack_interrupt(struct rvin_dev *vin)
-{
- rvin_write(vin, rvin_read(vin, VNINTS_REG), VNINTS_REG);
-}
-
static bool rvin_capture_active(struct rvin_dev *vin)
{
return rvin_read(vin, VNMS_REG) & VNMS_CA;
@@ -1049,22 +1033,35 @@ static void rvin_capture_stop(struct rvin_dev *vin)
static irqreturn_t rvin_irq(int irq, void *data)
{
struct rvin_dev *vin = data;
- u32 int_status, vnms;
+ u32 capture, status, vnms;
int slot;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&vin->qlock, flags);
- int_status = rvin_get_interrupt_status(vin);
- if (!int_status)
+ status = rvin_read(vin, VNINTS_REG);
+ if (!status)
goto done;
- rvin_ack_interrupt(vin);
+ rvin_write(vin, status, VNINTS_REG);
handled = 1;
+ /* Signal Start of Frame. */
+ if (status & VNINTS_VRS) {
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ .u.frame_sync.frame_sequence = vin->sequence,
+ };
+
+ v4l2_event_queue(&vin->vdev, &event);
+ }
+
/* Nothing to do if nothing was captured. */
- if (!(int_status & VNINTS_FIS))
+ capture = vin->format.field == V4L2_FIELD_NONE ||
+ vin->format.field == V4L2_FIELD_ALTERNATE ?
+ VNINTS_FIS : VNINTS_EFS;
+ if (!(status & capture))
goto done;
/* Nothing to do if not running. */
@@ -1297,14 +1294,6 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
struct media_pad *pad;
int ret;
- /* No media controller used, simply pass operation to subdevice. */
- if (!vin->info->use_mc) {
- ret = v4l2_subdev_call(vin->parallel.subdev, video, s_stream,
- on);
-
- return ret == -ENOIOCTLCMD ? 0 : ret;
- }
-
pad = media_pad_remote_pad_first(&vin->pad);
if (!pad)
return -EPIPE;
@@ -1417,7 +1406,7 @@ void rvin_stop_streaming(struct rvin_dev *vin)
rvin_set_stream(vin, 0);
/* disable interrupts */
- rvin_disable_interrupts(vin);
+ rvin_write(vin, 0, VNIE_REG);
/* Return unprocessed buffers from hardware. */
for (unsigned int i = 0; i < HW_BUFFER_NUM; i++) {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index db091af57c19..62eddf3a35fc 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -2,12 +2,11 @@
/*
* Driver for Renesas R-Car VIN
*
+ * Copyright (C) 2025 Niklas Söderlund <niklas.soderlund@ragnatech.se>
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2011-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
* Copyright (C) 2008 Magnus Damm
- *
- * Based on the soc-camera rcar_vin driver
*/
#include <linux/pm_runtime.h>
@@ -230,101 +229,6 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
* V4L2
*/
-static int rvin_reset_format(struct rvin_dev *vin)
-{
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .pad = vin->parallel.source_pad,
- };
- int ret;
-
- ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
- if (ret)
- return ret;
-
- v4l2_fill_pix_format(&vin->format, &fmt.format);
-
- vin->crop.top = 0;
- vin->crop.left = 0;
- vin->crop.width = vin->format.width;
- vin->crop.height = vin->format.height;
-
- /* Make use of the hardware interlacer by default. */
- if (vin->format.field == V4L2_FIELD_ALTERNATE) {
- vin->format.field = V4L2_FIELD_INTERLACED;
- vin->format.height *= 2;
- }
-
- rvin_format_align(vin, &vin->format);
-
- vin->compose.top = 0;
- vin->compose.left = 0;
- vin->compose.width = vin->format.width;
- vin->compose.height = vin->format.height;
-
- return 0;
-}
-
-static int rvin_try_format(struct rvin_dev *vin, u32 which,
- struct v4l2_pix_format *pix,
- struct v4l2_rect *src_rect)
-{
- struct v4l2_subdev *sd = vin_to_source(vin);
- struct v4l2_subdev_state *sd_state;
- static struct lock_class_key key;
- struct v4l2_subdev_format format = {
- .which = which,
- .pad = vin->parallel.source_pad,
- };
- enum v4l2_field field;
- u32 width, height;
- int ret;
-
- /*
- * FIXME: Drop this call, drivers are not supposed to use
- * __v4l2_subdev_state_alloc().
- */
- sd_state = __v4l2_subdev_state_alloc(sd, "rvin:state->lock", &key);
- if (IS_ERR(sd_state))
- return PTR_ERR(sd_state);
-
- if (!rvin_format_from_pixel(vin, pix->pixelformat))
- pix->pixelformat = RVIN_DEFAULT_FORMAT;
-
- v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code);
-
- /* Allow the video device to override field and to scale */
- field = pix->field;
- width = pix->width;
- height = pix->height;
-
- ret = v4l2_subdev_call(sd, pad, set_fmt, sd_state, &format);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- goto done;
- ret = 0;
-
- v4l2_fill_pix_format(pix, &format.format);
-
- if (src_rect) {
- src_rect->top = 0;
- src_rect->left = 0;
- src_rect->width = pix->width;
- src_rect->height = pix->height;
- }
-
- if (field != V4L2_FIELD_ANY)
- pix->field = field;
-
- pix->width = width;
- pix->height = height;
-
- rvin_format_align(vin, pix);
-done:
- __v4l2_subdev_state_free(sd_state);
-
- return ret;
-}
-
static int rvin_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -333,42 +237,6 @@ static int rvin_querycap(struct file *file, void *priv,
return 0;
}
-static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct rvin_dev *vin = video_drvdata(file);
-
- return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
-}
-
-static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_rect fmt_rect, src_rect;
- int ret;
-
- if (vb2_is_busy(&vin->queue))
- return -EBUSY;
-
- ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &src_rect);
- if (ret)
- return ret;
-
- vin->format = f->fmt.pix;
-
- fmt_rect.top = 0;
- fmt_rect.left = 0;
- fmt_rect.width = vin->format.width;
- fmt_rect.height = vin->format.height;
-
- v4l2_rect_map_inside(&vin->crop, &src_rect);
- v4l2_rect_map_inside(&vin->compose, &fmt_rect);
-
- return 0;
-}
-
static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -465,6 +333,7 @@ static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect)
{
+ struct media_pad *pad = media_pad_remote_pad_first(&vin->pad);
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
@@ -472,18 +341,11 @@ static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect)
unsigned int index;
int ret;
- if (vin->info->use_mc) {
- struct media_pad *pad = media_pad_remote_pad_first(&vin->pad);
-
- if (!pad)
- return -EINVAL;
+ if (!pad)
+ return -EINVAL;
- sd = media_entity_to_v4l2_subdev(pad->entity);
- index = pad->index;
- } else {
- sd = vin_to_source(vin);
- index = vin->parallel.source_pad;
- }
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ index = pad->index;
fmt.pad = index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
@@ -623,284 +485,18 @@ static int rvin_s_selection(struct file *file, void *fh,
return 0;
}
-static int rvin_g_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- return v4l2_g_parm_cap(&vin->vdev, sd, parm);
-}
-
-static int rvin_s_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- return v4l2_s_parm_cap(&vin->vdev, sd, parm);
-}
-
-static int rvin_g_pixelaspect(struct file *file, void *priv,
- int type, struct v4l2_fract *f)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- return v4l2_subdev_call(sd, video, g_pixelaspect, f);
-}
-
-static int rvin_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- if (i->index != 0)
- return -EINVAL;
-
- ret = v4l2_subdev_call(sd, video, g_input_status, &i->status);
- if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
-
- i->type = V4L2_INPUT_TYPE_CAMERA;
-
- if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) {
- i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
- i->std = 0;
- } else {
- i->capabilities = V4L2_IN_CAP_STD;
- i->std = vin->vdev.tvnorms;
- }
-
- strscpy(i->name, "Camera", sizeof(i->name));
-
- return 0;
-}
-
-static int rvin_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int rvin_s_input(struct file *file, void *priv, unsigned int i)
-{
- if (i > 0)
- return -EINVAL;
- return 0;
-}
-
-static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- return v4l2_subdev_call(sd, video, querystd, a);
-}
-
-static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
-{
- struct rvin_dev *vin = video_drvdata(file);
- int ret;
-
- ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
- if (ret < 0)
- return ret;
-
- vin->std = a;
-
- /* Changing the standard will change the width/height */
- return rvin_reset_format(vin);
-}
-
-static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
-{
- struct rvin_dev *vin = video_drvdata(file);
-
- if (v4l2_subdev_has_op(vin_to_source(vin), pad, dv_timings_cap))
- return -ENOIOCTLCMD;
-
- *a = vin->std;
-
- return 0;
-}
-
static int rvin_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_event_subscribe(fh, sub, 4, NULL);
}
return v4l2_ctrl_subscribe_event(fh, sub);
}
-static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_enum_dv_timings *timings)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- if (timings->pad)
- return -EINVAL;
-
- timings->pad = vin->parallel.sink_pad;
-
- ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
-
- timings->pad = 0;
-
- return ret;
-}
-
-static int rvin_s_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- ret = v4l2_subdev_call(sd, pad, s_dv_timings,
- vin->parallel.sink_pad, timings);
- if (ret)
- return ret;
-
- /* Changing the timings will change the width/height */
- return rvin_reset_format(vin);
-}
-
-static int rvin_g_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- return v4l2_subdev_call(sd, pad, g_dv_timings,
- vin->parallel.sink_pad, timings);
-}
-
-static int rvin_query_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- return v4l2_subdev_call(sd, pad, query_dv_timings,
- vin->parallel.sink_pad, timings);
-}
-
-static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
- struct v4l2_dv_timings_cap *cap)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- if (cap->pad)
- return -EINVAL;
-
- cap->pad = vin->parallel.sink_pad;
-
- ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
-
- cap->pad = 0;
-
- return ret;
-}
-
-static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- if (edid->pad)
- return -EINVAL;
-
- edid->pad = vin->parallel.sink_pad;
-
- ret = v4l2_subdev_call(sd, pad, get_edid, edid);
-
- edid->pad = 0;
-
- return ret;
-}
-
-static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
-{
- struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- int ret;
-
- if (edid->pad)
- return -EINVAL;
-
- edid->pad = vin->parallel.sink_pad;
-
- ret = v4l2_subdev_call(sd, pad, set_edid, edid);
-
- edid->pad = 0;
-
- return ret;
-}
-
-static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
- .vidioc_querycap = rvin_querycap,
- .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
-
- .vidioc_g_selection = rvin_g_selection,
- .vidioc_s_selection = rvin_s_selection,
-
- .vidioc_g_parm = rvin_g_parm,
- .vidioc_s_parm = rvin_s_parm,
-
- .vidioc_g_pixelaspect = rvin_g_pixelaspect,
-
- .vidioc_enum_input = rvin_enum_input,
- .vidioc_g_input = rvin_g_input,
- .vidioc_s_input = rvin_s_input,
-
- .vidioc_dv_timings_cap = rvin_dv_timings_cap,
- .vidioc_enum_dv_timings = rvin_enum_dv_timings,
- .vidioc_g_dv_timings = rvin_g_dv_timings,
- .vidioc_s_dv_timings = rvin_s_dv_timings,
- .vidioc_query_dv_timings = rvin_query_dv_timings,
-
- .vidioc_g_edid = rvin_g_edid,
- .vidioc_s_edid = rvin_s_edid,
-
- .vidioc_querystd = rvin_querystd,
- .vidioc_g_std = rvin_g_std,
- .vidioc_s_std = rvin_s_std,
-
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_expbuf = vb2_ioctl_expbuf,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
-
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = rvin_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-/* -----------------------------------------------------------------------------
- * V4L2 Media Controller
- */
-
static void rvin_mc_try_format(struct rvin_dev *vin,
struct v4l2_pix_format *pix)
{
@@ -979,19 +575,6 @@ static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
* File Operations
*/
-static int rvin_power_parallel(struct rvin_dev *vin, bool on)
-{
- struct v4l2_subdev *sd = vin_to_source(vin);
- int power = on ? 1 : 0;
- int ret;
-
- ret = v4l2_subdev_call(sd, core, s_power, power);
- if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
-
- return 0;
-}
-
static int rvin_open(struct file *file)
{
struct rvin_dev *vin = video_drvdata(file);
@@ -1011,11 +594,7 @@ static int rvin_open(struct file *file)
if (ret)
goto err_unlock;
- if (vin->info->use_mc)
- ret = v4l2_pipeline_pm_get(&vin->vdev.entity);
- else if (v4l2_fh_is_singular_file(file))
- ret = rvin_power_parallel(vin, true);
-
+ ret = v4l2_pipeline_pm_get(&vin->vdev.entity);
if (ret < 0)
goto err_open;
@@ -1027,10 +606,7 @@ static int rvin_open(struct file *file)
return 0;
err_power:
- if (vin->info->use_mc)
- v4l2_pipeline_pm_put(&vin->vdev.entity);
- else if (v4l2_fh_is_singular_file(file))
- rvin_power_parallel(vin, false);
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
err_open:
v4l2_fh_release(file);
err_unlock:
@@ -1044,23 +620,14 @@ err_pm:
static int rvin_release(struct file *file)
{
struct rvin_dev *vin = video_drvdata(file);
- bool fh_singular;
int ret;
mutex_lock(&vin->lock);
- /* Save the singular status before we call the clean-up helper */
- fh_singular = v4l2_fh_is_singular_file(file);
-
/* the release helper will cleanup any on-going streaming */
ret = _vb2_fop_release(file, NULL);
- if (vin->info->use_mc) {
- v4l2_pipeline_pm_put(&vin->vdev.entity);
- } else {
- if (fh_singular)
- rvin_power_parallel(vin, false);
- }
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
mutex_unlock(&vin->lock);
@@ -1091,18 +658,6 @@ void rvin_v4l2_unregister(struct rvin_dev *vin)
video_unregister_device(&vin->vdev);
}
-static void rvin_notify_video_device(struct rvin_dev *vin,
- unsigned int notification, void *arg)
-{
- switch (notification) {
- case V4L2_DEVICE_NOTIFY_EVENT:
- v4l2_event_queue(&vin->vdev, arg);
- break;
- default:
- break;
- }
-}
-
static void rvin_notify(struct v4l2_subdev *sd,
unsigned int notification, void *arg)
{
@@ -1113,12 +668,6 @@ static void rvin_notify(struct v4l2_subdev *sd,
container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
unsigned int i;
- /* If no media controller, no need to route the event. */
- if (!vin->info->use_mc) {
- rvin_notify_video_device(vin, notification, arg);
- return;
- }
-
group = vin->group;
for (i = 0; i < RCAR_VIN_NUM; i++) {
@@ -1134,7 +683,13 @@ static void rvin_notify(struct v4l2_subdev *sd,
if (remote != sd)
continue;
- rvin_notify_video_device(vin, notification, arg);
+ switch (notification) {
+ case V4L2_DEVICE_NOTIFY_EVENT:
+ v4l2_event_queue(&vin->vdev, arg);
+ break;
+ default:
+ break;
+ }
}
}
@@ -1153,7 +708,8 @@ int rvin_v4l2_register(struct rvin_dev *vin)
vdev->lock = &vin->lock;
vdev->fops = &rvin_fops;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ V4L2_CAP_READWRITE | V4L2_CAP_IO_MC;
+ vdev->ioctl_ops = &rvin_mc_ioctl_ops;
/* Set a default format */
vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
@@ -1162,14 +718,6 @@ int rvin_v4l2_register(struct rvin_dev *vin)
vin->format.field = RVIN_DEFAULT_FIELD;
vin->format.colorspace = RVIN_DEFAULT_COLORSPACE;
- if (vin->info->use_mc) {
- vdev->device_caps |= V4L2_CAP_IO_MC;
- vdev->ioctl_ops = &rvin_mc_ioctl_ops;
- } else {
- vdev->ioctl_ops = &rvin_ioctl_ops;
- rvin_reset_format(vin);
- }
-
rvin_format_align(vin, &vin->format);
ret = video_register_device(&vin->vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index 83d1b2734c41..74bef5b8adad 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -2,12 +2,11 @@
/*
* Driver for Renesas R-Car VIN
*
+ * Copyright (C) 2025 Niklas Söderlund <niklas.soderlund@ragnatech.se>
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2011-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
* Copyright (C) 2008 Magnus Damm
- *
- * Based on the soc-camera rcar_vin driver
*/
#ifndef __RCAR_VIN__
@@ -79,8 +78,6 @@ struct rvin_video_format {
* @mbus_type: media bus type
* @bus: media bus parallel configuration
* @source_pad: source pad of remote subdevice
- * @sink_pad: sink pad of remote subdevice
- *
*/
struct rvin_parallel_entity {
struct v4l2_async_connection *asc;
@@ -90,7 +87,6 @@ struct rvin_parallel_entity {
struct v4l2_mbus_config_parallel bus;
unsigned int source_pad;
- unsigned int sink_pad;
};
/**
@@ -117,7 +113,6 @@ struct rvin_group_route {
/**
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
- * @use_mc: use media controller instead of controlling subdevice
* @use_isp: the VIN is connected to the ISP and not to the CSI-2
* @nv12: support outputting NV12 pixel format
* @raw10: support outputting RAW10 pixel format
@@ -129,7 +124,6 @@ struct rvin_group_route {
*/
struct rvin_info {
enum model_id model;
- bool use_mc;
bool use_isp;
bool nv12;
bool raw10;
@@ -149,7 +143,6 @@ struct rvin_info {
* @vdev: V4L2 video device associated with VIN
* @v4l2_dev: V4L2 device
* @ctrl_handler: V4L2 control handler
- * @notifier: V4L2 asynchronous subdevs notifier
*
* @parallel: parallel input subdevice descriptor
*
@@ -177,7 +170,6 @@ struct rvin_info {
* @crop: active cropping
* @compose: active composing
* @scaler: Optional scaler
- * @std: active video standard of the video source
*
* @alpha: Alpha component to fill in for supported pixel formats
*/
@@ -189,7 +181,6 @@ struct rvin_dev {
struct video_device vdev;
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_async_notifier notifier;
struct rvin_parallel_entity parallel;
@@ -220,7 +211,6 @@ struct rvin_dev {
struct v4l2_rect crop;
struct v4l2_rect compose;
void (*scaler)(struct rvin_dev *vin);
- v4l2_std_id std;
unsigned int alpha;
};
@@ -242,6 +232,7 @@ struct rvin_dev {
* @lock: protects the count, notifier, vin and csi members
* @count: number of enabled VIN instances found in DT
* @notifier: group notifier for CSI-2 async connections
+ * @info: Platform dependent information about the VIN instances
* @vin: VIN instances which are part of the group
* @link_setup: Callback to create all links for the media graph
* @remotes: array of pairs of async connection and subdev pointers
@@ -255,9 +246,10 @@ struct rvin_group {
struct mutex lock;
unsigned int count;
struct v4l2_async_notifier notifier;
+ const struct rvin_info *info;
struct rvin_dev *vin[RCAR_VIN_NUM];
- int (*link_setup)(struct rvin_dev *vin);
+ int (*link_setup)(struct rvin_group *group);
struct {
struct v4l2_async_connection *asc;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 5fa73ab2db53..806acc8f9728 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -366,7 +366,7 @@ static const struct rzg2l_cru_info rzg3e_cru_info = {
.irq_handler = rzg3e_cru_irq,
.enable_interrupts = rzg3e_cru_enable_interrupts,
.disable_interrupts = rzg3e_cru_disable_interrupts,
- .fifo_empty = rz3e_fifo_empty,
+ .fifo_empty = rzg3e_fifo_empty,
.csi_setup = rzg3e_cru_csi2_setup,
};
@@ -403,7 +403,7 @@ static const u16 rzg2l_cru_regs[] = {
[ICnDMR] = 0x26c,
};
-static const struct rzg2l_cru_info rzgl2_cru_info = {
+static const struct rzg2l_cru_info rzg2l_cru_info = {
.max_width = 2800,
.max_height = 4095,
.image_conv = ICnMC,
@@ -422,7 +422,7 @@ static const struct of_device_id rzg2l_cru_of_id_table[] = {
},
{
.compatible = "renesas,rzg2l-cru",
- .data = &rzgl2_cru_info,
+ .data = &rzg2l_cru_info,
},
{ /* sentinel */ }
};
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index c30f3b281284..be95b41c37df 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -64,19 +64,21 @@ struct rzg2l_cru_ip {
/**
* struct rzg2l_cru_ip_format - CRU IP format
- * @code: Media bus code
+ * @codes: Array of up to four media bus codes
* @datatype: MIPI CSI2 data type
* @format: 4CC format identifier (V4L2_PIX_FMT_*)
* @icndmr: ICnDMR register value
- * @bpp: bytes per pixel
* @yuv: Flag to indicate whether the format is YUV-based.
*/
struct rzg2l_cru_ip_format {
- u32 code;
+ /*
+ * RAW output formats might be produced by RAW media codes with any one
+ * of the 4 common bayer patterns.
+ */
+ u32 codes[4];
u32 datatype;
u32 format;
u32 icndmr;
- u8 bpp;
bool yuv;
};
@@ -192,6 +194,8 @@ struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_format_to_fmt(u32 format);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_index_to_fmt(u32 index);
+bool rzg2l_cru_ip_fmt_supports_mbus_code(const struct rzg2l_cru_ip_format *fmt,
+ unsigned int code);
void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
@@ -199,7 +203,7 @@ void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru);
-bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru);
+bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru);
void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
const struct rzg2l_cru_ip_format *ip_fmt,
u8 csi_vc);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index 9243306e2aa9..1520211e7418 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -232,6 +232,18 @@ static const struct rzg2l_csi2_format rzg2l_csi2_formats[] = {
{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, },
+ { .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, },
+ { .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, },
+ { .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, },
+ { .code = MEDIA_BUS_FMT_SBGGR14_1X14, .bpp = 14, },
+ { .code = MEDIA_BUS_FMT_SGBRG14_1X14, .bpp = 14, },
+ { .code = MEDIA_BUS_FMT_SGRBG14_1X14, .bpp = 14, },
+ { .code = MEDIA_BUS_FMT_SRGGB14_1X14, .bpp = 14, },
};
static inline struct rzg2l_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
@@ -282,15 +294,18 @@ static int rzg2l_csi2_calc_mbps(struct rzg2l_csi2 *csi2)
const struct rzg2l_csi2_format *format;
const struct v4l2_mbus_framefmt *fmt;
struct v4l2_subdev_state *state;
- struct v4l2_ctrl *ctrl;
+ struct media_pad *remote_pad;
u64 mbps;
+ s64 ret;
- /* Read the pixel rate control from remote. */
- ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
- if (!ctrl) {
- dev_err(csi2->dev, "no pixel rate control in subdev %s\n",
- source->name);
- return -EINVAL;
+ if (!csi2->remote_source)
+ return -ENODEV;
+
+ remote_pad = media_pad_remote_pad_unique(&csi2->pads[RZG2L_CSI2_SINK]);
+ if (IS_ERR(remote_pad)) {
+ dev_err(csi2->dev, "can't get source pad of %s (%ld)\n",
+ csi2->remote_source->name, PTR_ERR(remote_pad));
+ return PTR_ERR(remote_pad);
}
state = v4l2_subdev_lock_and_get_active_state(&csi2->subdev);
@@ -298,12 +313,16 @@ static int rzg2l_csi2_calc_mbps(struct rzg2l_csi2 *csi2)
format = rzg2l_csi2_code_to_fmt(fmt->code);
v4l2_subdev_unlock_state(state);
- /*
- * Calculate hsfreq in Mbps
- * hsfreq = (pixel_rate * bits_per_sample) / number_of_lanes
- */
- mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * format->bpp;
- do_div(mbps, csi2->lanes * 1000000);
+ /* Read the link frequency from remote subdevice. */
+ ret = v4l2_get_link_freq(remote_pad, format->bpp, csi2->lanes * 2);
+ if (ret < 0) {
+ dev_err(csi2->dev, "can't retrieve link freq from subdev %s\n",
+ source->name);
+ return -EINVAL;
+ }
+
+ mbps = ret * 2;
+ do_div(mbps, 1000000);
return mbps;
}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
index 7836c7cd53dc..5f2c87858bfe 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -13,42 +13,83 @@
static const struct rzg2l_cru_ip_format rzg2l_cru_ip_formats[] = {
{
- .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .codes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ },
.datatype = MIPI_CSI2_DT_YUV422_8B,
.format = V4L2_PIX_FMT_UYVY,
- .bpp = 2,
.icndmr = ICnDMR_YCMODE_UYVY,
.yuv = true,
},
{
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .codes = {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ },
.format = V4L2_PIX_FMT_SBGGR8,
.datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 1,
.icndmr = 0,
.yuv = false,
},
{
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .codes = {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ },
.format = V4L2_PIX_FMT_SGBRG8,
.datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 1,
.icndmr = 0,
.yuv = false,
},
{
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .codes = {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ },
.format = V4L2_PIX_FMT_SGRBG8,
.datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 1,
.icndmr = 0,
.yuv = false,
},
{
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .codes = {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ },
.format = V4L2_PIX_FMT_SRGGB8,
.datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 1,
+ .icndmr = 0,
+ .yuv = false,
+ },
+ {
+ .codes = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10
+ },
+ .format = V4L2_PIX_FMT_RAW_CRU10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .icndmr = 0,
+ .yuv = false,
+ },
+ {
+ .codes = {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12
+ },
+ .format = V4L2_PIX_FMT_RAW_CRU12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .icndmr = 0,
+ .yuv = false,
+ },
+ {
+ .codes = {
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SRGGB14_1X14
+ },
+ .format = V4L2_PIX_FMT_RAW_CRU14,
+ .datatype = MIPI_CSI2_DT_RAW14,
.icndmr = 0,
.yuv = false,
},
@@ -56,11 +97,14 @@ static const struct rzg2l_cru_ip_format rzg2l_cru_ip_formats[] = {
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code)
{
- unsigned int i;
+ unsigned int i, j;
- for (i = 0; i < ARRAY_SIZE(rzg2l_cru_ip_formats); i++)
- if (rzg2l_cru_ip_formats[i].code == code)
- return &rzg2l_cru_ip_formats[i];
+ for (i = 0; i < ARRAY_SIZE(rzg2l_cru_ip_formats); i++) {
+ for (j = 0; j < ARRAY_SIZE(rzg2l_cru_ip_formats[i].codes); j++) {
+ if (rzg2l_cru_ip_formats[i].codes[j] == code)
+ return &rzg2l_cru_ip_formats[i];
+ }
+ }
return NULL;
}
@@ -85,6 +129,17 @@ const struct rzg2l_cru_ip_format *rzg2l_cru_ip_index_to_fmt(u32 index)
return &rzg2l_cru_ip_formats[index];
}
+bool rzg2l_cru_ip_fmt_supports_mbus_code(const struct rzg2l_cru_ip_format *fmt,
+ unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmt->codes); i++)
+ if (fmt->codes[i] == code)
+ return true;
+
+ return false;
+}
struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru)
{
struct v4l2_subdev_state *state;
@@ -162,7 +217,7 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
sink_format = v4l2_subdev_state_get_format(state, fmt->pad);
if (!rzg2l_cru_ip_code_to_fmt(fmt->format.code))
- sink_format->code = rzg2l_cru_ip_formats[0].code;
+ sink_format->code = rzg2l_cru_ip_formats[0].codes[0];
else
sink_format->code = fmt->format.code;
@@ -188,11 +243,26 @@ static int rzg2l_cru_ip_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->index >= ARRAY_SIZE(rzg2l_cru_ip_formats))
- return -EINVAL;
+ unsigned int index = code->index;
+ unsigned int i, j;
- code->code = rzg2l_cru_ip_formats[code->index].code;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(rzg2l_cru_ip_formats); i++) {
+ const struct rzg2l_cru_ip_format *fmt = &rzg2l_cru_ip_formats[i];
+
+ for (j = 0; j < ARRAY_SIZE(fmt->codes); j++) {
+ if (!fmt->codes[j])
+ continue;
+
+ if (!index) {
+ code->code = fmt->codes[j];
+ return 0;
+ }
+
+ index--;
+ }
+ }
+
+ return -EINVAL;
}
static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index 067c6af14e95..a8817a7066b2 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -323,7 +323,7 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
return 0;
}
-bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru)
+bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru)
{
u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
@@ -345,8 +345,6 @@ bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru)
amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
amnfifopntr_r_y =
(amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
- if (amnfifopntr_w == amnfifopntr_r_y)
- return true;
return amnfifopntr_w == amnfifopntr_r_y;
}
@@ -941,15 +939,7 @@ static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
v4l_bound_align_image(&pix->width, 320, info->max_width, 1,
&pix->height, 240, info->max_height, 2, 0);
- if (info->has_stride) {
- u32 stride = clamp(pix->bytesperline, pix->width * fmt->bpp,
- RZG2L_CRU_STRIDE_MAX);
- pix->bytesperline = round_up(stride, RZG2L_CRU_STRIDE_ALIGN);
- } else {
- pix->bytesperline = pix->width * fmt->bpp;
- }
-
- pix->sizeimage = pix->bytesperline * pix->height;
+ v4l2_fill_pixfmt(pix, pix->pixelformat, pix->width, pix->height);
dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
pix->width, pix->height, pix->bytesperline, pix->sizeimage);
@@ -1031,6 +1021,31 @@ static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int rzg2l_cru_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+ const struct rzg2l_cru_info *info = cru->info;
+ const struct rzg2l_cru_ip_format *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = rzg2l_cru_ip_format_to_fmt(fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = RZG2L_CRU_MIN_INPUT_WIDTH;
+ fsize->stepwise.max_width = info->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = RZG2L_CRU_MIN_INPUT_HEIGHT;
+ fsize->stepwise.max_height = info->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
.vidioc_querycap = rzg2l_cru_querycap,
.vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap,
@@ -1047,6 +1062,7 @@ static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_framesizes = rzg2l_cru_enum_framesizes,
};
/* -----------------------------------------------------------------------------
@@ -1129,7 +1145,7 @@ static int rzg2l_cru_video_link_validate(struct media_link *link)
if (fmt.format.width != cru->format.width ||
fmt.format.height != cru->format.height ||
fmt.format.field != cru->format.field ||
- video_fmt->code != fmt.format.code)
+ !rzg2l_cru_ip_fmt_supports_mbus_code(video_fmt, fmt.format.code))
return -EPIPE;
return 0;
diff --git a/drivers/media/platform/renesas/vsp1/Makefile b/drivers/media/platform/renesas/vsp1/Makefile
index de8c802e1d1a..2057c8f7be47 100644
--- a/drivers/media/platform/renesas/vsp1/Makefile
+++ b/drivers/media/platform/renesas/vsp1/Makefile
@@ -6,5 +6,6 @@ vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
vsp1-y += vsp1_brx.o vsp1_sru.o vsp1_uds.o
vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
vsp1-y += vsp1_iif.o vsp1_lif.o vsp1_uif.o
+vsp1-y += vsp1_vspx.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index f97a1a31bfab..94de2e85792e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -111,6 +111,7 @@ struct vsp1_device {
struct media_entity_operations media_ops;
struct vsp1_drm *drm;
+ struct vsp1_vspx *vspx;
};
int vsp1_device_get(struct vsp1_device *vsp1);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_dl.c b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
index bb8228b19824..d732b4ed1180 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/lockdep.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -176,6 +177,7 @@ struct vsp1_dl_cmd_pool {
* @bodies: list of extra display list bodies
* @pre_cmd: pre command to be issued through extended dl header
* @post_cmd: post command to be issued through extended dl header
+ * @allocated: flag to detect double list release
* @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
* @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
@@ -194,6 +196,8 @@ struct vsp1_dl_list {
struct vsp1_dl_ext_cmd *pre_cmd;
struct vsp1_dl_ext_cmd *post_cmd;
+ bool allocated;
+
bool has_chain;
struct list_head chain;
@@ -212,6 +216,7 @@ struct vsp1_dl_list {
* @pending: list waiting to be queued to the hardware
* @pool: body pool for the display list bodies
* @cmdpool: commands pool for extended display list
+ * @list_count: number of allocated display lists
*/
struct vsp1_dl_manager {
unsigned int index;
@@ -226,6 +231,8 @@ struct vsp1_dl_manager {
struct vsp1_dl_body_pool *pool;
struct vsp1_dl_cmd_pool *cmdpool;
+
+ size_t list_count;
};
/* -----------------------------------------------------------------------------
@@ -606,6 +613,8 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
struct vsp1_dl_list *dl = NULL;
unsigned long flags;
+ lockdep_assert_not_held(&dlm->lock);
+
spin_lock_irqsave(&dlm->lock, flags);
if (!list_empty(&dlm->free)) {
@@ -617,6 +626,7 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
* display list can assert list_empty() if it is not in a chain.
*/
INIT_LIST_HEAD(&dl->chain);
+ dl->allocated = true;
}
spin_unlock_irqrestore(&dlm->lock, flags);
@@ -632,6 +642,8 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
if (!dl)
return;
+ lockdep_assert_held(&dl->dlm->lock);
+
/*
* Release any linked display-lists which were chained for a single
* hardware operation.
@@ -657,6 +669,13 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
*/
dl->body0->num_entries = 0;
+ /*
+ * Return the display list to the 'free' pool. If the list had already
+ * been returned be loud about it.
+ */
+ WARN_ON_ONCE(!dl->allocated);
+ dl->allocated = false;
+
list_add_tail(&dl->list, &dl->dlm->free);
}
@@ -1067,6 +1086,7 @@ void vsp1_dlm_setup(struct vsp1_device *vsp1)
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
{
unsigned long flags;
+ size_t list_count;
spin_lock_irqsave(&dlm->lock, flags);
@@ -1074,8 +1094,11 @@ void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
__vsp1_dl_list_put(dlm->queued);
__vsp1_dl_list_put(dlm->pending);
+ list_count = list_count_nodes(&dlm->free);
spin_unlock_irqrestore(&dlm->lock, flags);
+ WARN_ON_ONCE(list_count != dlm->list_count);
+
dlm->active = NULL;
dlm->queued = NULL;
dlm->pending = NULL;
@@ -1145,6 +1168,8 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
list_add_tail(&dl->list, &dlm->free);
}
+ dlm->list_count = prealloc;
+
if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
VSP1_EXTCMD_AUTOFLD, prealloc);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index fe55e8747b05..15d266439564 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <media/media-entity.h>
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index 8270a9d207cb..b8d06e88c475 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -33,11 +33,13 @@
#include "vsp1_lif.h"
#include "vsp1_lut.h"
#include "vsp1_pipe.h"
+#include "vsp1_regs.h"
#include "vsp1_rwpf.h"
#include "vsp1_sru.h"
#include "vsp1_uds.h"
#include "vsp1_uif.h"
#include "vsp1_video.h"
+#include "vsp1_vspx.h"
/* -----------------------------------------------------------------------------
* Interrupt Handling
@@ -490,7 +492,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
ret = media_device_register(mdev);
} else {
- ret = vsp1_drm_init(vsp1);
+ if (vsp1->info->version == VI6_IP_VERSION_MODEL_VSPX_GEN4)
+ ret = vsp1_vspx_init(vsp1);
+ else
+ ret = vsp1_drm_init(vsp1);
}
done:
@@ -502,7 +507,9 @@ done:
int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
{
+ u32 version = vsp1->version & VI6_IP_VERSION_MODEL_MASK;
unsigned int timeout;
+ int ret = 0;
u32 status;
status = vsp1_read(vsp1, VI6_STATUS);
@@ -523,7 +530,11 @@ int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
return -ETIMEDOUT;
}
- return 0;
+ if (version == VI6_IP_VERSION_MODEL_VSPD_GEN3 ||
+ version == VI6_IP_VERSION_MODEL_VSPD_GEN4)
+ ret = rcar_fcp_soft_reset(vsp1->fcp);
+
+ return ret;
}
static int vsp1_device_init(struct vsp1_device *vsp1)
@@ -851,6 +862,13 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uif_count = 2,
.wpf_count = 1,
.num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPX_GEN4,
+ .model = "VSP2-X",
+ .gen = 4,
+ .features = VSP1_HAS_IIF,
+ .rpf_count = 2,
+ .wpf_count = 1,
},
};
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index 3cbb768cf6ad..5d769cc42fe1 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -473,6 +474,8 @@ void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ lockdep_assert_held(&pipe->irqlock);
+
if (pipe->state == VSP1_PIPELINE_STOPPED) {
vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index),
VI6_CMD_STRCMD);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index 86e47c2d991f..10cfbcd1b6e0 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -799,6 +799,7 @@
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
#define VI6_IP_VERSION_MODEL_VSPD_GEN4 (0x1c << 8)
+#define VI6_IP_VERSION_MODEL_VSPX_GEN4 (0x1d << 8)
/* RZ/G2L SoCs have no version register, So use 0x80 as the model version */
#define VI6_IP_VERSION_MODEL_VSPD_RZG2L (0x80 << 8)
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_vspx.c b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
new file mode 100644
index 000000000000..a754b92232bd
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_vspx.c -- R-Car Gen 4 VSPX
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include "vsp1_vspx.h"
+
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/vsp1.h>
+
+#include "vsp1_dl.h"
+#include "vsp1_iif.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+
+/*
+ * struct vsp1_vspx_pipeline - VSPX pipeline
+ * @pipe: the VSP1 pipeline
+ * @partition: the pre-calculated partition used by the pipeline
+ * @mutex: protects the streaming start/stop sequences
+ * @lock: protect access to the enabled flag
+ * @enabled: the enable flag
+ * @vspx_frame_end: frame end callback
+ * @frame_end_data: data for the frame end callback
+ */
+struct vsp1_vspx_pipeline {
+ struct vsp1_pipeline pipe;
+ struct vsp1_partition partition;
+
+ /*
+ * Protects the streaming start/stop sequences.
+ *
+ * The start/stop sequences cannot be locked with the 'lock' spinlock
+ * as they acquire mutexes when handling the pm runtime and the vsp1
+ * pipe start/stop operations. Provide a dedicated mutex for this
+ * reason.
+ */
+ struct mutex mutex;
+
+ /*
+ * Protects the enable flag.
+ *
+ * The enabled flag is contended between the start/stop streaming
+ * routines and the job_run one, which cannot take a mutex as it is
+ * called from the ISP irq context.
+ */
+ spinlock_t lock;
+ bool enabled;
+
+ void (*vspx_frame_end)(void *frame_end_data);
+ void *frame_end_data;
+};
+
+static inline struct vsp1_vspx_pipeline *
+to_vsp1_vspx_pipeline(struct vsp1_pipeline *pipe)
+{
+ return container_of(pipe, struct vsp1_vspx_pipeline, pipe);
+}
+
+/*
+ * struct vsp1_vspx - VSPX device
+ * @vsp1: the VSP1 device
+ * @pipe: the VSPX pipeline
+ */
+struct vsp1_vspx {
+ struct vsp1_device *vsp1;
+ struct vsp1_vspx_pipeline pipe;
+};
+
+/* Apply the given width, height and fourcc to the RWPF's subdevice */
+static int vsp1_vspx_rwpf_set_subdev_fmt(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf,
+ u32 isp_fourcc,
+ unsigned int width,
+ unsigned int height)
+{
+ struct vsp1_entity *ent = &rwpf->entity;
+ struct v4l2_subdev_format format = {};
+ u32 vspx_fourcc;
+
+ switch (isp_fourcc) {
+ case V4L2_PIX_FMT_GREY:
+ /* 8 bit RAW Bayer image. */
+ vspx_fourcc = V4L2_PIX_FMT_RGB332;
+ break;
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ case V4L2_PIX_FMT_Y16:
+ /* 10, 12 and 16 bit RAW Bayer image. */
+ vspx_fourcc = V4L2_PIX_FMT_RGB565;
+ break;
+ case V4L2_META_FMT_GENERIC_8:
+ /* ConfigDMA parameters buffer. */
+ vspx_fourcc = V4L2_PIX_FMT_XBGR32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rwpf->fmtinfo = vsp1_get_format_info(vsp1, vspx_fourcc);
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = width;
+ format.format.height = height;
+ format.format.field = V4L2_FIELD_NONE;
+ format.format.code = rwpf->fmtinfo->mbus;
+
+ return v4l2_subdev_call(&ent->subdev, pad, set_fmt, NULL, &format);
+}
+
+/* Configure the RPF->IIF->WPF pipeline for ConfigDMA or RAW image transfer. */
+static int vsp1_vspx_pipeline_configure(struct vsp1_device *vsp1,
+ dma_addr_t addr, u32 isp_fourcc,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ unsigned int iif_sink_pad,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+ struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
+ struct vsp1_rwpf *rpf0 = pipe->inputs[0];
+ int ret;
+
+ ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, rpf0, isp_fourcc, width,
+ height);
+ if (ret)
+ return ret;
+
+ ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, pipe->output, isp_fourcc,
+ width, height);
+ if (ret)
+ return ret;
+
+ vsp1_pipeline_calculate_partition(pipe, &pipe->part_table[0], width, 0);
+ rpf0->format.plane_fmt[0].bytesperline = stride;
+ rpf0->format.num_planes = 1;
+ rpf0->mem.addr[0] = addr;
+
+ /*
+ * Connect RPF0 to the IIF sink pad corresponding to the config or image
+ * path.
+ */
+ rpf0->entity.sink_pad = iif_sink_pad;
+
+ vsp1_entity_route_setup(&rpf0->entity, pipe, dlb);
+ vsp1_entity_configure_stream(&rpf0->entity, rpf0->entity.state, pipe,
+ dl, dlb);
+ vsp1_entity_configure_partition(&rpf0->entity, pipe,
+ &pipe->part_table[0], dl, dlb);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void vsp1_vspx_pipeline_frame_end(struct vsp1_pipeline *pipe,
+ unsigned int completion)
+{
+ struct vsp1_vspx_pipeline *vspx_pipe = to_vsp1_vspx_pipeline(pipe);
+
+ scoped_guard(spinlock_irqsave, &pipe->irqlock) {
+ /*
+ * Operating the vsp1_pipe in singleshot mode requires to
+ * manually set the pipeline state to stopped when a transfer
+ * is completed.
+ */
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ }
+
+ if (vspx_pipe->vspx_frame_end)
+ vspx_pipe->vspx_frame_end(vspx_pipe->frame_end_data);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP Driver API (include/media/vsp1.h)
+ */
+
+/**
+ * vsp1_isp_init() - Initialize the VSPX
+ * @dev: The VSP1 struct device
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int vsp1_isp_init(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ if (!vsp1)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_init);
+
+/**
+ * vsp1_isp_get_bus_master - Get VSPX bus master
+ * @dev: The VSP1 struct device
+ *
+ * The VSPX accesses memory through an FCPX instance. When allocating memory
+ * buffers that will have to be accessed by the VSPX the 'struct device' of
+ * the FCPX should be used. Use this function to get a reference to it.
+ *
+ * Return: a pointer to the bus master's device
+ */
+struct device *vsp1_isp_get_bus_master(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ if (!vsp1)
+ return ERR_PTR(-ENODEV);
+
+ return vsp1->bus_master;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_get_bus_master);
+
+/**
+ * vsp1_isp_alloc_buffer - Allocate a buffer in the VSPX address space
+ * @dev: The VSP1 struct device
+ * @size: The size of the buffer to be allocated by the VSPX
+ * @buffer_desc: The buffer descriptor. Will be filled with the buffer
+ * CPU-mapped address, the bus address and the size of the
+ * allocated buffer
+ *
+ * Allocate a buffer that will be later accessed by the VSPX. Buffers allocated
+ * using vsp1_isp_alloc_buffer() shall be released with a call to
+ * vsp1_isp_free_buffer(). This function is used by the ISP driver to allocate
+ * memory for the ConfigDMA parameters buffer.
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
+ struct vsp1_isp_buffer_desc *buffer_desc)
+{
+ struct device *bus_master = vsp1_isp_get_bus_master(dev);
+
+ if (IS_ERR_OR_NULL(bus_master))
+ return -ENODEV;
+
+ buffer_desc->cpu_addr = dma_alloc_coherent(bus_master, size,
+ &buffer_desc->dma_addr,
+ GFP_KERNEL);
+ if (!buffer_desc->cpu_addr)
+ return -ENOMEM;
+
+ buffer_desc->size = size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_alloc_buffer);
+
+/**
+ * vsp1_isp_free_buffer - Release a buffer allocated by vsp1_isp_alloc_buffer()
+ * @dev: The VSP1 struct device
+ * @buffer_desc: The descriptor of the buffer to release as returned by
+ * vsp1_isp_alloc_buffer()
+ *
+ * Release memory in the VSPX address space allocated by
+ * vsp1_isp_alloc_buffer().
+ */
+void vsp1_isp_free_buffer(struct device *dev,
+ struct vsp1_isp_buffer_desc *buffer_desc)
+{
+ struct device *bus_master = vsp1_isp_get_bus_master(dev);
+
+ if (IS_ERR_OR_NULL(bus_master))
+ return;
+
+ dma_free_coherent(bus_master, buffer_desc->size, buffer_desc->cpu_addr,
+ buffer_desc->dma_addr);
+}
+
+/**
+ * vsp1_isp_start_streaming - Start processing VSPX jobs
+ * @dev: The VSP1 struct device
+ * @frame_end: The frame end callback description
+ *
+ * Start the VSPX and prepare for accepting buffer transfer job requests.
+ * The caller is responsible for tracking the started state of the VSPX.
+ * Attempting to start an already started VSPX instance is an error.
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int vsp1_isp_start_streaming(struct device *dev,
+ struct vsp1_vspx_frame_end *frame_end)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+ struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
+ u32 value;
+ int ret;
+
+ if (!frame_end)
+ return -EINVAL;
+
+ guard(mutex)(&vspx_pipe->mutex);
+
+ scoped_guard(spinlock_irq, &vspx_pipe->lock) {
+ if (vspx_pipe->enabled)
+ return -EBUSY;
+ }
+
+ vspx_pipe->vspx_frame_end = frame_end->vspx_frame_end;
+ vspx_pipe->frame_end_data = frame_end->frame_end_data;
+
+ /* Enable the VSP1 and prepare for streaming. */
+ vsp1_pipeline_dump(pipe, "VSPX job");
+
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Make sure VSPX is not active. This should never happen in normal
+ * usage
+ */
+ value = vsp1_read(vsp1, VI6_CMD(0));
+ if (value & VI6_CMD_STRCMD) {
+ dev_err(vsp1->dev,
+ "%s: Starting of WPF0 already reserved\n", __func__);
+ ret = -EBUSY;
+ goto error_put;
+ }
+
+ value = vsp1_read(vsp1, VI6_STATUS);
+ if (value & VI6_STATUS_SYS_ACT(0)) {
+ dev_err(vsp1->dev,
+ "%s: WPF0 has not entered idle state\n", __func__);
+ ret = -EBUSY;
+ goto error_put;
+ }
+
+ scoped_guard(spinlock_irq, &vspx_pipe->lock) {
+ vspx_pipe->enabled = true;
+ }
+
+ return 0;
+
+error_put:
+ vsp1_device_put(vsp1);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_start_streaming);
+
+/**
+ * vsp1_isp_stop_streaming - Stop the VSPX
+ * @dev: The VSP1 struct device
+ *
+ * Stop the VSPX operation by stopping the vsp1 pipeline and waiting for the
+ * last frame in transfer, if any, to complete.
+ *
+ * The caller is responsible for tracking the stopped state of the VSPX.
+ * Attempting to stop an already stopped VSPX instance is a nop.
+ */
+void vsp1_isp_stop_streaming(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+ struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
+
+ guard(mutex)(&vspx_pipe->mutex);
+
+ scoped_guard(spinlock_irq, &vspx_pipe->lock) {
+ if (!vspx_pipe->enabled)
+ return;
+
+ vspx_pipe->enabled = false;
+ }
+
+ WARN_ON_ONCE(vsp1_pipeline_stop(pipe));
+
+ vspx_pipe->vspx_frame_end = NULL;
+ vsp1_dlm_reset(pipe->output->dlm);
+ vsp1_device_put(vsp1);
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_stop_streaming);
+
+/**
+ * vsp1_isp_job_prepare - Prepare a new buffer transfer job
+ * @dev: The VSP1 struct device
+ * @job: The job description
+ *
+ * Prepare a new buffer transfer job by populating a display list that will be
+ * later executed by a call to vsp1_isp_job_run(). All pending jobs must be
+ * released after stopping the streaming operations with a call to
+ * vsp1_isp_job_release().
+ *
+ * In order for the VSPX to accept new jobs to prepare the VSPX must have been
+ * started.
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int vsp1_isp_job_prepare(struct device *dev, struct vsp1_isp_job_desc *job)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+ struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
+ const struct v4l2_pix_format_mplane *pix_mp;
+ struct vsp1_dl_list *second_dl = NULL;
+ struct vsp1_dl_body *dlb;
+ struct vsp1_dl_list *dl;
+ int ret;
+
+ /*
+ * Transfer the buffers described in the job: an optional ConfigDMA
+ * parameters buffer and a RAW image.
+ */
+
+ job->dl = vsp1_dl_list_get(pipe->output->dlm);
+ if (!job->dl)
+ return -ENOMEM;
+
+ dl = job->dl;
+ dlb = vsp1_dl_list_get_body0(dl);
+
+ /* Configure IIF routing and enable IIF function. */
+ vsp1_entity_route_setup(pipe->iif, pipe, dlb);
+ vsp1_entity_configure_stream(pipe->iif, pipe->iif->state, pipe,
+ dl, dlb);
+
+ /* Configure WPF0 to enable RPF0 as source. */
+ vsp1_entity_route_setup(&pipe->output->entity, pipe, dlb);
+ vsp1_entity_configure_stream(&pipe->output->entity,
+ pipe->output->entity.state, pipe,
+ dl, dlb);
+
+ if (job->config.pairs) {
+ /*
+ * Writing less than 17 pairs corrupts the output images ( < 16
+ * pairs) or freezes the VSPX operations (= 16 pairs). Only
+ * allow more than 16 pairs to be written.
+ */
+ if (job->config.pairs <= 16) {
+ ret = -EINVAL;
+ goto error_put_dl;
+ }
+
+ /*
+ * Configure RPF0 for ConfigDMA data. Transfer the number of
+ * configuration pairs plus 2 words for the header.
+ */
+ ret = vsp1_vspx_pipeline_configure(vsp1, job->config.mem,
+ V4L2_META_FMT_GENERIC_8,
+ job->config.pairs * 2 + 2, 1,
+ job->config.pairs * 2 + 2,
+ VSPX_IIF_SINK_PAD_CONFIG,
+ dl, dlb);
+ if (ret)
+ goto error_put_dl;
+
+ second_dl = vsp1_dl_list_get(pipe->output->dlm);
+ if (!second_dl) {
+ ret = -ENOMEM;
+ goto error_put_dl;
+ }
+
+ dl = second_dl;
+ dlb = vsp1_dl_list_get_body0(dl);
+ }
+
+ /* Configure RPF0 for RAW image transfer. */
+ pix_mp = &job->img.fmt;
+ ret = vsp1_vspx_pipeline_configure(vsp1, job->img.mem,
+ pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height,
+ pix_mp->plane_fmt[0].bytesperline,
+ VSPX_IIF_SINK_PAD_IMG, dl, dlb);
+ if (ret)
+ goto error_put_dl;
+
+ if (second_dl)
+ vsp1_dl_list_add_chain(job->dl, second_dl);
+
+ return 0;
+
+error_put_dl:
+ if (second_dl)
+ vsp1_dl_list_put(second_dl);
+ vsp1_dl_list_put(job->dl);
+ job->dl = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_job_prepare);
+
+/**
+ * vsp1_isp_job_run - Run a buffer transfer job
+ * @dev: The VSP1 struct device
+ * @job: The job to be run
+ *
+ * Run the display list contained in the job description provided by the caller.
+ * The job must have been prepared with a call to vsp1_isp_job_prepare() and
+ * the job's display list shall be valid.
+ *
+ * Jobs can be run only on VSPX instances which have been started. Requests
+ * to run a job after the VSPX has been stopped return -EINVAL and the job
+ * resources shall be released by the caller with vsp1_isp_job_release().
+ * When a job is run successfully all the resources acquired by
+ * vsp1_isp_job_prepare() are released by this function and no further action
+ * is required to the caller.
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+ struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
+ u32 value;
+
+ /* Make sure VSPX is not busy processing a frame. */
+ value = vsp1_read(vsp1, VI6_CMD(0));
+ if (value) {
+ dev_err(vsp1->dev,
+ "%s: Starting of WPF0 already reserved\n", __func__);
+ return -EBUSY;
+ }
+
+ scoped_guard(spinlock_irqsave, &vspx_pipe->lock) {
+ /*
+ * If a new job is scheduled when the VSPX is stopped, do not
+ * run it.
+ */
+ if (!vspx_pipe->enabled)
+ return -EINVAL;
+
+ vsp1_dl_list_commit(job->dl, 0);
+
+ /*
+ * The display list is now under control of the display list
+ * manager and will be released automatically when the job
+ * completes.
+ */
+ job->dl = NULL;
+ }
+
+ scoped_guard(spinlock_irqsave, &pipe->irqlock) {
+ vsp1_pipeline_run(pipe);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_job_run);
+
+/**
+ * vsp1_isp_job_release - Release a non processed transfer job
+ * @dev: The VSP1 struct device
+ * @job: The job to release
+ *
+ * Release a job prepared by a call to vsp1_isp_job_prepare() and not yet
+ * run. All pending jobs shall be released after streaming has been stopped.
+ */
+void vsp1_isp_job_release(struct device *dev,
+ struct vsp1_isp_job_desc *job)
+{
+ vsp1_dl_list_put(job->dl);
+}
+EXPORT_SYMBOL_GPL(vsp1_isp_job_release);
+
+/* -----------------------------------------------------------------------------
+ * Initialization and cleanup
+ */
+
+int vsp1_vspx_init(struct vsp1_device *vsp1)
+{
+ struct vsp1_vspx_pipeline *vspx_pipe;
+ struct vsp1_pipeline *pipe;
+
+ vsp1->vspx = devm_kzalloc(vsp1->dev, sizeof(*vsp1->vspx), GFP_KERNEL);
+ if (!vsp1->vspx)
+ return -ENOMEM;
+
+ vsp1->vspx->vsp1 = vsp1;
+
+ vspx_pipe = &vsp1->vspx->pipe;
+ vspx_pipe->enabled = false;
+
+ pipe = &vspx_pipe->pipe;
+
+ vsp1_pipeline_init(pipe);
+
+ pipe->partitions = 1;
+ pipe->part_table = &vspx_pipe->partition;
+ pipe->interlaced = false;
+ pipe->frame_end = vsp1_vspx_pipeline_frame_end;
+
+ mutex_init(&vspx_pipe->mutex);
+ spin_lock_init(&vspx_pipe->lock);
+
+ /*
+ * Initialize RPF0 as input for VSPX and use it unconditionally for
+ * now.
+ */
+ pipe->inputs[0] = vsp1->rpf[0];
+ pipe->inputs[0]->entity.pipe = pipe;
+ pipe->inputs[0]->entity.sink = &vsp1->iif->entity;
+ list_add_tail(&pipe->inputs[0]->entity.list_pipe, &pipe->entities);
+
+ pipe->iif = &vsp1->iif->entity;
+ pipe->iif->pipe = pipe;
+ pipe->iif->sink = &vsp1->wpf[0]->entity;
+ pipe->iif->sink_pad = RWPF_PAD_SINK;
+ list_add_tail(&pipe->iif->list_pipe, &pipe->entities);
+
+ pipe->output = vsp1->wpf[0];
+ pipe->output->entity.pipe = pipe;
+ list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
+
+ return 0;
+}
+
+void vsp1_vspx_cleanup(struct vsp1_device *vsp1)
+{
+ struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
+
+ mutex_destroy(&vspx_pipe->mutex);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_vspx.h b/drivers/media/platform/renesas/vsp1/vsp1_vspx.h
new file mode 100644
index 000000000000..f871bf9e7dec
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_vspx.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_vspx.h -- R-Car Gen 4 VSPX
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+#ifndef __VSP1_VSPX_H__
+#define __VSP1_VSPX_H__
+
+#include "vsp1.h"
+
+int vsp1_vspx_init(struct vsp1_device *vsp1);
+void vsp1_vspx_cleanup(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_VSPX_H__ */
diff --git a/drivers/media/platform/rockchip/Kconfig b/drivers/media/platform/rockchip/Kconfig
index b41d3960c1b4..9bbeec4996aa 100644
--- a/drivers/media/platform/rockchip/Kconfig
+++ b/drivers/media/platform/rockchip/Kconfig
@@ -4,3 +4,4 @@ comment "Rockchip media platform drivers"
source "drivers/media/platform/rockchip/rga/Kconfig"
source "drivers/media/platform/rockchip/rkisp1/Kconfig"
+source "drivers/media/platform/rockchip/rkvdec/Kconfig"
diff --git a/drivers/media/platform/rockchip/Makefile b/drivers/media/platform/rockchip/Makefile
index 4f782b876ac9..286dc5c53f7e 100644
--- a/drivers/media/platform/rockchip/Makefile
+++ b/drivers/media/platform/rockchip/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += rga/
obj-y += rkisp1/
+obj-y += rkvdec/
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index ca952fd0829b..5f187f9efc7b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -415,6 +415,8 @@ struct rkisp1_params {
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
+ struct v4l2_ctrl_handler ctrls;
+
const struct v4l2_meta_format *metafmt;
enum v4l2_quantization quantization;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index b28f4140c8a3..f1585f8fa0f4 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
+#include <linux/bitfield.h>
#include <linux/math.h>
#include <linux/string.h>
@@ -60,6 +61,7 @@ union rkisp1_ext_params_config {
struct rkisp1_ext_params_afc_config afc;
struct rkisp1_ext_params_compand_bls_config compand_bls;
struct rkisp1_ext_params_compand_curve_config compand_curve;
+ struct rkisp1_ext_params_wdr_config wdr;
};
enum rkisp1_params_formats {
@@ -1348,6 +1350,73 @@ rkisp1_compand_compress_config(struct rkisp1_params *params,
arg->x);
}
+static void rkisp1_wdr_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_wdr_config *arg)
+{
+ unsigned int i;
+ u32 value;
+
+ value = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_WDR_CTRL)
+ & ~(RKISP1_CIF_ISP_WDR_USE_IREF |
+ RKISP1_CIF_ISP_WDR_COLOR_SPACE_SELECT |
+ RKISP1_CIF_ISP_WDR_CR_MAPPING_DISABLE |
+ RKISP1_CIF_ISP_WDR_USE_Y9_8 |
+ RKISP1_CIF_ISP_WDR_USE_RGB7_8 |
+ RKISP1_CIF_ISP_WDR_DISABLE_TRANSIENT |
+ RKISP1_CIF_ISP_WDR_RGB_FACTOR_MASK);
+
+ /* Colorspace and chrominance mapping */
+ if (arg->use_rgb_colorspace)
+ value |= RKISP1_CIF_ISP_WDR_COLOR_SPACE_SELECT;
+
+ if (!arg->use_rgb_colorspace && arg->bypass_chroma_mapping)
+ value |= RKISP1_CIF_ISP_WDR_CR_MAPPING_DISABLE;
+
+ /* Illumination reference */
+ if (arg->use_iref) {
+ value |= RKISP1_CIF_ISP_WDR_USE_IREF;
+
+ if (arg->iref_config.use_y9_8)
+ value |= RKISP1_CIF_ISP_WDR_USE_Y9_8;
+
+ if (arg->iref_config.use_rgb7_8)
+ value |= RKISP1_CIF_ISP_WDR_USE_RGB7_8;
+
+ if (arg->iref_config.disable_transient)
+ value |= RKISP1_CIF_ISP_WDR_DISABLE_TRANSIENT;
+
+ value |= FIELD_PREP(RKISP1_CIF_ISP_WDR_RGB_FACTOR_MASK,
+ min(arg->iref_config.rgb_factor,
+ RKISP1_CIF_ISP_WDR_RGB_FACTOR_MAX));
+ }
+
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_WDR_CTRL, value);
+
+ /* RGB and Luminance offsets */
+ value = FIELD_PREP(RKISP1_CIF_ISP_WDR_RGB_OFFSET_MASK,
+ arg->rgb_offset)
+ | FIELD_PREP(RKISP1_CIF_ISP_WDR_LUM_OFFSET_MASK,
+ arg->luma_offset);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_WDR_OFFSET, value);
+
+ /* DeltaMin */
+ value = FIELD_PREP(RKISP1_CIF_ISP_WDR_DMIN_THRESH_MASK,
+ arg->dmin_thresh)
+ | FIELD_PREP(RKISP1_CIF_ISP_WDR_DMIN_STRENGTH_MASK,
+ min(arg->dmin_strength,
+ RKISP1_CIF_ISP_WDR_DMIN_STRENGTH_MAX));
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_WDR_DELTAMIN, value);
+
+ /* Tone curve */
+ for (i = 0; i < RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS; i++)
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_WDR_TONECURVE(i),
+ arg->tone_curve.dY[i]);
+ for (i = 0; i < RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF; i++)
+ rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_WDR_TONECURVE_YM(i),
+ arg->tone_curve.ym[i] &
+ RKISP1_CIF_ISP_WDR_TONE_CURVE_YM_MASK);
+}
+
static void
rkisp1_isp_isr_other_config(struct rkisp1_params *params,
const struct rkisp1_params_cfg *new_params)
@@ -2005,6 +2074,25 @@ static void rkisp1_ext_params_compand_compress(struct rkisp1_params *params,
RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE);
}
+static void rkisp1_ext_params_wdr(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_wdr_config *wdr = &block->wdr;
+
+ if (wdr->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_WDR_CTRL,
+ RKISP1_CIF_ISP_WDR_CTRL_ENABLE);
+ return;
+ }
+
+ rkisp1_wdr_config(params, &wdr->config);
+
+ if ((wdr->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(wdr->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_WDR_CTRL,
+ RKISP1_CIF_ISP_WDR_CTRL_ENABLE);
+}
+
typedef void (*rkisp1_block_handler)(struct rkisp1_params *params,
const union rkisp1_ext_params_config *config);
@@ -2118,6 +2206,11 @@ static const struct rkisp1_ext_params_handler {
.group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
.features = RKISP1_FEATURE_COMPAND,
},
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR] = {
+ .size = sizeof(struct rkisp1_ext_params_wdr_config),
+ .handler = rkisp1_ext_params_wdr,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
};
static void rkisp1_ext_params_config(struct rkisp1_params *params,
@@ -2736,6 +2829,44 @@ static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
return vb2_queue_init(q);
}
+static int rkisp1_params_ctrl_init(struct rkisp1_params *params)
+{
+ struct v4l2_ctrl_config ctrl_config = {
+ .id = RKISP1_CID_SUPPORTED_PARAMS_BLOCKS,
+ .name = "Supported Params Blocks",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ };
+ int ret;
+
+ v4l2_ctrl_handler_init(&params->ctrls, 1);
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(rkisp1_ext_params_handlers); i++) {
+ const struct rkisp1_ext_params_handler *block_handler;
+
+ block_handler = &rkisp1_ext_params_handlers[i];
+ ctrl_config.max |= BIT(i);
+
+ if ((params->rkisp1->info->features & block_handler->features) !=
+ block_handler->features)
+ continue;
+
+ ctrl_config.def |= BIT(i);
+ }
+
+ v4l2_ctrl_new_custom(&params->ctrls, &ctrl_config, NULL);
+
+ params->vnode.vdev.ctrl_handler = &params->ctrls;
+
+ if (params->ctrls.error) {
+ ret = params->ctrls.error;
+ v4l2_ctrl_handler_free(&params->ctrls);
+ return ret;
+ }
+
+ return 0;
+}
+
int rkisp1_params_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
@@ -2763,7 +2894,9 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1)
vdev->queue = &node->buf_queue;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
vdev->vfl_dir = VFL_DIR_TX;
- rkisp1_params_init_vb2_queue(vdev->queue, params);
+ ret = rkisp1_params_init_vb2_queue(vdev->queue, params);
+ if (ret)
+ goto err_media;
params->metafmt = &rkisp1_params_formats[RKISP1_PARAMS_FIXED];
@@ -2777,18 +2910,26 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1)
node->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
- goto error;
+ goto err_media;
+
+ ret = rkisp1_params_ctrl_init(params);
+ if (ret) {
+ dev_err(rkisp1->dev, "Control initialization error %d\n", ret);
+ goto err_media;
+ }
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
- goto error;
+ goto err_ctrl;
}
return 0;
-error:
+err_ctrl:
+ v4l2_ctrl_handler_free(&params->ctrls);
+err_media:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
return ret;
@@ -2804,6 +2945,7 @@ void rkisp1_params_unregister(struct rkisp1_device *rkisp1)
return;
vb2_video_unregister_device(vdev);
+ v4l2_ctrl_handler_free(&params->ctrls);
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 139177db9c6d..fbeb186cde0d 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -703,6 +703,27 @@
#define RKISP1_CIF_ISP_COMPAND_CTRL_SOFT_RESET_FLAG BIT(2)
#define RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE BIT(3)
+/* WDR */
+/* ISP_WDR_CTRL */
+#define RKISP1_CIF_ISP_WDR_CTRL_ENABLE BIT(0)
+#define RKISP1_CIF_ISP_WDR_COLOR_SPACE_SELECT BIT(1)
+#define RKISP1_CIF_ISP_WDR_CR_MAPPING_DISABLE BIT(2)
+#define RKISP1_CIF_ISP_WDR_USE_IREF BIT(3)
+#define RKISP1_CIF_ISP_WDR_USE_Y9_8 BIT(4)
+#define RKISP1_CIF_ISP_WDR_USE_RGB7_8 BIT(5)
+#define RKISP1_CIF_ISP_WDR_DISABLE_TRANSIENT BIT(6)
+#define RKISP1_CIF_ISP_WDR_RGB_FACTOR_MASK GENMASK(11, 8)
+#define RKISP1_CIF_ISP_WDR_RGB_FACTOR_MAX 8U
+/* ISP_WDR_TONE_CURVE_YM */
+#define RKISP1_CIF_ISP_WDR_TONE_CURVE_YM_MASK GENMASK(12, 0)
+/* ISP_WDR_OFFSET */
+#define RKISP1_CIF_ISP_WDR_RGB_OFFSET_MASK GENMASK(11, 0)
+#define RKISP1_CIF_ISP_WDR_LUM_OFFSET_MASK GENMASK(27, 16)
+/* ISP_WDR_DELTAMIN */
+#define RKISP1_CIF_ISP_WDR_DMIN_THRESH_MASK GENMASK(11, 0)
+#define RKISP1_CIF_ISP_WDR_DMIN_STRENGTH_MASK GENMASK(20, 16)
+#define RKISP1_CIF_ISP_WDR_DMIN_STRENGTH_MAX 16U
+
/* =================================================================== */
/* CIF Registers */
/* =================================================================== */
@@ -1295,82 +1316,12 @@
#define RKISP1_CIF_ISP_WDR_BASE 0x00002a00
#define RKISP1_CIF_ISP_WDR_CTRL (RKISP1_CIF_ISP_WDR_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_2 (RKISP1_CIF_ISP_WDR_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3 (RKISP1_CIF_ISP_WDR_BASE + 0x0000000c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0 (RKISP1_CIF_ISP_WDR_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2 (RKISP1_CIF_ISP_WDR_BASE + 0x0000001c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3 (RKISP1_CIF_ISP_WDR_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5 (RKISP1_CIF_ISP_WDR_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6 (RKISP1_CIF_ISP_WDR_BASE + 0x0000002c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7 (RKISP1_CIF_ISP_WDR_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8 (RKISP1_CIF_ISP_WDR_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9 (RKISP1_CIF_ISP_WDR_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10 (RKISP1_CIF_ISP_WDR_BASE + 0x0000003c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11 (RKISP1_CIF_ISP_WDR_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12 (RKISP1_CIF_ISP_WDR_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13 (RKISP1_CIF_ISP_WDR_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14 (RKISP1_CIF_ISP_WDR_BASE + 0x0000004c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15 (RKISP1_CIF_ISP_WDR_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16 (RKISP1_CIF_ISP_WDR_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17 (RKISP1_CIF_ISP_WDR_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18 (RKISP1_CIF_ISP_WDR_BASE + 0x0000005c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19 (RKISP1_CIF_ISP_WDR_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20 (RKISP1_CIF_ISP_WDR_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21 (RKISP1_CIF_ISP_WDR_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22 (RKISP1_CIF_ISP_WDR_BASE + 0x0000006c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23 (RKISP1_CIF_ISP_WDR_BASE + 0x00000070)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24 (RKISP1_CIF_ISP_WDR_BASE + 0x00000074)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25 (RKISP1_CIF_ISP_WDR_BASE + 0x00000078)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26 (RKISP1_CIF_ISP_WDR_BASE + 0x0000007c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27 (RKISP1_CIF_ISP_WDR_BASE + 0x00000080)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28 (RKISP1_CIF_ISP_WDR_BASE + 0x00000084)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29 (RKISP1_CIF_ISP_WDR_BASE + 0x00000088)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30 (RKISP1_CIF_ISP_WDR_BASE + 0x0000008c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31 (RKISP1_CIF_ISP_WDR_BASE + 0x00000090)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32 (RKISP1_CIF_ISP_WDR_BASE + 0x00000094)
+#define RKISP1_CIF_ISP_WDR_TONECURVE(n) (RKISP1_CIF_ISP_WDR_BASE + 0x00000004 + (n) * 4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM(n) (RKISP1_CIF_ISP_WDR_BASE + 0x00000014 + (n) * 4)
#define RKISP1_CIF_ISP_WDR_OFFSET (RKISP1_CIF_ISP_WDR_BASE + 0x00000098)
#define RKISP1_CIF_ISP_WDR_DELTAMIN (RKISP1_CIF_ISP_WDR_BASE + 0x0000009c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000ac)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000bc)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000cc)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000dc)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000ec)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000fc)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000100)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000104)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000108)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000110)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000114)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000120)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000124)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012c)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_SHD(n) (RKISP1_CIF_ISP_WDR_BASE + 0x000000a0 + (n) * 4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_SHD(n) (RKISP1_CIF_ISP_WDR_BASE + 0x000000b0 + (n) * 4)
#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002c00
#define RKISP1_CIF_ISP_HIST_CTRL_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
diff --git a/drivers/staging/media/rkvdec/Kconfig b/drivers/media/platform/rockchip/rkvdec/Kconfig
index 5f3bdd848a2c..5f3bdd848a2c 100644
--- a/drivers/staging/media/rkvdec/Kconfig
+++ b/drivers/media/platform/rockchip/rkvdec/Kconfig
diff --git a/drivers/staging/media/rkvdec/Makefile b/drivers/media/platform/rockchip/rkvdec/Makefile
index cb86b429cfaa..cb86b429cfaa 100644
--- a/drivers/staging/media/rkvdec/Makefile
+++ b/drivers/media/platform/rockchip/rkvdec/Makefile
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
index d14b4d173448..d14b4d173448 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
diff --git a/drivers/staging/media/rkvdec/rkvdec-regs.h b/drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h
index 15b9bee92016..15b9bee92016 100644
--- a/drivers/staging/media/rkvdec/rkvdec-regs.h
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h
diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
index 0e7e16f20eeb..0e7e16f20eeb 100644
--- a/drivers/staging/media/rkvdec/rkvdec-vp9.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
index 3367902f22de..d707088ec0dc 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -1055,24 +1056,42 @@ static void rkvdec_v4l2_cleanup(struct rkvdec_dev *rkvdec)
v4l2_device_unregister(&rkvdec->v4l2_dev);
}
+static void rkvdec_iommu_restore(struct rkvdec_dev *rkvdec)
+{
+ if (rkvdec->empty_domain) {
+ /*
+ * To rewrite mapping into the attached IOMMU core, attach a new empty domain that
+ * will program an empty table, then detach it to restore the default domain and
+ * all cached mappings.
+ * This is safely done in this interrupt handler to make sure no memory get mapped
+ * through the IOMMU while the empty domain is attached.
+ */
+ iommu_attach_device(rkvdec->empty_domain, rkvdec->dev);
+ iommu_detach_device(rkvdec->empty_domain, rkvdec->dev);
+ }
+}
+
static irqreturn_t rkvdec_irq_handler(int irq, void *priv)
{
struct rkvdec_dev *rkvdec = priv;
+ struct rkvdec_ctx *ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
enum vb2_buffer_state state;
u32 status;
status = readl(rkvdec->regs + RKVDEC_REG_INTERRUPT);
- state = (status & RKVDEC_RDY_STA) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
writel(0, rkvdec->regs + RKVDEC_REG_INTERRUPT);
- if (cancel_delayed_work(&rkvdec->watchdog_work)) {
- struct rkvdec_ctx *ctx;
- ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
- rkvdec_job_finish(ctx, state);
+ if (status & RKVDEC_RDY_STA) {
+ state = VB2_BUF_STATE_DONE;
+ } else {
+ state = VB2_BUF_STATE_ERROR;
+ if (status & RKVDEC_SOFTRESET_RDY)
+ rkvdec_iommu_restore(rkvdec);
}
+ if (cancel_delayed_work(&rkvdec->watchdog_work))
+ rkvdec_job_finish(ctx, state);
+
return IRQ_HANDLED;
}
@@ -1140,6 +1159,13 @@ static int rkvdec_probe(struct platform_device *pdev)
return ret;
}
+ if (iommu_get_domain_for_dev(&pdev->dev)) {
+ rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
+
+ if (!rkvdec->empty_domain)
+ dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
+ }
+
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
irq = platform_get_irq(pdev, 0);
@@ -1179,6 +1205,9 @@ static void rkvdec_remove(struct platform_device *pdev)
rkvdec_v4l2_cleanup(rkvdec);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ if (rkvdec->empty_domain)
+ iommu_domain_free(rkvdec->empty_domain);
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
index 9a9f4fced7a1..f6e8bf38add3 100644
--- a/drivers/staging/media/rkvdec/rkvdec.h
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
@@ -110,6 +110,7 @@ struct rkvdec_dev {
void __iomem *regs;
struct mutex vdev_lock; /* serializes ioctls */
struct delayed_work watchdog_work;
+ struct iommu_domain *empty_domain;
};
struct rkvdec_ctx {
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
index b243cbb1d010..b5b37b6f8fa8 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
@@ -131,7 +131,7 @@ static const struct dev_pm_ops fimc_is_i2c_pm_ops = {
};
static const struct of_device_id fimc_is_i2c_of_match[] = {
- { .compatible = FIMC_IS_I2C_COMPATIBLE },
+ { .compatible = "samsung,exynos4212-i2c-isp" },
{ },
};
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h
index a23bd20be6c8..69d597e5c297 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h
@@ -6,7 +6,5 @@
* Sylwester Nawrocki <s.nawrocki@samsung.com>
*/
-#define FIMC_IS_I2C_COMPATIBLE "samsung,exynos4212-i2c-isp"
-
int fimc_is_register_i2c_driver(void);
void fimc_is_unregister_i2c_driver(void);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index 2e8fe9e49735..ed7b7ca16f71 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
@@ -207,7 +207,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is)
if (ret < 0)
return ret;
- for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) {
+ for_each_compatible_node(i2c_bus, NULL, "samsung,exynos4212-i2c-isp") {
for_each_available_child_of_node(i2c_bus, child) {
ret = fimc_is_parse_sensor_config(is, index, child);
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
index b5ee3c547789..c781853586fd 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
@@ -482,15 +482,12 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
static int fimc_md_parse_port_node(struct fimc_md *fmd,
struct device_node *port)
{
- struct device_node *ep;
int ret;
- for_each_child_of_node(port, ep) {
+ for_each_child_of_node_scoped(port, ep) {
ret = fimc_md_parse_one_endpoint(fmd, ep);
- if (ret < 0) {
- of_node_put(ep);
+ if (ret < 0)
return ret;
- }
}
return 0;
@@ -501,7 +498,6 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
{
struct device_node *parent = fmd->pdev->dev.of_node;
struct device_node *ports = NULL;
- struct device_node *node;
int ret;
/*
@@ -518,7 +514,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
fmd->num_sensors = 0;
/* Attach sensors linked to MIPI CSI-2 receivers */
- for_each_available_child_of_node(parent, node) {
+ for_each_available_child_of_node_scoped(parent, node) {
struct device_node *port;
if (!of_node_name_eq(node, "csis"))
@@ -530,10 +526,8 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
ret = fimc_md_parse_port_node(fmd, port);
of_node_put(port);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto cleanup;
- }
}
/* Attach sensors listed in the parallel-ports node */
@@ -541,12 +535,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
if (!ports)
goto rpm_put;
- for_each_child_of_node(ports, node) {
+ for_each_child_of_node_scoped(ports, node) {
ret = fimc_md_parse_port_node(fmd, node);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto cleanup;
- }
}
of_node_put(ports);
@@ -736,10 +728,9 @@ dev_unlock:
static int fimc_md_register_platform_entities(struct fimc_md *fmd,
struct device_node *parent)
{
- struct device_node *node;
int ret = 0;
- for_each_available_child_of_node(parent, node) {
+ for_each_available_child_of_node_scoped(parent, node) {
struct platform_device *pdev;
int plat_entity = -1;
@@ -762,10 +753,8 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd,
ret = fimc_md_register_platform_entity(fmd, pdev,
plat_entity);
put_device(&pdev->dev);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
break;
- }
}
return ret;
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 602c37cbe177..89bd15a4d26a 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -658,7 +658,7 @@ static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
static int c8sectpfe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *np = dev->of_node;
+ struct device_node *np = dev->of_node;
struct c8sectpfei *fei;
struct resource *res;
int ret, index = 0;
@@ -742,17 +742,15 @@ static int c8sectpfe_probe(struct platform_device *pdev)
return PTR_ERR(fei->pinctrl);
}
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
struct device_node *i2c_bus;
fei->channel_data[index] = devm_kzalloc(dev,
sizeof(struct channel_info),
GFP_KERNEL);
- if (!fei->channel_data[index]) {
- ret = -ENOMEM;
- goto err_node_put;
- }
+ if (!fei->channel_data[index])
+ return -ENOMEM;
tsin = fei->channel_data[index];
@@ -761,7 +759,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
if (ret) {
dev_err(&pdev->dev, "No tsin_num found\n");
- goto err_node_put;
+ return ret;
}
/* sanity check value */
@@ -769,8 +767,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
tsin->tsin_id, fei->hw_stats.num_ib);
- ret = -EINVAL;
- goto err_node_put;
+ return -EINVAL;
}
tsin->invert_ts_clk = of_property_read_bool(child,
@@ -786,22 +783,20 @@ static int c8sectpfe_probe(struct platform_device *pdev)
&tsin->dvb_card);
if (ret) {
dev_err(&pdev->dev, "No dvb-card found\n");
- goto err_node_put;
+ return ret;
}
i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
if (!i2c_bus) {
dev_err(&pdev->dev, "No i2c-bus found\n");
- ret = -ENODEV;
- goto err_node_put;
+ return -ENODEV;
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
- ret = -ENODEV;
- goto err_node_put;
+ return -ENODEV;
}
/* Acquire reset GPIO and activate it */
@@ -813,7 +808,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
if (ret && ret != -EBUSY) {
dev_err(dev, "Can't request tsin%d reset gpio\n",
fei->channel_data[index]->tsin_id);
- goto err_node_put;
+ return ret;
}
if (!ret) {
@@ -855,10 +850,6 @@ static int c8sectpfe_probe(struct platform_device *pdev)
c8sectpfe_debugfs_init(fei);
return 0;
-
-err_node_put:
- of_node_put(child);
- return ret;
}
static void c8sectpfe_remove(struct platform_device *pdev)
@@ -897,16 +888,15 @@ static void c8sectpfe_remove(struct platform_device *pdev)
static int configure_channels(struct c8sectpfei *fei)
{
int index = 0, ret;
- struct device_node *child, *np = fei->dev->of_node;
+ struct device_node *np = fei->dev->of_node;
/* iterate round each tsin and configure memdma descriptor and IB hw */
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = configure_memdma_and_inputblock(fei,
fei->channel_data[index]);
if (ret) {
dev_err(fei->dev,
"configure_memdma_and_inputblock failed\n");
- of_node_put(child);
goto err_unmap;
}
index++;
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index 6412a00be8ea..b628d6e081db 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -619,6 +619,7 @@ static void ti_csi2rx_dma_callback(void *param)
if (ti_csi2rx_start_dma(csi, buf)) {
dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
+ list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
} else {
list_move_tail(&buf->list, &dma->submitted);
@@ -895,6 +896,7 @@ static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
q->dev = dmaengine_get_dma_device(csi->dma.chan);
q->lock = &csi->mutex;
q->min_queued_buffers = 1;
+ q->allow_cache_hints = 1;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/ti/vpe/vpdma.c b/drivers/media/platform/ti/vpe/vpdma.c
index da90d7f03f82..bb8a8bd7980c 100644
--- a/drivers/media/platform/ti/vpe/vpdma.c
+++ b/drivers/media/platform/ti/vpe/vpdma.c
@@ -552,38 +552,6 @@ EXPORT_SYMBOL(vpdma_submit_descs);
static void dump_dtd(struct vpdma_dtd *dtd);
-void vpdma_update_dma_addr(struct vpdma_data *vpdma,
- struct vpdma_desc_list *list, dma_addr_t dma_addr,
- void *write_dtd, int drop, int idx)
-{
- struct vpdma_dtd *dtd = list->buf.addr;
- dma_addr_t write_desc_addr;
- int offset;
-
- dtd += idx;
- vpdma_unmap_desc_buf(vpdma, &list->buf);
-
- dtd->start_addr = dma_addr;
-
- /* Calculate write address from the offset of write_dtd from start
- * of the list->buf
- */
- offset = (void *)write_dtd - list->buf.addr;
- write_desc_addr = list->buf.dma_addr + offset;
-
- if (drop)
- dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
- 1, 1, 0);
- else
- dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
- 1, 0, 0);
-
- vpdma_map_desc_buf(vpdma, &list->buf);
-
- dump_dtd(dtd);
-}
-EXPORT_SYMBOL(vpdma_update_dma_addr);
-
void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
u32 width, u32 height)
{
diff --git a/drivers/media/platform/ti/vpe/vpdma.h b/drivers/media/platform/ti/vpe/vpdma.h
index 393fcbb3cb40..e4d7941c6207 100644
--- a/drivers/media/platform/ti/vpe/vpdma.h
+++ b/drivers/media/platform/ti/vpe/vpdma.h
@@ -222,9 +222,6 @@ void vpdma_free_desc_list(struct vpdma_desc_list *list);
int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
int list_num);
bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
-void vpdma_update_dma_addr(struct vpdma_data *vpdma,
- struct vpdma_desc_list *list, dma_addr_t dma_addr,
- void *write_dtd, int drop, int idx);
/* VPDMA hardware list funcs */
int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv);
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index edc217eed293..81328c63b796 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -323,6 +323,8 @@ struct hantro_postproc_regs {
struct hantro_reg output_fmt;
struct hantro_reg orig_width;
struct hantro_reg display_width;
+ struct hantro_reg input_width_ext;
+ struct hantro_reg input_height_ext;
};
struct hantro_vp9_decoded_buffer_info {
diff --git a/drivers/media/platform/verisilicon/hantro_g1_regs.h b/drivers/media/platform/verisilicon/hantro_g1_regs.h
index c623b3b0be18..7874d76c6898 100644
--- a/drivers/media/platform/verisilicon/hantro_g1_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_g1_regs.h
@@ -350,7 +350,7 @@
#define G1_REG_PP_CONTROL_OUT_WIDTH(v) (((v) << 4) & GENMASK(14, 4))
#define G1_REG_PP_MASK1_ORIG_WIDTH G1_SWREG(88)
#define G1_REG_PP_ORIG_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
-#define G1_REG_PP_DISPLAY_WIDTH G1_SWREG(92)
+#define G1_REG_PP_DISPLAY_WIDTH_IN_EXT G1_SWREG(92)
#define G1_REG_PP_FUSE G1_SWREG(99)
#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_h264.c b/drivers/media/platform/verisilicon/hantro_h264.c
index 4e9a0ecf5c13..2414782f1eb6 100644
--- a/drivers/media/platform/verisilicon/hantro_h264.c
+++ b/drivers/media/platform/verisilicon/hantro_h264.c
@@ -325,12 +325,12 @@ static void update_dpb(struct hantro_ctx *ctx)
continue;
*cdpb = *ndpb;
- set_bit(j, used);
+ __set_bit(j, used);
break;
}
if (j == ARRAY_SIZE(ctx->h264_dec.dpb))
- set_bit(i, new);
+ __set_bit(i, new);
}
/* For entries that could not be matched, use remaining free slots. */
@@ -349,7 +349,7 @@ static void update_dpb(struct hantro_ctx *ctx)
cdpb = &ctx->h264_dec.dpb[j];
*cdpb = *ndpb;
- set_bit(j, used);
+ __set_bit(j, used);
}
}
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 9f559a13d409..e94d1ba5ef10 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -49,7 +49,9 @@ static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
.input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
.output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
.orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
- .display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
+ .display_width = {G1_REG_PP_DISPLAY_WIDTH_IN_EXT, 0, 0xfff},
+ .input_width_ext = {G1_REG_PP_DISPLAY_WIDTH_IN_EXT, 26, 0x7},
+ .input_height_ext = {G1_REG_PP_DISPLAY_WIDTH_IN_EXT, 29, 0x7},
};
bool hantro_needs_postproc(const struct hantro_ctx *ctx,
@@ -103,6 +105,8 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
+ HANTRO_PP_REG_WRITE(vpu, input_width_ext, MB_WIDTH(ctx->dst_fmt.width) >> 9);
+ HANTRO_PP_REG_WRITE(vpu, input_height_ext, MB_HEIGHT(ctx->dst_fmt.height >> 8));
}
static int down_scale_factor(struct hantro_ctx *ctx)
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index acd29fa41d2d..02673be9878e 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -17,7 +17,6 @@
#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
-#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define ROCKCHIP_VPU981_MIN_SIZE 64
@@ -454,13 +453,6 @@ static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
return 0;
}
-static int rk3588_vpu981_hw_init(struct hantro_dev *vpu)
-{
- /* Bump ACLKs to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ);
- return 0;
-}
-
static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLK to max. possible freq. to improve performance. */
@@ -821,7 +813,6 @@ const struct hantro_variant rk3588_vpu981_variant = {
.codec_ops = rk3588_vpu981_codec_ops,
.irqs = rk3588_vpu981_irqs,
.num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs),
- .init = rk3588_vpu981_hw_init,
.clk_names = rk3588_vpu981_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names)
};
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 024b439feec9..30675f681410 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -450,7 +450,6 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
{
struct device_node *ports;
- struct device_node *port;
int ret = 0;
ports = of_get_child_by_name(xdev->dev->of_node, "ports");
@@ -459,12 +458,10 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
return -EINVAL;
}
- for_each_child_of_node(ports, port) {
+ for_each_child_of_node_scoped(ports, port) {
ret = xvip_graph_dma_init_one(xdev, port);
- if (ret) {
- of_node_put(port);
+ if (ret)
break;
- }
}
of_node_put(ports);
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index 8fc8e496e6aa..392441e0c116 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -21,13 +21,12 @@
#define IR_SPI_DRIVER_NAME "ir-spi"
#define IR_SPI_DEFAULT_FREQUENCY 38000
-#define IR_SPI_MAX_BUFSIZE 4096
+#define IR_SPI_BITS_PER_PULSE 16
struct ir_spi_data {
u32 freq;
bool negated;
- u16 tx_buf[IR_SPI_MAX_BUFSIZE];
u16 pulse;
u16 space;
@@ -43,17 +42,23 @@ static int ir_spi_tx(struct rc_dev *dev, unsigned int *buffer, unsigned int coun
unsigned int len = 0;
struct ir_spi_data *idata = dev->priv;
struct spi_transfer xfer;
+ u16 *tx_buf;
/* convert the pulse/space signal to raw binary signal */
for (i = 0; i < count; i++) {
- unsigned int periods;
- int j;
- u16 val;
+ buffer[i] = DIV_ROUND_CLOSEST_ULL((u64)buffer[i] * idata->freq,
+ 1000000);
+ len += buffer[i];
+ }
- periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
+ tx_buf = kmalloc_array(len, sizeof(*tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
- if (len + periods >= IR_SPI_MAX_BUFSIZE)
- return -EINVAL;
+ len = 0;
+ for (i = 0; i < count; i++) {
+ int j;
+ u16 val;
/*
* The first value in buffer is a pulse, so that 0, 2, 4, ...
@@ -61,19 +66,19 @@ static int ir_spi_tx(struct rc_dev *dev, unsigned int *buffer, unsigned int coun
* contain a space duration.
*/
val = (i % 2) ? idata->space : idata->pulse;
- for (j = 0; j < periods; j++)
- idata->tx_buf[len++] = val;
+ for (j = 0; j < buffer[i]; j++)
+ tx_buf[len++] = val;
}
memset(&xfer, 0, sizeof(xfer));
- xfer.speed_hz = idata->freq * 16;
- xfer.len = len * sizeof(*idata->tx_buf);
- xfer.tx_buf = idata->tx_buf;
+ xfer.speed_hz = idata->freq * IR_SPI_BITS_PER_PULSE;
+ xfer.len = len * sizeof(*tx_buf);
+ xfer.tx_buf = tx_buf;
ret = regulator_enable(idata->regulator);
if (ret)
- return ret;
+ goto err_free_tx_buf;
ret = spi_sync_transfer(idata->spi, &xfer, 1);
if (ret)
@@ -81,6 +86,10 @@ static int ir_spi_tx(struct rc_dev *dev, unsigned int *buffer, unsigned int coun
regulator_disable(idata->regulator);
+err_free_tx_buf:
+
+ kfree(tx_buf);
+
return ret ? ret : count;
}
@@ -91,6 +100,9 @@ static int ir_spi_set_tx_carrier(struct rc_dev *dev, u32 carrier)
if (!carrier)
return -EINVAL;
+ if (carrier > idata->spi->max_speed_hz / IR_SPI_BITS_PER_PULSE)
+ return -EINVAL;
+
idata->freq = carrier;
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index e340df0b6261..f94c15ff84f7 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -244,7 +244,8 @@ static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = {
.min = 0x00,
.max = 0xff,
.step = 1,
- .dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV },
+ .dims = { DIV_ROUND_UP(360, PIXEL_ARRAY_DIV),
+ DIV_ROUND_UP(640, PIXEL_ARRAY_DIV) },
};
static const struct v4l2_ctrl_config vivid_ctrl_s32_array = {
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-gen.c b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
index 70a4024d461e..e0f4151bda18 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
@@ -5,6 +5,7 @@
* Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
@@ -165,12 +166,7 @@ static const u8 vivid_cc_sequence2[30] = {
static u8 calc_parity(u8 val)
{
- unsigned i;
- unsigned tot = 0;
-
- for (i = 0; i < 7; i++)
- tot += (val & (1 << i)) ? 1 : 0;
- return val | ((tot & 1) ? 0 : 0x80);
+ return val | (parity8(val) ? 0 : 0x80);
}
static void vivid_vbi_gen_set_time_of_day(u8 *packet)
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 84e9155b5815..2e4c1ed37cd2 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -454,8 +454,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
if (keep_controls)
return;
- dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV);
- dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV);
+ dims[0] = DIV_ROUND_UP(dev->src_rect.height, PIXEL_ARRAY_DIV);
+ dims[1] = DIV_ROUND_UP(dev->src_rect.width, PIXEL_ARRAY_DIV);
v4l2_ctrl_modify_dimensions(dev->pixel_array, dims);
}
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index d98343fd33fe..91e177aa8136 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -227,6 +227,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
const struct ihex_binrec *rec;
const struct firmware *fw;
u8 *firmware_buf;
+ int len;
ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
&gspca_dev->dev->dev);
@@ -241,9 +242,14 @@ static int sd_init(struct gspca_dev *gspca_dev)
goto exit;
}
for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
- memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len));
+ len = be16_to_cpu(rec->len);
+ if (len > PAGE_SIZE) {
+ ret = -EINVAL;
+ break;
+ }
+ memcpy(firmware_buf, rec->data, len);
ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf,
- be16_to_cpu(rec->len));
+ len);
if (ret < 0)
break;
}
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 070559b01b01..9eacc85e3f11 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -124,32 +124,12 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
else
retval = hdpvr_i2c_write(dev, 1, addr, msgs[0].buf,
msgs[0].len);
- } else if (num == 2) {
- if (msgs[0].addr != msgs[1].addr) {
- v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer with conflicting target addresses\n");
- retval = -EINVAL;
- goto out;
- }
-
- if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD)) {
- v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with r0=%d, r1=%d\n",
- msgs[0].flags & I2C_M_RD,
- msgs[1].flags & I2C_M_RD);
- retval = -EINVAL;
- goto out;
- }
-
- /*
- * Write followed by atomic read is the only complex xfer that
- * we actually support here.
- */
+ } else {
+ /* do write-then-read */
retval = hdpvr_i2c_read(dev, 1, addr, msgs[0].buf, msgs[0].len,
msgs[1].buf, msgs[1].len);
- } else {
- v4l2_warn(&dev->v4l2_dev, "refusing %d-phase i2c xfer\n", num);
}
-out:
mutex_unlock(&dev->i2c_mutex);
return retval ? retval : num;
@@ -165,10 +145,16 @@ static const struct i2c_algorithm hdpvr_algo = {
.functionality = hdpvr_functionality,
};
+/* prevent invalid 0-length usb_control_msg and support only write-then-read */
+static const struct i2c_adapter_quirks hdpvr_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
static const struct i2c_adapter hdpvr_i2c_adapter_template = {
.name = "Hauppauge HD PVR I2C",
.owner = THIS_MODULE,
.algo = &hdpvr_algo,
+ .quirks = &hdpvr_quirks,
};
static int hdpvr_activate_ir(struct hdpvr_device *dev)
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index be22a9697197..de0328100a60 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -73,6 +73,10 @@ static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
}
if (params) {
+ if (vb2_is_busy(&usbtv->vb2q) &&
+ (usbtv->width != params->cap_width ||
+ usbtv->height != params->cap_height))
+ return -EBUSY;
usbtv->width = params->cap_width;
usbtv->height = params->cap_height;
usbtv->n_chunks = usbtv->width * usbtv->height
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 44b6513c5264..efe609d70877 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1483,14 +1483,28 @@ static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl,
return ~0;
}
+/*
+ * Maximum retry count to avoid spurious errors with controls. Increasing this
+ * value does no seem to produce better results in the tested hardware.
+ */
+#define MAX_QUERY_RETRIES 2
+
static int __uvc_queryctrl_boundaries(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
if (!ctrl->cached) {
- int ret = uvc_ctrl_populate_cache(chain, ctrl);
- if (ret < 0)
+ unsigned int retries;
+ int ret;
+
+ for (retries = 0; retries < MAX_QUERY_RETRIES; retries++) {
+ ret = uvc_ctrl_populate_cache(chain, ctrl);
+ if (ret != -EIO)
+ break;
+ }
+
+ if (ret)
return ret;
}
@@ -1567,6 +1581,7 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
{
struct uvc_control_mapping *master_map = NULL;
struct uvc_control *master_ctrl = NULL;
+ int ret;
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
v4l2_ctrl->id = mapping->id;
@@ -1587,18 +1602,31 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
__uvc_find_control(ctrl->entity, mapping->master_id,
&master_map, &master_ctrl, 0, 0);
if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
+ unsigned int retries;
s32 val;
int ret;
if (WARN_ON(uvc_ctrl_mapping_is_compound(master_map)))
return -EIO;
- ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
- if (ret < 0)
- return ret;
+ for (retries = 0; retries < MAX_QUERY_RETRIES; retries++) {
+ ret = __uvc_ctrl_get(chain, master_ctrl, master_map,
+ &val);
+ if (!ret)
+ break;
+ if (ret < 0 && ret != -EIO)
+ return ret;
+ }
- if (val != mapping->master_manual)
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ret == -EIO) {
+ dev_warn_ratelimited(&chain->dev->udev->dev,
+ "UVC non compliance: Error %d querying master control %x (%s)\n",
+ ret, master_map->id,
+ uvc_map_get_name(master_map));
+ } else {
+ if (val != mapping->master_manual)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
}
v4l2_ctrl->elem_size = uvc_mapping_v4l2_size(mapping);
@@ -1613,7 +1641,18 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
return 0;
}
- return __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
+ ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
+ if (ret && !mapping->disabled) {
+ dev_warn(&chain->dev->udev->dev,
+ "UVC non compliance: permanently disabling control %x (%s), due to error %d\n",
+ mapping->id, uvc_map_get_name(mapping), ret);
+ mapping->disabled = true;
+ }
+
+ if (mapping->disabled)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+
+ return 0;
}
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
@@ -1812,48 +1851,48 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
-static int uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
- struct uvc_fh *new_handle)
+static int uvc_ctrl_set_handle(struct uvc_control *ctrl, struct uvc_fh *handle)
{
- lockdep_assert_held(&handle->chain->ctrl_mutex);
-
- if (new_handle) {
- int ret;
+ int ret;
- if (ctrl->handle)
- dev_warn_ratelimited(&handle->stream->dev->udev->dev,
- "UVC non compliance: Setting an async control with a pending operation.");
+ lockdep_assert_held(&handle->chain->ctrl_mutex);
- if (new_handle == ctrl->handle)
- return 0;
+ if (ctrl->handle) {
+ dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ "UVC non compliance: Setting an async control with a pending operation.");
- if (ctrl->handle) {
- WARN_ON(!ctrl->handle->pending_async_ctrls);
- if (ctrl->handle->pending_async_ctrls)
- ctrl->handle->pending_async_ctrls--;
- ctrl->handle = new_handle;
- handle->pending_async_ctrls++;
+ if (ctrl->handle == handle)
return 0;
- }
-
- ret = uvc_pm_get(handle->chain->dev);
- if (ret)
- return ret;
- ctrl->handle = new_handle;
- handle->pending_async_ctrls++;
+ WARN_ON(!ctrl->handle->pending_async_ctrls);
+ if (ctrl->handle->pending_async_ctrls)
+ ctrl->handle->pending_async_ctrls--;
+ ctrl->handle = handle;
+ ctrl->handle->pending_async_ctrls++;
return 0;
}
- /* Cannot clear the handle for a control not owned by us.*/
- if (WARN_ON(ctrl->handle != handle))
+ ret = uvc_pm_get(handle->chain->dev);
+ if (ret)
+ return ret;
+
+ ctrl->handle = handle;
+ ctrl->handle->pending_async_ctrls++;
+ return 0;
+}
+
+static int uvc_ctrl_clear_handle(struct uvc_control *ctrl)
+{
+ lockdep_assert_held(&ctrl->handle->chain->ctrl_mutex);
+
+ if (WARN_ON(!ctrl->handle->pending_async_ctrls)) {
+ ctrl->handle = NULL;
return -EINVAL;
+ }
+ ctrl->handle->pending_async_ctrls--;
+ uvc_pm_put(ctrl->handle->chain->dev);
ctrl->handle = NULL;
- if (WARN_ON(!handle->pending_async_ctrls))
- return -EINVAL;
- handle->pending_async_ctrls--;
- uvc_pm_put(handle->chain->dev);
return 0;
}
@@ -1871,7 +1910,7 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
handle = ctrl->handle;
if (handle)
- uvc_ctrl_set_handle(handle, ctrl, NULL);
+ uvc_ctrl_clear_handle(ctrl);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
s32 value;
@@ -2033,18 +2072,24 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
goto done;
}
- list_add_tail(&sev->node, &mapping->ev_subs);
if (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL) {
struct v4l2_event ev;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
+ ret = uvc_pm_get(handle->chain->dev);
+ if (ret)
+ goto done;
+
if (uvc_ctrl_mapping_is_compound(mapping) ||
__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
changes);
+
+ uvc_pm_put(handle->chain->dev);
+
/*
* Mark the queue as active, allowing this initial event to be
* accepted.
@@ -2053,6 +2098,8 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
v4l2_event_queue_fh(sev->fh, &ev);
}
+ list_add_tail(&sev->node, &mapping->ev_subs);
+
done:
mutex_unlock(&handle->chain->ctrl_mutex);
return ret;
@@ -2161,7 +2208,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
if (!rollback && handle && !ret &&
ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
- ret = uvc_ctrl_set_handle(handle, ctrl, handle);
+ ret = uvc_ctrl_set_handle(ctrl, handle);
if (ret < 0 && !rollback) {
if (err_ctrl)
@@ -3271,7 +3318,7 @@ void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
for (unsigned int i = 0; i < entity->ncontrols; ++i) {
if (entity->controls[i].handle != handle)
continue;
- uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
+ uvc_ctrl_clear_handle(&entity->controls[i]);
}
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index da24a655ab68..775bede0d93d 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -344,6 +344,9 @@ static int uvc_parse_format(struct uvc_device *dev,
u8 ftype;
int ret;
+ if (buflen < 4)
+ return -EINVAL;
+
format->type = buffer[2];
format->index = buffer[3];
format->frames = frames;
@@ -1866,7 +1869,7 @@ static int uvc_scan_device(struct uvc_device *dev)
if (list_empty(&dev->chains)) {
dev_info(&dev->udev->dev, "No valid video chain found.\n");
- return -1;
+ return -ENODEV;
}
/* Add GPIO entity to the first chain. */
@@ -1958,31 +1961,7 @@ static void uvc_unregister_video(struct uvc_device *dev)
if (!video_is_registered(&stream->vdev))
continue;
- /*
- * For stream->vdev we follow the same logic as:
- * vb2_video_unregister_device().
- */
-
- /* 1. Take a reference to vdev */
- get_device(&stream->vdev.dev);
-
- /* 2. Ensure that no new ioctls can be called. */
- video_unregister_device(&stream->vdev);
-
- /* 3. Wait for old ioctls to finish. */
- mutex_lock(&stream->mutex);
-
- /* 4. Stop streaming. */
- uvc_queue_release(&stream->queue);
-
- mutex_unlock(&stream->mutex);
-
- put_device(&stream->vdev.dev);
-
- /*
- * For stream->meta.vdev we can directly call:
- * vb2_video_unregister_device().
- */
+ vb2_video_unregister_device(&stream->vdev);
vb2_video_unregister_device(&stream->meta.vdev);
/*
@@ -2030,6 +2009,8 @@ int uvc_register_video_device(struct uvc_device *dev,
vdev->ioctl_ops = ioctl_ops;
vdev->release = uvc_release;
vdev->prio = &stream->chain->prio;
+ vdev->queue = &queue->queue;
+ vdev->lock = &queue->mutex;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vdev->vfl_dir = VFL_DIR_TX;
else
@@ -2239,7 +2220,6 @@ static int uvc_probe(struct usb_interface *intf,
/* Parse the Video Class control descriptor. */
ret = uvc_parse_control(dev);
if (ret < 0) {
- ret = -ENODEV;
uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n");
goto error;
}
@@ -2275,22 +2255,19 @@ static int uvc_probe(struct usb_interface *intf,
goto error;
/* Scan the device for video chains. */
- if (uvc_scan_device(dev) < 0) {
- ret = -ENODEV;
+ ret = uvc_scan_device(dev);
+ if (ret < 0)
goto error;
- }
/* Initialize controls. */
- if (uvc_ctrl_init_device(dev) < 0) {
- ret = -ENODEV;
+ ret = uvc_ctrl_init_device(dev);
+ if (ret < 0)
goto error;
- }
/* Register video device nodes. */
- if (uvc_register_chains(dev) < 0) {
- ret = -ENODEV;
+ ret = uvc_register_chains(dev);
+ if (ret < 0)
goto error;
- }
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device node */
@@ -2316,6 +2293,13 @@ static int uvc_probe(struct usb_interface *intf,
goto error;
}
+ ret = uvc_meta_init(dev);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev,
+ "Error initializing the metadata formats (%d)\n", ret);
+ goto error;
+ }
+
if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME)
udev->quirks &= ~USB_QUIRK_RESET_RESUME;
@@ -2398,9 +2382,12 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
list_for_each_entry(stream, &dev->streams, list) {
if (stream->intf == intf) {
ret = uvc_video_resume(stream, reset);
- if (ret < 0)
- uvc_queue_streamoff(&stream->queue,
- stream->queue.queue.type);
+ if (ret < 0) {
+ mutex_lock(&stream->queue.mutex);
+ vb2_streamoff(&stream->queue.queue,
+ stream->queue.queue.type);
+ mutex_unlock(&stream->queue.mutex);
+ }
return ret;
}
}
@@ -2514,6 +2501,15 @@ static const struct uvc_device_info uvc_quirk_force_y8 = {
* Sort these by vendor/product ID.
*/
static const struct usb_device_id uvc_ids[] = {
+ /* HP Webcam HD 2300 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x03f0,
+ .idProduct = 0xe207,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid },
/* Quanta ACER HD User Facing */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 82de7781f5b6..229e08ff323e 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
@@ -63,15 +64,20 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
struct v4l2_meta_format *fmt = &format->fmt.meta;
- u32 fmeta = fmt->dataformat;
+ u32 fmeta = V4L2_META_FMT_UVC;
if (format->type != vfh->vdev->queue->type)
return -EINVAL;
+ for (unsigned int i = 0; i < dev->nmeta_formats; i++)
+ if (dev->meta_formats[i] == fmt->dataformat) {
+ fmeta = fmt->dataformat;
+ break;
+ }
+
memset(fmt, 0, sizeof(*fmt));
- fmt->dataformat = fmeta == dev->info->meta_format
- ? fmeta : V4L2_META_FMT_UVC;
+ fmt->dataformat = fmeta;
fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
@@ -96,7 +102,7 @@ static int uvc_meta_v4l2_set_format(struct file *file, void *fh,
*/
mutex_lock(&stream->mutex);
- if (uvc_queue_allocated(&stream->queue))
+ if (vb2_is_busy(&stream->meta.queue.queue))
ret = -EBUSY;
else
stream->meta.format = fmt->dataformat;
@@ -112,17 +118,19 @@ static int uvc_meta_v4l2_enum_formats(struct file *file, void *fh,
struct v4l2_fh *vfh = file->private_data;
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
- u32 index = fdesc->index;
+ u32 i = fdesc->index;
+
+ if (fdesc->type != vfh->vdev->queue->type)
+ return -EINVAL;
- if (fdesc->type != vfh->vdev->queue->type ||
- index > 1U || (index && !dev->info->meta_format))
+ if (i >= dev->nmeta_formats)
return -EINVAL;
memset(fdesc, 0, sizeof(*fdesc));
fdesc->type = vfh->vdev->queue->type;
- fdesc->index = index;
- fdesc->pixelformat = index ? dev->info->meta_format : V4L2_META_FMT_UVC;
+ fdesc->index = i;
+ fdesc->pixelformat = dev->meta_formats[i];
return 0;
}
@@ -156,6 +164,71 @@ static const struct v4l2_file_operations uvc_meta_fops = {
.mmap = vb2_fop_mmap,
};
+static struct uvc_entity *uvc_meta_find_msxu(struct uvc_device *dev)
+{
+ static const u8 uvc_msxu_guid[16] = UVC_GUID_MSXU_1_5;
+ struct uvc_entity *entity;
+
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (!memcmp(entity->guid, uvc_msxu_guid, sizeof(entity->guid)))
+ return entity;
+ }
+
+ return NULL;
+}
+
+#define MSXU_CONTROL_METADATA 0x9
+static int uvc_meta_detect_msxu(struct uvc_device *dev)
+{
+ u32 *data __free(kfree) = NULL;
+ struct uvc_entity *entity;
+ int ret;
+
+ entity = uvc_meta_find_msxu(dev);
+ if (!entity)
+ return 0;
+
+ /*
+ * USB requires buffers aligned in a special way, simplest way is to
+ * make sure that query_ctrl will work is to kmalloc() them.
+ */
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Check if the metadata is already enabled. */
+ ret = uvc_query_ctrl(dev, UVC_GET_CUR, entity->id, dev->intfnum,
+ MSXU_CONTROL_METADATA, data, sizeof(*data));
+ if (ret)
+ return 0;
+
+ if (*data) {
+ dev->quirks |= UVC_QUIRK_MSXU_META;
+ return 0;
+ }
+
+ /*
+ * We have seen devices that require 1 to enable the metadata, others
+ * requiring a value != 1 and others requiring a value >1. Luckily for
+ * us, the value from GET_MAX seems to work all the time.
+ */
+ ret = uvc_query_ctrl(dev, UVC_GET_MAX, entity->id, dev->intfnum,
+ MSXU_CONTROL_METADATA, data, sizeof(*data));
+ if (ret || !*data)
+ return 0;
+
+ /*
+ * If we can set MSXU_CONTROL_METADATA, the device will report
+ * metadata.
+ */
+ ret = uvc_query_ctrl(dev, UVC_SET_CUR, entity->id, dev->intfnum,
+ MSXU_CONTROL_METADATA, data, sizeof(*data));
+ if (!ret)
+ dev->quirks |= UVC_QUIRK_MSXU_META;
+
+ return 0;
+}
+
int uvc_meta_register(struct uvc_streaming *stream)
{
struct uvc_device *dev = stream->dev;
@@ -164,13 +237,32 @@ int uvc_meta_register(struct uvc_streaming *stream)
stream->meta.format = V4L2_META_FMT_UVC;
- /*
- * The video interface queue uses manual locking and thus does not set
- * the queue pointer. Set it manually here.
- */
- vdev->queue = &queue->queue;
-
return uvc_register_video_device(dev, stream, vdev, queue,
V4L2_BUF_TYPE_META_CAPTURE,
&uvc_meta_fops, &uvc_meta_ioctl_ops);
}
+
+int uvc_meta_init(struct uvc_device *dev)
+{
+ unsigned int i = 0;
+ int ret;
+
+ ret = uvc_meta_detect_msxu(dev);
+ if (ret)
+ return ret;
+
+ dev->meta_formats[i++] = V4L2_META_FMT_UVC;
+
+ if (dev->info->meta_format &&
+ !WARN_ON(dev->info->meta_format == V4L2_META_FMT_UVC))
+ dev->meta_formats[i++] = dev->info->meta_format;
+
+ if (dev->quirks & UVC_QUIRK_MSXU_META &&
+ !WARN_ON(dev->info->meta_format == V4L2_META_FMT_UVC_MSXU_1_5))
+ dev->meta_formats[i++] = V4L2_META_FMT_UVC_MSXU_1_5;
+
+ /* IMPORTANT: for new meta-formats update UVC_MAX_META_DATA_FORMATS. */
+ dev->nmeta_formats = i;
+
+ return 0;
+}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 2ee142621042..790184c9843d 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -42,13 +42,15 @@ static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
*
* This function must be called with the queue spinlock held.
*/
-static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
- enum uvc_buffer_state state)
+static void __uvc_queue_return_buffers(struct uvc_video_queue *queue,
+ enum uvc_buffer_state state)
{
enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
? VB2_BUF_STATE_ERROR
: VB2_BUF_STATE_QUEUED;
+ lockdep_assert_held(&queue->irqlock);
+
while (!list_empty(&queue->irqqueue)) {
struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
struct uvc_buffer,
@@ -59,6 +61,14 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
}
}
+static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
+ enum uvc_buffer_state state)
+{
+ spin_lock_irq(&queue->irqlock);
+ __uvc_queue_return_buffers(queue, state);
+ spin_unlock_irq(&queue->irqlock);
+}
+
/* -----------------------------------------------------------------------------
* videobuf2 queue operations
*/
@@ -157,7 +167,7 @@ static void uvc_buffer_finish(struct vb2_buffer *vb)
uvc_video_clock_update(stream, vbuf, buf);
}
-static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
+static int uvc_start_streaming_video(struct vb2_queue *vq, unsigned int count)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
@@ -165,31 +175,44 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
lockdep_assert_irqs_enabled();
+ ret = uvc_pm_get(stream->dev);
+ if (ret)
+ return ret;
+
queue->buf_used = 0;
ret = uvc_video_start_streaming(stream);
if (ret == 0)
return 0;
- spin_lock_irq(&queue->irqlock);
+ uvc_pm_put(stream->dev);
+
uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
- spin_unlock_irq(&queue->irqlock);
return ret;
}
-static void uvc_stop_streaming(struct vb2_queue *vq)
+static void uvc_stop_streaming_video(struct vb2_queue *vq)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
lockdep_assert_irqs_enabled();
- if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
- uvc_video_stop_streaming(uvc_queue_to_stream(queue));
+ uvc_video_stop_streaming(uvc_queue_to_stream(queue));
+
+ uvc_pm_put(stream->dev);
+
+ uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+}
+
+static void uvc_stop_streaming_meta(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+ lockdep_assert_irqs_enabled();
- spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
- spin_unlock_irq(&queue->irqlock);
}
static const struct vb2_ops uvc_queue_qops = {
@@ -197,15 +220,20 @@ static const struct vb2_ops uvc_queue_qops = {
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
.buf_finish = uvc_buffer_finish,
- .start_streaming = uvc_start_streaming,
- .stop_streaming = uvc_stop_streaming,
+ .start_streaming = uvc_start_streaming_video,
+ .stop_streaming = uvc_stop_streaming_video,
};
static const struct vb2_ops uvc_meta_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
- .stop_streaming = uvc_stop_streaming,
+ /*
+ * .start_streaming is not provided here. Metadata relies on video
+ * streaming being active. If video isn't streaming, then no metadata
+ * will arrive either.
+ */
+ .stop_streaming = uvc_stop_streaming_meta,
};
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
@@ -242,154 +270,11 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
return 0;
}
-void uvc_queue_release(struct uvc_video_queue *queue)
-{
- mutex_lock(&queue->mutex);
- vb2_queue_release(&queue->queue);
- mutex_unlock(&queue->mutex);
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 queue operations
- */
-
-int uvc_request_buffers(struct uvc_video_queue *queue,
- struct v4l2_requestbuffers *rb)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_reqbufs(&queue->queue, rb);
- mutex_unlock(&queue->mutex);
-
- return ret ? ret : rb->count;
-}
-
-int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_querybuf(&queue->queue, buf);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_create_buffers(struct uvc_video_queue *queue,
- struct v4l2_create_buffers *cb)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_create_bufs(&queue->queue, cb);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_queue_buffer(struct uvc_video_queue *queue,
- struct media_device *mdev, struct v4l2_buffer *buf)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_qbuf(&queue->queue, mdev, buf);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_export_buffer(struct uvc_video_queue *queue,
- struct v4l2_exportbuffer *exp)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_expbuf(&queue->queue, exp);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
- int nonblocking)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_streamon(&queue->queue, type);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
-{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_streamoff(&queue->queue, type);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
-int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
-{
- return vb2_mmap(&queue->queue, vma);
-}
-
-#ifndef CONFIG_MMU
-unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
- unsigned long pgoff)
-{
- return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
-}
-#endif
-
-__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
- poll_table *wait)
-{
- __poll_t ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_poll(&queue->queue, file, wait);
- mutex_unlock(&queue->mutex);
-
- return ret;
-}
-
/* -----------------------------------------------------------------------------
*
*/
/*
- * Check if buffers have been allocated.
- */
-int uvc_queue_allocated(struct uvc_video_queue *queue)
-{
- int allocated;
-
- mutex_lock(&queue->mutex);
- allocated = vb2_is_busy(&queue->queue);
- mutex_unlock(&queue->mutex);
-
- return allocated;
-}
-
-/*
* Cancel the video buffers queue.
*
* Cancelling the queue marks all buffers on the irq queue as erroneous,
@@ -406,7 +291,7 @@ void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
- uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+ __uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
/*
* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_buffer_queue and the disconnection event that
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 668a4e9d772c..160f9cf6e6db 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -47,8 +47,6 @@ void uvc_pm_put(struct uvc_device *dev)
usb_autopm_put_interface(dev->intf);
}
-static int uvc_acquire_privileges(struct uvc_fh *handle);
-
static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
struct uvc_control_mapping *map,
const struct uvc_xu_control_mapping *xmap)
@@ -436,10 +434,6 @@ static int uvc_ioctl_s_fmt(struct file *file, void *fh,
const struct uvc_frame *frame;
int ret;
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
if (fmt->type != stream->type)
return -EINVAL;
@@ -448,8 +442,7 @@ static int uvc_ioctl_s_fmt(struct file *file, void *fh,
return ret;
mutex_lock(&stream->mutex);
-
- if (uvc_queue_allocated(&stream->queue)) {
+ if (vb2_is_busy(&stream->queue.queue)) {
ret = -EBUSY;
goto done;
}
@@ -513,10 +506,6 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
unsigned int i;
int ret;
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
if (parm->type != stream->type)
return -EINVAL;
@@ -594,63 +583,6 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
}
/* ------------------------------------------------------------------------
- * Privilege management
- */
-
-/*
- * Privilege management is the multiple-open implementation basis. The current
- * implementation is completely transparent for the end-user and doesn't
- * require explicit use of the VIDIOC_G_PRIORITY and VIDIOC_S_PRIORITY ioctls.
- * Those ioctls enable finer control on the device (by making possible for a
- * user to request exclusive access to a device), but are not mature yet.
- * Switching to the V4L2 priority mechanism might be considered in the future
- * if this situation changes.
- *
- * Each open instance of a UVC device can either be in a privileged or
- * unprivileged state. Only a single instance can be in a privileged state at
- * a given time. Trying to perform an operation that requires privileges will
- * automatically acquire the required privileges if possible, or return -EBUSY
- * otherwise. Privileges are dismissed when closing the instance or when
- * freeing the video buffers using VIDIOC_REQBUFS.
- *
- * Operations that require privileges are:
- *
- * - VIDIOC_S_INPUT
- * - VIDIOC_S_PARM
- * - VIDIOC_S_FMT
- * - VIDIOC_CREATE_BUFS
- * - VIDIOC_REQBUFS
- */
-static int uvc_acquire_privileges(struct uvc_fh *handle)
-{
- /* Always succeed if the handle is already privileged. */
- if (handle->state == UVC_HANDLE_ACTIVE)
- return 0;
-
- /* Check if the device already has a privileged handle. */
- if (atomic_inc_return(&handle->stream->active) != 1) {
- atomic_dec(&handle->stream->active);
- return -EBUSY;
- }
-
- handle->state = UVC_HANDLE_ACTIVE;
- return 0;
-}
-
-static void uvc_dismiss_privileges(struct uvc_fh *handle)
-{
- if (handle->state == UVC_HANDLE_ACTIVE)
- atomic_dec(&handle->stream->active);
-
- handle->state = UVC_HANDLE_PASSIVE;
-}
-
-static int uvc_has_privileges(struct uvc_fh *handle)
-{
- return handle->state == UVC_HANDLE_ACTIVE;
-}
-
-/* ------------------------------------------------------------------------
* V4L2 file operations
*/
@@ -671,7 +603,6 @@ static int uvc_v4l2_open(struct file *file)
v4l2_fh_add(&handle->vfh);
handle->chain = stream->chain;
handle->stream = stream;
- handle->state = UVC_HANDLE_PASSIVE;
file->private_data = handle;
return 0;
@@ -686,19 +617,8 @@ static int uvc_v4l2_release(struct file *file)
uvc_ctrl_cleanup_fh(handle);
- /* Only free resources if this is a privileged handle. */
- if (uvc_has_privileges(handle))
- uvc_queue_release(&stream->queue);
-
- if (handle->is_streaming)
- uvc_pm_put(stream->dev);
-
/* Release the file handle. */
- uvc_dismiss_privileges(handle);
- v4l2_fh_del(&handle->vfh);
- v4l2_fh_exit(&handle->vfh);
- kfree(handle);
- file->private_data = NULL;
+ vb2_fop_release(file);
return 0;
}
@@ -753,140 +673,6 @@ static int uvc_ioctl_try_fmt(struct file *file, void *fh,
return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
}
-static int uvc_ioctl_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *rb)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- mutex_lock(&stream->mutex);
- ret = uvc_request_buffers(&stream->queue, rb);
- mutex_unlock(&stream->mutex);
- if (ret < 0)
- return ret;
-
- if (ret == 0)
- uvc_dismiss_privileges(handle);
-
- return 0;
-}
-
-static int uvc_ioctl_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- return uvc_query_buffer(&stream->queue, buf);
-}
-
-static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- return uvc_queue_buffer(&stream->queue,
- stream->vdev.v4l2_dev->mdev, buf);
-}
-
-static int uvc_ioctl_expbuf(struct file *file, void *fh,
- struct v4l2_exportbuffer *exp)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- return uvc_export_buffer(&stream->queue, exp);
-}
-
-static int uvc_ioctl_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- return uvc_dequeue_buffer(&stream->queue, buf,
- file->f_flags & O_NONBLOCK);
-}
-
-static int uvc_ioctl_create_bufs(struct file *file, void *fh,
- struct v4l2_create_buffers *cb)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_create_buffers(&stream->queue, cb);
-}
-
-static int uvc_ioctl_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- guard(mutex)(&stream->mutex);
-
- if (handle->is_streaming)
- return 0;
-
- ret = uvc_queue_streamon(&stream->queue, type);
- if (ret)
- return ret;
-
- ret = uvc_pm_get(stream->dev);
- if (ret) {
- uvc_queue_streamoff(&stream->queue, type);
- return ret;
- }
- handle->is_streaming = true;
-
- return 0;
-}
-
-static int uvc_ioctl_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- if (!uvc_has_privileges(handle))
- return -EBUSY;
-
- guard(mutex)(&stream->mutex);
-
- uvc_queue_streamoff(&stream->queue, type);
- if (handle->is_streaming) {
- handle->is_streaming = false;
- uvc_pm_put(stream->dev);
- }
-
- return 0;
-}
-
static int uvc_ioctl_enum_input(struct file *file, void *fh,
struct v4l2_input *input)
{
@@ -961,13 +747,13 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
{
struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
struct uvc_video_chain *chain = handle->chain;
u8 *buf;
int ret;
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
+ if (vb2_is_busy(&stream->queue.queue))
+ return -EBUSY;
if (chain->selector == NULL ||
(chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
@@ -1381,11 +1167,9 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
#define UVCIOC_CTRL_MAP32 _IOWR('u', 0x20, struct uvc_xu_control_mapping32)
#define UVCIOC_CTRL_QUERY32 _IOWR('u', 0x21, struct uvc_xu_control_query32)
-DEFINE_FREE(uvc_pm_put, struct uvc_device *, if (_T) uvc_pm_put(_T))
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct uvc_device *uvc_device __free(uvc_pm_put) = NULL;
struct uvc_fh *handle = file->private_data;
union {
struct uvc_xu_control_mapping xmap;
@@ -1398,38 +1182,38 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
if (ret)
return ret;
- uvc_device = handle->stream->dev;
-
switch (cmd) {
case UVCIOC_CTRL_MAP32:
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
if (ret)
- return ret;
+ break;
ret = uvc_ioctl_xu_ctrl_map(handle->chain, &karg.xmap);
if (ret)
- return ret;
+ break;
ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
if (ret)
- return ret;
-
+ break;
break;
case UVCIOC_CTRL_QUERY32:
ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
if (ret)
- return ret;
+ break;
ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
if (ret)
- return ret;
+ break;
ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
if (ret)
- return ret;
+ break;
break;
default:
- return -ENOIOCTLCMD;
+ ret = -ENOIOCTLCMD;
+ break;
}
+ uvc_pm_put(handle->stream->dev);
+
return ret;
}
#endif
@@ -1438,81 +1222,37 @@ static long uvc_v4l2_unlocked_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct uvc_fh *handle = file->private_data;
+ unsigned int converted_cmd = v4l2_translate_cmd(cmd);
int ret;
- /* The following IOCTLs do not need to turn on the camera. */
- switch (cmd) {
- case VIDIOC_CREATE_BUFS:
- case VIDIOC_DQBUF:
- case VIDIOC_ENUM_FMT:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- case VIDIOC_ENUM_FRAMESIZES:
- case VIDIOC_ENUMINPUT:
- case VIDIOC_EXPBUF:
- case VIDIOC_G_FMT:
- case VIDIOC_G_PARM:
- case VIDIOC_G_SELECTION:
- case VIDIOC_QBUF:
- case VIDIOC_QUERYCAP:
- case VIDIOC_REQBUFS:
- case VIDIOC_SUBSCRIBE_EVENT:
- case VIDIOC_UNSUBSCRIBE_EVENT:
- return video_ioctl2(file, cmd, arg);
- }
-
- ret = uvc_pm_get(handle->stream->dev);
- if (ret)
+ /* The following IOCTLs need to turn on the camera. */
+ switch (converted_cmd) {
+ case UVCIOC_CTRL_MAP:
+ case UVCIOC_CTRL_QUERY:
+ case VIDIOC_G_CTRL:
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_G_INPUT:
+ case VIDIOC_QUERYCTRL:
+ case VIDIOC_QUERYMENU:
+ case VIDIOC_QUERY_EXT_CTRL:
+ case VIDIOC_S_CTRL:
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_S_FMT:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_PARM:
+ case VIDIOC_TRY_EXT_CTRLS:
+ case VIDIOC_TRY_FMT:
+ ret = uvc_pm_get(handle->stream->dev);
+ if (ret)
+ return ret;
+ ret = video_ioctl2(file, cmd, arg);
+ uvc_pm_put(handle->stream->dev);
return ret;
+ }
- ret = video_ioctl2(file, cmd, arg);
-
- uvc_pm_put(handle->stream->dev);
- return ret;
-}
-
-static ssize_t uvc_v4l2_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct uvc_fh *handle = file->private_data;
- struct uvc_streaming *stream = handle->stream;
-
- uvc_dbg(stream->dev, CALLS, "%s: not implemented\n", __func__);
- return -EINVAL;
-}
-
-static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct uvc_fh *handle = file->private_data;
- struct uvc_streaming *stream = handle->stream;
-
- uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
-
- return uvc_queue_mmap(&stream->queue, vma);
-}
-
-static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
-{
- struct uvc_fh *handle = file->private_data;
- struct uvc_streaming *stream = handle->stream;
-
- uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
-
- return uvc_queue_poll(&stream->queue, file, wait);
-}
-
-#ifndef CONFIG_MMU
-static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct uvc_fh *handle = file->private_data;
- struct uvc_streaming *stream = handle->stream;
-
- uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
-
- return uvc_queue_get_unmapped_area(&stream->queue, pgoff);
+ /* The other IOCTLs can run with the camera off. */
+ return video_ioctl2(file, cmd, arg);
}
-#endif
const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt,
@@ -1526,14 +1266,15 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt,
.vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt,
.vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt,
- .vidioc_reqbufs = uvc_ioctl_reqbufs,
- .vidioc_querybuf = uvc_ioctl_querybuf,
- .vidioc_qbuf = uvc_ioctl_qbuf,
- .vidioc_expbuf = uvc_ioctl_expbuf,
- .vidioc_dqbuf = uvc_ioctl_dqbuf,
- .vidioc_create_bufs = uvc_ioctl_create_bufs,
- .vidioc_streamon = uvc_ioctl_streamon,
- .vidioc_streamoff = uvc_ioctl_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_input = uvc_ioctl_enum_input,
.vidioc_g_input = uvc_ioctl_g_input,
.vidioc_s_input = uvc_ioctl_s_input,
@@ -1558,11 +1299,10 @@ const struct v4l2_file_operations uvc_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl32 = uvc_v4l2_compat_ioctl32,
#endif
- .read = uvc_v4l2_read,
- .mmap = uvc_v4l2_mmap,
- .poll = uvc_v4l2_poll,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
#ifndef CONFIG_MMU
- .get_unmapped_area = uvc_v4l2_get_unmapped_area,
+ .get_unmapped_area = vb2_fop_get_unmapped_area,
#endif
};
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a75af314e46b..a5013a7fbca4 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -262,6 +262,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxPayloadTransferSize = bandwidth;
}
+
+ if (stream->intf->num_altsetting > 1 &&
+ ctrl->dwMaxPayloadTransferSize > stream->maxpsize) {
+ dev_warn_ratelimited(&stream->intf->dev,
+ "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n",
+ ctrl->dwMaxPayloadTransferSize,
+ stream->maxpsize);
+ ctrl->dwMaxPayloadTransferSize = stream->maxpsize;
+ }
}
static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
@@ -1419,12 +1428,6 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
if (!meta_buf || length == 2)
return;
- if (meta_buf->length - meta_buf->bytesused <
- length + sizeof(meta->ns) + sizeof(meta->sof)) {
- meta_buf->error = 1;
- return;
- }
-
has_pts = mem[1] & UVC_STREAM_PTS;
has_scr = mem[1] & UVC_STREAM_SCR;
@@ -1445,6 +1448,12 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
!memcmp(scr, stream->clock.last_scr, 6)))
return;
+ if (meta_buf->length - meta_buf->bytesused <
+ length + sizeof(meta->ns) + sizeof(meta->sof)) {
+ meta_buf->error = 1;
+ return;
+ }
+
meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused);
local_irq_save(flags);
time = uvc_video_get_time();
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index b9f8eb62ba1d..757254fc4fe9 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -77,6 +77,7 @@
#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
#define UVC_QUIRK_MJPEG_NO_EOF 0x00020000
+#define UVC_QUIRK_MSXU_META 0x00040000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -134,6 +135,8 @@ struct uvc_control_mapping {
s32 master_manual;
u32 slave_ids[2];
+ bool disabled;
+
const struct uvc_control_mapping *(*filter_mapping)
(struct uvc_video_chain *chain,
struct uvc_control *ctrl);
@@ -326,7 +329,10 @@ struct uvc_buffer {
struct uvc_video_queue {
struct vb2_queue queue;
- struct mutex mutex; /* Protects queue */
+ struct mutex mutex; /*
+ * Serializes vb2_queue and
+ * fops
+ */
unsigned int flags;
unsigned int buf_used;
@@ -570,6 +576,8 @@ struct uvc_status {
};
} __packed;
+#define UVC_MAX_META_DATA_FORMATS 3
+
struct uvc_device {
struct usb_device *udev;
struct usb_interface *intf;
@@ -580,6 +588,9 @@ struct uvc_device {
const struct uvc_device_info *info;
+ u32 meta_formats[UVC_MAX_META_DATA_FORMATS];
+ unsigned int nmeta_formats;
+
atomic_t nmappings;
/* Video control interface */
@@ -619,18 +630,11 @@ struct uvc_device {
struct uvc_entity *gpio_unit;
};
-enum uvc_handle_state {
- UVC_HANDLE_PASSIVE = 0,
- UVC_HANDLE_ACTIVE = 1,
-};
-
struct uvc_fh {
struct v4l2_fh vfh;
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
- enum uvc_handle_state state;
unsigned int pending_async_ctrls;
- bool is_streaming;
};
/* ------------------------------------------------------------------------
@@ -687,36 +691,11 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type);
-void uvc_queue_release(struct uvc_video_queue *queue);
-int uvc_request_buffers(struct uvc_video_queue *queue,
- struct v4l2_requestbuffers *rb);
-int uvc_query_buffer(struct uvc_video_queue *queue,
- struct v4l2_buffer *v4l2_buf);
-int uvc_create_buffers(struct uvc_video_queue *queue,
- struct v4l2_create_buffers *v4l2_cb);
-int uvc_queue_buffer(struct uvc_video_queue *queue,
- struct media_device *mdev,
- struct v4l2_buffer *v4l2_buf);
-int uvc_export_buffer(struct uvc_video_queue *queue,
- struct v4l2_exportbuffer *exp);
-int uvc_dequeue_buffer(struct uvc_video_queue *queue,
- struct v4l2_buffer *v4l2_buf, int nonblocking);
-int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type);
-int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type);
void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue);
void uvc_queue_buffer_release(struct uvc_buffer *buf);
-int uvc_queue_mmap(struct uvc_video_queue *queue,
- struct vm_area_struct *vma);
-__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
- poll_table *wait);
-#ifndef CONFIG_MMU
-unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
- unsigned long pgoff);
-#endif
-int uvc_queue_allocated(struct uvc_video_queue *queue);
static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
{
return vb2_is_streaming(&queue->queue);
@@ -749,6 +728,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
void uvc_video_clock_update(struct uvc_streaming *stream,
struct vb2_v4l2_buffer *vbuf,
struct uvc_buffer *buf);
+int uvc_meta_init(struct uvc_device *dev);
int uvc_meta_register(struct uvc_streaming *stream);
int uvc_register_video_device(struct uvc_device *dev,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index bd160a8c9efe..6e585bc76367 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -323,6 +323,12 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ /* Tiled YUV formats, non contiguous variant */
+ { .format = V4L2_PIX_FMT_NV12MT, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 64, 32, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+ { .format = V4L2_PIX_FMT_NV12MT_16X16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 16, 8, 0, 0 }},
+
/* Bayer RGB formats */
{ .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
@@ -332,6 +338,10 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
@@ -344,6 +354,28 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* Renesas Camera Data Receiver Unit formats, bayer order agnostic */
+ { .format = V4L2_PIX_FMT_RAW_CRU10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 6, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 5, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU20, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 3, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
@@ -505,10 +537,10 @@ s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
- pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
- __func__);
- pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
- __func__);
+ pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
+ __func__);
+ pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
+ __func__);
}
return freq > 0 ? freq : -EINVAL;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 90d25329661e..98b960775e87 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -968,12 +968,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
p_h264_sps->flags &=
~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
-
- if (p_h264_sps->chroma_format_idc < 3)
- p_h264_sps->flags &=
- ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
}
+ if (p_h264_sps->chroma_format_idc < 3)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
+
if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
p_h264_sps->flags &=
~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
@@ -1631,14 +1631,17 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
/* Free all controls and control refs */
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl_ref *ref, *next_ref;
struct v4l2_ctrl *ctrl, *next_ctrl;
struct v4l2_subscribed_event *sev, *next_sev;
- if (hdl == NULL || hdl->buckets == NULL)
- return;
+ if (!hdl)
+ return 0;
+
+ if (!hdl->buckets)
+ return hdl->error;
v4l2_ctrl_handler_free_request(hdl);
@@ -1661,9 +1664,10 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
kvfree(hdl->buckets);
hdl->buckets = NULL;
hdl->cached = NULL;
- hdl->error = 0;
mutex_unlock(hdl->lock);
mutex_destroy(&hdl->_lock);
+
+ return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_free);
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
index 586c46544255..ffc64e10fcae 100644
--- a/drivers/media/v4l2-core/v4l2-i2c.c
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -5,6 +5,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -24,7 +25,7 @@ void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
* registered by us, and would not be
* re-created by just probing the V4L2 driver.
*/
- if (client && !client->dev.of_node && !client->dev.fwnode)
+ if (client && !dev_fwnode(&client->dev))
i2c_unregister_device(client);
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 650dc1956f73..46da373066f4 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1413,6 +1413,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
+ case V4L2_PIX_FMT_RAW_CRU10: descr = "10-bit Raw CRU Packed"; break;
case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break;
@@ -1421,6 +1422,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_RAW_CRU12: descr = "12-bit Raw CRU Packed"; break;
case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break;
@@ -1429,10 +1431,12 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_RAW_CRU14: descr = "14-bit Raw CRU Packed"; break;
case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_RAW_CRU20: descr = "14-bit Raw CRU Packed"; break;
case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
@@ -1459,6 +1463,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
+ case V4L2_META_FMT_UVC_MSXU_1_5: descr = "UVC MSXU Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
@@ -3245,7 +3250,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
return ret;
}
-static unsigned int video_translate_cmd(unsigned int cmd)
+unsigned int v4l2_translate_cmd(unsigned int cmd)
{
#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
@@ -3266,6 +3271,7 @@ static unsigned int video_translate_cmd(unsigned int cmd)
return cmd;
}
+EXPORT_SYMBOL_GPL(v4l2_translate_cmd);
static int video_get_user(void __user *arg, void *parg,
unsigned int real_cmd, unsigned int cmd,
@@ -3426,7 +3432,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
- unsigned int cmd = video_translate_cmd(orig_cmd);
+ unsigned int cmd = v4l2_translate_cmd(orig_cmd);
const size_t ioc_size = _IOC_SIZE(cmd);
/* Copy arguments into temp kernel buffer */
diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
index 6e2647323522..36a0f1a1b0d9 100644
--- a/drivers/media/v4l2-core/v4l2-jpeg.c
+++ b/drivers/media/v4l2-core/v4l2-jpeg.c
@@ -711,83 +711,3 @@ int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out)
return marker;
}
EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header);
-
-/**
- * v4l2_jpeg_parse_frame_header - parse frame header
- * @buf: address of the frame header, after the SOF0 marker
- * @len: length of the frame header
- * @frame_header: returns the parsed frame header
- *
- * Returns 0 or negative error if parsing failed.
- */
-int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
- struct v4l2_jpeg_frame_header *frame_header)
-{
- struct jpeg_stream stream;
-
- stream.curr = buf;
- stream.end = stream.curr + len;
- return jpeg_parse_frame_header(&stream, SOF0, frame_header);
-}
-EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_frame_header);
-
-/**
- * v4l2_jpeg_parse_scan_header - parse scan header
- * @buf: address of the scan header, after the SOS marker
- * @len: length of the scan header
- * @scan_header: returns the parsed scan header
- *
- * Returns 0 or negative error if parsing failed.
- */
-int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
- struct v4l2_jpeg_scan_header *scan_header)
-{
- struct jpeg_stream stream;
-
- stream.curr = buf;
- stream.end = stream.curr + len;
- return jpeg_parse_scan_header(&stream, scan_header);
-}
-EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_scan_header);
-
-/**
- * v4l2_jpeg_parse_quantization_tables - parse quantization tables segment
- * @buf: address of the quantization table segment, after the DQT marker
- * @len: length of the quantization table segment
- * @precision: sample precision (P) in bits per component
- * @q_tables: returns four references into the buffer for the
- * four possible quantization table destinations
- *
- * Returns 0 or negative error if parsing failed.
- */
-int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
- struct v4l2_jpeg_reference *q_tables)
-{
- struct jpeg_stream stream;
-
- stream.curr = buf;
- stream.end = stream.curr + len;
- return jpeg_parse_quantization_tables(&stream, precision, q_tables);
-}
-EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_quantization_tables);
-
-/**
- * v4l2_jpeg_parse_huffman_tables - parse huffman tables segment
- * @buf: address of the Huffman table segment, after the DHT marker
- * @len: length of the Huffman table segment
- * @huffman_tables: returns four references into the buffer for the
- * four possible Huffman table destinations, in
- * the order DC0, DC1, AC0, AC1
- *
- * Returns 0 or negative error if parsing failed.
- */
-int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
- struct v4l2_jpeg_reference *huffman_tables)
-{
- struct jpeg_stream stream;
-
- stream.curr = buf;
- stream.end = stream.curr + len;
- return jpeg_parse_huffman_tables(&stream, huffman_tables);
-}
-EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_huffman_tables);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index a3074f469b15..4fd25fea3b58 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -1004,6 +1004,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
struct v4l2_subdev_route *routes =
(struct v4l2_subdev_route *)(uintptr_t)routing->routes;
struct v4l2_subdev_krouting krouting = {};
+ unsigned int num_active_routes = 0;
unsigned int i;
if (!v4l2_subdev_enable_streams_api)
@@ -1041,9 +1042,22 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
if (!(pads[route->source_pad].flags &
MEDIA_PAD_FL_SOURCE))
return -EINVAL;
+
+ if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)
+ num_active_routes++;
}
/*
+ * Drivers that implement routing need to report a frame
+ * descriptor accordingly, with up to one entry per route. Until
+ * the frame descriptors entries get allocated dynamically,
+ * limit the number of active routes to
+ * V4L2_FRAME_DESC_ENTRY_MAX.
+ */
+ if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -E2BIG;
+
+ /*
* If the driver doesn't support setting routing, just return
* the routing table.
*/
@@ -2219,6 +2233,9 @@ static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd,
*found_streams = BIT_ULL(0);
*enabled_streams =
(sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0;
+ dev_dbg(sd->dev,
+ "collect_streams: sub-device \"%s\" does not support streams\n",
+ sd->entity.name);
return;
}
@@ -2236,6 +2253,10 @@ static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd,
if (cfg->enabled)
*enabled_streams |= BIT_ULL(cfg->stream);
}
+
+ dev_dbg(sd->dev,
+ "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n",
+ sd->entity.name, pad, *found_streams, *enabled_streams);
}
static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd,
@@ -2271,6 +2292,9 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
bool use_s_stream;
int ret;
+ dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
+ streams_mask);
+
/* A few basic sanity checks first. */
if (pad >= sd->entity.num_pads)
return -EINVAL;
@@ -2318,8 +2342,6 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
goto done;
}
- dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
-
already_streaming = v4l2_subdev_is_streaming(sd);
if (!use_s_stream) {
@@ -2371,6 +2393,9 @@ int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
bool use_s_stream;
int ret;
+ dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
+ streams_mask);
+
/* A few basic sanity checks first. */
if (pad >= sd->entity.num_pads)
return -EINVAL;
@@ -2418,8 +2443,6 @@ int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
goto done;
}
- dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
-
if (!use_s_stream) {
/* Call the .disable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index c6cc42360887..425c5fba6cb1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -261,6 +261,36 @@ config MFD_CROS_EC_DEV
To compile this driver as a module, choose M here: the module will be
called cros-ec-dev.
+config MFD_CS40L50_CORE
+ tristate
+ select MFD_CORE
+ select FW_CS_DSP
+ select REGMAP_IRQ
+
+config MFD_CS40L50_I2C
+ tristate "Cirrus Logic CS40L50 (I2C)"
+ select REGMAP_I2C
+ select MFD_CS40L50_CORE
+ depends on I2C
+ help
+ Select this to support the Cirrus Logic CS40L50 Haptic
+ Driver over I2C.
+
+ This driver can be built as a module. If built as a module it will be
+ called "cs40l50-i2c".
+
+config MFD_CS40L50_SPI
+ tristate "Cirrus Logic CS40L50 (SPI)"
+ select REGMAP_SPI
+ select MFD_CS40L50_CORE
+ depends on SPI
+ help
+ Select this to support the Cirrus Logic CS40L50 Haptic
+ Driver over SPI.
+
+ This driver can be built as a module. If built as a module it will be
+ called "cs40l50-spi".
+
config MFD_CS42L43
tristate
select MFD_CORE
@@ -285,6 +315,14 @@ config MFD_CS42L43_SDW
Select this to support the Cirrus Logic CS42L43 PC CODEC with
headphone and class D speaker drivers over SoundWire.
+config MFD_LOCHNAGAR
+ bool "Cirrus Logic Lochnagar Audio Development Board"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C=y && OF
+ help
+ Support for Cirrus Logic Lochnagar audio development board.
+
config MFD_MACSMC
tristate "Apple Silicon System Management Controller (SMC)"
depends on ARCH_APPLE || COMPILE_TEST
@@ -332,16 +370,6 @@ config MFD_MADERA_SPI
Support for the Cirrus Logic Madera platform audio SoC
core functionality controlled via SPI.
-config MFD_MAX5970
- tristate "Maxim 5970/5978 power switch and monitor"
- depends on I2C && OF
- select MFD_SIMPLE_MFD_I2C
- help
- This driver controls a Maxim 5970/5978 switch via I2C bus.
- The MAX5970/5978 is a smart switch with no output regulation, but
- fault protection and voltage and current monitoring capabilities.
- Also it supports upto 4 indication leds.
-
config MFD_CS47L15
bool "Cirrus Logic CS47L15"
select PINCTRL_CS47L15
@@ -848,6 +876,16 @@ config MFD_88PM886_PMIC
This includes the I2C driver and the core APIs _only_, you have to
select individual components like onkey under the corresponding menus.
+config MFD_MAX5970
+ tristate "Maxim 5970/5978 power switch and monitor"
+ depends on I2C && OF
+ select MFD_SIMPLE_MFD_I2C
+ help
+ This driver controls a Maxim 5970/5978 switch via I2C bus.
+ The MAX5970/5978 is a smart switch with no output regulation, but
+ fault protection and voltage and current monitoring capabilities.
+ Also it supports upto 4 indication leds.
+
config MFD_MAX14577
tristate "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
depends on I2C
@@ -1970,14 +2008,6 @@ config MFD_VX855
VIA VX855/VX875 south bridge. You will need to enable the vx855_spi
and/or vx855_gpio drivers for this to do anything useful.
-config MFD_LOCHNAGAR
- bool "Cirrus Logic Lochnagar Audio Development Board"
- select MFD_CORE
- select REGMAP_I2C
- depends on I2C=y && OF
- help
- Support for Cirrus Logic Lochnagar audio development board.
-
config MFD_ARIZONA
select REGMAP
select REGMAP_IRQ
@@ -2335,36 +2365,6 @@ config MCP_UCB1200_TS
endmenu
-config MFD_CS40L50_CORE
- tristate
- select MFD_CORE
- select FW_CS_DSP
- select REGMAP_IRQ
-
-config MFD_CS40L50_I2C
- tristate "Cirrus Logic CS40L50 (I2C)"
- select REGMAP_I2C
- select MFD_CS40L50_CORE
- depends on I2C
- help
- Select this to support the Cirrus Logic CS40L50 Haptic
- Driver over I2C.
-
- This driver can be built as a module. If built as a module it will be
- called "cs40l50-i2c".
-
-config MFD_CS40L50_SPI
- tristate "Cirrus Logic CS40L50 (SPI)"
- select REGMAP_SPI
- select MFD_CS40L50_CORE
- depends on SPI
- help
- Select this to support the Cirrus Logic CS40L50 Haptic
- Driver over SPI.
-
- This driver can be built as a module. If built as a module it will be
- called "cs40l50-spi".
-
config MFD_VEXPRESS_SYSREG
tristate "Versatile Express System Registers"
depends on VEXPRESS_CONFIG && GPIOLIB
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 049abcbd71ce..f0bc0b5a6f4a 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -580,8 +580,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
num_irqs = AB8500_NR_IRQS;
/* If ->irq_base is zero this will give a linear mapping */
- ab8500->domain = irq_domain_create_simple(of_fwnode_handle(ab8500->dev->of_node),
- num_irqs, 0,
+ ab8500->domain = irq_domain_create_simple(dev_fwnode(ab8500->dev), num_irqs, 0,
&ab8500_irq_ops, ab8500);
if (!ab8500->domain) {
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index ac2139597fab..3f8622ee0e59 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -152,7 +152,6 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
}
} while (poll);
- pm_runtime_mark_last_busy(arizona->dev);
pm_runtime_put_autosuspend(arizona->dev);
return IRQ_HANDLED;
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index 4628ca14e766..0a5b42c83f17 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -8,9 +8,16 @@
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
*/
-#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/string.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+
/**
* atmel_smc_cs_conf_init - initialize a SMC CS conf
* @conf: the SMC CS conf to initialize
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e9914e8a29a3..c5f0ebae327f 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1053,7 +1053,8 @@ static const struct mfd_cell axp152_cells[] = {
};
static struct mfd_cell axp313a_cells[] = {
- MFD_CELL_NAME("axp20x-regulator"),
+ /* AXP323 is sometimes paired with AXP717 as sub-PMIC */
+ MFD_CELL_BASIC("axp20x-regulator", NULL, NULL, 0, 1),
MFD_CELL_RES("axp313a-pek", axp313a_pek_resources),
};
@@ -1230,9 +1231,8 @@ static const struct mfd_cell axp15060_cells[] = {
/* For boards that don't have IRQ line connected to SOC. */
static const struct mfd_cell axp_regulator_only_cells[] = {
- {
- .name = "axp20x-regulator",
- },
+ /* PMIC without IRQ line may be secondary PMIC */
+ MFD_CELL_BASIC("axp20x-regulator", NULL, NULL, 0, 1),
};
static int axp20x_power_off(struct sys_off_data *data)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 9f84a52b48d6..dc80a272726b 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -87,7 +87,6 @@ static const struct mfd_cell cros_ec_sensorhub_cells[] = {
};
static const struct mfd_cell cros_usbpd_charger_cells[] = {
- { .name = "cros-charge-control", },
{ .name = "cros-usbpd-charger", },
{ .name = "cros-usbpd-logger", },
};
@@ -112,6 +111,10 @@ static const struct mfd_cell cros_ec_ucsi_cells[] = {
{ .name = "cros_ec_ucsi", },
};
+static const struct mfd_cell cros_ec_charge_control_cells[] = {
+ { .name = "cros-charge-control", },
+};
+
static const struct cros_feature_to_cells cros_subdevices[] = {
{
.id = EC_FEATURE_CEC,
@@ -148,6 +151,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.mfd_cells = cros_ec_keyboard_leds_cells,
.num_cells = ARRAY_SIZE(cros_ec_keyboard_leds_cells),
},
+ {
+ .id = EC_FEATURE_CHARGER,
+ .mfd_cells = cros_ec_charge_control_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_charge_control_cells),
+ },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
diff --git a/drivers/mfd/cs40l50-core.c b/drivers/mfd/cs40l50-core.c
index 4859a33777a0..662d987b650b 100644
--- a/drivers/mfd/cs40l50-core.c
+++ b/drivers/mfd/cs40l50-core.c
@@ -52,7 +52,7 @@ static const struct regmap_irq cs40l50_reg_irqs[] = {
CS40L50_GLOBAL_ERROR_MASK),
};
-static struct regmap_irq_chip cs40l50_irq_chip = {
+static const struct regmap_irq_chip cs40l50_irq_chip = {
.name = "cs40l50",
.status_base = CS40L50_IRQ1_INT_1,
.mask_base = CS40L50_IRQ1_MASK_1,
@@ -531,7 +531,6 @@ int cs40l50_probe(struct cs40l50 *cs40l50)
if (ret)
return dev_err_probe(dev, ret, "Failed to request %s\n", CS40L50_FW);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 103787f37443..07c8f1b8183e 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -962,7 +962,6 @@ static void cs42l43_boot_work(struct work_struct *work)
goto err;
}
- pm_runtime_mark_last_busy(cs42l43->dev);
pm_runtime_put_autosuspend(cs42l43->dev);
return;
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index d47152467951..0aab6428e042 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -71,8 +71,8 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
if (irq < 0)
return irq;
- tsadc->domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), 2, 0,
- &mx25_tsadc_domain_ops, tsadc);
+ tsadc->domain = irq_domain_create_simple(dev_fwnode(dev), 2, 0, &mx25_tsadc_domain_ops,
+ tsadc);
if (!tsadc->domain) {
dev_err(dev, "Failed to add irq domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
index 58656837b7c6..5f8ac364b610 100644
--- a/drivers/mfd/ioc3.c
+++ b/drivers/mfd/ioc3.c
@@ -6,7 +6,7 @@
*
* Based on work by:
* Stanislaw Skowronek <skylark@unaligned.org>
- * Joshua Kinard <kumba@gentoo.org>
+ * Joshua Kinard <linux@kumba.dev>
* Brent Casavant <bcasavan@sgi.com> - IOC4 master driver
* Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer
*/
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index ea0fdf7a4b6e..f62fa2d7f010 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -161,7 +161,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq)
return -ENOMEM;
irqd->lp = lp;
- irqd->domain = irq_domain_create_linear(of_fwnode_handle(lp->dev->of_node), LP8788_INT_MAX,
+ irqd->domain = irq_domain_create_linear(dev_fwnode(lp->dev), LP8788_INT_MAX,
&lp8788_domain_ops, irqd);
if (!irqd->domain) {
dev_err(lp->dev, "failed to add irq domain err\n");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index 9f0bcc3ad7a1..f467b00d2366 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -272,8 +272,7 @@ int mt6358_irq_init(struct mt6397_chip *chip)
irqd->pmic_ints[i].en_reg_shift * j, 0);
}
- chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node),
- irqd->num_pmic_irqs,
+ chip->irq_domain = irq_domain_create_linear(dev_fwnode(chip->dev), irqd->num_pmic_irqs,
&mt6358_irq_domain_ops, chip);
if (!chip->irq_domain) {
dev_err(chip->dev, "Could not create IRQ domain\n");
diff --git a/drivers/mfd/mt6370.c b/drivers/mfd/mt6370.c
index c126ccb25d66..c7c2efe3598c 100644
--- a/drivers/mfd/mt6370.c
+++ b/drivers/mfd/mt6370.c
@@ -95,7 +95,7 @@ static const struct regmap_irq mt6370_irqs[] = {
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_SHORT, 8),
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_STRB, 8),
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB, 8),
- REGMAP_IRQ_REG_LINE(mT6370_IRQ_FLED2_STRB_TO, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_STRB_TO, 8),
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB_TO, 8),
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_TOR, 8),
REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_TOR, 8),
diff --git a/drivers/mfd/mt6370.h b/drivers/mfd/mt6370.h
index 094e59e4af4e..dd9ccc0a53f1 100644
--- a/drivers/mfd/mt6370.h
+++ b/drivers/mfd/mt6370.h
@@ -69,7 +69,7 @@
#define MT6370_IRQ_FLED1_SHORT 79
#define MT6370_IRQ_FLED2_STRB 80
#define MT6370_IRQ_FLED1_STRB 81
-#define mT6370_IRQ_FLED2_STRB_TO 82
+#define MT6370_IRQ_FLED2_STRB_TO 82
#define MT6370_IRQ_FLED1_STRB_TO 83
#define MT6370_IRQ_FLED2_TOR 84
#define MT6370_IRQ_FLED1_TOR 85
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 5f8ed8988907..3e58d0764c7e 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -136,7 +136,7 @@ static const struct mfd_cell mt6323_devs[] = {
.name = "mt6323-led",
.of_compatible = "mediatek,mt6323-led"
}, {
- .name = "mtk-pmic-keys",
+ .name = "mt6323-keys",
.num_resources = ARRAY_SIZE(mt6323_keys_resources),
.resources = mt6323_keys_resources,
.of_compatible = "mediatek,mt6323-keys"
@@ -153,7 +153,7 @@ static const struct mfd_cell mt6328_devs[] = {
.name = "mt6328-regulator",
.of_compatible = "mediatek,mt6328-regulator"
}, {
- .name = "mtk-pmic-keys",
+ .name = "mt6328-keys",
.num_resources = ARRAY_SIZE(mt6328_keys_resources),
.resources = mt6328_keys_resources,
.of_compatible = "mediatek,mt6328-keys"
@@ -175,7 +175,7 @@ static const struct mfd_cell mt6357_devs[] = {
.name = "mt6357-sound",
.of_compatible = "mediatek,mt6357-sound"
}, {
- .name = "mtk-pmic-keys",
+ .name = "mt6357-keys",
.num_resources = ARRAY_SIZE(mt6357_keys_resources),
.resources = mt6357_keys_resources,
.of_compatible = "mediatek,mt6357-keys"
@@ -196,7 +196,7 @@ static const struct mfd_cell mt6331_mt6332_devs[] = {
.name = "mt6332-regulator",
.of_compatible = "mediatek,mt6332-regulator"
}, {
- .name = "mtk-pmic-keys",
+ .name = "mt6331-keys",
.num_resources = ARRAY_SIZE(mt6331_keys_resources),
.resources = mt6331_keys_resources,
.of_compatible = "mediatek,mt6331-keys"
@@ -240,7 +240,7 @@ static const struct mfd_cell mt6359_devs[] = {
},
{ .name = "mt6359-sound", },
{
- .name = "mtk-pmic-keys",
+ .name = "mt6359-keys",
.num_resources = ARRAY_SIZE(mt6359_keys_resources),
.resources = mt6359_keys_resources,
.of_compatible = "mediatek,mt6359-keys"
@@ -272,7 +272,7 @@ static const struct mfd_cell mt6397_devs[] = {
.name = "mt6397-pinctrl",
.of_compatible = "mediatek,mt6397-pinctrl",
}, {
- .name = "mtk-pmic-keys",
+ .name = "mt6397-keys",
.num_resources = ARRAY_SIZE(mt6397_keys_resources),
.resources = mt6397_keys_resources,
.of_compatible = "mediatek,mt6397-keys"
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index badc614b4345..0e463026c5a9 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -216,8 +216,8 @@ int mt6397_irq_init(struct mt6397_chip *chip)
regmap_write(chip->regmap, chip->int_con[2], 0x0);
chip->pm_nb.notifier_call = mt6397_irq_pm_notifier;
- chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node),
- MT6397_IRQ_NR, &mt6397_irq_domain_ops, chip);
+ chip->irq_domain = irq_domain_create_linear(dev_fwnode(chip->dev), MT6397_IRQ_NR,
+ &mt6397_irq_domain_ops, chip);
if (!chip->irq_domain) {
dev_err(chip->dev, "could not create irq domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index c96ea6fbede8..1149f7102a36 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -559,8 +559,8 @@ static int pm8xxx_probe(struct platform_device *pdev)
chip->pm_irq_data = data;
spin_lock_init(&chip->pm_irq_lock);
- chip->irqdomain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
- data->num_irqs, &pm8xxx_irq_domain_ops, chip);
+ chip->irqdomain = irq_domain_create_linear(dev_fwnode(&pdev->dev), data->num_irqs,
+ &pm8xxx_irq_domain_ops, chip);
if (!chip->irqdomain)
return -ENODEV;
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index 71c2b80a4678..def4587fdfb8 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -10,6 +10,7 @@
* Author: Wadim Egorov <w.egorov@phytec.de>
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/mfd/rk808.h>
#include <linux/mfd/core.h>
@@ -699,6 +700,7 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
const struct mfd_cell *cells;
int dual_support = 0;
int nr_pre_init_regs;
+ u32 rst_fun = 0;
int nr_cells;
int ret;
int i;
@@ -726,6 +728,16 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
cells = rk806s;
nr_cells = ARRAY_SIZE(rk806s);
dual_support = IRQF_SHARED;
+
+ ret = device_property_read_u32(dev, "rockchip,reset-mode", &rst_fun);
+ if (ret)
+ break;
+
+ ret = regmap_update_bits(rk808->regmap, RK806_SYS_CFG3, RK806_RST_FUN_MSK,
+ FIELD_PREP(RK806_RST_FUN_MSK, rst_fun));
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to configure requested restart/reset behavior\n");
break;
case RK808_ID:
rk808->regmap_irq_chip = &rk808_irq_chip;
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 738d8b3b9ffe..a14b7aa69c3c 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -25,7 +25,7 @@ static struct gpio_keys_button button = {
.type = EV_KEY,
};
-static struct gpio_keys_platform_data bd71828_powerkey_data = {
+static const struct gpio_keys_platform_data bd71828_powerkey_data = {
.buttons = &button,
.nbuttons = 1,
.name = "bd71828-pwrkey",
@@ -43,7 +43,7 @@ static const struct resource bd71828_rtc_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC2, "bd70528-rtc-alm-2"),
};
-static struct resource bd71815_power_irqs[] = {
+static const struct resource bd71815_power_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71815_INT_DCIN_RMV, "bd71815-dcin-rmv"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_OUT, "bd71815-clps-out"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_IN, "bd71815-clps-in"),
@@ -93,7 +93,7 @@ static struct resource bd71815_power_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_HI_DET, "bd71815-bat-hi-det"),
};
-static struct mfd_cell bd71815_mfd_cells[] = {
+static const struct mfd_cell bd71815_mfd_cells[] = {
{ .name = "bd71815-pmic", },
{ .name = "bd71815-clk", },
{ .name = "bd71815-gpo", },
@@ -109,7 +109,7 @@ static struct mfd_cell bd71815_mfd_cells[] = {
},
};
-static struct mfd_cell bd71828_mfd_cells[] = {
+static const struct mfd_cell bd71828_mfd_cells[] = {
{ .name = "bd71828-pmic", },
{ .name = "bd71828-gpio", },
{ .name = "bd71828-led", .of_compatible = "rohm,bd71828-leds" },
@@ -223,7 +223,7 @@ static unsigned int bit5_offsets[] = {3}; /* VSYS IRQ */
static unsigned int bit6_offsets[] = {1, 2}; /* DCIN IRQ */
static unsigned int bit7_offsets[] = {0}; /* BUCK IRQ */
-static struct regmap_irq_sub_irq_map bd718xx_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map bd718xx_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -493,7 +493,7 @@ static int bd71828_i2c_probe(struct i2c_client *i2c)
const struct regmap_config *regmap_config;
const struct regmap_irq_chip *irqchip;
unsigned int chip_type;
- struct mfd_cell *mfd;
+ const struct mfd_cell *mfd;
int cells;
int button_irq;
int clkmode_reg;
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index a5f9241fa3f2..50bf3260f65d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -965,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set_rv = sm501_gpio_set,
+ .set = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index e3c116ee4034..b3dbc02aaf79 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/export.h>
#include <linux/mfd/stm32-timers.h>
#include <linux/module.h>
#include <linux/of_platform.h>
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 823b1d29389e..f683fdb6ece6 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -269,9 +269,8 @@ static int stmfx_irq_init(struct i2c_client *client)
u32 irqoutpin = 0, irqtrigger;
int ret;
- stmfx->irq_domain = irq_domain_create_simple(of_fwnode_handle(stmfx->dev->of_node),
- STMFX_REG_IRQ_SRC_MAX, 0,
- &stmfx_irq_ops, stmfx);
+ stmfx->irq_domain = irq_domain_create_simple(dev_fwnode(stmfx->dev), STMFX_REG_IRQ_SRC_MAX,
+ 0, &stmfx_irq_ops, stmfx);
if (!stmfx->irq_domain) {
dev_err(stmfx->dev, "Failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 03bd5cd66798..8a144ec52201 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -620,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set_rv = tps65010_gpio_set;
+ tps->chip.set = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 4e9669d327b4..c240fac0ede7 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,8 +158,8 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
TPS65217_INT_MASK, TPS65217_PROTECT_NONE);
- tps->irq_domain = irq_domain_create_linear(of_fwnode_handle(tps->dev->of_node),
- TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
+ tps->irq_domain = irq_domain_create_linear(dev_fwnode(tps->dev), TPS65217_NUM_IRQ,
+ &tps65217_irq_domain_ops, tps);
if (!tps->irq_domain) {
dev_err(tps->dev, "Could not create IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index fd390600fbf0..65a952555218 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -190,7 +190,7 @@ static const struct resource tps65219_regulator_resources[] = {
static const struct mfd_cell tps65214_cells[] = {
MFD_CELL_RES("tps65214-regulator", tps65214_regulator_resources),
- MFD_CELL_NAME("tps65215-gpio"),
+ MFD_CELL_NAME("tps65214-gpio"),
};
static const struct mfd_cell tps65215_cells[] = {
@@ -238,7 +238,7 @@ static unsigned int tps65214_bit4_offsets[] = { TPS65214_REG_INT_BUCK_3_POS };
static unsigned int tps65214_bit5_offsets[] = { TPS65214_REG_INT_LDO_1_2_POS };
static unsigned int tps65214_bit7_offsets[] = { TPS65214_REG_INT_PB_POS };
-static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -249,7 +249,7 @@ static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
-static struct regmap_irq_sub_irq_map tps65215_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map tps65215_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -260,7 +260,7 @@ static struct regmap_irq_sub_irq_map tps65215_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
-static struct regmap_irq_sub_irq_map tps65214_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map tps65214_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit2_offsets),
@@ -455,7 +455,7 @@ struct tps65219_chip_data {
int n_cells;
};
-static struct tps65219_chip_data chip_info_table[] = {
+static const struct tps65219_chip_data chip_info_table[] = {
[TPS65214] = {
.irq_chip = &tps65214_irq_chip,
.cells = tps65214_cells,
@@ -476,7 +476,8 @@ static struct tps65219_chip_data chip_info_table[] = {
static int tps65219_probe(struct i2c_client *client)
{
struct tps65219 *tps;
- struct tps65219_chip_data *pmic;
+ const struct tps65219_chip_data *pmic;
+ unsigned int chip_id;
bool pwr_button;
int ret;
@@ -487,8 +488,8 @@ static int tps65219_probe(struct i2c_client *client)
i2c_set_clientdata(client, tps);
tps->dev = &client->dev;
- tps->chip_id = (uintptr_t)i2c_get_match_data(client);
- pmic = &chip_info_table[tps->chip_id];
+ chip_id = (uintptr_t)i2c_get_match_data(client);
+ pmic = &chip_info_table[chip_id];
tps->regmap = devm_regmap_init_i2c(client, &tps65219_regmap_config);
if (IS_ERR(tps->regmap)) {
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 853c48286071..8d5fe2b60bfa 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -363,9 +363,9 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
new_irq_base = 0;
}
- tps6586x->irq_domain = irq_domain_create_simple(of_fwnode_handle(tps6586x->dev->of_node),
- irq_num, new_irq_base, &tps6586x_domain_ops,
- tps6586x);
+ tps6586x->irq_domain = irq_domain_create_simple(dev_fwnode(tps6586x->dev), irq_num,
+ new_irq_base, &tps6586x_domain_ops,
+ tps6586x);
if (!tps6586x->irq_domain) {
dev_err(tps6586x->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 00b14cef1dfb..0ca00f618d4d 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -256,80 +256,6 @@ int twl6030_interrupt_mask(u8 bit_mask, u8 offset)
}
EXPORT_SYMBOL(twl6030_interrupt_mask);
-int twl6030_mmc_card_detect_config(void)
-{
- int ret;
- u8 reg_val = 0;
-
- /* Unmasking the Card detect Interrupt line for MMC1 from Phoenix */
- twl6030_interrupt_unmask(TWL6030_MMCDETECT_INT_MASK,
- REG_INT_MSK_LINE_B);
- twl6030_interrupt_unmask(TWL6030_MMCDETECT_INT_MASK,
- REG_INT_MSK_STS_B);
- /*
- * Initially Configuring MMC_CTRL for receiving interrupts &
- * Card status on TWL6030 for MMC1
- */
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &reg_val, TWL6030_MMCCTRL);
- if (ret < 0) {
- pr_err("twl6030: Failed to read MMCCTRL, error %d\n", ret);
- return ret;
- }
- reg_val &= ~VMMC_AUTO_OFF;
- reg_val |= SW_FC;
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, reg_val, TWL6030_MMCCTRL);
- if (ret < 0) {
- pr_err("twl6030: Failed to write MMCCTRL, error %d\n", ret);
- return ret;
- }
-
- /* Configuring PullUp-PullDown register */
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &reg_val,
- TWL6030_CFG_INPUT_PUPD3);
- if (ret < 0) {
- pr_err("twl6030: Failed to read CFG_INPUT_PUPD3, error %d\n",
- ret);
- return ret;
- }
- reg_val &= ~(MMC_PU | MMC_PD);
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, reg_val,
- TWL6030_CFG_INPUT_PUPD3);
- if (ret < 0) {
- pr_err("twl6030: Failed to write CFG_INPUT_PUPD3, error %d\n",
- ret);
- return ret;
- }
-
- return irq_find_mapping(twl6030_irq->irq_domain,
- MMCDETECT_INTR_OFFSET);
-}
-EXPORT_SYMBOL(twl6030_mmc_card_detect_config);
-
-int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- int ret = -EIO;
- u8 read_reg = 0;
- struct platform_device *pdev = to_platform_device(dev);
-
- if (pdev->id) {
- /* TWL6030 provide's Card detect support for
- * only MMC1 controller.
- */
- pr_err("Unknown MMC controller %d in %s\n", pdev->id, __func__);
- return ret;
- }
- /*
- * BIT0 of MMC_CTRL on TWL6030 provides card status for MMC1
- * 0 - Card not present ,1 - Card present
- */
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID0, &read_reg,
- TWL6030_MMCCTRL);
- if (ret >= 0)
- ret = read_reg & STS_MMC;
- return ret;
-}
-EXPORT_SYMBOL(twl6030_mmc_card_detect);
-
static int twl6030_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
@@ -410,9 +336,8 @@ int twl6030_init_irq(struct device *dev, int irq_num)
atomic_set(&twl6030_irq->wakeirqs, 0);
twl6030_irq->irq_mapping_tbl = of_id->data;
- twl6030_irq->irq_domain =
- irq_domain_create_linear(of_fwnode_handle(dev->of_node), nr_irqs,
- &twl6030_irq_domain_ops, twl6030_irq);
+ twl6030_irq->irq_domain = irq_domain_create_linear(dev_fwnode(dev), nr_irqs,
+ &twl6030_irq_domain_ops, twl6030_irq);
if (!twl6030_irq->irq_domain) {
dev_err(dev, "Can't add irq_domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 218d6195fad2..562a0f939f6e 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -69,7 +69,7 @@ static const struct reg_default twl6040_defaults[] = {
{ 0x2E, 0x00 }, /* REG_STATUS (ro) */
};
-static struct reg_sequence twl6040_patch[] = {
+static const struct reg_sequence twl6040_patch[] = {
/*
* Select I2C bus access to dual access registers
* Interrupt register is cleared on read
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fd71ba29f6b5..4b450d78a65f 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -570,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set_rv = ucb1x00_gpio_set;
+ ucb->gpio.set = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index b3883fa5dd9f..defd5f173eb6 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -587,13 +587,11 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
if (irq_base)
- domain = irq_domain_create_legacy(of_fwnode_handle(wm831x->dev->of_node),
- ARRAY_SIZE(wm831x_irqs), irq_base, 0,
- &wm831x_irq_domain_ops, wm831x);
+ domain = irq_domain_create_legacy(dev_fwnode(wm831x->dev), ARRAY_SIZE(wm831x_irqs),
+ irq_base, 0, &wm831x_irq_domain_ops, wm831x);
else
- domain = irq_domain_create_linear(of_fwnode_handle(wm831x->dev->of_node),
- ARRAY_SIZE(wm831x_irqs), &wm831x_irq_domain_ops,
- wm831x);
+ domain = irq_domain_create_linear(dev_fwnode(wm831x->dev), ARRAY_SIZE(wm831x_irqs),
+ &wm831x_irq_domain_ops, wm831x);
if (!domain) {
dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index ff8f4404d10f..8eddbaa1fccd 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -438,7 +438,7 @@ static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
gchip->direction_output = pci1xxxx_gpio_direction_output;
gchip->get_direction = pci1xxxx_gpio_get_direction;
gchip->get = pci1xxxx_gpio_get;
- gchip->set_rv = pci1xxxx_gpio_set;
+ gchip->set = pci1xxxx_gpio_set;
gchip->set_config = pci1xxxx_gpio_set_config;
gchip->dbg_show = NULL;
gchip->base = -1;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index c4e5e2c977be..1c156a3f845e 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -37,6 +37,8 @@
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
+#define COMMAND_ENABLE_DOORBELL BIT(6)
+#define COMMAND_DISABLE_DOORBELL BIT(7)
#define PCI_ENDPOINT_TEST_STATUS 0x8
#define STATUS_READ_SUCCESS BIT(0)
@@ -48,6 +50,11 @@
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define STATUS_DOORBELL_SUCCESS BIT(9)
+#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
+#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
+#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
+#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
@@ -62,6 +69,7 @@
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
+
#define FLAG_USE_DMA BIT(0)
#define PCI_ENDPOINT_TEST_CAPS 0x30
@@ -70,6 +78,10 @@
#define CAP_MSIX BIT(2)
#define CAP_INTX BIT(3)
+#define PCI_ENDPOINT_TEST_DB_BAR 0x34
+#define PCI_ENDPOINT_TEST_DB_OFFSET 0x38
+#define PCI_ENDPOINT_TEST_DB_DATA 0x3c
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
@@ -100,6 +112,7 @@ enum pci_barno {
BAR_3,
BAR_4,
BAR_5,
+ NO_BAR = -1,
};
struct pci_endpoint_test {
@@ -841,6 +854,73 @@ static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
return 0;
}
+static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ int irq_type = test->irq_type;
+ enum pci_barno bar;
+ u32 data, status;
+ u32 addr;
+ int left;
+
+ if (irq_type < PCITEST_IRQ_TYPE_INTX ||
+ irq_type > PCITEST_IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type\n");
+ return -EINVAL;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_ENABLE_DOORBELL);
+
+ left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
+ dev_err(dev, "Failed to enable doorbell\n");
+ return -EINVAL;
+ }
+
+ data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
+ addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
+ bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
+
+ bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
+
+ writel(data, test->bar[bar] + addr);
+
+ left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+
+ if (!left || !(status & STATUS_DOORBELL_SUCCESS))
+ dev_err(dev, "Failed to trigger doorbell in endpoint\n");
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_DISABLE_DOORBELL);
+
+ wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+
+ if (status & STATUS_DOORBELL_DISABLE_FAIL) {
+ dev_err(dev, "Failed to disable doorbell\n");
+ return -EINVAL;
+ }
+
+ if (!(status & STATUS_DOORBELL_SUCCESS))
+ return -EINVAL;
+
+ return 0;
+}
+
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -891,6 +971,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_CLEAR_IRQ:
ret = pci_endpoint_test_clear_irq(test);
break;
+ case PCITEST_DOORBELL:
+ ret = pci_endpoint_test_doorbell(test);
+ break;
}
ret:
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
index 0b1a6350c02b..7964e46c7448 100644
--- a/drivers/misc/ti_fpc202.c
+++ b/drivers/misc/ti_fpc202.c
@@ -333,7 +333,7 @@ static int fpc202_probe(struct i2c_client *client)
priv->gpio.base = -1;
priv->gpio.direction_input = fpc202_gpio_direction_input;
priv->gpio.direction_output = fpc202_gpio_direction_output;
- priv->gpio.set_rv = fpc202_gpio_set;
+ priv->gpio.set = fpc202_gpio_set;
priv->gpio.get = fpc202_gpio_get;
priv->gpio.ngpio = FPC202_GPIO_COUNT;
priv->gpio.parent = dev;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index c817d8c21641..6653fc53c951 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1778,8 +1778,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* @pages_lock . We keep holding @comm_lock since we will need it in a
* second.
*/
- balloon_page_delete(page);
-
+ balloon_page_finalize(page);
put_page(page);
/* Inflate */
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index aed653ce8fa2..46cebde79f34 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -183,6 +183,17 @@ config MTD_POWERNV_FLASH
platforms from Linux. This device abstracts away the
firmware interface for flash access.
+config MTD_INTEL_DG
+ tristate "Intel Discrete Graphics non-volatile memory driver"
+ depends on AUXILIARY_BUS
+ depends on MTD
+ help
+ This provides an MTD device to access Intel Discrete Graphics
+ non-volatile memory.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mtd-intel-dg.
+
comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index d11eb2b8b6f8..9fe4ce9cffde 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o
+obj-$(CONFIG_MTD_INTEL_DG) += mtd_intel_dg.o
CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/mtd_intel_dg.c b/drivers/mtd/devices/mtd_intel_dg.c
new file mode 100644
index 000000000000..b438ee5aacc3
--- /dev/null
+++ b/drivers/mtd/devices/mtd_intel_dg.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/intel_dg_nvm_aux.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+struct intel_dg_nvm {
+ struct kref refcnt;
+ struct mtd_info mtd;
+ struct mutex lock; /* region access lock */
+ void __iomem *base;
+ void __iomem *base2;
+ bool non_posted_erase;
+
+ size_t size;
+ unsigned int nregions;
+ struct {
+ const char *name;
+ u8 id;
+ u64 offset;
+ u64 size;
+ unsigned int is_readable:1;
+ unsigned int is_writable:1;
+ } regions[] __counted_by(nregions);
+};
+
+#define NVM_TRIGGER_REG 0x00000000
+#define NVM_VALSIG_REG 0x00000010
+#define NVM_ADDRESS_REG 0x00000040
+#define NVM_REGION_ID_REG 0x00000044
+#define NVM_DEBUG_REG 0x00000000
+/*
+ * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
+ * [23:16]-Reserved
+ * [31:24]-Erase MEM RegionID
+ */
+#define NVM_ERASE_REG 0x00000048
+#define NVM_ACCESS_ERROR_REG 0x00000070
+#define NVM_ADDRESS_ERROR_REG 0x00000074
+
+/* Flash Valid Signature */
+#define NVM_FLVALSIG 0x0FF0A55A
+
+#define NVM_MAP_ADDR_MASK GENMASK(7, 0)
+#define NVM_MAP_ADDR_SHIFT 0x00000004
+
+#define NVM_REGION_ID_DESCRIPTOR 0
+/* Flash Region Base Address */
+#define NVM_FRBA 0x40
+/* Flash Region __n - Flash Descriptor Record */
+#define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
+/* Flash Map 1 Register */
+#define NVM_FLMAP1_REG 0x18
+#define NVM_FLMSTR4_OFFSET 0x00C
+
+#define NVM_ACCESS_ERROR_PCIE_MASK 0x7
+
+#define NVM_FREG_BASE_MASK GENMASK(15, 0)
+#define NVM_FREG_ADDR_MASK GENMASK(31, 16)
+#define NVM_FREG_ADDR_SHIFT 12
+#define NVM_FREG_MIN_REGION_SIZE 0xFFF
+
+#define NVM_NON_POSTED_ERASE_DONE BIT(23)
+#define NVM_NON_POSTED_ERASE_DONE_ITER 3000
+
+static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
+{
+ iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
+}
+
+static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
+{
+ void __iomem *base = nvm->base;
+
+ u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
+
+ /* reset error bits */
+ if (reg)
+ iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
+
+ return reg;
+}
+
+static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ return ioread32(base + NVM_TRIGGER_REG);
+}
+
+static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ return readq(base + NVM_TRIGGER_REG);
+}
+
+static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ iowrite32(data, base + NVM_TRIGGER_REG);
+}
+
+static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
+{
+ void __iomem *base = nvm->base;
+
+ iowrite32(address, base + NVM_ADDRESS_REG);
+
+ writeq(data, base + NVM_TRIGGER_REG);
+}
+
+static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
+{
+ u32 fmstr4_addr;
+ u32 fmstr4;
+ u32 flmap1;
+ u32 fmba;
+
+ idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
+
+ flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ /* Get Flash Master Baser Address (FMBA) */
+ fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
+ fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
+
+ fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ *access_map = fmstr4;
+ return 0;
+}
+
+/*
+ * Region read/write access encoded in the access map
+ * in the following order from the lower bit:
+ * [3:0] regions 12-15 read state
+ * [7:4] regions 12-15 write state
+ * [19:8] regions 0-11 read state
+ * [31:20] regions 0-11 write state
+ */
+static bool idg_nvm_region_readable(u32 access_map, u8 region)
+{
+ if (region < 12)
+ return access_map & BIT(region + 8); /* [19:8] */
+ else
+ return access_map & BIT(region - 12); /* [3:0] */
+}
+
+static bool idg_nvm_region_writable(u32 access_map, u8 region)
+{
+ if (region < 12)
+ return access_map & BIT(region + 20); /* [31:20] */
+ else
+ return access_map & BIT(region - 8); /* [7:4] */
+}
+
+static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
+{
+ u32 is_valid;
+
+ idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
+
+ is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ if (is_valid != NVM_FLVALSIG)
+ return -ENODEV;
+
+ return 0;
+}
+
+static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
+{
+ unsigned int i;
+
+ for (i = 0; i < nvm->nregions; i++) {
+ if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
+ nvm->regions[i].offset <= from &&
+ nvm->regions[i].size != 0)
+ break;
+ }
+
+ return i;
+}
+
+static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
+ loff_t offset, size_t len, const u32 *newdata)
+{
+ u32 data = idg_nvm_read32(nvm, to);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ memcpy((u8 *)&data + offset, newdata, len);
+
+ idg_nvm_write32(nvm, to, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
+ loff_t to, size_t len, const unsigned char *buf)
+{
+ size_t len_s = len;
+ size_t to_shift;
+ size_t len8;
+ size_t len4;
+ ssize_t ret;
+ size_t to4;
+ size_t i;
+
+ idg_nvm_set_region_id(nvm, region);
+
+ to4 = ALIGN_DOWN(to, sizeof(u32));
+ to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
+ if (to - to4) {
+ ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
+ if (ret < 0)
+ return ret;
+
+ buf += to_shift;
+ to += to_shift;
+ len_s -= to_shift;
+ }
+
+ if (!IS_ALIGNED(to, sizeof(u64)) &&
+ ((to ^ (to + len_s)) & GENMASK(31, 10))) {
+ /*
+ * Workaround reads/writes across 1k-aligned addresses
+ * (start u32 before 1k, end u32 after)
+ * as this fails on hardware.
+ */
+ u32 data;
+
+ memcpy(&data, &buf[0], sizeof(u32));
+ idg_nvm_write32(nvm, to, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ buf += sizeof(u32);
+ to += sizeof(u32);
+ len_s -= sizeof(u32);
+ }
+
+ len8 = ALIGN_DOWN(len_s, sizeof(u64));
+ for (i = 0; i < len8; i += sizeof(u64)) {
+ u64 data;
+
+ memcpy(&data, &buf[i], sizeof(u64));
+ idg_nvm_write64(nvm, to + i, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ }
+
+ len4 = len_s - len8;
+ if (len4 >= sizeof(u32)) {
+ u32 data;
+
+ memcpy(&data, &buf[i], sizeof(u32));
+ idg_nvm_write32(nvm, to + i, data);
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ i += sizeof(u32);
+ len4 -= sizeof(u32);
+ }
+
+ if (len4 > 0) {
+ ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return len;
+}
+
+static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
+ loff_t from, size_t len, unsigned char *buf)
+{
+ size_t len_s = len;
+ size_t from_shift;
+ size_t from4;
+ size_t len8;
+ size_t len4;
+ size_t i;
+
+ idg_nvm_set_region_id(nvm, region);
+
+ from4 = ALIGN_DOWN(from, sizeof(u32));
+ from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
+
+ if (from - from4) {
+ u32 data = idg_nvm_read32(nvm, from4);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
+ len_s -= from_shift;
+ buf += from_shift;
+ from += from_shift;
+ }
+
+ if (!IS_ALIGNED(from, sizeof(u64)) &&
+ ((from ^ (from + len_s)) & GENMASK(31, 10))) {
+ /*
+ * Workaround reads/writes across 1k-aligned addresses
+ * (start u32 before 1k, end u32 after)
+ * as this fails on hardware.
+ */
+ u32 data = idg_nvm_read32(nvm, from);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[0], &data, sizeof(data));
+ len_s -= sizeof(u32);
+ buf += sizeof(u32);
+ from += sizeof(u32);
+ }
+
+ len8 = ALIGN_DOWN(len_s, sizeof(u64));
+ for (i = 0; i < len8; i += sizeof(u64)) {
+ u64 data = idg_nvm_read64(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+
+ memcpy(&buf[i], &data, sizeof(data));
+ }
+
+ len4 = len_s - len8;
+ if (len4 >= sizeof(u32)) {
+ u32 data = idg_nvm_read32(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[i], &data, sizeof(data));
+ i += sizeof(u32);
+ len4 -= sizeof(u32);
+ }
+
+ if (len4 > 0) {
+ u32 data = idg_nvm_read32(nvm, from + i);
+
+ if (idg_nvm_error(nvm))
+ return -EIO;
+ memcpy(&buf[i], &data, len4);
+ }
+
+ return len;
+}
+
+static ssize_t
+idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
+{
+ void __iomem *base2 = nvm->base2;
+ void __iomem *base = nvm->base;
+ const u32 block = 0x10;
+ u32 iter = 0;
+ u32 reg;
+ u64 i;
+
+ for (i = 0; i < len; i += SZ_4K) {
+ iowrite32(from + i, base + NVM_ADDRESS_REG);
+ iowrite32(region << 24 | block, base + NVM_ERASE_REG);
+ if (nvm->non_posted_erase) {
+ /* Wait for Erase Done */
+ reg = ioread32(base2 + NVM_DEBUG_REG);
+ while (!(reg & NVM_NON_POSTED_ERASE_DONE) &&
+ ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) {
+ msleep(10);
+ reg = ioread32(base2 + NVM_DEBUG_REG);
+ }
+ if (reg & NVM_NON_POSTED_ERASE_DONE) {
+ /* Clear Erase Done */
+ iowrite32(reg, base2 + NVM_DEBUG_REG);
+ } else {
+ *fail_addr = from + i;
+ return -ETIME;
+ }
+ }
+ /* Since the writes are via sgunit
+ * we cannot do back to back erases.
+ */
+ msleep(50);
+ }
+ return len;
+}
+
+static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device,
+ bool non_posted_erase)
+{
+ u32 access_map = 0;
+ unsigned int i, n;
+ int ret;
+
+ /* clean error register, previous errors are ignored */
+ idg_nvm_error(nvm);
+
+ ret = idg_nvm_is_valid(nvm);
+ if (ret) {
+ dev_err(device, "The MEM is not valid %d\n", ret);
+ return ret;
+ }
+
+ if (idg_nvm_get_access_map(nvm, &access_map))
+ return -EIO;
+
+ for (i = 0, n = 0; i < nvm->nregions; i++) {
+ u32 address, base, limit, region;
+ u8 id = nvm->regions[i].id;
+
+ address = NVM_FLREG(id);
+ region = idg_nvm_read32(nvm, address);
+
+ base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
+ limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
+ NVM_FREG_MIN_REGION_SIZE;
+
+ dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
+ id, nvm->regions[i].name, region, base, limit);
+
+ if (base >= limit || (i > 0 && limit == 0)) {
+ dev_dbg(device, "[%d] %s: disabled\n",
+ id, nvm->regions[i].name);
+ nvm->regions[i].is_readable = 0;
+ continue;
+ }
+
+ if (nvm->size < limit)
+ nvm->size = limit;
+
+ nvm->regions[i].offset = base;
+ nvm->regions[i].size = limit - base + 1;
+ /* No write access to descriptor; mask it out*/
+ nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
+
+ nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
+ dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
+ nvm->regions[i].name,
+ nvm->regions[i].id,
+ nvm->regions[i].offset,
+ nvm->regions[i].size,
+ nvm->regions[i].is_readable,
+ nvm->regions[i].is_writable);
+
+ if (nvm->regions[i].is_readable)
+ n++;
+ }
+
+ nvm->non_posted_erase = non_posted_erase;
+
+ dev_dbg(device, "Registered %d regions\n", n);
+ dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase);
+
+ /* Need to add 1 to the amount of memory
+ * so it is reported as an even block
+ */
+ nvm->size += 1;
+
+ return n;
+}
+
+static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ size_t total_len;
+ unsigned int idx;
+ ssize_t bytes;
+ loff_t from;
+ size_t len;
+ u8 region;
+ u64 addr;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) {
+ dev_err(&mtd->dev, "unaligned erase %llx %llx\n",
+ info->addr, info->len);
+ info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EINVAL;
+ }
+
+ total_len = info->len;
+ addr = info->addr;
+
+ guard(mutex)(&nvm->lock);
+
+ while (total_len > 0) {
+ if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
+ dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
+ info->fail_addr = addr;
+ return -ERANGE;
+ }
+
+ idx = idg_nvm_get_region(nvm, addr);
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -ERANGE;
+ }
+
+ from = addr - nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ len = total_len;
+ if (len > nvm->regions[idx].size - from)
+ len = nvm->regions[idx].size - from;
+
+ dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n",
+ region, nvm->regions[idx].name, from, len);
+
+ bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
+ if (bytes < 0) {
+ dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
+ info->fail_addr += nvm->regions[idx].offset;
+ return bytes;
+ }
+
+ addr += len;
+ total_len -= len;
+ }
+
+ return 0;
+}
+
+static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ unsigned int idx;
+ ssize_t ret;
+ u8 region;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ idx = idg_nvm_get_region(nvm, from);
+
+ dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n",
+ nvm->regions[idx].id, nvm->regions[idx].name, from, len);
+
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ return -ERANGE;
+ }
+
+ from -= nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ if (len > nvm->regions[idx].size - from)
+ len = nvm->regions[idx].size - from;
+
+ guard(mutex)(&nvm->lock);
+
+ ret = idg_read(nvm, region, from, len, buf);
+ if (ret < 0) {
+ dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
+ return ret;
+ }
+
+ *retlen = ret;
+
+ return 0;
+}
+
+static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct intel_dg_nvm *nvm = mtd->priv;
+ unsigned int idx;
+ ssize_t ret;
+ u8 region;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+
+ idx = idg_nvm_get_region(nvm, to);
+
+ dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n",
+ nvm->regions[idx].id, nvm->regions[idx].name, to, len);
+
+ if (idx >= nvm->nregions) {
+ dev_err(&mtd->dev, "out of range");
+ return -ERANGE;
+ }
+
+ to -= nvm->regions[idx].offset;
+ region = nvm->regions[idx].id;
+ if (len > nvm->regions[idx].size - to)
+ len = nvm->regions[idx].size - to;
+
+ guard(mutex)(&nvm->lock);
+
+ ret = idg_write(nvm, region, to, len, buf);
+ if (ret < 0) {
+ dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
+ return ret;
+ }
+
+ *retlen = ret;
+
+ return 0;
+}
+
+static void intel_dg_nvm_release(struct kref *kref)
+{
+ struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
+ int i;
+
+ pr_debug("freeing intel_dg nvm\n");
+ for (i = 0; i < nvm->nregions; i++)
+ kfree(nvm->regions[i].name);
+ mutex_destroy(&nvm->lock);
+ kfree(nvm);
+}
+
+static int intel_dg_mtd_get_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct intel_dg_nvm *nvm = master->priv;
+
+ if (WARN_ON(!nvm))
+ return -EINVAL;
+ pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
+ kref_get(&nvm->refcnt);
+
+ return 0;
+}
+
+static void intel_dg_mtd_put_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct intel_dg_nvm *nvm = master->priv;
+
+ if (WARN_ON(!nvm))
+ return;
+ pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+}
+
+static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
+ unsigned int nparts, bool writable_override)
+{
+ struct mtd_partition *parts = NULL;
+ unsigned int i, n;
+ int ret;
+
+ dev_dbg(device, "registering with mtd\n");
+
+ nvm->mtd.owner = THIS_MODULE;
+ nvm->mtd.dev.parent = device;
+ nvm->mtd.flags = MTD_CAP_NORFLASH;
+ nvm->mtd.type = MTD_DATAFLASH;
+ nvm->mtd.priv = nvm;
+ nvm->mtd._write = intel_dg_mtd_write;
+ nvm->mtd._read = intel_dg_mtd_read;
+ nvm->mtd._erase = intel_dg_mtd_erase;
+ nvm->mtd._get_device = intel_dg_mtd_get_device;
+ nvm->mtd._put_device = intel_dg_mtd_put_device;
+ nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
+ nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
+ nvm->mtd.size = nvm->size;
+
+ parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
+ if (!nvm->regions[i].is_readable)
+ continue;
+ parts[n].name = nvm->regions[i].name;
+ parts[n].offset = nvm->regions[i].offset;
+ parts[n].size = nvm->regions[i].size;
+ if (!nvm->regions[i].is_writable && !writable_override)
+ parts[n].mask_flags = MTD_WRITEABLE;
+ n++;
+ }
+
+ ret = mtd_device_register(&nvm->mtd, parts, n);
+
+ kfree(parts);
+ return ret;
+}
+
+static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *aux_dev_id)
+{
+ struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
+ struct intel_dg_nvm *nvm;
+ struct device *device;
+ unsigned int nregions;
+ unsigned int i, n;
+ int ret;
+
+ device = &aux_dev->dev;
+
+ /* count available regions */
+ for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
+ if (invm->regions[i].name)
+ nregions++;
+ }
+
+ if (!nregions) {
+ dev_err(device, "no regions defined\n");
+ return -ENODEV;
+ }
+
+ nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
+
+ kref_init(&nvm->refcnt);
+ mutex_init(&nvm->lock);
+
+ for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
+ if (!invm->regions[i].name)
+ continue;
+
+ char *name = kasprintf(GFP_KERNEL, "%s.%s",
+ dev_name(&aux_dev->dev), invm->regions[i].name);
+ if (!name)
+ continue;
+ nvm->regions[n].name = name;
+ nvm->regions[n].id = i;
+ n++;
+ }
+ nvm->nregions = n; /* in case where kasprintf fail */
+
+ nvm->base = devm_ioremap_resource(device, &invm->bar);
+ if (IS_ERR(nvm->base)) {
+ ret = PTR_ERR(nvm->base);
+ goto err;
+ }
+
+ if (invm->non_posted_erase) {
+ nvm->base2 = devm_ioremap_resource(device, &invm->bar2);
+ if (IS_ERR(nvm->base2)) {
+ ret = PTR_ERR(nvm->base2);
+ goto err;
+ }
+ }
+
+ ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase);
+ if (ret < 0) {
+ dev_err(device, "cannot initialize nvm %d\n", ret);
+ goto err;
+ }
+
+ ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
+ if (ret) {
+ dev_err(device, "failed init mtd %d\n", ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&aux_dev->dev, nvm);
+
+ return 0;
+
+err:
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+ return ret;
+}
+
+static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
+{
+ struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
+
+ if (!nvm)
+ return;
+
+ mtd_device_unregister(&nvm->mtd);
+
+ dev_set_drvdata(&aux_dev->dev, NULL);
+
+ kref_put(&nvm->refcnt, intel_dg_nvm_release);
+}
+
+static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
+ {
+ .name = "i915.nvm",
+ },
+ {
+ .name = "xe.nvm",
+ },
+ {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
+
+static struct auxiliary_driver intel_dg_mtd_driver = {
+ .probe = intel_dg_mtd_probe,
+ .remove = intel_dg_mtd_remove,
+ .driver = {
+ /* auxiliary_driver_register() sets .name to be the modname */
+ },
+ .id_table = intel_dg_mtd_id_table
+};
+module_auxiliary_driver(intel_dg_mtd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel DGFX MTD driver");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 8c22064ead38..f2bd1984609c 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part,
return -ENOMEM;
erase->addr = xfer->Offset;
- erase->len = 1 << part->header.EraseUnitSize;
+ erase->len = 1ULL << part->header.EraseUnitSize;
ret = mtd_erase(part->mbd.mtd, erase);
if (!ret) {
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index dedcca87defc..84ab4a83cbd6 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
dma_cookie_t cookie;
buf_dma = dma_map_single(nc->dev, buf, len, dir);
- if (dma_mapping_error(nc->dev, dev_dma)) {
+ if (dma_mapping_error(nc->dev, buf_dma)) {
dev_err(nc->dev,
"Failed to prepare a buffer for DMA access\n");
goto err;
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 3c7dee1be21d..0b402823b619 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -143,6 +143,7 @@ struct atmel_pmecc_caps {
int nstrengths;
int el_offset;
bool correct_erased_chunks;
+ bool clk_ctrl;
};
struct atmel_pmecc {
@@ -843,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
if (IS_ERR(pmecc->regs.errloc))
return ERR_CAST(pmecc->regs.errloc);
+ /* pmecc data setup time */
+ if (caps->clk_ctrl)
+ writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK);
+
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
atmel_pmecc_reset(pmecc);
@@ -896,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = {
.strengths = atmel_pmecc_strengths,
.nstrengths = 5,
.el_offset = 0x8c,
+ .clk_ctrl = true,
};
static struct atmel_pmecc_caps sama5d4_caps = {
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 62bdda3be92f..835653bdd5ab 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -29,6 +29,7 @@
#include <linux/static_key.h>
#include <linux/list.h>
#include <linux/log2.h>
+#include <linux/string_choices.h>
#include "brcmnand.h"
@@ -359,6 +360,7 @@ enum brcmnand_reg {
BRCMNAND_CORR_THRESHOLD_EXT,
BRCMNAND_UNCORR_COUNT,
BRCMNAND_CORR_COUNT,
+ BRCMNAND_READ_ERROR_COUNT,
BRCMNAND_CORR_EXT_ADDR,
BRCMNAND_CORR_ADDR,
BRCMNAND_UNCORR_EXT_ADDR,
@@ -389,6 +391,7 @@ static const u16 brcmnand_regs_v21[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0,
[BRCMNAND_CORR_EXT_ADDR] = 0x60,
[BRCMNAND_CORR_ADDR] = 0x64,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
@@ -419,6 +422,7 @@ static const u16 brcmnand_regs_v33[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x80,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
@@ -449,6 +453,7 @@ static const u16 brcmnand_regs_v50[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x80,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
@@ -479,6 +484,7 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -509,6 +515,7 @@ static const u16 brcmnand_regs_v71[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -539,6 +546,7 @@ static const u16 brcmnand_regs_v72[] = {
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_READ_ERROR_COUNT] = 0x104,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
@@ -959,11 +967,11 @@ static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
}
-static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+static inline u32 brcmnand_corr_total(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version < 0x0600)
- return 1;
- return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+ if (ctrl->nand_version < 0x400)
+ return 0;
+ return brcmnand_read_reg(ctrl, BRCMNAND_READ_ERROR_COUNT);
}
static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
@@ -1462,7 +1470,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
int ret;
if (old_wp != wp) {
- dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ dev_dbg(ctrl->dev, "WP %s\n", str_on_off(wp));
old_wp = wp;
}
@@ -1492,7 +1500,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
if (ret)
dev_err_ratelimited(&host->pdev->dev,
"nand #WP expected %s\n",
- wp ? "on" : "off");
+ str_on_off(wp));
}
}
@@ -1869,8 +1877,8 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
unsigned int trans = len >> FC_SHIFT;
dma_addr_t pa;
- dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
- "read" : "write"), buf, oob);
+ dev_dbg(ctrl->dev, "EDU %s %p:%p\n",
+ str_read_write(edu_cmd == EDU_CMD_READ), buf, oob);
pa = dma_map_single(ctrl->dev, buf, len, dir);
if (dma_mapping_error(ctrl->dev, pa)) {
@@ -2066,15 +2074,20 @@ static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
*/
static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf,
- u8 *oob, u64 *err_addr)
+ u8 *oob, u64 *err_addr, unsigned int *corr)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
int i, ret = 0;
+ unsigned int prev_corr;
+
+ if (corr)
+ *corr = 0;
brcmnand_clear_ecc_addr(ctrl);
for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ prev_corr = brcmnand_corr_total(ctrl);
brcmnand_set_cmd_addr(mtd, addr);
/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
brcmnand_send_cmd(host, CMD_PAGE_READ);
@@ -2099,13 +2112,16 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
if (*err_addr)
ret = -EBADMSG;
- }
+ else {
+ *err_addr = brcmnand_get_correcc_addr(ctrl);
- if (!ret) {
- *err_addr = brcmnand_get_correcc_addr(ctrl);
+ if (*err_addr) {
+ ret = -EUCLEAN;
- if (*err_addr)
- ret = -EUCLEAN;
+ if (corr && (brcmnand_corr_total(ctrl) - prev_corr) > *corr)
+ *corr = brcmnand_corr_total(ctrl) - prev_corr;
+ }
+ }
}
}
@@ -2173,6 +2189,8 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
int err;
bool retry = true;
bool edu_err = false;
+ unsigned int corrected = 0; /* max corrected bits per subpage */
+ unsigned int prev_tot = brcmnand_corr_total(ctrl);
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
@@ -2200,9 +2218,11 @@ try_dmaread:
memset(oob, 0x99, mtd->oobsize);
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
- oob, &err_addr);
+ oob, &err_addr, &corrected);
}
+ mtd->ecc_stats.corrected += brcmnand_corr_total(ctrl) - prev_tot;
+
if (mtd_is_eccerr(err)) {
/*
* On controller version and 7.0, 7.1 , DMA read after a
@@ -2240,16 +2260,20 @@ try_dmaread:
}
if (mtd_is_bitflip(err)) {
- unsigned int corrected = brcmnand_count_corrected(ctrl);
-
/* in case of EDU correctable error we read again using PIO */
if (edu_err)
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
- oob, &err_addr);
+ oob, &err_addr, &corrected);
dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
(unsigned long long)err_addr);
- mtd->ecc_stats.corrected += corrected;
+ /*
+ * if flipped bits accumulator is not supported but we detected
+ * a correction, increase stat by 1 to match previous behavior.
+ */
+ if (brcmnand_corr_total(ctrl) == prev_tot)
+ mtd->ecc_stats.corrected++;
+
/* Always exceed the software-imposed threshold */
return max(mtd->bitflip_threshold, corrected);
}
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index d579d5dd60d6..df61db8ce466 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+ if (dma_mapping_error(dma_dev->dev, dma_addr))
+ return -EINVAL;
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index c02e50608816..b663659b2f49 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -377,9 +377,9 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
/*
* We only support read-retry for 1xnm NANDs, and those NANDs all
- * expose a valid JEDEC ID.
+ * expose a valid JEDEC ID. SLC NANDs don't require read-retry.
*/
- if (valid_jedecid) {
+ if (valid_jedecid && nanddev_bits_per_cell(&chip->base) > 1) {
u8 nand_tech = chip->id.data[5] >> 4;
/* 1xnm technology */
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
index 44f6603736d1..ac8c1b80d7be 100644
--- a/drivers/mtd/nand/raw/renesas-nand-controller.c
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(rnandc->dev, dma_addr))
+ return -ENOMEM;
+
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
@@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
DMA_TO_DEVICE);
+ if (dma_mapping_error(rnandc->dev, dma_addr))
+ return -ENOMEM;
+
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 63e7b9e39a5a..c5d7cd8a6cab 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -656,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
mtd->writesize, DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_data))
+ return -ENOMEM;
+
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
reinit_completion(&nfc->done);
writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
@@ -772,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
dma_data = dma_map_single(nfc->dev, nfc->page_buf,
mtd->writesize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_data))
+ return -ENOMEM;
+
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
/*
* The first blocks (4, 8 or 16 depending on the device)
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 2ee498230ec1..9e97c40955c9 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,12 +17,12 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index 2b4df1d917ac..45d38ce0736c 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,9 +14,9 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index c411fe9be3ef..b0898990b2a5 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -20,7 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
@@ -360,7 +360,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
engine_conf->status = status;
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
@@ -688,7 +688,10 @@ int spinand_write_page(struct spinand_device *spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
- if (!ret && (status & STATUS_PROG_FAILED))
+ if (ret)
+ return ret;
+
+ if (status & STATUS_PROG_FAILED)
return -EIO;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
@@ -1250,8 +1253,19 @@ static int spinand_id_detect(struct spinand_device *spinand)
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
- if (spinand->manufacturer->ops->init)
- return spinand->manufacturer->ops->init(spinand);
+ int ret;
+
+ if (spinand->manufacturer->ops->init) {
+ ret = spinand->manufacturer->ops->init(spinand);
+ if (ret)
+ return ret;
+ }
+
+ if (spinand->configure_chip) {
+ ret = spinand->configure_chip(spinand);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1294,7 +1308,7 @@ spinand_select_op_variant(struct spinand_device *spinand,
nbytes -= op.data.nbytes;
- op_duration_ns += spi_mem_calc_op_duration(&op);
+ op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
}
if (!nbytes && op_duration_ns < best_op_duration_ns) {
@@ -1346,6 +1360,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->configure_chip = table[i].configure_chip;
spinand->set_cont_read = table[i].set_cont_read;
spinand->fact_otp = &table[i].fact_otp;
spinand->user_otp = &table[i].user_otp;
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 9e286612a296..9a9325c0bc49 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -18,10 +18,10 @@
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index 7c61644bfb10..c521dd6abc4b 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -12,10 +12,10 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index cb1d316fc4d8..93e40431dbe2 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -24,36 +24,36 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
@@ -533,6 +533,26 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM9UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM9RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index eeaf5bf9f082..edf63b9996cf 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,10 +28,10 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 8281c9d3f4f7..a49d7cb6a96d 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -35,12 +35,12 @@
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
@@ -52,10 +52,10 @@ static SPINAND_OP_VARIANTS(x4_update_cache_variants,
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 4670bac41245..73bd124273a5 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,12 +22,12 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
index 51d61785df61..bf9ce163e6a7 100644
--- a/drivers/mtd/nand/spi/skyhigh.c
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -17,12 +17,12 @@
#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 4c6923047aeb..6530257ac0be 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,10 +15,10 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index b7a28f001a38..87053389a1fc 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#include <linux/units.h>
+#include <linux/delay.h>
#define SPINAND_MFR_WINBOND 0xEF
@@ -18,17 +19,33 @@
#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+#define W25N0XJW_SR4 0xD0
+#define W25N0XJW_SR4_HS BIT(2)
+
+#define W35N01JW_VCR_IO_MODE 0x00
+#define W35N01JW_VCR_IO_MODE_SINGLE_SDR 0xFF
+#define W35N01JW_VCR_IO_MODE_OCTAL_SDR 0xDF
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR_DS 0xE7
+#define W35N01JW_VCR_IO_MODE_OCTAL_DDR 0xC7
+#define W35N01JW_VCR_DUMMY_CLOCK_REG 0x01
+
/*
* "X2" in the core is equivalent to "dual output" in the datasheets,
* "X4" in the core is equivalent to "quad output" in the datasheets.
+ * Quad and octal capable chips feature an absolute maximum frequency of 166MHz.
*/
static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 3, NULL, 0, 120 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 20, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 12, NULL, 0, 124 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 8, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 2, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_octal_variants,
SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
@@ -42,23 +59,25 @@ static SPINAND_OP_VARIANTS(update_cache_octal_variants,
static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
@@ -230,6 +249,113 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int w25n0xjw_hs_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ bool hs;
+ u8 sr4;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ hs = false;
+ else if (op->cmd.buswidth == 1 && op->addr.buswidth == 1 &&
+ op->dummy.buswidth == 1 && op->data.buswidth == 1)
+ hs = false;
+ else if (!op->max_freq)
+ hs = true;
+ else
+ hs = false;
+
+ ret = spinand_read_reg_op(spinand, W25N0XJW_SR4, &sr4);
+ if (ret)
+ return ret;
+
+ if (hs)
+ sr4 |= W25N0XJW_SR4_HS;
+ else
+ sr4 &= ~W25N0XJW_SR4_HS;
+
+ ret = spinand_write_reg_op(spinand, W25N0XJW_SR4, sr4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int w35n0xjw_write_vcr(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x81, 1),
+ SPI_MEM_OP_ADDR(3, reg, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, spinand->scratchbuf, 1));
+ int ret;
+
+ *spinand->scratchbuf = val;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * Write VCR operation doesn't set the busy bit in SR, which means we
+ * cannot perform a status poll. Minimum time of 50ns is needed to
+ * complete the write.
+ */
+ ndelay(50);
+
+ return 0;
+}
+
+static int w35n0xjw_vcr_cfg(struct spinand_device *spinand)
+{
+ const struct spi_mem_op *op;
+ unsigned int dummy_cycles;
+ bool dtr, single;
+ u8 io_mode;
+ int ret;
+
+ op = spinand->op_templates.read_cache;
+
+ single = (op->cmd.buswidth == 1 && op->addr.buswidth == 1 && op->data.buswidth == 1);
+ dtr = (op->cmd.dtr || op->addr.dtr || op->data.dtr);
+ if (single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_SINGLE_SDR;
+ else if (!single && !dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_SDR;
+ else if (!single && dtr)
+ io_mode = W35N01JW_VCR_IO_MODE_OCTAL_DDR;
+ else
+ return -EINVAL;
+
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_IO_MODE, io_mode);
+ if (ret)
+ return ret;
+
+ dummy_cycles = ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+ switch (dummy_cycles) {
+ case 8:
+ case 12:
+ case 16:
+ case 20:
+ case 24:
+ case 28:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = w35n0xjw_write_vcr(spinand, W35N01JW_VCR_DUMMY_CLOCK_REG, dummy_cycles);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct spinand_info winbond_spinand_table[] = {
/* 512M-bit densities */
SPINAND_INFO("W25N512GW", /* 1.8V */
@@ -268,7 +394,8 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
SPINAND_INFO("W25N01KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
@@ -286,7 +413,8 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
- SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
SPINAND_INFO("W35N02JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1),
@@ -295,7 +423,8 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
- SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
SPINAND_INFO("W35N04JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1),
@@ -304,7 +433,8 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
- SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w35n0xjw_vcr_cfg)),
/* 2G-bit densities */
SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
@@ -324,7 +454,8 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
SPINAND_INFO("W25N02KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 37336d5958a9..5915b37b47f5 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -23,12 +23,12 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 64d319e959b2..868aa3d35d09 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -228,6 +228,25 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
return BLOCK_NIL;
}
+static noinline_for_stack void NFTL_move_block(struct mtd_info *mtd, loff_t src, loff_t dst)
+{
+ unsigned char movebuf[512];
+ struct nftl_oob oob;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read(mtd, src, 512, &retlen, movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd, src, 512, &retlen, movebuf);
+ if (ret != -EIO)
+ printk("Error went away on retry.\n");
+ }
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(mtd, dst, 512, &retlen, movebuf, (char *)&oob);
+}
+
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
{
struct mtd_info *mtd = nftl->mbd.mtd;
@@ -389,9 +408,6 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
*/
pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
- unsigned char movebuf[512];
- int ret;
-
/* If it's in the target EUN already, or if it's pending write, do nothing */
if (BlockMap[block] == targetEUN ||
(pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
@@ -403,25 +419,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd_read(mtd,
- (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512,
- &retlen,
- movebuf);
- if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd_read(mtd,
- (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512,
- &retlen,
- movebuf);
- if (ret != -EIO)
- printk("Error went away on retry.\n");
- }
- memset(&oob, 0xff, sizeof(struct nftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
-
- nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
- (block * 512), 512, &retlen, movebuf, (char *)&oob);
+ NFTL_move_block(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ (nftl->EraseSize * targetEUN) + (block * 512));
}
/* add the header so that it is now a valid chain */
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index e6bab2d00c92..187239ccd549 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -189,7 +189,7 @@ static int mt25qu512a_post_bfpt_fixup(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups mt25qu512a_fixups = {
+static const struct spi_nor_fixups mt25qu512a_fixups = {
.post_bfpt = mt25qu512a_post_bfpt_fixup,
};
@@ -225,15 +225,15 @@ static int st_nor_two_die_late_init(struct spi_nor *nor)
return spi_nor_set_4byte_addr_mode(nor, true);
}
-static struct spi_nor_fixups n25q00_fixups = {
+static const struct spi_nor_fixups n25q00_fixups = {
.late_init = st_nor_four_die_late_init,
};
-static struct spi_nor_fixups mt25q01_fixups = {
+static const struct spi_nor_fixups mt25q01_fixups = {
.late_init = st_nor_two_die_late_init,
};
-static struct spi_nor_fixups mt25q02_fixups = {
+static const struct spi_nor_fixups mt25q02_fixups = {
.late_init = st_nor_four_die_late_init,
};
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index bf08dbf5e742..a0296c871634 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -17,6 +17,7 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
+#define SPINOR_OP_CYPRESS_EX4B 0xB8 /* Exit 4-byte address mode */
#define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
@@ -58,6 +59,13 @@
SPI_MEM_OP_DUMMY(ndummy, 0), \
SPI_MEM_OP_DATA_IN(1, buf, 0))
+#define CYPRESS_NOR_EN4B_EX4B_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : \
+ SPINOR_OP_CYPRESS_EX4B, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
#define SPANSION_OP(opcode) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
SPI_MEM_OP_NO_ADDR, \
@@ -356,6 +364,20 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
return 0;
}
+static int cypress_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+ struct spi_mem_op op = CYPRESS_NOR_EN4B_EX4B_OP(enable);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
/**
* cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode
* (3 or 4-byte) by querying status
@@ -526,6 +548,9 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
struct spi_mem_op op;
int ret;
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
+
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
@@ -578,7 +603,7 @@ static int s25fs256t_late_init(struct spi_nor *nor)
return 0;
}
-static struct spi_nor_fixups s25fs256t_fixups = {
+static const struct spi_nor_fixups s25fs256t_fixups = {
.post_bfpt = s25fs256t_post_bfpt_fixup,
.post_sfdp = s25fs256t_post_sfdp_fixup,
.late_init = s25fs256t_late_init,
@@ -591,6 +616,9 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
{
int ret;
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
+
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
@@ -650,7 +678,7 @@ static int s25hx_t_late_init(struct spi_nor *nor)
return 0;
}
-static struct spi_nor_fixups s25hx_t_fixups = {
+static const struct spi_nor_fixups s25hx_t_fixups = {
.post_bfpt = s25hx_t_post_bfpt_fixup,
.post_sfdp = s25hx_t_post_sfdp_fixup,
.late_init = s25hx_t_late_init,
@@ -718,6 +746,9 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
+ /* Assign 4-byte address mode method that is not determined in BFPT */
+ nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
+
return cypress_nor_set_addr_mode_nbytes(nor);
}
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 9c9328478d8a..9b07f83aeac7 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -56,7 +56,6 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
u64 *len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
u8 mask = spi_nor_get_sr_bp_mask(nor);
u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
@@ -77,13 +76,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
*len = min_prot_len << (bp - 1);
- if (*len > mtd->size)
- *len = mtd->size;
+ if (*len > nor->params->size)
+ *len = nor->params->size;
if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
*ofs = 0;
else
- *ofs = mtd->size - *len;
+ *ofs = nor->params->size - *len;
}
/*
@@ -158,7 +157,6 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len,
*/
static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
@@ -183,7 +181,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
can_be_bottom = false;
/* If anything above us is unlocked, we can't use 'top' protection */
- if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len),
status_old))
can_be_top = false;
@@ -195,11 +193,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
/* lock_len: length of region that should end up locked */
if (use_top)
- lock_len = mtd->size - ofs;
+ lock_len = nor->params->size - ofs;
else
lock_len = ofs + len;
- if (lock_len == mtd->size) {
+ if (lock_len == nor->params->size) {
val = mask;
} else {
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
@@ -248,7 +246,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
*/
static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
- struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
@@ -273,7 +270,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
can_be_top = false;
/* If anything above us is locked, we can't use 'bottom' protection */
- if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len),
status_old))
can_be_bottom = false;
@@ -285,7 +282,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
/* lock_len: length of region that should remain locked */
if (use_top)
- lock_len = mtd->size - (ofs + len);
+ lock_len = nor->params->size - (ofs + len);
else
lock_len = ofs;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index f1ea8677467f..df0a5a57b072 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -791,33 +791,6 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
-/**
- * ubi_flush - flush UBI work queue.
- * @ubi_num: UBI device to flush work queue
- * @vol_id: volume id to flush for
- * @lnum: logical eraseblock number to flush for
- *
- * This function executes all pending works for a particular volume id / logical
- * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
- * a wildcard for all of the corresponding volume numbers or logical
- * eraseblock numbers. It returns zero in case of success and a negative error
- * code in case of failure.
- */
-int ubi_flush(int ubi_num, int vol_id, int lnum)
-{
- struct ubi_device *ubi;
- int err = 0;
-
- ubi = ubi_get_device(ubi_num);
- if (!ubi)
- return -ENODEV;
-
- err = ubi_wl_flush(ubi, vol_id, lnum);
- ubi_put_device(ubi);
- return err;
-}
-EXPORT_SYMBOL_GPL(ubi_flush);
-
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 734a0b3242a9..ed86537b2f61 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -979,7 +979,7 @@ static void amt_event_send_request(struct amt_dev *amt)
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
- mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
}
static void amt_req_work(struct work_struct *work)
@@ -1046,7 +1046,8 @@ static bool amt_send_membership_update(struct amt_dev *amt,
amt->gw_port,
amt->relay_port,
false,
- false);
+ false,
+ 0);
amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
return false;
}
@@ -1103,7 +1104,8 @@ static void amt_send_multicast_data(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
}
static bool amt_send_membership_query(struct amt_dev *amt,
@@ -1161,7 +1163,8 @@ static bool amt_send_membership_query(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
return false;
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a9dffdcac805..0df3208783ad 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -362,8 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -431,7 +431,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
!test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ info->key.tun_flags),
+ 0);
return 0;
free_dst:
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c6807e473ab7..2fca8e84ab10 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -982,6 +982,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
return 0;
}
+static void ad_cond_set_peer_notif(struct port *port)
+{
+ struct bonding *bond = port->slave->bond;
+
+ if (bond->params.broadcast_neighbor && rtnl_trylock()) {
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+ rtnl_unlock();
+ }
+}
+
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
@@ -1378,7 +1389,7 @@ static void ad_tx_machine(struct port *port)
/* check if tx timer expired, to verify that we do not send more than
* 3 packets per second
*/
- if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
+ if (!port->sm_tx_timer_counter || !(--port->sm_tx_timer_counter)) {
/* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1393,12 +1404,13 @@ static void ad_tx_machine(struct port *port)
* again until demanded
*/
port->ntt = false;
+
+ /* restart tx timer(to verify that we will not
+ * exceed AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec / AD_MAX_TX_IN_SECOND;
}
}
- /* restart tx timer(to verify that we will not exceed
- * AD_MAX_TX_IN_SECOND
- */
- port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
@@ -2061,6 +2073,8 @@ static void ad_enable_collecting_distributing(struct port *port,
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+ /* Should notify peers if possible */
+ ad_cond_set_peer_notif(port);
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c4d53e8e7c15..257333c88710 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -212,6 +212,8 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+DEFINE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -1038,7 +1040,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->addr_len);
- err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
+ err = netif_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@ -1235,17 +1237,32 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ struct bond_up_slave *usable;
+ struct slave *slave = NULL;
- if (!slave || !bond->send_peer_notif ||
+ if (!bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
- !netif_carrier_ok(bond->dev) ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ !netif_carrier_ok(bond->dev))
return false;
+ /* The send_peer_notif is set by active-backup or 8023ad
+ * mode, and cleared in bond_close() when changing mode.
+ * It is safe to only check bond mode here.
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ usable = rcu_dereference_rtnl(bond->usable_slaves);
+ if (!usable || !READ_ONCE(usable->count))
+ return false;
+ } else {
+ slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ if (!slave || test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &slave->dev->state))
+ return false;
+ }
+
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
+ slave ? slave->dev->name : "all");
return true;
}
@@ -2652,7 +2669,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (unregister) {
netdev_lock_ops(slave_dev);
- __dev_set_mtu(slave_dev, slave->original_mtu);
+ __netif_set_mtu(slave_dev, slave->original_mtu);
netdev_unlock_ops(slave_dev);
} else {
dev_set_mtu(slave_dev, slave->original_mtu);
@@ -4456,6 +4473,9 @@ static int bond_open(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
dev_mc_add(slave->dev, lacpdu_mcast_addr);
+
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4475,6 +4495,10 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ bond->params.broadcast_neighbor)
+ static_branch_dec(&bond_bcast_neigh_enabled);
+
if (bond_uses_primary(bond)) {
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
@@ -5310,6 +5334,37 @@ static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
return slaves->arr[hash % count];
}
+static bool bond_should_broadcast_neighbor(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (!static_branch_unlikely(&bond_bcast_neigh_enabled))
+ return false;
+
+ if (!bond->params.broadcast_neighbor)
+ return false;
+
+ if (skb->protocol == htons(ETH_P_ARP))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ combined = skb_header_pointer(skb, skb_mac_header_len(skb),
+ sizeof(_combined),
+ &_combined);
+ if (combined && combined->ip6.nexthdr == NEXTHDR_ICMP &&
+ (combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT))
+ return true;
+ }
+
+ return false;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -5329,17 +5384,27 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
return bond_tx_drop(dev, skb);
}
-/* in broadcast mode, we send everything to all usable interfaces. */
+/* in broadcast mode, we send everything to all or usable slave interfaces.
+ * under rcu_read_lock when this function is called.
+ */
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
- struct net_device *bond_dev)
+ struct net_device *bond_dev,
+ bool all_slaves)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
- struct list_head *iter;
+ struct bond_up_slave *slaves;
bool xmit_suc = false;
bool skb_used = false;
+ int slaves_count, i;
- bond_for_each_slave_rcu(bond, slave, iter) {
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+
+ slaves_count = slaves ? READ_ONCE(slaves->count) : 0;
+ for (i = 0; i < slaves_count; i++) {
+ struct slave *slave = slaves->arr[i];
struct sk_buff *skb2;
if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
@@ -5577,10 +5642,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
+ if (bond_should_broadcast_neighbor(skb, dev))
+ return bond_xmit_broadcast(skb, dev, false);
+ fallthrough;
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
- return bond_xmit_broadcast(skb, dev);
+ return bond_xmit_broadcast(skb, dev, true);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
@@ -6456,6 +6524,7 @@ static int __init bond_check_params(struct bond_params *params)
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
params->coupled_control = 1;
+ params->broadcast_neighbor = 0;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6674,3 +6743,4 @@ module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index ac5e402c34bc..57fff2421f1b 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -124,6 +124,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
[IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ [IFLA_BOND_BROADCAST_NEIGH] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -561,6 +562,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_BROADCAST_NEIGH]) {
+ int broadcast_neigh = nla_get_u8(data[IFLA_BOND_BROADCAST_NEIGH]);
+
+ bond_opt_initval(&newval, broadcast_neigh);
+ err = __bond_opt_set(bond, BOND_OPT_BROADCAST_NEIGH, &newval,
+ data[IFLA_BOND_BROADCAST_NEIGH], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -630,6 +641,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_BROADCAST_NEIGH */
0;
}
@@ -793,6 +805,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.coupled_control))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_BROADCAST_NEIGH,
+ bond->params.broadcast_neighbor))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 91893c29b899..1d639a3be6ba 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -87,6 +87,8 @@ static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_coupled_control_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -240,6 +242,12 @@ static const struct bond_opt_value bond_coupled_control_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_broadcast_neigh_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -513,6 +521,14 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_coupled_control_tbl,
.set = bond_option_coupled_control_set,
+ },
+ [BOND_OPT_BROADCAST_NEIGH] = {
+ .id = BOND_OPT_BROADCAST_NEIGH,
+ .name = "broadcast_neighbor",
+ .desc = "Broadcast neighbor packets to all active slaves",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_broadcast_neigh_tbl,
+ .set = bond_option_broadcast_neigh_set,
}
};
@@ -894,6 +910,13 @@ static int bond_option_mode_set(struct bonding *bond,
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ /* When changing mode, the bond device is down, we may reduce
+ * the bond_bcast_neigh_enabled in bond_close() if broadcast_neighbor
+ * enabled in 8023ad mode. Therefore, only clear broadcast_neighbor
+ * to 0.
+ */
+ bond->params.broadcast_neighbor = 0;
+
if (bond->dev->reg_state == NETREG_REGISTERED) {
bool update = false;
@@ -1840,3 +1863,22 @@ static int bond_option_coupled_control_set(struct bonding *bond,
bond->params.coupled_control = newval->value;
return 0;
}
+
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ if (bond->params.broadcast_neighbor == newval->value)
+ return 0;
+
+ bond->params.broadcast_neighbor = newval->value;
+ if (bond->dev->flags & IFF_UP) {
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
+ else
+ static_branch_dec(&bond_bcast_neigh_enabled);
+ }
+
+ netdev_dbg(bond->dev, "Setting broadcast_neighbor to %s (%llu)\n",
+ newval->string, newval->value);
+ return 0;
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index cf989bea9aa3..d43d56694667 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -154,6 +154,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select NET_DEVLINK
help
This is a driver for the Kvaser PCI Express CAN FD family.
@@ -201,7 +202,7 @@ config CAN_SUN4I
be called sun4i_can.
config CAN_TI_HECC
- depends on ARM
+ depends on ARM || COMPILE_TEST
tristate "TI High End CAN Controller"
select CAN_RX_OFFLOAD
help
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index a71db2cfe990..56138d8ddfd2 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
-obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index bf6398772960..8bd3f0fc385c 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -506,11 +506,12 @@ static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
* @buf: TXT Buffer index to which frame is inserted (0-based)
* @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
*
- * Return: True - Frame inserted successfully
- * False - Frame was not inserted due to one of:
- * 1. TXT Buffer is not writable (it is in wrong state)
- * 2. Invalid TXT buffer index
- * 3. Invalid frame length
+ * Return:
+ * * True - Frame inserted successfully
+ * * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
*/
static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
bool isfdf)
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index 3809c148fb88..a94bd67c670c 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -179,7 +179,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ *ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index f0e3f0d538fb..d9f6ab3efb97 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -18,7 +18,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
@@ -67,12 +67,12 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
+ u32 tdc_flags = cm->flags & CAN_CTRLMODE_FD_TDC_MASK;
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ if (tdc_flags == CAN_CTRLMODE_FD_TDC_MASK)
return -EOPNOTSUPP;
/* If one of the CAN_CTRLMODE_TDC_* flag is set then
* TDC must be set and vice-versa
@@ -144,7 +144,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
int err;
- if (!tdc_const || !can_tdc_is_enabled(priv))
+ if (!tdc_const || !can_fd_tdc_is_enabled(priv))
return -EOPNOTSUPP;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
@@ -189,7 +189,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
- u32 tdc_mask = 0;
+ bool fd_tdc_flag_provided = false;
int err;
/* We need synchronization with dev->stop() */
@@ -230,16 +230,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CAN_MTU;
memset(&priv->fd.data_bittiming, 0,
sizeof(priv->fd.data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
- tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
+ fd_tdc_flag_provided = cm->mask & CAN_CTRLMODE_FD_TDC_MASK;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
* exclusive: make sure to turn the other one off
*/
- if (tdc_mask)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
+ if (fd_tdc_flag_provided)
+ priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_FD_TDC_MASK;
}
if (data[IFLA_CAN_BITTIMING]) {
@@ -351,10 +351,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
extack);
if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
return err;
}
- } else if (!tdc_mask) {
+ } else if (!fd_tdc_flag_provided) {
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
@@ -421,7 +421,7 @@ static size_t can_tdc_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_tdc_is_enabled(priv)) {
+ if (can_fd_tdc_is_enabled(priv)) {
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
priv->fd.do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
@@ -502,7 +502,7 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_tdc_is_enabled(priv)) {
+ if (can_fd_tdc_is_enabled(priv)) {
u32 tdcv;
int err = -EINVAL;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 60c7b83b4539..bfa5cbe88017 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1867,7 +1867,7 @@ static ssize_t fwinfo_show(struct device *dev,
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
- return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+ return sysfs_emit(buf, "%s\n", mod->fwinfo);
}
static DEVICE_ATTR_RW(termination);
diff --git a/drivers/net/can/kvaser_pciefd/Makefile b/drivers/net/can/kvaser_pciefd/Makefile
new file mode 100644
index 000000000000..8c5b8cdc6b5f
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+kvaser_pciefd-y = kvaser_pciefd_core.o kvaser_pciefd_devlink.o
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
new file mode 100644
index 000000000000..08c9ddc1ee85
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* kvaser_pciefd common definitions and declarations
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+
+#ifndef _KVASER_PCIEFD_H
+#define _KVASER_PCIEFD_H
+
+#include <linux/can/dev.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
+#define KVASER_PCIEFD_DMA_COUNT 2U
+#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_address_offset {
+ u32 serdes;
+ u32 pci_ien;
+ u32 pci_irq;
+ u32 sysid;
+ u32 loopback;
+ u32 kcan_srb_fifo;
+ u32 kcan_srb;
+ u32 kcan_ch0;
+ u32 kcan_ch1;
+};
+
+struct kvaser_pciefd_irq_mask {
+ u32 kcan_rx0;
+ u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ u32 all;
+};
+
+struct kvaser_pciefd_dev_ops {
+ void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+};
+
+struct kvaser_pciefd_driver_data {
+ const struct kvaser_pciefd_address_offset *address_offset;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ const struct kvaser_pciefd_dev_ops *ops;
+};
+
+struct kvaser_pciefd_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct devlink_port devlink_port;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u32 ioc;
+ u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
+ int err_rep_cnt;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ const struct kvaser_pciefd_driver_data *driver_data;
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 bus_freq;
+ u32 freq;
+ u32 freq_to_ticks_div;
+ struct kvaser_pciefd_fw_version fw_version;
+};
+
+extern const struct devlink_ops kvaser_pciefd_devlink_ops;
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can);
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can);
+#endif /* _KVASER_PCIEFD_H */
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
index 09510663988c..0880023611be 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
@@ -5,6 +5,8 @@
* - PEAK linux canfd driver
*/
+#include "kvaser_pciefd.h"
+
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/device.h>
@@ -27,10 +29,6 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
-#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
-#define KVASER_PCIEFD_DMA_COUNT 2U
-#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
@@ -66,6 +64,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IOC_REG 0x404
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
@@ -136,6 +135,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* Control CAN LED, active low */
+#define KVASER_PCIEFD_KCAN_IOC_LED BIT(0)
+
/* Transmitter unaligned */
#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
/* Tx FIFO empty */
@@ -292,35 +294,6 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
-struct kvaser_pciefd_address_offset {
- u32 serdes;
- u32 pci_ien;
- u32 pci_irq;
- u32 sysid;
- u32 loopback;
- u32 kcan_srb_fifo;
- u32 kcan_srb;
- u32 kcan_ch0;
- u32 kcan_ch1;
-};
-
-struct kvaser_pciefd_dev_ops {
- void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
- dma_addr_t addr, int index);
-};
-
-struct kvaser_pciefd_irq_mask {
- u32 kcan_rx0;
- u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- u32 all;
-};
-
-struct kvaser_pciefd_driver_data {
- const struct kvaser_pciefd_address_offset *address_offset;
- const struct kvaser_pciefd_irq_mask *irq_mask;
- const struct kvaser_pciefd_dev_ops *ops;
-};
-
static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
.serdes = 0x1000,
.pci_ien = 0x50,
@@ -405,35 +378,6 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data =
.ops = &kvaser_pciefd_xilinx_dev_ops,
};
-struct kvaser_pciefd_can {
- struct can_priv can;
- struct kvaser_pciefd *kv_pcie;
- void __iomem *reg_base;
- struct can_berr_counter bec;
- u8 cmd_seq;
- u8 tx_max_count;
- u8 tx_idx;
- u8 ack_idx;
- int err_rep_cnt;
- unsigned int completed_tx_pkts;
- unsigned int completed_tx_bytes;
- spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- struct timer_list bec_poll_timer;
- struct completion start_comp, flush_comp;
-};
-
-struct kvaser_pciefd {
- struct pci_dev *pci;
- void __iomem *reg_base;
- struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- const struct kvaser_pciefd_driver_data *driver_data;
- void *dma_data[KVASER_PCIEFD_DMA_COUNT];
- u8 nr_channels;
- u32 bus_freq;
- u32 freq;
- u32 freq_to_ticks_div;
-};
-
struct kvaser_pciefd_rx_packet {
u32 header[2];
u64 timestamp;
@@ -528,6 +472,16 @@ static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can
kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
}
+static inline void kvaser_pciefd_set_led(struct kvaser_pciefd_can *can, bool on)
+{
+ if (on)
+ can->ioc &= ~KVASER_PCIEFD_KCAN_IOC_LED;
+ else
+ can->ioc |= KVASER_PCIEFD_KCAN_IOC_LED;
+
+ iowrite32(can->ioc, can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+}
+
static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
{
u32 mode;
@@ -953,8 +907,32 @@ static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static int kvaser_pciefd_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ kvaser_pciefd_set_led(can, true);
+ return 0;
+
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ kvaser_pciefd_set_led(can, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_pciefd_set_phys_id,
};
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
@@ -965,6 +943,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
struct net_device *netdev;
struct kvaser_pciefd_can *can;
u32 status, tx_nr_packets_max;
+ int ret;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
@@ -982,6 +961,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
+ can->can.dev->dev_port = i;
init_completion(&can->start_comp);
init_completion(&can->flush_comp);
@@ -990,6 +970,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
/* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
+ can->ioc = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+ kvaser_pciefd_set_led(can, false);
+
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
@@ -1031,6 +1014,11 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
pcie->can[i] = can;
kvaser_pciefd_pwm_start(can);
+ ret = kvaser_pciefd_devlink_port_register(can);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to register devlink port\n");
+ return ret;
+ }
}
return 0;
@@ -1163,14 +1151,12 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
u32 version, srb_status, build;
version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
+ build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
-
- build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
- dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
+ pcie->fw_version.major = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version);
+ pcie->fw_version.minor = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version);
+ pcie->fw_version.build = FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build);
srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
@@ -1752,6 +1738,7 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
if (can) {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
free_candev(can->can.dev);
}
}
@@ -1771,13 +1758,16 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
+ struct devlink *devlink;
+ struct device *dev = &pdev->dev;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ devlink = devlink_alloc(&kvaser_pciefd_devlink_ops, sizeof(*pcie), dev);
+ if (!devlink)
return -ENOMEM;
+ pcie = devlink_priv(devlink);
pci_set_drvdata(pdev, pcie);
pcie->pci = pdev;
pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
@@ -1785,7 +1775,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
- return ret;
+ goto err_free_devlink;
ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
if (ret)
@@ -1813,7 +1803,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
- dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n");
+ dev_err(dev, "Failed to allocate IRQ vectors.\n");
goto err_teardown_can_ctrls;
}
@@ -1826,7 +1816,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
if (ret) {
- dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ dev_err(dev, "Failed to request IRQ %d\n", pcie->pci->irq);
goto err_pci_free_irq_vectors;
}
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
@@ -1849,6 +1839,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
+ devlink_register(devlink);
+
return 0;
err_free_irq:
@@ -1872,6 +1864,9 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
+err_free_devlink:
+ devlink_free(devlink);
+
return ret;
}
@@ -1886,6 +1881,7 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
unregister_candev(can->can.dev);
timer_delete(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
}
kvaser_pciefd_disable_irq_srcs(pcie);
@@ -1895,9 +1891,11 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
for (i = 0; i < pcie->nr_channels; ++i)
free_candev(pcie->can[i]->can.dev);
+ devlink_unregister(priv_to_devlink(pcie));
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ devlink_free(priv_to_devlink(pcie));
}
static struct pci_driver kvaser_pciefd = {
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
new file mode 100644
index 000000000000..1d61a8b0eeba
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* kvaser_pciefd devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_pciefd.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+static int kvaser_pciefd_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_pciefd *pcie = devlink_priv(devlink);
+ char buf[] = "xxx.xxx.xxxxx";
+ int ret;
+
+ if (pcie->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ pcie->fw_version.major,
+ pcie->fw_version.minor,
+ pcie->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_pciefd_devlink_ops = {
+ .info_get = kvaser_pciefd_devlink_info_get,
+};
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = can->can.dev->dev_port,
+ };
+ devlink_port_attrs_set(&can->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(can->kv_pcie),
+ &can->devlink_port, can->can.dev->dev_port);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(can->can.dev, &can->devlink_port);
+
+ return 0;
+}
+
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can)
+{
+ devlink_port_unregister(&can->devlink_port);
+}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 2b7dd359f27b..64e664f5adcc 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -834,7 +834,7 @@ static void rcar_can_remove(struct platform_device *pdev)
free_candev(ndev);
}
-static int __maybe_unused rcar_can_suspend(struct device *dev)
+static int rcar_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
@@ -857,7 +857,7 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rcar_can_resume(struct device *dev)
+static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
@@ -886,7 +886,8 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend,
+ rcar_can_resume);
static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7778" },
@@ -904,7 +905,7 @@ static struct platform_driver rcar_can_driver = {
.driver = {
.name = RCAR_CAN_DRV_NAME,
.of_match_table = of_match_ptr(rcar_can_of_table),
- .pm = &rcar_can_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_can_pm_ops),
},
.probe = rcar_can_probe,
.remove = rcar_can_remove,
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 7f10213738e5..b3c8c592fb0e 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -191,9 +191,19 @@
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
+#define RCANFD_FDCFG_TDCO GENMASK(23, 16)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
-#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDCmFDSTS */
+#define RCANFD_FDSTS_SOC GENMASK(31, 24)
+#define RCANFD_FDSTS_EOC GENMASK(23, 16)
+#define RCANFD_GEN4_FDSTS_TDCVF BIT(15)
+#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12)
+#define RCANFD_FDSTS_SOCO BIT(9)
+#define RCANFD_FDSTS_EOCO BIT(8)
+#define RCANFD_FDSTS_TDCVF BIT(7)
+#define RCANFD_FDSTS_TDCR GENMASK(7, 0)
/* RSCFDnCFDRFCCx */
#define RCANFD_RFCC_RFIM BIT(12)
@@ -214,8 +224,6 @@
/* RSCFDnCFDRFPTRx */
#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
-#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
-#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
/* RSCFDnCFDRFFDSTSx */
#define RCANFD_RFFDSTS_RFFDF BIT(2)
@@ -247,12 +255,9 @@
/* RSCFDnCFDCFIDk */
#define RCANFD_CFID_CFIDE BIT(31)
#define RCANFD_CFID_CFRTR BIT(30)
-#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
/* RSCFDnCFDCFPTRk */
#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
-#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
-#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCFFDCSTSk */
#define RCANFD_CFFDCSTS_CFFDF BIT(2)
@@ -318,59 +323,6 @@
#define RCANFD_CFPCTR(gpriv, ch, idx) \
((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
-/* RSCFDnCFDFESTS / RSCFDnFESTS */
-#define RCANFD_FESTS (0x0238)
-/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
-#define RCANFD_FFSTS (0x023c)
-/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
-#define RCANFD_FMSTS (0x0240)
-/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
-#define RCANFD_RFISTS (0x0244)
-/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
-#define RCANFD_CFRISTS (0x0248)
-/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
-#define RCANFD_CFTISTS (0x024c)
-
-/* RSCFDnCFDTMCp / RSCFDnTMCp */
-#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
-/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
-#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
-
-/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
-#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
-/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
-#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
-/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
-#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
-/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
-#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
-/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
-#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
-
-/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
-#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
-/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
-#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
-/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
-#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
-
-/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
-#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
-/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
-#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
-/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
-#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
-
-/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
-#define RCANFD_GTINTSTS0 (0x0460)
-/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
-#define RCANFD_GTINTSTS1 (0x0464)
-/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
-#define RCANFD_GTSTCFG (0x0468)
-/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
-#define RCANFD_GTSTCTR (0x046c)
-/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
-#define RCANFD_GLOCKK (0x047c)
/* RSCFDnCFDGRMCFG */
#define RCANFD_GRMCFG (0x04fc)
@@ -388,12 +340,6 @@
/* RSCFDnGAFLXXXj offset */
#define RCANFD_C_GAFL_OFFSET (0x0500)
-/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
-#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
-#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
-#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
-#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
-
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
#define RCANFD_C_RFOFFSET (0x0e00)
#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
@@ -413,40 +359,24 @@
#define RCANFD_C_CFDF(ch, idx, df) \
(RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
-/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
-#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
-#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
-#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
-#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
-
-/* RSCFDnTHLACCm */
-#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
-/* RSCFDnRPGACCr */
-#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-
/* R-Car Gen4 Classical and CAN FD mode specific register map */
-#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-
#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
-/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m)))
-#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
-#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
-#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
-#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */
+struct rcar_canfd_f_c {
+ u32 dcfg;
+ u32 cfdcfg;
+ u32 cfdctr;
+ u32 cfdsts;
+ u32 cfdcrc;
+ u32 pad[3];
+};
/* RSCFDnCFDGAFLXXXj offset */
#define RCANFD_F_GAFL_OFFSET (0x1000)
-/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
-#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
-#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
-#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
-#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
-
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
@@ -471,23 +401,11 @@
(RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
(0x04 * (df)))
-/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
-#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
-#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
-#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
-#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
-
-/* RSCFDnCFDTHLACCm */
-#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
-/* RSCFDnCFDRPGACCr */
-#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
-
/* Constants */
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
-#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
@@ -510,7 +428,7 @@ struct rcar_canfd_regs {
u16 cfcc; /* Common FIFO Configuration/Control Register */
u16 cfsts; /* Common FIFO Status Register */
u16 cfpctr; /* Common FIFO Pointer Control Register */
- u16 f_dcfg; /* Global FD Configuration Register */
+ u16 coffset; /* Channel Data Bitrate Configuration Register */
u16 rfoffset; /* Receive FIFO buffer access ID register */
u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
};
@@ -529,6 +447,7 @@ struct rcar_canfd_shift_data {
struct rcar_canfd_hw_info {
const struct can_bittiming_const *nom_bittiming;
const struct can_bittiming_const *data_bittiming;
+ const struct can_tdc_const *tdc_const;
const struct rcar_canfd_regs *regs;
const struct rcar_canfd_shift_data *sh;
u8 rnc_field_width;
@@ -562,6 +481,7 @@ struct rcar_canfd_channel {
struct rcar_canfd_global {
struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
void __iomem *base; /* Register base address */
+ struct rcar_canfd_f_c __iomem *fcbase;
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
@@ -636,12 +556,31 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+/* CAN FD Transmission Delay Compensation constants */
+static const struct can_tdc_const rcar_canfd_gen3_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 128,
+ .tdco_min = 1,
+ .tdco_max = 128,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct can_tdc_const rcar_canfd_gen4_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 256,
+ .tdco_min = 1,
+ .tdco_max = 256,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
static const struct rcar_canfd_regs rcar_gen3_regs = {
.rfcc = 0x00b8,
.cfcc = 0x0118,
.cfsts = 0x0178,
.cfpctr = 0x01d8,
- .f_dcfg = 0x0500,
+ .coffset = 0x0500,
.rfoffset = 0x3000,
.cfoffset = 0x3400,
};
@@ -651,7 +590,7 @@ static const struct rcar_canfd_regs rcar_gen4_regs = {
.cfcc = 0x0120,
.cfsts = 0x01e0,
.cfpctr = 0x0240,
- .f_dcfg = 0x1400,
+ .coffset = 0x1400,
.rfoffset = 0x6000,
.cfoffset = 0x6400,
};
@@ -681,6 +620,7 @@ static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
.nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
.regs = &rcar_gen3_regs,
.sh = &rcar_gen3_shift_data,
.rnc_field_width = 8,
@@ -697,6 +637,7 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
.nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
.regs = &rcar_gen4_regs,
.sh = &rcar_gen4_shift_data,
.rnc_field_width = 16,
@@ -713,6 +654,7 @@ static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
.nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
.regs = &rcar_gen3_regs,
.sh = &rcar_gen3_shift_data,
.rnc_field_width = 8,
@@ -729,6 +671,7 @@ static const struct rcar_canfd_hw_info rzg2l_hw_info = {
static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
.nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
.regs = &rcar_gen4_regs,
.sh = &rcar_gen4_shift_data,
.rnc_field_width = 16,
@@ -778,26 +721,36 @@ static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
rcar_canfd_update(mask, val, base + reg);
}
+static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, val, addr);
+}
+
+static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, addr);
+}
+
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- *((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + i * sizeof(u32));
+ data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ const u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + i * sizeof(u32),
- *((u32 *)cf->data + i));
+ rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]);
}
static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
@@ -808,8 +761,8 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
-static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch,
- unsigned int num_rules)
+static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
{
unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
@@ -827,8 +780,7 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
for_each_set_bit(ch, &gpriv->channels_mask,
gpriv->info->max_channels)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
- val);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
@@ -841,6 +793,7 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
+ struct device *dev = &gpriv->pdev->dev;
u32 sts, ch;
int err;
@@ -850,7 +803,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ dev_dbg(dev, "global raminit failed\n");
return err;
}
@@ -863,7 +816,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
(sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ dev_dbg(dev, "global reset failed\n");
return err;
}
@@ -887,8 +840,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
(sts & RCANFD_CSTS_CRSTSTS),
2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev,
- "channel %u reset failed\n", ch);
+ dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
}
@@ -938,7 +890,7 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_setrnc(gpriv, ch, num_rules);
+ rcar_canfd_set_rnc(gpriv, ch, num_rules);
if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
@@ -1436,14 +1388,17 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_canfd_set_bittiming(struct net_device *dev)
+static void rcar_canfd_set_bittiming(struct net_device *ndev)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const;
+ const struct can_tdc *tdc = &priv->can.fd.tdc;
+ u32 cfg, tdcmode = 0, tdco = 0;
u16 brp, sjw, tseg1, tseg2;
- u32 cfg;
u32 ch = priv->channel;
/* Nominal bit timing settings */
@@ -1452,46 +1407,43 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- /* CAN FD only mode */
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ }
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
-
- /* Data bit timing settings */
- brp = dbt->brp - 1;
- sjw = dbt->sjw - 1;
- tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
- tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
- netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
- } else {
- /* Classical CAN only mode */
- if (gpriv->info->shared_can_regs) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
- RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) |
- RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) |
- RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) |
- RCANFD_CFG_TSEG2(tseg2));
- }
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return;
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev,
- "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+
+ writel(cfg, &gpriv->fcbase[ch].dcfg);
+
+ /* Transceiver Delay Compensation */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) {
+ /* TDC enabled, measured + offset */
+ tdcmode = RCANFD_FDCFG_TDCE;
+ tdco = tdc->tdco - 1;
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ /* TDC enabled, offset only */
+ tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1;
}
+
+ rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask,
+ tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco));
}
static int rcar_canfd_start(struct net_device *ndev)
@@ -1691,7 +1643,8 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
- struct net_device_stats *stats = &priv->ndev->stats;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
@@ -1707,14 +1660,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
sts & RCANFD_RFFDSTS_RFFDF)
- skb = alloc_canfd_skb(priv->ndev, &cf);
+ skb = alloc_canfd_skb(ndev, &cf);
else
- skb = alloc_can_skb(priv->ndev,
- (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
} else {
id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
}
if (!skb) {
@@ -1735,7 +1687,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
- netdev_dbg(priv->ndev, "ESI Error\n");
+ netdev_dbg(ndev, "ESI Error\n");
}
if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
@@ -1802,6 +1754,29 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
return num_pkts;
}
+static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv,
+ unsigned int ch)
+{
+ u32 sts = readl(&gpriv->fcbase[ch].cfdsts);
+ u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts);
+
+ return tdcr & (gpriv->info->tdc_const->tdcv_max - 1);
+}
+
+static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ u32 tdco = priv->can.fd.tdc.tdco;
+ u32 tdcr;
+
+ /* Transceiver Delay Compensation Result */
+ tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1;
+
+ *tdcv = tdcr < tdco ? 0 : tdcr - tdco;
+
+ return 0;
+}
+
static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
{
int err;
@@ -1818,10 +1793,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+static int rcar_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
u32 val, ch = priv->channel;
/* Peripheral clock is already enabled in probe */
@@ -1924,12 +1899,17 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (gpriv->fdmode) {
priv->can.bittiming_const = gpriv->info->nom_bittiming;
priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
+ priv->can.fd.tdc_const = gpriv->info->tdc_const;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
if (err)
goto fail;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
priv->can.bittiming_const = &rcar_canfd_bittiming_const;
@@ -2086,6 +2066,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_dev;
}
gpriv->base = addr;
+ gpriv->fcbase = addr + gpriv->info->regs->coffset;
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 2f516cc6d22c..e061e35769bf 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -105,7 +105,7 @@ config CAN_SJA1000_PLATFORM
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
- depends on ISA
+ depends on (ISA && PC104) || (COMPILE_TEST && HAS_IOPORT)
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
https://www.embeddedts.com/products/TS-CAN1
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index ec5c64006a16..313e1d241f01 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -388,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
mcp251x_spi_write(spi, 4);
}
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, u8 val)
+static int mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -398,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_write(spi, 4);
+ return mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -441,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
unsigned int offset)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ int ret;
u8 val;
/* nothing to be done for inputs */
@@ -450,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl |= val;
@@ -530,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
val = value ? mask : 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
-static void
+static int
mcp251x_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *maskp, unsigned long *bitsp)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
@@ -561,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip,
val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
if (!mask)
- return;
+ return 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
static void mcp251x_gpio_restore(struct spi_device *spi)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 644e8b8eb91e..e6d6661a908a 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -383,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
* overflows instead of the hardware silently dropping the
* messages.
*/
- mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 9dae0c71a2e1..a7547a83120e 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -66,6 +66,7 @@ config CAN_GS_USB
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
+ select NET_DEVLINK
help
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index d924b053677b..6476add1c105 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -429,7 +429,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
&priv->can.fd.data_bittiming);
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index cf260044f0b9..41b4a11555aa 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_devlink.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index f6c77eca9f43..46a1b6907a50 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <net/devlink.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -47,6 +48,10 @@
#define KVASER_USB_CAP_EXT_CAP 0x02
#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+#define KVASER_USB_SW_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define KVASER_USB_SW_VERSION_MINOR_MASK GENMASK(23, 16)
+#define KVASER_USB_SW_VERSION_BUILD_MASK GENMASK(15, 0)
+
struct kvaser_usb_dev_cfg;
enum kvaser_usb_leaf_family {
@@ -54,6 +59,11 @@ enum kvaser_usb_leaf_family {
KVASER_USBCAN,
};
+enum kvaser_usb_led_state {
+ KVASER_USB_LED_ON = 0,
+ KVASER_USB_LED_OFF = 1,
+};
+
#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
struct kvaser_usb_dev_card_data_hydra {
u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
@@ -78,6 +88,12 @@ struct kvaser_usb_tx_urb_context {
u32 echo_index;
};
+struct kvaser_usb_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
struct kvaser_usb_busparams {
__le32 bitrate;
u8 tseg1;
@@ -96,12 +112,15 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ u32 ean[2];
+ u32 serial_number;
+ struct kvaser_usb_fw_version fw_version;
+ u8 hw_revision;
+ unsigned int nchannels;
/* @max_tx_urbs: Firmware-reported maximum number of outstanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
- u32 fw_version;
- unsigned int nchannels;
unsigned int max_tx_urbs;
struct kvaser_usb_dev_card_data card_data;
@@ -112,6 +131,7 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct can_berr_counter bec;
/* subdriver-specific data */
@@ -149,6 +169,7 @@ struct kvaser_usb_net_priv {
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
* @dev_get_capabilities: discover device capabilities
+ * @dev_set_led: turn on/off device LED
*
* @dev_set_opt_mode: set ctrlmod
* @dev_start_chip: start the CAN controller
@@ -176,6 +197,9 @@ struct kvaser_usb_dev_ops {
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_led)(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms);
int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
@@ -204,6 +228,11 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+extern const struct devlink_ops kvaser_usb_devlink_ops;
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv);
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv);
+
void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index daf42080f942..90e77fa0ff4a 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -364,10 +364,13 @@ resubmit_urb:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ struct kvaser_usb_net_priv *priv;
+
+ priv = dev->nets[i];
+ if (!priv)
continue;
- netif_device_detach(dev->nets[i]->netdev);
+ netif_device_detach(priv->netdev);
}
} else if (err) {
dev_err(&dev->intf->dev,
@@ -753,6 +756,31 @@ freeurb:
return ret;
}
+static int kvaser_usb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ const struct kvaser_usb_dev_ops *ops = priv->dev->driver_info->ops;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ return ops->dev_set_led(priv, KVASER_USB_LED_ON, 1000);
+
+ case ETHTOOL_ID_OFF:
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1000);
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Turn LED off and restore standard function after 1ms */
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
@@ -763,30 +791,35 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_usb_set_phys_id,
};
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
+ struct kvaser_usb_net_priv *priv;
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
- unregister_candev(dev->nets[i]->netdev);
+ unregister_candev(priv->netdev);
}
kvaser_usb_unlink_all_urbs(dev);
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
if (ops->dev_remove_channel)
- ops->dev_remove_channel(dev->nets[i]);
+ ops->dev_remove_channel(priv);
- free_candev(dev->nets[i]->netdev);
+ kvaser_usb_devlink_port_unregister(priv);
+ free_candev(priv->netdev);
}
}
@@ -852,26 +885,35 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
+ netdev->dev_port = channel;
dev->nets[channel] = priv;
if (ops->dev_init_channel) {
err = ops->dev_init_channel(priv);
if (err)
- goto err;
+ goto candev_free;
+ }
+
+ err = kvaser_usb_devlink_port_register(priv);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register devlink port\n");
+ goto candev_free;
}
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- goto err;
+ goto unregister_devlink_port;
}
netdev_dbg(netdev, "device registered\n");
return 0;
-err:
+unregister_devlink_port:
+ kvaser_usb_devlink_port_unregister(priv);
+candev_free:
free_candev(netdev);
dev->nets[channel] = NULL;
return err;
@@ -881,6 +923,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct kvaser_usb *dev;
+ struct devlink *devlink;
int err;
int i;
const struct kvaser_usb_driver_info *driver_info;
@@ -890,17 +933,20 @@ static int kvaser_usb_probe(struct usb_interface *intf,
if (!driver_info)
return -ENODEV;
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ devlink = devlink_alloc(&kvaser_usb_devlink_ops, sizeof(*dev), &intf->dev);
+ if (!devlink)
return -ENOMEM;
+ dev = devlink_priv(devlink);
dev->intf = intf;
dev->driver_info = driver_info;
ops = driver_info->ops;
err = ops->dev_setup_endpoints(dev);
- if (err)
- return dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ if (err) {
+ dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ goto free_devlink;
+ }
dev->udev = interface_to_usbdev(intf);
@@ -911,55 +957,66 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
err = ops->dev_init_card(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Failed to initialize card\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
+ goto free_devlink;
+ }
err = ops->dev_get_software_info(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get software info\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
+ goto free_devlink;
+ }
if (ops->dev_get_software_details) {
err = ops->dev_get_software_details(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get software details\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
+ goto free_devlink;
+ }
}
- if (WARN_ON(!dev->cfg))
- return -ENODEV;
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
+ if (WARN_ON(!dev->cfg)) {
+ err = -ENODEV;
+ goto free_devlink;
+ }
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = ops->dev_get_card_info(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get card info\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
+ goto free_devlink;
+ }
if (ops->dev_get_capabilities) {
err = ops->dev_get_capabilities(dev);
if (err) {
- kvaser_usb_remove_interfaces(dev);
- return dev_err_probe(&intf->dev, err,
- "Cannot get capabilities\n");
+ dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
+ goto remove_interfaces;
}
}
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(dev, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
+ if (err)
+ goto remove_interfaces;
}
+ devlink_register(devlink);
return 0;
+
+remove_interfaces:
+ kvaser_usb_remove_interfaces(dev);
+free_devlink:
+ devlink_free(devlink);
+
+ return err;
}
static void kvaser_usb_disconnect(struct usb_interface *intf)
@@ -972,6 +1029,8 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
return;
kvaser_usb_remove_interfaces(dev);
+ devlink_unregister(priv_to_devlink(dev));
+ devlink_free(priv_to_devlink(dev));
}
static struct usb_driver kvaser_usb_driver = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
new file mode 100644
index 000000000000..e838b82298ae
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* kvaser_usb devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_usb.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+#define KVASER_USB_EAN_MSB 0x00073301
+
+static int kvaser_usb_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_usb *dev = devlink_priv(devlink);
+ char buf[] = "73301XXXXXXXXXX";
+ int ret;
+
+ if (dev->serial_number) {
+ snprintf(buf, sizeof(buf), "%u", dev->serial_number);
+ ret = devlink_info_serial_number_put(req, buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ dev->fw_version.major,
+ dev->fw_version.minor,
+ dev->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->hw_revision) {
+ snprintf(buf, sizeof(buf), "%u", dev->hw_revision);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ean[1] == KVASER_USB_EAN_MSB) {
+ snprintf(buf, sizeof(buf), "%x%08x", dev->ean[1], dev->ean[0]);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_usb_devlink_ops = {
+ .info_get = kvaser_usb_devlink_info_get,
+};
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = priv->channel,
+ };
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(priv->dev),
+ &priv->devlink_port, priv->channel);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(priv->netdev, &priv->devlink_port);
+
+ return 0;
+}
+
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv)
+{
+ devlink_port_unregister(&priv->devlink_port);
+}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 8e88b5917796..a59f20dad692 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -12,6 +12,7 @@
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -67,6 +68,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
#define CMD_SET_BUSPARAMS_RESP 85
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_RX_MESSAGE 106
#define CMD_MAP_CHANNEL_REQ 200
#define CMD_MAP_CHANNEL_RESP 201
@@ -111,7 +114,7 @@ struct kvaser_cmd_card_info {
__le32 clock_res;
__le32 mfg_date;
__le32 ean[2];
- u8 hw_version;
+ u8 hw_revision;
u8 usb_mode;
u8 hw_type;
u8 reserved0;
@@ -217,6 +220,22 @@ struct kvaser_cmd_get_busparams_res {
u8 reserved[20];
} __packed;
+/* The device has two LEDs per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_HYDRA_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX 3
+#define KVASER_USB_HYDRA_LEDS_PER_CHANNEL 2
+struct kvaser_cmd_led_action_req {
+ u8 action;
+ u8 padding;
+ __le16 duration_ms;
+ u8 reserved[24];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -299,6 +318,8 @@ struct kvaser_cmd {
struct kvaser_cmd_get_busparams_req get_busparams_req;
struct kvaser_cmd_get_busparams_res get_busparams_res;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_chip_state_event chip_state_event;
struct kvaser_cmd_set_ctrlmode set_ctrlmode;
@@ -1390,6 +1411,7 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_SET_BUSPARAMS_RESP:
case CMD_SET_BUSPARAMS_FD_RESP:
+ case CMD_LED_ACTION_RESP:
break;
default:
@@ -1817,6 +1839,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
size_t cmd_len;
int err;
u32 flags;
+ u32 fw_version;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1841,7 +1864,10 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
if (err)
goto end;
- dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
@@ -1892,6 +1918,10 @@ static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
if (err)
return err;
+ dev->ean[1] = le32_to_cpu(cmd.card_info.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.card_info.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.card_info.serial_number);
+ dev->hw_revision = cmd.card_info.hw_revision;
dev->nchannels = cmd.card_info.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
@@ -1946,6 +1976,36 @@ static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_LED_ACTION_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, dev->card_data.hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid(cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ cmd->led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_HYDRA_LED_IDX_MASK,
+ KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX +
+ KVASER_USB_HYDRA_LEDS_PER_CHANNEL * priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
@@ -2149,6 +2209,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
.dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_led = kvaser_usb_hydra_set_led,
.dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
.dev_start_chip = kvaser_usb_hydra_start_chip,
.dev_stop_chip = kvaser_usb_hydra_stop_chip,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 6a45adcc45bd..c29828a94ad0 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -10,6 +10,7 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -81,6 +82,8 @@
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_LEAF_LOG_MESSAGE 106
@@ -135,7 +138,7 @@ struct kvaser_cmd_cardinfo {
__le32 padding0;
__le32 clock_resolution;
__le32 mfgdate;
- u8 ean[8];
+ __le32 ean[2];
u8 hw_revision;
union {
struct {
@@ -173,6 +176,21 @@ struct kvaser_cmd_busparams {
struct kvaser_usb_busparams busparams;
} __packed;
+/* The device has one LED per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_LEAF_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_LEAF_LED_YELLOW_CH0_IDX 2
+struct kvaser_cmd_led_action_req {
+ u8 tid;
+ u8 action;
+ __le16 duration_ms;
+ u8 padding[24];
+} __packed;
+
struct kvaser_cmd_tx_can {
u8 channel;
u8 tid;
@@ -359,6 +377,8 @@ struct kvaser_cmd {
struct kvaser_cmd_cardinfo cardinfo;
struct kvaser_cmd_busparams busparams;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_rx_can_header rx_can_header;
struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
@@ -409,6 +429,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
/* ignored events: */
[CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
@@ -423,6 +444,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
[CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
+ /* ignored events: */
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -718,9 +741,13 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
const struct leaf_cmd_softinfo *softinfo)
{
+ u32 fw_version;
u32 sw_options = le32_to_cpu(softinfo->sw_options);
- dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
@@ -761,6 +788,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
+ u32 fw_version;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
if (err)
@@ -775,7 +803,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK,
+ fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK,
+ fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK,
+ fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
@@ -820,6 +854,10 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
(dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
+ dev->ean[1] = le32_to_cpu(cmd.u.cardinfo.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.u.cardinfo.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.u.cardinfo.serial_number);
+ dev->hw_revision = cmd.u.cardinfo.hw_revision;
return 0;
}
@@ -924,6 +962,34 @@ static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_leaf_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_LED_ACTION_REQ;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_led_action_req);
+ cmd->u.led_action_req.tid = 0xff;
+
+ cmd->u.led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->u.led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_LEAF_LED_IDX_MASK,
+ KVASER_USB_LEAF_LED_YELLOW_CH0_IDX +
+ priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
{
int err = 0;
@@ -1638,6 +1704,8 @@ static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
+ case CMD_LED_ACTION_RESP:
+ break;
default:
warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
@@ -1927,6 +1995,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
.dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_led = kvaser_usb_leaf_set_led,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 4d85b29a17b7..ebefc274b50a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -49,7 +49,7 @@ struct __packed pcan_ufd_fw_info {
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
- /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ /* extended data when type >= PCAN_USBFD_TYPE_EXT */
u8 cmd_out_ep; /* ep for cmd */
u8 cmd_in_ep; /* ep for replies */
u8 data_out_ep[2]; /* ep for CANx TX */
@@ -982,10 +982,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for cmds pipes. If not, then default EP should be used.
+ /* if vendor rsp type is greater than or equal to 2, then it
+ * contains EP numbers to use for cmds pipes. If not, then
+ * default EP should be used.
*/
- if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
}
@@ -1018,11 +1019,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for data pipes. If not, then statically defined EP are used
- * (see peak_usb_create_dev()).
+ /* if vendor rsp type is greater than or equal to 2, then it contains EP
+ * numbers to use for data pipes. If not, then statically defined EP are
+ * used (see peak_usb_create_dev()).
*/
- if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
dev->ep_msg_in = fw_info->data_in_ep;
dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3f2e378199ab..81baec8eb1e5 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -515,7 +515,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->devtype.cantype == XAXI_CANFD_2_0) {
/* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index bb9812b3b0e8..ec759f8cb0e2 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -92,7 +92,7 @@ source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_RZN1_A5PSW
tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
- depends on OF && ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
select NET_DSA_TAG_RZN1_A5PSW
select PCS_RZN1_MIIC
help
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index ebaa4a80d544..915008e8eff5 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -5,6 +5,7 @@ menuconfig B53
select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_LEGACY_FCS
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index dc2f4adac9bc..9942fb6f7f4b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -361,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
-
- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
- * frames should be flooded or not.
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ if (!is5325(dev)) {
+ /* Include IMP port in dumb forwarding mode */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ } else {
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_IP_MCAST_25;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
@@ -487,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
@@ -511,6 +519,9 @@ out:
static int b53_fast_age_port(struct b53_device *dev, int port)
{
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
@@ -518,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port)
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
+ if (is5325(dev))
+ return 0;
+
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
@@ -529,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
unsigned int i;
u16 pvlan;
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
@@ -546,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
{
u16 uc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
+ if (unicast)
+ uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ }
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
@@ -559,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
{
u16 mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
+ if (multicast)
+ mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ }
}
static void b53_port_set_learning(struct b53_device *dev, int port,
@@ -579,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
{
u16 reg;
+ if (is5325(dev))
+ return;
+
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
if (learning)
reg &= ~BIT(port);
@@ -615,6 +660,19 @@ int b53_setup_port(struct dsa_switch *ds, int port)
if (dsa_is_user_port(ds, port))
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+ if (is5325(dev) &&
+ in_range(port, 1, 4)) {
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
+ reg &= ~PD_MODE_POWER_DOWN_PORT(0);
+ if (dsa_is_unused_port(ds, port))
+ reg |= PD_MODE_POWER_DOWN_PORT(port);
+ else
+ reg &= ~PD_MODE_POWER_DOWN_PORT(port);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
+ }
+
return 0;
}
EXPORT_SYMBOL(b53_setup_port);
@@ -631,6 +689,9 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ if (dev->ops->phy_enable)
+ dev->ops->phy_enable(dev, port);
+
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
@@ -669,6 +730,9 @@ void b53_disable_port(struct dsa_switch *ds, int port)
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+ if (dev->ops->phy_disable)
+ dev->ops->phy_disable(dev, port);
+
if (dev->ops->irq_disable)
dev->ops->irq_disable(dev, port);
}
@@ -713,6 +777,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+ /* B53_BRCM_HDR not present on devices with legacy tags */
+ if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
+ dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
+ return;
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -1257,6 +1326,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1281,6 +1352,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1311,10 +1384,19 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
- if (rx_pause)
- reg |= PORT_OVERRIDE_RX_FLOW;
- if (tx_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
+ if (rx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ if (tx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ }
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
@@ -1328,7 +1410,7 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (is63268(dev))
+ if (is6318_268(dev))
rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
@@ -1764,6 +1846,45 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
+static int b53_arl_read_25(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx)
+{
+ DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ bitmap_zero(free_bins, dev->num_arl_bins);
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_bins; i++) {
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+
+ b53_arl_to_entry_25(ent, mac_vid);
+
+ if (!(mac_vid & ARLTBL_VALID_25)) {
+ set_bit(i, free_bins);
+ continue;
+ }
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ if (dev->vlan_enabled &&
+ ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid)
+ continue;
+ *idx = i;
+ return 0;
+ }
+
+ *idx = find_first_bit(free_bins, dev->num_arl_bins);
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
+}
+
static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
@@ -1778,14 +1899,18 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
- b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ if (!is5325m(dev))
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+ if (is5325(dev) || is5365(dev))
+ ret = b53_arl_read_25(dev, mac, vid, &ent, &idx);
+ else
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
@@ -1829,12 +1954,17 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
- b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+ if (is5325(dev) || is5365(dev))
+ b53_arl_from_entry_25(&mac_vid, &ent);
+ else
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
b53_write64(dev, B53_ARLIO_PAGE,
B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
- b53_write32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
return b53_arl_rw_op(dev, 0);
}
@@ -1846,12 +1976,6 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
- /* 5325 and 5365 require some more massaging, but could
- * be supported eventually
- */
- if (is5325(priv) || is5365(priv))
- return -EOPNOTSUPP;
-
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
@@ -1878,10 +2002,15 @@ EXPORT_SYMBOL(b53_fdb_del);
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
- u8 reg;
+ u8 reg, offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else
+ offset = B53_ARL_SRCH_CTL;
do {
- b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ b53_read8(dev, B53_ARLIO_PAGE, offset, &reg);
if (!(reg & ARL_SRCH_STDN))
return 0;
@@ -1898,13 +2027,24 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
struct b53_arl_entry *ent)
{
u64 mac_vid;
- u32 fwd_entry;
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL(idx), &fwd_entry);
- b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ if (is5325(dev)) {
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid);
+ } else if (is5365(dev)) {
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65,
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid);
+ } else {
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
+ &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ }
}
static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
@@ -1925,14 +2065,20 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
unsigned int count = 0;
+ u8 offset;
int ret;
u8 reg;
mutex_lock(&priv->arl_mutex);
+ if (is5325(priv) || is5365(priv))
+ offset = B53_ARL_SRCH_CTL_25;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
/* Start search operation */
reg = ARL_SRCH_STDN;
- b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+ b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg);
do {
ret = b53_arl_search_wait(priv);
@@ -2165,7 +2311,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ struct b53_device *dev = ds->priv;
+ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD);
+
+ if (!is5325(dev))
+ mask |= BR_LEARNING;
+
+ if (flags.mask & ~mask)
return -EINVAL;
return 0;
@@ -2241,8 +2393,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
goto out;
}
- /* Older models require a different 6 byte tag */
- if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ /* Older models require different 6 byte tags */
+ if (is5325(dev) || is5365(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
+ goto out;
+ } else if (is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
@@ -2620,19 +2775,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
},
{
- .chip_id = BCM63268_DEVICE_ID,
- .dev_name = "BCM63268",
- .vlans = 4096,
- .enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
- .imp_port = 8,
- .vta_regs = B53_VTA_REGS_63XX,
- .duplex_reg = B53_DUPLEX_STAT_63XX,
- .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
- .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
- },
- {
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
@@ -2781,13 +2923,17 @@ static const struct b53_chip_data b53_switch_chips[] = {
static int b53_switch_init(struct b53_device *dev)
{
+ u32 chip_id = dev->chip_id;
unsigned int i;
int ret;
+ if (is63xx(dev))
+ chip_id = BCM63XX_DEVICE_ID;
+
for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
const struct b53_chip_data *chip = &b53_switch_chips[i];
- if (chip->chip_id == dev->chip_id) {
+ if (chip->chip_id == chip_id) {
if (!dev->enabled_ports)
dev->enabled_ports = chip->enabled_ports;
dev->name = chip->dev_name;
@@ -2830,6 +2976,9 @@ static int b53_switch_init(struct b53_device *dev)
}
}
+ if (is5325e(dev))
+ dev->num_arl_buckets = 512;
+
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2931,10 +3080,24 @@ int b53_switch_detect(struct b53_device *dev)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
- if (tmp == 0xf)
+ if (tmp == 0xf) {
+ u32 phy_id;
+ int val;
+
dev->chip_id = BCM5325_DEVICE_ID;
- else
+
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
+ phy_id = (val & 0xffff) << 16;
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
+ phy_id |= (val & 0xfff0);
+
+ if (phy_id == 0x00406330)
+ dev->variant_id = B53_VARIANT_5325M;
+ else if (phy_id == 0x0143bc30)
+ dev->variant_id = B53_VARIANT_5325E;
+ } else {
dev->chip_id = BCM5365_DEVICE_ID;
+ }
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index c687360a5b7f..f06c3e0cc42a 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -21,13 +21,54 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
+#include <linux/regmap.h>
#include "b53_priv.h"
+#define BCM63XX_EPHY_REG 0x3C
+
+struct b53_phy_info {
+ u32 ephy_enable_mask;
+ u32 ephy_port_mask;
+ u32 ephy_bias_bit;
+ const u32 *ephy_offset;
+};
+
struct b53_mmap_priv {
void __iomem *regs;
+ struct regmap *gpio_ctrl;
+ const struct b53_phy_info *phy_info;
+ u32 phys_enabled;
+};
+
+static const u32 bcm6318_ephy_offsets[] = {4, 5, 6, 7};
+
+static const struct b53_phy_info bcm6318_ephy_info = {
+ .ephy_enable_mask = BIT(0) | BIT(4) | BIT(8) | BIT(12) | BIT(16),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6318_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm6318_ephy_offsets,
+};
+
+static const u32 bcm6368_ephy_offsets[] = {2, 3, 4, 5};
+
+static const struct b53_phy_info bcm6368_ephy_info = {
+ .ephy_enable_mask = BIT(0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6368_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 0,
+ .ephy_offset = bcm6368_ephy_offsets,
+};
+
+static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
+
+static const struct b53_phy_info bcm63268_ephy_info = {
+ .ephy_enable_mask = GENMASK(4, 0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm63268_ephy_offsets,
};
static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
@@ -229,6 +270,50 @@ static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
return -EIO;
}
+static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ const struct b53_phy_info *info = priv->phy_info;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask, val;
+
+ if (enable) {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port])
+ | BIT(info->ephy_bias_bit);
+ val = 0;
+ } else {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port]);
+ if (!((priv->phys_enabled & ~BIT(port)) & info->ephy_port_mask))
+ mask |= BIT(info->ephy_bias_bit);
+ val = mask;
+ }
+ return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
+}
+
+static void b53_mmap_phy_enable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
+ ret = bcm63xx_ephy_set(dev, port, true);
+
+ if (!ret)
+ priv->phys_enabled |= BIT(port);
+}
+
+static void b53_mmap_phy_disable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
+ ret = bcm63xx_ephy_set(dev, port, false);
+
+ if (!ret)
+ priv->phys_enabled &= ~BIT(port);
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -242,6 +327,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
+ .phy_enable = b53_mmap_phy_enable,
+ .phy_disable = b53_mmap_phy_disable,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
@@ -313,6 +400,18 @@ static int b53_mmap_probe(struct platform_device *pdev)
priv->regs = pdata->regs;
+ priv->gpio_ctrl = syscon_regmap_lookup_by_phandle(np, "brcm,gpio-ctrl");
+ if (!IS_ERR(priv->gpio_ctrl)) {
+ if (pdata->chip_id == BCM6318_DEVICE_ID ||
+ pdata->chip_id == BCM6328_DEVICE_ID ||
+ pdata->chip_id == BCM6362_DEVICE_ID)
+ priv->phy_info = &bcm6318_ephy_info;
+ else if (pdata->chip_id == BCM6368_DEVICE_ID)
+ priv->phy_info = &bcm6368_ephy_info;
+ else if (pdata->chip_id == BCM63268_DEVICE_ID)
+ priv->phy_info = &bcm63268_ephy_info;
+ }
+
dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
if (!dev)
return -ENOMEM;
@@ -348,16 +447,16 @@ static const struct of_device_id b53_mmap_of_table[] = {
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6318-switch",
- .data = (void *)BCM63268_DEVICE_ID,
+ .data = (void *)BCM6318_DEVICE_ID,
}, {
.compatible = "brcm,bcm6328-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6328_DEVICE_ID,
}, {
.compatible = "brcm,bcm6362-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6362_DEVICE_ID,
}, {
.compatible = "brcm,bcm6368-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6368_DEVICE_ID,
}, {
.compatible = "brcm,bcm63268-switch",
.data = (void *)BCM63268_DEVICE_ID,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a5ef7071ba07..458775f95164 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -45,6 +45,8 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phy_enable)(struct b53_device *dev, int port);
+ void (*phy_disable)(struct b53_device *dev, int port);
void (*phylink_get_caps)(struct b53_device *dev, int port,
struct phylink_config *config);
struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
@@ -71,6 +73,10 @@ enum {
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
BCM63XX_DEVICE_ID = 0x6300,
+ BCM6318_DEVICE_ID = 0x6318,
+ BCM6328_DEVICE_ID = 0x6328,
+ BCM6362_DEVICE_ID = 0x6362,
+ BCM6368_DEVICE_ID = 0x6368,
BCM63268_DEVICE_ID = 0x63268,
BCM53010_DEVICE_ID = 0x53010,
BCM53011_DEVICE_ID = 0x53011,
@@ -84,6 +90,12 @@ enum {
BCM53134_DEVICE_ID = 0x5075,
};
+enum b53_variant_id {
+ B53_VARIANT_NONE = 0,
+ B53_VARIANT_5325E,
+ B53_VARIANT_5325M,
+};
+
struct b53_pcs {
struct phylink_pcs pcs;
struct b53_device *dev;
@@ -118,6 +130,7 @@ struct b53_device {
/* chip specific data */
u32 chip_id;
+ enum b53_variant_id variant_id;
u8 core_rev;
u8 vta_regs[3];
u8 duplex_reg;
@@ -165,6 +178,18 @@ static inline int is5325(struct b53_device *dev)
return dev->chip_id == BCM5325_DEVICE_ID;
}
+static inline int is5325e(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325E;
+}
+
+static inline int is5325m(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325M;
+}
+
static inline int is5365(struct b53_device *dev)
{
#ifdef CONFIG_BCM47XX
@@ -199,12 +224,17 @@ static inline int is531x5(struct b53_device *dev)
static inline int is63xx(struct b53_device *dev)
{
return dev->chip_id == BCM63XX_DEVICE_ID ||
+ dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM6328_DEVICE_ID ||
+ dev->chip_id == BCM6362_DEVICE_ID ||
+ dev->chip_id == BCM6368_DEVICE_ID ||
dev->chip_id == BCM63268_DEVICE_ID;
}
-static inline int is63268(struct b53_device *dev)
+static inline int is6318_268(struct b53_device *dev)
{
- return dev->chip_id == BCM63268_DEVICE_ID;
+ return dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
}
static inline int is5301x(struct b53_device *dev)
@@ -298,6 +328,19 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->vid = mac_vid >> ARLTBL_VID_S;
}
+static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) &
+ ARLTBL_DATA_PORT_ID_MASK_25;
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S_65;
+}
+
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
@@ -312,6 +355,22 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
*fwd_entry |= ARLTBL_AGE;
}
+static inline void b53_arl_from_entry_25(u64 *mac_vid,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) <<
+ ARLTBL_DATA_PORT_ID_S_25;
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) <<
+ ARLTBL_VID_S_65;
+ if (ent->is_valid)
+ *mac_vid |= ARLTBL_VALID_25;
+ if (ent->is_static)
+ *mac_vid |= ARLTBL_STATIC_25;
+ if (ent->is_age)
+ *mac_vid |= ARLTBL_AGE_25;
+}
+
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 1fbc5a204bc7..309fe0e46dad 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -29,6 +29,7 @@
#define B53_ARLIO_PAGE 0x05 /* ARL Access */
#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */
/* PHY Registers */
#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
@@ -95,17 +96,22 @@
#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */
#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
#define PORT_OVERRIDE_RX_FLOW BIT(4)
#define PORT_OVERRIDE_TX_FLOW BIT(5)
#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
-/* Power-down mode control */
+/* Power-down mode control (8 bit) */
#define B53_PD_MODE_CTRL_25 0x0f
+#define PD_MODE_PORT_MASK 0x1f
+/* Bit 0 also powers down the switch. */
+#define PD_MODE_POWER_DOWN_PORT(i) BIT(i)
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IP_MCAST_25 BIT(0)
#define B53_IPMC_FWD_EN BIT(1)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
@@ -324,9 +330,10 @@
#define ARLTBL_VID_MASK 0xfff
#define ARLTBL_DATA_PORT_ID_S_25 48
#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
-#define ARLTBL_AGE_25 BIT(61)
-#define ARLTBL_STATIC_25 BIT(62)
-#define ARLTBL_VALID_25 BIT(63)
+#define ARLTBL_VID_S_65 53
+#define ARLTBL_AGE_25 BIT_ULL(61)
+#define ARLTBL_STATIC_25 BIT_ULL(62)
+#define ARLTBL_VALID_25 BIT_ULL(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
@@ -366,6 +373,18 @@
#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
/*************************************************************************
+ * IEEE 802.1X Registers
+ *************************************************************************/
+
+/* Multicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_MCAST_DLF 0x94
+#define B53_IEEE_MCAST_DROP_EN BIT(11)
+
+/* Unicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_UCAST_DLF 0x96
+#define B53_IEEE_UCAST_DROP_EN BIT(11)
+
+/*************************************************************************
* Port VLAN Registers
*************************************************************************/
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 283ec5a6e23c..e0b4758ca583 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1061,7 +1061,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry l2_ptp = {
+ static const struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1072,7 +1072,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_ptp = {
+ static const struct hellcreek_fdb_entry udp4_ptp = {
/* MAC: 01-00-5E-00-01-81 */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1083,7 +1083,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_ptp = {
+ static const struct hellcreek_fdb_entry udp6_ptp = {
/* MAC: 33-33-00-00-01-81 */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1094,7 +1094,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry l2_p2p = {
+ static const struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
@@ -1105,7 +1105,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_p2p = {
+ static const struct hellcreek_fdb_entry udp4_p2p = {
/* MAC: 01-00-5E-00-00-6B */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1116,7 +1116,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_p2p = {
+ static const struct hellcreek_fdb_entry udp6_p2p = {
/* MAC: 33-33-00-00-00-6B */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1127,7 +1127,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry stp = {
+ static const struct hellcreek_fdb_entry stp = {
/* MAC: 01-80-C2-00-00-00 */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1320,13 +1320,13 @@ static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
return 0;
}
-static struct devlink_region_ops hellcreek_region_vlan_ops = {
+static const struct devlink_region_ops hellcreek_region_vlan_ops = {
.name = "vlan",
.snapshot = hellcreek_devlink_region_vlan_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops hellcreek_region_fdb_ops = {
+static const struct devlink_region_ops hellcreek_region_fdb_ops = {
.name = "fdb",
.snapshot = hellcreek_devlink_region_fdb_snapshot,
.destructor = kfree,
@@ -1335,7 +1335,7 @@ static struct devlink_region_ops hellcreek_region_fdb_ops = {
static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int ret;
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index be433b4e2b1c..c354abdafc1b 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -3,6 +3,7 @@
* Microchip KSZ8XXX series switch driver
*
* It supports the following switches:
+ * - KSZ8463
* - KSZ8863, KSZ8873 aka KSZ88X3
* - KSZ8895, KSZ8864 aka KSZ8895 family
* - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
@@ -35,14 +36,14 @@
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
- bits, set ? bits : 0);
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
}
/**
@@ -140,6 +141,11 @@ int ksz8_reset_switch(struct ksz_device *dev)
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
ksz_cfg(dev, KSZ8863_REG_SW_RESET,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else if (ksz_is_ksz8463(dev)) {
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, true);
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, false);
} else {
/* reset switch */
ksz_write8(dev, REG_POWER_MANAGEMENT_1,
@@ -194,6 +200,7 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
+ case KSZ8463_CHIP_ID:
case KSZ88X3_CHIP_ID:
case KSZ8864_CHIP_ID:
case KSZ8895_CHIP_ID:
@@ -227,6 +234,11 @@ static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
WEIGHTED_FAIR_QUEUE_ENABLE);
if (ret)
return ret;
+ } else if (ksz_is_ksz8463(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = P1CR1;
+ reg_2q = P1CR1 + 1;
} else {
mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
@@ -371,6 +383,9 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ if (ksz_is_8895_family(dev) &&
+ ctrl_addr == KSZ8863_MIB_PACKET_DROPPED_RX_0)
+ ctrl_addr = KSZ8895_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -1265,12 +1280,15 @@ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- data &= ~PORT_VLAN_MEMBERSHIP;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_pread8(dev, port, offset, &data);
+ data &= ~dev->port_mask;
data |= (member & dev->port_mask);
- ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+ ksz_pwrite8(dev, port, offset, data);
}
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1278,6 +1296,8 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
const u16 *regs;
+ int reg = S_FLUSH_TABLE_CTRL;
+ int mask = SW_FLUSH_DYN_MAC_TABLE;
regs = dev->info->regs;
@@ -1295,7 +1315,11 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
- ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ if (ksz_is_ksz8463(dev)) {
+ reg = KSZ8463_FLUSH_TABLE_CTRL;
+ mask = KSZ8463_FLUSH_DYN_MAC_TABLE;
+ }
+ ksz_cfg(dev, reg, mask, true);
for (index = first; index < cnt; index++) {
if (!(learn[index] & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
@@ -1434,7 +1458,7 @@ int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
struct netlink_ext_ack *extack)
{
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* Discard packets with VID not enabled on the switch */
@@ -1450,9 +1474,12 @@ int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
{
- if (ksz_is_ksz88x3(dev)) {
- ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
- 0x03 << (4 - 2 * port), state);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ int reg = REG_SW_INSERT_SRC_PVID;
+
+ if (ksz_is_ksz8463(dev))
+ reg = KSZ8463_REG_SW_CTRL_9;
+ ksz_cfg(dev, reg, 0x03 << (4 - 2 * port), state);
} else {
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
}
@@ -1467,7 +1494,7 @@ int ksz8_port_vlan_add(struct ksz_device *dev, int port,
u16 data, new_pvid = 0;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* If a VLAN is added with untagged flag different from the
@@ -1536,7 +1563,7 @@ int ksz8_port_vlan_del(struct ksz_device *dev, int port,
u16 data, pvid;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -1566,19 +1593,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
+ int offset = P_MIRROR_CTRL;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, true);
dev->mirror_tx |= BIT(port);
}
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_SNIFFER, false);
/* configure mirror port */
if (dev->mirror_rx || dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, true);
return 0;
@@ -1587,20 +1618,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
void ksz8_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (mirror->ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, false);
dev->mirror_rx &= ~BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, false);
dev->mirror_tx &= ~BIT(port);
}
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ ksz_pread8(dev, port, offset, &data);
if (!dev->mirror_rx && !dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, false);
}
@@ -1625,17 +1659,24 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
+ int offset;
u8 member;
masks = dev->info->masks;
/* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+ offset = P_BCAST_STORM_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR1;
+ ksz_port_cfg(dev, port, offset, PORT_BROADCAST_STORM, true);
ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */
- ksz_port_cfg(dev, port, P_802_1P_CTRL,
+ offset = P_802_1P_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_port_cfg(dev, port, offset,
masks[PORT_802_1P_REMAPPING], false);
if (cpu_port)
@@ -1675,6 +1716,7 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
const u32 *masks;
const u16 *regs;
u8 remote;
+ u8 fiber_ports = 0;
int i;
masks = dev->info->masks;
@@ -1705,6 +1747,32 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
else
ksz_port_cfg(dev, i, regs[P_STP_CTRL],
PORT_FORCE_FLOW_CTRL, false);
+ if (p->fiber)
+ fiber_ports |= (1 << i);
+ }
+ if (ksz_is_ksz8463(dev)) {
+ /* Setup fiber ports. */
+ if (fiber_ports) {
+ fiber_ports &= 3;
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_CFG_CTRL,
+ fiber_ports << PORT_COPPER_MODE_S,
+ 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_DSP_CTRL_6,
+ COPPER_RECEIVE_ADJUSTMENT, 0);
+ }
+
+ /* Turn off PTP function as the switch's proprietary way of
+ * handling timestamp is not supported in current Linux PTP
+ * stack implementation.
+ */
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_MSG_CONF1,
+ PTP_ENABLE, 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_CLK_CTRL,
+ PTP_CLK_ENABLE, 0);
}
}
@@ -1886,22 +1954,25 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
- if (!ksz_is_ksz88x3(dev))
+ if (!ksz_is_ksz88x3(dev) && !ksz_is_ksz8463(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
for (i = 0; i < (dev->info->num_vlans / 4); i++)
@@ -1947,6 +2018,84 @@ u32 ksz8_get_port_addr(int port, int offset)
return PORT_CTRL_ADDR(port, offset);
}
+u32 ksz8463_get_port_addr(int port, int offset)
+{
+ return offset + 0x18 * port;
+}
+
+static u16 ksz8463_get_phy_addr(u16 phy, u16 reg, u16 offset)
+{
+ return offset + reg * 2 + phy * (P2MBCR - P1MBCR);
+}
+
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u16 sw_reg = 0;
+ u16 data = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+ switch (reg) {
+ case MII_PHYSID1:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1IHR);
+ break;
+ case MII_PHYSID2:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1ILR);
+ break;
+ case MII_BMCR:
+ case MII_BMSR:
+ case MII_ADVERTISE:
+ case MII_LPA:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ case MII_TPISTATUS:
+ /* This register holds the PHY interrupt status for simulated
+ * Micrel KSZ PHY.
+ */
+ data = 0x0505;
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_read16(dev, sw_reg, &data);
+ if (ret)
+ return ret;
+ }
+ *val = data;
+
+ return 0;
+}
+
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sw_reg = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+
+ /* No write to fiber port. */
+ if (dev->ports[phy].fiber)
+ return 0;
+ switch (reg) {
+ case MII_BMCR:
+ case MII_ADVERTISE:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_write16(dev, sw_reg, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int ksz8_switch_init(struct ksz_device *dev)
{
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index e1c79ff97123..0f2cd1474b44 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -63,4 +63,8 @@ void ksz8_phylink_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause);
int ksz8_all_queues_split(struct ksz_device *dev, int queues);
+u32 ksz8463_get_port_addr(int port, int offset);
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz8_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 329688603a58..332408567b47 100644
--- a/drivers/net/dsa/microchip/ksz8_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -729,6 +729,55 @@
#define PHY_POWER_SAVING_ENABLE BIT(2)
#define PHY_REMOTE_LOOPBACK BIT(1)
+/* KSZ8463 specific registers. */
+#define P1MBCR 0x4C
+#define P1MBSR 0x4E
+#define PHY1ILR 0x50
+#define PHY1IHR 0x52
+#define P1ANAR 0x54
+#define P1ANLPR 0x56
+#define P2MBCR 0x58
+#define P2MBSR 0x5A
+#define PHY2ILR 0x5C
+#define PHY2IHR 0x5E
+#define P2ANAR 0x60
+#define P2ANLPR 0x62
+
+#define P1CR1 0x6C
+#define P1CR2 0x6E
+#define P1CR3 0x72
+#define P1CR4 0x7E
+#define P1SR 0x80
+
+#define KSZ8463_FLUSH_TABLE_CTRL 0xAD
+
+#define KSZ8463_FLUSH_DYN_MAC_TABLE BIT(2)
+#define KSZ8463_FLUSH_STA_MAC_TABLE BIT(1)
+
+#define KSZ8463_REG_SW_CTRL_9 0xAE
+
+#define KSZ8463_REG_CFG_CTRL 0xD8
+
+#define PORT_2_COPPER_MODE BIT(7)
+#define PORT_1_COPPER_MODE BIT(6)
+#define PORT_COPPER_MODE_S 6
+
+#define KSZ8463_REG_SW_RESET 0x126
+
+#define KSZ8463_GLOBAL_SOFTWARE_RESET BIT(0)
+
+#define KSZ8463_PTP_CLK_CTRL 0x600
+
+#define PTP_CLK_ENABLE BIT(1)
+
+#define KSZ8463_PTP_MSG_CONF1 0x620
+
+#define PTP_ENABLE BIT(6)
+
+#define KSZ8463_REG_DSP_CTRL_6 0x734
+
+#define COPPER_RECEIVE_ADJUSTMENT BIT(13)
+
/* Chip resource */
#define PRIO_QUEUES 4
@@ -784,7 +833,9 @@
#define KSZ8795_MIB_TOTAL_TX_1 0x105
#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
-#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x103
+
+#define KSZ8895_MIB_PACKET_DROPPED_RX_0 0x105
#define MIB_PACKET_DROPPED 0x0000FFFF
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7c142c17b3f6..4cb14288ff0f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -331,6 +331,38 @@ static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
+static const struct ksz_dev_ops ksz8463_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8463_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8463_r_phy,
+ .w_phy = ksz8463_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+};
+
static const struct ksz_dev_ops ksz88xx_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
@@ -517,6 +549,60 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.exit = lan937x_switch_exit,
};
+static const u16 ksz8463_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x10,
+ [REG_IND_CTRL_0] = 0x30,
+ [REG_IND_DATA_8] = 0x26,
+ [REG_IND_DATA_CHECK] = 0x26,
+ [REG_IND_DATA_HI] = 0x28,
+ [REG_IND_DATA_LO] = 0x2C,
+ [REG_IND_MIB_CHECK] = 0x2F,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0xAD,
+ [P_STP_CTRL] = 0x6F,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8463_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(0),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8463_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
static const u16 ksz8795_regs[] = {
[REG_SW_MAC_ADDR] = 0x68,
[REG_IND_CTRL_0] = 0x6E,
@@ -1361,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
@@ -1387,6 +1474,29 @@ static const struct regmap_access_table ksz8873_register_set = {
};
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8463] = {
+ .chip_id = KSZ8463_CHIP_ID,
+ .dev_name = "KSZ8463",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz8463_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8463_regs,
+ .masks = ksz8463_masks,
+ .shifts = ksz8463_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
[KSZ8563] = {
.chip_id = KSZ8563_CHIP_ID,
.dev_name = "KSZ8563",
@@ -2786,8 +2896,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node),
- kirq->nirqs, 0,
+ kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
&ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
@@ -2843,6 +2952,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev);
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ u16 storm_mask, storm_rate;
struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
@@ -2872,10 +2982,14 @@ static int ksz_setup(struct dsa_switch *ds)
}
/* set broadcast storm protection 10% rate */
+ storm_mask = BROADCAST_STORM_RATE;
+ storm_rate = (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ if (ksz_is_ksz8463(dev)) {
+ storm_mask = swab16(storm_mask);
+ storm_rate = swab16(storm_rate);
+ }
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ storm_mask, storm_rate);
dev->dev_ops->config_cpu_port(ds);
@@ -3401,6 +3515,7 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ88X3_CHIP_ID ||
+ dev->chip_id == KSZ8463_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
@@ -3513,6 +3628,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8463_CHIP_ID:
case KSZ88X3_CHIP_ID:
case KSZ8864_CHIP_ID:
case KSZ8895_CHIP_ID:
@@ -3867,6 +3983,9 @@ static int ksz_switch_detect(struct ksz_device *dev)
id2 = FIELD_GET(SW_CHIP_ID_M, id16);
switch (id1) {
+ case KSZ84_FAMILY_ID:
+ dev->chip_id = KSZ8463_CHIP_ID;
+ break;
case KSZ87_FAMILY_ID:
if (id2 == KSZ87_CHIP_ID_95) {
u8 val;
@@ -4108,6 +4227,17 @@ static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
return p->bands - 1 - band;
}
+static u8 ksz8463_tc_ctrl(int port, int queue)
+{
+ u8 reg;
+
+ reg = 0xC8 + port * 4;
+ reg += ((3 - queue) / 2) * 2;
+ reg++;
+ reg -= (queue & 1);
+ return reg;
+}
+
/**
* ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
* for a port on KSZ88x3 switch
@@ -4143,6 +4273,8 @@ static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
* port/queue
*/
reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
/* Clear WFQ enable bit to select strict priority scheduling */
ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
@@ -4178,6 +4310,8 @@ static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
* port/queue
*/
reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
/* Set WFQ enable bit to revert back to default scheduling
* mode
@@ -4325,7 +4459,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (is_ksz8(dev) && !ksz_is_ksz88x3(dev))
+ if (is_ksz8(dev) && !(ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -4339,13 +4473,13 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
if (ret)
return ret;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return ksz88x3_tc_ets_add(dev, port,
&qopt->replace_params);
else
return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return ksz88x3_tc_ets_del(dev, port);
else
return ksz_tc_ets_del(dev, port);
@@ -4688,7 +4822,16 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
/* Program the switch MAC address to hardware */
for (i = 0; i < ETH_ALEN; i++) {
- ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]);
+ if (ksz_is_ksz8463(dev)) {
+ u16 addr16 = ((u16)addr[i] << 8) | addr[i + 1];
+
+ ret = ksz_write16(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr16);
+ i++;
+ } else {
+ ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr[i]);
+ }
if (ret)
goto macaddr_drop;
}
@@ -5297,6 +5440,9 @@ int ksz_switch_register(struct ksz_device *dev)
&dev->ports[port_num].interface);
ksz_parse_rgmii_delay(dev, port_num, port);
+ dev->ports[port_num].fiber =
+ of_property_read_bool(port,
+ "micrel,fiber-mode");
}
of_node_put(ports);
}
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a08417df2ca4..a1eb39771bb9 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -222,6 +222,7 @@ struct ksz_device {
/* List of supported models */
enum ksz_model {
+ KSZ8463,
KSZ8563,
KSZ8567,
KSZ8795,
@@ -484,6 +485,11 @@ static inline struct regmap *ksz_regmap_32(struct ksz_device *dev)
return dev->regmap[KSZ_REGMAP_32];
}
+static inline bool ksz_is_ksz8463(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8463_CHIP_ID;
+}
+
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
{
unsigned int value;
@@ -709,12 +715,13 @@ static inline bool ksz_is_8895_family(struct ksz_device *dev)
static inline bool is_ksz8(struct ksz_device *dev)
{
return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
- ksz_is_8895_family(dev);
+ ksz_is_8895_family(dev) || ksz_is_ksz8463(dev);
}
static inline bool is_ksz88xx(struct ksz_device *dev)
{
- return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev);
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev) ||
+ ksz_is_ksz8463(dev);
}
static inline bool is_ksz9477(struct ksz_device *dev)
@@ -761,6 +768,7 @@ static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
#define REG_CHIP_ID0 0x00
#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ84_FAMILY_ID 0x84
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
#define KSZ8895_FAMILY_ID 0x95
@@ -939,4 +947,29 @@ static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
[KSZ_REGMAP_32] = KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
}
+#define KSZ8463_REGMAP_ENTRY(width, regbits, regpad, regalign) \
+ { \
+ .name = #width, \
+ .val_bits = (width), \
+ .reg_stride = (width / 8), \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .read = ksz8463_spi_read, \
+ .write = ksz8463_spi_write, \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .zero_flag_mask = 1, \
+ .use_single_read = 1, \
+ .use_single_write = 1, \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
+ }
+
+#define KSZ8463_REGMAP_TABLE(ksz, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ [KSZ_REGMAP_8] = KSZ8463_REGMAP_ENTRY(8, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_16] = KSZ8463_REGMAP_ENTRY(16, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_32] = KSZ8463_REGMAP_ENTRY(32, (regbits), (regpad), (regalign)), \
+ }
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index c3b501997ac9..7131c5caac54 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -16,10 +16,12 @@
* Therefore, we define the base offset as 0x00 here to align with that logic.
*/
#define KSZ8_REG_PORT_1_CTRL_0 0x00
+#define KSZ8463_REG_PORT_1_CTRL_0 0x6C
#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
#define KSZ8_PORT_802_1P_ENABLE BIT(5)
#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+#define KSZ8463_REG_TOS_DSCP_CTRL 0x16
#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
@@ -98,6 +100,8 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
*reg = KSZ8_REG_PORT_1_CTRL_0;
*mask = KSZ8_PORT_BASED_PRIO_M;
*shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
} else {
*reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
*mask = KSZ9477_PORT_BASED_PRIO_M;
@@ -122,10 +126,12 @@ static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
*reg = KSZ8765_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
- } else if (ksz_is_ksz88x3(dev)) {
+ } else if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
*reg = KSZ88X3_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_TOS_DSCP_CTRL;
} else {
*reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
*per_reg = 2;
@@ -151,6 +157,8 @@ static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
*map = ksz8_apptrust_map_to_bit;
*reg = KSZ8_REG_PORT_1_CTRL_0;
*mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
} else {
*map = ksz9477_apptrust_map_to_bit;
*reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 8ab664e85f13..35fc21b1ee48 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -1130,8 +1130,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node),
- ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index b633d263098c..d8001734b057 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -16,6 +16,10 @@
#include "ksz_common.h"
+#define KSZ8463_SPI_ADDR_SHIFT 13
+#define KSZ8463_SPI_ADDR_ALIGN 3
+#define KSZ8463_SPI_TURNAROUND_SHIFT 2
+
#define KSZ8795_SPI_ADDR_SHIFT 12
#define KSZ8795_SPI_ADDR_ALIGN 3
#define KSZ8795_SPI_TURNAROUND_SHIFT 1
@@ -37,6 +41,99 @@ KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+static u16 ksz8463_reg(u16 reg, size_t size)
+{
+ switch (size) {
+ case 1:
+ reg = ((reg >> 2) << 4) | (1 << (reg & 3));
+ break;
+ case 2:
+ reg = ((reg >> 2) << 4) | (reg & 2 ? 0x0c : 0x03);
+ break;
+ default:
+ reg = ((reg >> 2) << 4) | 0xf;
+ break;
+ }
+ reg <<= KSZ8463_SPI_TURNAROUND_SHIFT;
+ return reg;
+}
+
+static int ksz8463_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 bytes[2];
+ u16 cmd;
+ int rc;
+
+ if (reg_size > 2 || val_size > 4)
+ return -EINVAL;
+ memcpy(&cmd, reg, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+ rc = spi_write_then_read(spi, bytes, reg_size, val, val_size);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (!rc && val_size > 1) {
+ if (val_size == 2) {
+ u16 v = get_unaligned_le16(val);
+
+ memcpy(val, &v, sizeof(v));
+ } else if (val_size == 4) {
+ u32 v = get_unaligned_le32(val);
+
+ memcpy(val, &v, sizeof(v));
+ }
+ }
+#endif
+ return rc;
+}
+
+static int ksz8463_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ size_t val_size = count - 2;
+ u8 bytes[6];
+ u16 cmd;
+
+ if (count <= 2 || count > 6)
+ return -EINVAL;
+ memcpy(bytes, data, count);
+ memcpy(&cmd, data, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ cmd |= (1 << (KSZ8463_SPI_ADDR_SHIFT + KSZ8463_SPI_TURNAROUND_SHIFT));
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (val_size == 2) {
+ u8 *val = &bytes[2];
+ u16 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le16(v, val);
+ } else if (val_size == 4) {
+ u8 *val = &bytes[2];
+ u32 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le32(v, val);
+ }
+#endif
+ return spi_write(spi, bytes, count);
+}
+
+KSZ8463_REGMAP_TABLE(ksz8463, KSZ8463_SPI_ADDR_SHIFT, 0,
+ KSZ8463_SPI_ADDR_ALIGN);
+
static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
@@ -58,6 +155,8 @@ static int ksz_spi_probe(struct spi_device *spi)
dev->chip_id = chip->chip_id;
if (chip->chip_id == KSZ88X3_CHIP_ID)
regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8463_CHIP_ID)
+ regmap_config = ksz8463_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
@@ -126,6 +225,10 @@ static void ksz_spi_shutdown(struct spi_device *spi)
static const struct of_device_id ksz_dt_ids[] = {
{
+ .compatible = "microchip,ksz8463",
+ .data = &ksz_switch_chips[KSZ8463]
+ },
+ {
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
},
@@ -214,6 +317,7 @@ static const struct of_device_id ksz_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8463" },
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 51df42ccdbe6..0286a6cecb6f 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -136,10 +136,17 @@ static const struct of_device_id mt7530_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+ .disable_locking = true,
+};
+
static int
mt7530_probe(struct mdio_device *mdiodev)
{
- static struct regmap_config *regmap_config;
struct mt7530_priv *priv;
struct device_node *dn;
int ret;
@@ -193,18 +200,8 @@ mt7530_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->io_pwr);
}
- regmap_config = devm_kzalloc(&mdiodev->dev, sizeof(*regmap_config),
- GFP_KERNEL);
- if (!regmap_config)
- return -ENOMEM;
-
- regmap_config->reg_bits = 16;
- regmap_config->val_bits = 32;
- regmap_config->reg_stride = 4;
- regmap_config->max_register = MT7530_CREV;
- regmap_config->disable_locking = true;
priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
- regmap_config);
+ &regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 842d74268e77..1dc8b93fb51a 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -18,10 +18,17 @@ static const struct of_device_id mt7988_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mt7988_of_match);
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+};
+
static int
mt7988_probe(struct platform_device *pdev)
{
- static struct regmap_config *sw_regmap_config;
struct mt7530_priv *priv;
void __iomem *base_addr;
int ret;
@@ -49,16 +56,8 @@ mt7988_probe(struct platform_device *pdev)
return -ENXIO;
}
- sw_regmap_config = devm_kzalloc(&pdev->dev, sizeof(*sw_regmap_config), GFP_KERNEL);
- if (!sw_regmap_config)
- return -ENOMEM;
-
- sw_regmap_config->name = "switch";
- sw_regmap_config->reg_bits = 16;
- sw_regmap_config->val_bits = 32;
- sw_regmap_config->reg_stride = 4;
- sw_regmap_config->max_register = MT7530_CREV;
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr, sw_regmap_config);
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr,
+ &sw_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index df213c37b4fe..548b85befbf4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2112,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
-static void
+static int
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
@@ -2122,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ return 0;
}
static int
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 195460a0a0d4..da69e0b85879 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -376,19 +376,14 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_devlink_atu_entry *table;
struct mv88e6xxx_chip *chip = ds->priv;
- int fid = -1, err = 0, count;
+ int fid = -1, err = 0, count = 0;
- table = kmalloc_array(mv88e6xxx_num_databases(chip),
- sizeof(struct mv88e6xxx_devlink_atu_entry),
- GFP_KERNEL);
+ table = kcalloc(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
if (!table)
return -ENOMEM;
- memset(table, 0, mv88e6xxx_num_databases(chip) *
- sizeof(struct mv88e6xxx_devlink_atu_entry));
-
- count = 0;
-
mv88e6xxx_reg_lock(chip);
while (1) {
@@ -647,7 +642,7 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
.id = MV88E6XXX_REGION_GLOBAL1,
};
-static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global1_ops = {
.name = "global1",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
@@ -658,32 +653,32 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
.id = MV88E6XXX_REGION_GLOBAL2,
};
-static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global2_ops = {
.name = "global2",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global2_priv,
};
-static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.name = "atu",
.snapshot = mv88e6xxx_region_atu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.name = "vtu",
.snapshot = mv88e6xxx_region_vtu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_stu_ops = {
.name = "stu",
.snapshot = mv88e6xxx_region_stu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
.destructor = kfree,
@@ -696,13 +691,13 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
};
struct mv88e6xxx_region {
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
u64 size;
bool (*cond)(struct mv88e6xxx_chip *chip);
};
-static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+static const struct mv88e6xxx_region mv88e6xxx_regions[] = {
[MV88E6XXX_REGION_GLOBAL1] = {
.ops = &mv88e6xxx_region_global1_ops,
.size = 32 * sizeof(u16)
@@ -768,7 +763,7 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
struct mv88e6xxx_chip *chip = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int i, j;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index aaf97c1e3167..30a6ffa7817b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,10 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node),
- 16, 0,
- &mv88e6xxx_g2_irq_domain_ops,
- chip);
+ chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 79a29676ca6f..0526aa96146e 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1,
- &ar9331_sw_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops,
+ priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index df7466d4fe8f..1635255f58e4 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1227,35 +1227,27 @@ static int a5psw_probe(struct platform_device *pdev)
if (ret)
return ret;
- a5psw->hclk = devm_clk_get(dev, "hclk");
+ a5psw->hclk = devm_clk_get_enabled(dev, "hclk");
if (IS_ERR(a5psw->hclk)) {
dev_err(dev, "failed get hclk clock\n");
ret = PTR_ERR(a5psw->hclk);
goto free_pcs;
}
- a5psw->clk = devm_clk_get(dev, "clk");
+ a5psw->clk = devm_clk_get_enabled(dev, "clk");
if (IS_ERR(a5psw->clk)) {
dev_err(dev, "failed get clk_switch clock\n");
ret = PTR_ERR(a5psw->clk);
goto free_pcs;
}
- ret = clk_prepare_enable(a5psw->clk);
- if (ret)
- goto free_pcs;
-
- ret = clk_prepare_enable(a5psw->hclk);
- if (ret)
- goto clk_disable;
-
mdio = of_get_available_child_by_name(dev->of_node, "mdio");
if (mdio) {
ret = a5psw_probe_mdio(a5psw, mdio);
of_node_put(mdio);
if (ret) {
dev_err(dev, "Failed to register MDIO: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
}
@@ -1269,15 +1261,11 @@ static int a5psw_probe(struct platform_device *pdev)
ret = dsa_register_switch(ds);
if (ret) {
dev_err(dev, "Failed to register DSA switch: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
return 0;
-hclk_disable:
- clk_disable_unprepare(a5psw->hclk);
-clk_disable:
- clk_disable_unprepare(a5psw->clk);
free_pcs:
a5psw_pcs_free(a5psw);
@@ -1293,8 +1281,6 @@ static void a5psw_remove(struct platform_device *pdev)
dsa_unregister_switch(&a5psw->ds);
a5psw_pcs_free(a5psw);
- clk_disable_unprepare(a5psw->hclk);
- clk_disable_unprepare(a5psw->clk);
}
static void a5psw_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index f18aa321053d..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -2258,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_GPIO, BIT(offset), tmp);
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 678eddb36172..5c8217638dda 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb),
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb_headlen(skb) / 2,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ goto unmap_first_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
0,
desc[frag].len_vlan,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
+ goto unmap_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
&adapter->regs->global.watchdog_timer);
}
return 0;
+
+unmap_out:
+ // Unmap the body of the packet with map_page
+ while (--i) {
+ frag--;
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_page(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+unmap_first_out:
+ // Unmap the header with map_single
+ while (frag--) {
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_single(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 9057180051df..e6b802e3d844 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -551,9 +551,7 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
- enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
struct airoha_qdma *qdma = q->qdma;
- struct airoha_eth *eth = qdma->eth;
int qid = q - &qdma->q_rx[0];
int nframes = 0;
@@ -577,9 +575,6 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
e->dma_addr = page_pool_get_dma_addr(page) + offset;
e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
- dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
- dir);
-
val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 1e58a4aeb9a0..a802f95df99d 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -161,7 +161,7 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
}
static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
- struct reserved_mem *rmem)
+ struct resource *res)
{
const struct firmware *fw;
void __iomem *addr;
@@ -178,9 +178,9 @@ static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
goto out;
}
- addr = devm_ioremap(dev, rmem->base, rmem->size);
- if (!addr) {
- ret = -ENOMEM;
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto out;
}
@@ -475,9 +475,8 @@ static const struct regmap_config regmap_config = {
static int airoha_npu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct reserved_mem *rmem;
struct airoha_npu *npu;
- struct device_node *np;
+ struct resource res;
void __iomem *base;
int i, irq, err;
@@ -499,15 +498,9 @@ static int airoha_npu_probe(struct platform_device *pdev)
if (IS_ERR(npu->regmap))
return PTR_ERR(npu->regmap);
- np = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
-
- if (!rmem)
- return -ENODEV;
+ err = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -540,12 +533,12 @@ static int airoha_npu_probe(struct platform_device *pdev)
if (err)
return err;
- err = airoha_npu_run_firmware(dev, base, rmem);
+ err = airoha_npu_run_firmware(dev, base, &res);
if (err)
return dev_err_probe(dev, err, "failed to run npu firmware\n");
regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
- rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ res.start + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
@@ -553,7 +546,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
/* setting booting address */
for (i = 0; i < NPU_NUM_CORES; i++)
- regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), res.start);
usleep_range(1000, 2000);
/* enable NPU cores */
@@ -586,6 +579,8 @@ static struct platform_driver airoha_npu_driver = {
};
module_platform_driver(airoha_npu_driver);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 0e217acfc5ef..47411d2cbd28 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
@@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv6.data = qdata;
hwe->ipv6.ib2 = val;
l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
} else {
hwe->ipv4.data = qdata;
hwe->ipv4.ib2 = val;
l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
}
l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
} else {
- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
}
if (data->vlan.num) {
- l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
l2->vlan1 = data->vlan.hdr[0].id;
if (data->vlan.num == 2)
l2->vlan2 = data->vlan.hdr[1].id;
- } else if (dsa_port >= 0) {
- l2->etype = BIT(15) | BIT(dsa_port);
- } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
- l2->etype = ETH_P_IPV6;
- } else {
- l2->etype = ETH_P_IP;
+ }
+
+ if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
}
return 0;
@@ -498,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
}
-struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
- u32 hash)
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
{
+ lockdep_assert_held(&ppe_lock);
+
if (hash < PPE_SRAM_NUM_ENTRIES) {
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
struct airoha_eth *eth = ppe->eth;
@@ -527,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
return ppe->foe + hash * sizeof(struct airoha_foe_entry);
}
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
struct airoha_foe_entry *hwe)
{
@@ -641,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *f;
int type;
- hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe_p)
return -EINVAL;
@@ -693,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
spin_lock_bh(&ppe_lock);
- hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe)
goto unlock;
@@ -808,7 +832,7 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
u32 ib1, state;
int idle;
- hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
if (!hwe)
continue;
@@ -845,7 +869,7 @@ static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
if (e->hash == 0xffff)
goto unlock;
- hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
if (!hwe_p)
goto unlock;
@@ -959,6 +983,11 @@ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index 6ab615365172..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9d9fa6559354..898ecd96b96a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
@@ -127,6 +128,14 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
/* ENA SRD configuration for ENI */
enum ena_admin_ena_srd_flags {
/* Feature enabled */
@@ -943,7 +952,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
- * 31:7 : reserved
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -975,7 +986,7 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
@@ -1023,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -1052,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1085,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1162,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1260,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 66445617fbfb..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,6 +41,12 @@
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -1641,6 +1647,267 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 9414e93d107b..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -210,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -258,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -317,6 +366,7 @@ struct ena_com_dev {
u32 ena_min_poll_delay_us;
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
@@ -382,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a3c934c3de71..a81d3a7a3bb9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,9 +5,11 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -298,6 +300,18 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_get_stats(adapter, data, true);
}
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
+}
+
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
{
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
@@ -721,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -772,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -816,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -847,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -1098,16 +1094,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 86fd08f375df..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,6 +19,12 @@
#include "ena_pci_id_tbl.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
+
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
@@ -39,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -2743,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
- ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -3135,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3188,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
if (unlikely(rc))
goto err_admin_init;
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
@@ -3233,7 +3244,7 @@ err_disable_msix:
return rc;
}
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3271,6 +3282,8 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3282,7 +3295,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3344,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3867,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -3932,10 +3947,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_phc_alloc(adapter);
+ if (rc) {
+ netdev_err(netdev, "ena_phc_alloc failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc) {
netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
- goto err_netdev_destroy;
+ goto err_free_phc;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
@@ -3944,12 +3965,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_metrics_destroy;
}
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
+ }
+
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_metrics_destroy;
+ goto ena_devlink_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4033,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4054,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4070,8 +4107,12 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
err_metrics_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4102,6 +4143,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
+ ena_debugfs_terminate(netdev);
+
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
@@ -4112,6 +4155,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6e12ae3b12e5..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -16,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -110,6 +111,8 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+struct ena_phc_info;
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -348,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -383,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -412,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index a2efebafd686..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -53,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -129,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5b0ab6240cf2 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
xgbe-platform.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 9316de4126cf..009fbc9b11ce 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -223,6 +223,10 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
@@ -364,6 +368,10 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_GPSLCE_INDEX 6
+#define MAC_RCR_GPSLCE_WIDTH 1
+#define MAC_RCR_WD_INDEX 7
+#define MAC_RCR_WD_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
@@ -374,6 +382,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_GPSL_INDEX 16
+#define MAC_RCR_GPSL_WIDTH 14
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -412,6 +422,8 @@
#define MAC_TCR_VNE_WIDTH 1
#define MAC_TCR_VNM_INDEX 25
#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TCR_JD_INDEX 16
+#define MAC_TCR_JD_WIDTH 1
#define MAC_TIR_TNID_INDEX 0
#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
@@ -420,6 +432,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -448,6 +462,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 466b5f6e5578..e5391a2eca51 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1558,125 +1558,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -2850,9 +2731,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
+ XGMAC_GIANT_PACKET_MTU);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
+ } else {
+ val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
}
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
@@ -3661,13 +3552,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 65447f9a0a59..2e9b95a94f89 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -448,7 +448,7 @@ static void xgbe_isr_bh_work(struct work_struct *work)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -1371,199 +1371,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1776,6 +1583,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2546,12 +2356,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..bc52e5ec6420
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 4ebdd123c435..d1f0419edb23 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -275,7 +275,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
- netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+ netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 097ec5e4f261..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -414,6 +414,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -430,6 +431,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index e8d5c05de77a..3658afc7801d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -13,18 +13,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
@@ -37,7 +25,7 @@ static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -49,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -67,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -84,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -107,8 +113,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
@@ -116,7 +120,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
info->enable = xgbe_enable;
@@ -128,23 +132,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 057379cd43ba..d7e03e292ec4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -80,11 +80,13 @@
#define XGBE_IRQ_MODE_EDGE 0
#define XGBE_IRQ_MODE_LEVEL 1
+#define XGBE_ETH_FRAME_HDR (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_GIANT_PACKET_MTU 16368
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
@@ -117,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -126,6 +136,11 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -739,14 +754,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -944,6 +951,7 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
@@ -1129,8 +1137,6 @@ struct xgbe_prv_data {
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
@@ -1275,6 +1281,29 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 42c0efc1b455..4e66fd9b2ab1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -113,6 +113,8 @@ struct aq_stats_s {
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
+
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 08630ee94251..ed5231dece3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -463,7 +463,7 @@ static const struct dev_pm_ops aq_pm_ops = {
};
#endif
-static struct pci_driver aq_pci_ops = {
+static struct pci_driver aq_pci_driver = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
@@ -476,11 +476,11 @@ static struct pci_driver aq_pci_ops = {
int aq_pci_func_register_driver(void)
{
- return pci_register_driver(&aq_pci_ops);
+ return pci_register_driver(&aq_pci_driver);
}
void aq_pci_func_unregister_driver(void)
{
- pci_unregister_driver(&aq_pci_ops);
+ pci_unregister_driver(&aq_pci_driver);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 52e2070a4a2f..7370e3f76b62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
return aq_a2_fw_get_phy_temp(self, temp);
}
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
+{
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ struct wake_on_lan_s wake_on_lan;
+
+ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
+ hw_atl2_shared_buffer_write(self, mac_address, mac_address);
+
+ memset(&wake_on_lan, 0, sizeof(wake_on_lan));
+
+ if (wol & WAKE_MAGIC)
+ wake_on_lan.wake_on_magic_packet = 1U;
+
+ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
+ wake_on_lan.wake_on_link_up = 1U;
+
+ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ u32 wol = self->aq_nic_cfg->wol;
+ int err = 0;
+
+ if (wol)
+ err = aq_a2_fw_set_wol_params(self, mac, wol);
+
+ return err;
+}
+
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
+ .set_power = aq_a2_fw_set_power,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index d8e6f23e1432..cbc730c7cff2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1213,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
+ skb_free_frag(data);
+ buf->rx.rx_buf = NULL;
+ return false;
+ }
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
@@ -1511,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
+ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
+ goto err_drop;
+ }
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index ef1a51347351..7efa3fc257b3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_threaded_enable(netdev);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 81a74e07464f..0fc10e6c6902 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -253,6 +253,15 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BNGE
+ tristate "Broadcom Ethernet device support"
+ depends on PCI
+ select NET_DEVLINK
+ help
+ This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
+ The module will be called bng_en. To compile this driver as a module,
+ choose M here.
+
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
depends on ARCH_BRCMSTB || COMPILE_TEST
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index bac5cb6ad0cd..10cc1c92ecfc 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BNGE) += bnge/
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 4381a4cfd8c6..63f1a8c3a7fb 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -430,4 +430,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = phy_ethtool_nway_reset,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index f832562bd7a3..b9973956c480 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -605,10 +605,8 @@ next:
bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
- if (processed < budget) {
- napi_complete_done(&intf->rx_napi, processed);
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
- }
return processed;
}
@@ -1284,6 +1282,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
ndev->hw_features |= ndev->features;
ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+ netdev_sw_irq_coalesce_default_on(ndev);
+
return intf;
err_free_netdev:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8267417b3750..0353359c3fe9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2570,7 +2570,7 @@ static int __init b44_init(void)
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
new file mode 100644
index 000000000000..6142d9c57f49
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BNGE) += bng_en.o
+
+bng_en-y := bnge_core.o \
+ bnge_devlink.o \
+ bnge_hwrm.o \
+ bnge_hwrm_lib.o \
+ bnge_rmem.o \
+ bnge_resc.o \
+ bnge_netdev.o \
+ bnge_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
new file mode 100644
index 000000000000..6fb3683b6b04
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_H_
+#define _BNGE_H_
+
+#define DRV_NAME "bng_en"
+#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+
+#include <linux/etherdevice.h>
+#include <linux/bnxt/hsi.h>
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 15
+#define DRV_VER_UPD 1
+
+extern char bnge_driver_name[];
+
+enum board_idx {
+ BCM57708,
+};
+
+struct bnge_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+enum {
+ BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0),
+ BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1),
+ BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2),
+ BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3),
+ BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4),
+ BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5),
+ BNGE_FW_CAP_PKG_VER = BIT_ULL(6),
+ BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8),
+ BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9),
+ BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10),
+ BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11),
+ BNGE_FW_CAP_HOT_RESET = BIT_ULL(12),
+ BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13),
+ BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14),
+ BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15),
+ BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16),
+ BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17),
+ BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18),
+ BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19),
+ BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20),
+ BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21),
+ BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22),
+ BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23),
+ BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25),
+ BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26),
+};
+
+enum {
+ BNGE_EN_ROCE_V1 = BIT_ULL(0),
+ BNGE_EN_ROCE_V2 = BIT_ULL(1),
+ BNGE_EN_STRIP_VLAN = BIT_ULL(2),
+ BNGE_EN_SHARED_CHNL = BIT_ULL(3),
+ BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4),
+};
+
+#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2)
+
+enum {
+ BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0),
+ BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1),
+ BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2),
+ BNGE_RSS_CAP_RSS_TCAM = BIT(3),
+ BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4),
+ BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5),
+ BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6),
+ BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7),
+};
+
+#define BNGE_MAX_QUEUE 8
+struct bnge_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnge_dev {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u64 dsn;
+#define BNGE_VPD_FLD_LEN 32
+ char board_partno[BNGE_VPD_FLD_LEN];
+ char board_serialno[BNGE_VPD_FLD_LEN];
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+
+ u16 chip_num;
+ u8 chip_rev;
+
+ int db_offset; /* db_offset within db_size */
+ int db_size;
+
+ /* HWRM members */
+ u16 hwrm_cmd_seq;
+ u16 hwrm_cmd_kong_seq;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
+ u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+ char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+
+ struct bnge_pf_info pf;
+
+ unsigned long state;
+#define BNGE_STATE_DRV_REGISTERED 0
+
+ u64 fw_cap;
+
+ /* Backing stores */
+ struct bnge_ctx_mem_info *ctx;
+
+ u64 flags;
+
+ struct bnge_hw_resc hw_resc;
+
+ u16 tso_max_segs;
+
+ int max_fltr;
+#define BNGE_L2_FLTR_MAX_FLTR 1024
+
+ u32 *rss_indir_tbl;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+#define BNGE_MAX_RSS_TABLE_ENTRIES \
+ (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL)
+ u16 rss_indir_tbl_entries;
+
+ u32 rss_cap;
+
+ u16 rx_nr_rings;
+ u16 tx_nr_rings;
+ u16 tx_nr_rings_per_tc;
+ /* Number of NQs */
+ u16 nq_nr_rings;
+
+ /* Aux device resources */
+ u16 aux_num_msix;
+ u16 aux_num_stat_ctxs;
+
+ u16 max_mtu;
+#define BNGE_MAX_MTU 9500
+
+ u16 hw_ring_stats_size;
+#define BNGE_NUM_RX_RING_STATS 8
+#define BNGE_NUM_TX_RING_STATS 8
+#define BNGE_NUM_TPA_RING_STATS 6
+#define BNGE_RING_STATS_SIZE \
+ ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \
+ BNGE_NUM_TPA_RING_STATS) * 8)
+
+ u16 max_tpa_v2;
+#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2)
+
+ u8 num_tc;
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnge_queue_info q_info[BNGE_MAX_QUEUE];
+ u8 tc_to_qidx[BNGE_MAX_QUEUE];
+ u8 q_ids[BNGE_MAX_QUEUE];
+ u8 max_q;
+ u8 port_count;
+
+ struct bnge_irq *irq_tbl;
+ u16 irqs_acquired;
+};
+
+static inline bool bnge_is_roce_en(struct bnge_dev *bd)
+{
+ return bd->flags & BNGE_EN_ROCE;
+}
+
+static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
+{
+ if (bd->netdev) {
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA ||
+ bn->priv_flags & BNGE_NET_EN_JUMBO)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd);
+
+#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
new file mode 100644
index 000000000000..68da656f2894
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+
+char bnge_driver_name[] = DRV_NAME;
+
+static const struct {
+ char *name;
+} board_info[] = {
+ [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+};
+
+static const struct pci_device_id bnge_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, bnge_pci_tbl);
+
+static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "%s found at mem %lx\n", board_info[idx].name,
+ (long)pci_resource_start(pdev, 0));
+
+ pcie_print_link_status(pdev);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd)
+{
+ return false;
+}
+
+static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info))
+ snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
+static int bnge_func_qcaps(struct bnge_dev *bd)
+{
+ int rc;
+
+ rc = bnge_hwrm_func_qcaps(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_queue_qportcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_resc_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query config failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_vnic_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "vnic caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_fw_unregister_dev(struct bnge_dev *bd)
+{
+ /* ctx mem free after unrgtr only */
+ bnge_hwrm_func_drv_unrgtr(bd);
+ bnge_free_ctx_mem(bd);
+}
+
+static int bnge_fw_register_dev(struct bnge_dev *bd)
+{
+ int rc;
+
+ bd->fw_cap = 0;
+ rc = bnge_hwrm_ver_get(bd);
+ if (rc) {
+ dev_err(bd->dev, "Get Version command failed rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_nvm_cfg_ver_get(bd);
+
+ rc = bnge_hwrm_func_reset(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to reset function rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_hwrm_fw_set_time(bd);
+
+ rc = bnge_hwrm_func_drv_rgtr(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_ctx_mem(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
+ goto err_func_unrgtr;
+ }
+
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
+ rc = -ENODEV;
+ goto err_func_unrgtr;
+ }
+
+ return 0;
+
+err_func_unrgtr:
+ bnge_fw_unregister_dev(bd);
+ return rc;
+}
+
+static void bnge_pci_disable(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static int bnge_pci_enable(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return rc;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_pci_disable;
+ }
+
+ rc = pci_request_regions(pdev, bnge_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_pci_disable;
+ }
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_pci_disable:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void bnge_unmap_bars(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ if (bd->bar1) {
+ pci_iounmap(pdev, bd->bar1);
+ bd->bar1 = NULL;
+ }
+
+ if (bd->bar0) {
+ pci_iounmap(pdev, bd->bar0);
+ bd->bar0 = NULL;
+ }
+}
+
+static void bnge_set_max_func_irqs(struct bnge_dev *bd,
+ unsigned int max_irqs)
+{
+ bd->hw_resc.max_irqs = max_irqs;
+}
+
+static int bnge_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static int bnge_map_db_bar(struct bnge_dev *bd)
+{
+ if (!bd->db_size)
+ return -ENODEV;
+
+ bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size);
+ if (!bd->bar1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int max_irqs;
+ struct bnge_dev *bd;
+ int rc;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability missing, aborting\n");
+ return -ENODEV;
+ }
+
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
+ rc = bnge_pci_enable(pdev);
+ if (rc)
+ return rc;
+
+ bnge_print_device_info(pdev, ent->driver_data);
+
+ bd = bnge_devlink_alloc(pdev);
+ if (!bd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ rc = -ENOMEM;
+ goto err_pci_disable;
+ }
+
+ bd->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bd->bar0) {
+ dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n");
+ rc = -ENOMEM;
+ goto err_devl_free;
+ }
+
+ rc = bnge_init_hwrm_resources(bd);
+ if (rc)
+ goto err_bar_unmap;
+
+ rc = bnge_fw_register_dev(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc);
+ goto err_hwrm_cleanup;
+ }
+
+ bnge_devlink_register(bd);
+
+ max_irqs = bnge_get_max_irq(pdev);
+ bnge_set_max_func_irqs(bd, max_irqs);
+
+ bnge_aux_init_dflt_config(bd);
+
+ rc = bnge_net_init_dflt_config(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n",
+ rc);
+ goto err_fw_reg;
+ }
+
+ rc = bnge_map_db_bar(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto err_config_uninit;
+ }
+
+ rc = bnge_alloc_irqs(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
+ goto err_config_uninit;
+ }
+
+ rc = bnge_netdev_alloc(bd, max_irqs);
+ if (rc)
+ goto err_free_irq;
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_free_irq:
+ bnge_free_irqs(bd);
+
+err_config_uninit:
+ bnge_net_uninit_dflt_config(bd);
+
+err_fw_reg:
+ bnge_devlink_unregister(bd);
+ bnge_fw_unregister_dev(bd);
+
+err_hwrm_cleanup:
+ bnge_cleanup_hwrm_resources(bd);
+
+err_bar_unmap:
+ bnge_unmap_bars(pdev);
+
+err_devl_free:
+ bnge_devlink_free(bd);
+
+err_pci_disable:
+ bnge_pci_disable(pdev);
+ return rc;
+}
+
+static void bnge_remove_one(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ bnge_netdev_free(bd);
+
+ bnge_free_irqs(bd);
+
+ bnge_net_uninit_dflt_config(bd);
+
+ bnge_devlink_unregister(bd);
+
+ bnge_fw_unregister_dev(bd);
+
+ bnge_cleanup_hwrm_resources(bd);
+
+ bnge_unmap_bars(pdev);
+
+ bnge_devlink_free(bd);
+
+ bnge_pci_disable(pdev);
+}
+
+static void bnge_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, 0);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver bnge_driver = {
+ .name = bnge_driver_name,
+ .id_table = bnge_pci_tbl,
+ .probe = bnge_probe_one,
+ .remove = bnge_remove_one,
+ .shutdown = bnge_shutdown,
+};
+
+static int __init bnge_init_module(void)
+{
+ return pci_register_driver(&bnge_driver);
+}
+module_init(bnge_init_module);
+
+static void __exit bnge_exit_module(void)
+{
+ pci_unregister_driver(&bnge_driver);
+}
+module_exit(bnge_exit_module);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
new file mode 100644
index 000000000000..a987afebd64d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm_lib.h"
+
+static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req,
+ enum bnge_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))
+ return 0;
+
+ switch (type) {
+ case BNGE_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNGE_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNGE_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+
+ return 0;
+}
+
+static void bnge_vpd_read_info(struct bnge_dev *bd)
+{
+ struct pci_dev *pdev = bd->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_serialno, &vpd_data[pos], size);
+
+exit:
+ kfree(vpd_data);
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
+static int bnge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
+ struct bnge_dev *bd = devlink_priv(devlink);
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
+ char buf[32];
+
+ int rc;
+
+ if (bd->dsn) {
+ char buf[32];
+ u8 dsn[8];
+ int rc;
+
+ put_unaligned_le64(bd->dsn, dsn);
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn");
+ return rc;
+ }
+ }
+
+ if (strlen(bd->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req,
+ bd->board_serialno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set board serial number");
+ return rc;
+ }
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bd->board_partno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number");
+ return rc;
+ }
+
+ /* More information from HWRM ver get command */
+ sprintf(buf, "%X", bd->chip_num);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id");
+ return rc;
+ }
+
+ ver_resp = &bd->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bd->nvm_cfg_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version");
+ return rc;
+ }
+
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware generic version");
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bd->hwrm_ver_supp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt api version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set ncsi firmware version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version");
+ return rc;
+ }
+
+ rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info);
+ if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set roce firmware version");
+ return rc;
+ }
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored firmware version");
+ return rc;
+ }
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored ncsi firmware version");
+ return rc;
+ }
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored roce firmware version");
+
+ return rc;
+}
+
+static const struct devlink_ops bnge_devlink_ops = {
+ .info_get = bnge_devlink_info_get,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+
+ devlink_free(devlink);
+}
+
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev)
+{
+ struct devlink *devlink;
+ struct bnge_dev *bd;
+
+ devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ bd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, bd);
+ bd->dev = &pdev->dev;
+ bd->pdev = pdev;
+
+ bd->dsn = pci_get_dsn(pdev);
+ if (!bd->dsn)
+ pci_warn(pdev, "Failed to get DSN\n");
+
+ bnge_vpd_read_info(bd);
+
+ return bd;
+}
+
+void bnge_devlink_register(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_register(devlink);
+}
+
+void bnge_devlink_unregister(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
new file mode 100644
index 000000000000..c6575255e650
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DEVLINK_H_
+#define _BNGE_DEVLINK_H_
+
+enum bnge_dl_version_type {
+ BNGE_VERSION_FIXED,
+ BNGE_VERSION_RUNNING,
+ BNGE_VERSION_STORED,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd);
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev);
+void bnge_devlink_register(struct bnge_dev *bd);
+void bnge_devlink_unregister(struct bnge_dev *bd);
+
+#endif /* _BNGE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
new file mode 100644
index 000000000000..569371c1b4f2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "bnge.h"
+#include "bnge_ethtool.h"
+
+static void bnge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_dev *bd = bn->bd;
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bnge_ethtool_ops = {
+ .get_drvinfo = bnge_get_drvinfo,
+};
+
+void bnge_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &bnge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
new file mode 100644
index 000000000000..21e96a0976d5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_ETHTOOL_H_
+#define _BNGE_ETHTOOL_H_
+
+void bnge_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _BNGE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
new file mode 100644
index 000000000000..0f971af24142
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+
+static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL;
+}
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len)
+{
+ struct bnge_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnge_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ dev_err(bd->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void bnge_hwrm_req_timeout(struct bnge_dev *bd,
+ void *req, unsigned int timeout)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->flags |= (flags & BNGE_HWRM_API_FLAGS);
+}
+
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) {
+ dev_err(bd->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_invalidate(struct bnge_dev *bd,
+ struct bnge_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnge_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle);
+}
+
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ __hwrm_ctx_invalidate(bd, ctx);
+}
+
+static int bnge_map_hwrm_error(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnge_hwrm_wait_token *
+bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bd->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNGE_HWRM_PENDING;
+ if (dst == BNGE_HWRM_CHNL_CHIMP) {
+ token->seq_id = bd->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
+ } else {
+ token->seq_id = bd->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token)
+{
+ if (token->dst == BNGE_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bd->hwrm_cmd_lock);
+}
+
+static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNGE_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define bnge_hwrm_err(bd, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \
+ dev_dbg((bd)->dev, fmt, __VA_ARGS__); \
+ else \
+ dev_err((bd)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM;
+ struct bnge_hwrm_wait_token *token = NULL;
+ u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ int rc = -EBUSY;
+ u32 req_type;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+
+ if (msg_len > BNGE_HWRM_MAX_REQ_LEN &&
+ msg_len > bd->hwrm_max_ext_req_len) {
+ dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ token = bnge_hwrm_create_token(bd, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bd->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bd->bar0 + doorbell_offset);
+
+ bnge_hwrm_req_dbg(bd, ctx->req);
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout,
+ bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT *
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) {
+ bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ if (token &&
+ READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) {
+ bnge_hwrm_destroy_token(bd, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ bnge_hwrm_err(bd, ctx,
+ "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ bnge_hwrm_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= BNGE_HWRM_FIN_WAIT_USEC) {
+ bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ bnge_hwrm_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT))
+ dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = bnge_map_hwrm_error(rc);
+
+exit:
+ if (token)
+ bnge_hwrm_destroy_token(bd, token);
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_invalidate(bd, ctx);
+ return rc;
+}
+
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send_ctx(bd, ctx);
+}
+
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req)
+{
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void *
+bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma_handle)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ if (ctx->slice_addr) {
+ dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp);
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ dma_pool_destroy(bd->hwrm_dma_pool);
+ bd->hwrm_dma_pool = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED);
+ rcu_read_unlock();
+}
+
+int bnge_init_hwrm_resources(struct bnge_dev *bd)
+{
+ bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev,
+ BNGE_HWRM_DMA_SIZE,
+ BNGE_HWRM_DMA_ALIGN, 0);
+ if (!bd->hwrm_dma_pool)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&bd->hwrm_pending_list);
+ mutex_init(&bd->hwrm_cmd_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
new file mode 100644
index 000000000000..83794a12cc81
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_H_
+#define _BNGE_HWRM_H_
+
+#include <linux/bnxt/hsi.h>
+
+enum bnge_hwrm_ctx_flags {
+ BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
+ BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1),
+ BNGE_HWRM_CTX_SILENT = BIT(2),
+ BNGE_HWRM_FULL_WAIT = BIT(3),
+};
+
+#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT)
+
+struct bnge_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnge_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnge_hwrm_wait_state {
+ BNGE_HWRM_PENDING,
+ BNGE_HWRM_DEFERRED,
+ BNGE_HWRM_COMPLETE,
+ BNGE_HWRM_CANCELLED,
+};
+
+enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG };
+
+struct bnge_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnge_hwrm_wait_state state;
+ enum bnge_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500
+
+#define BNGE_GRCPF_REG_CHIMP_COMM 0x0
+#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+
+#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len)
+#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U
+#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20
+#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout)
+#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4)
+#define BNGE_HWRM_TARGET 0xffff
+#define BNGE_HWRM_NO_CMPL_RING -1
+#define BNGE_HWRM_REQ_MAX_SIZE 128
+#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \
+ BNGE_HWRM_RESP_RESERVED)
+#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \
+ sizeof(struct bnge_hwrm_ctx))
+#define BNGE_HWRM_DMA_ALIGN 16
+#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3
+#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10
+#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define BNGE_HWRM_MIN_TIMEOUT 25
+#define BNGE_HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int bnge_hwrm_timeout(unsigned int n)
+{
+ return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ?
+ n * BNGE_HWRM_SHORT_MIN_TIMEOUT :
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER *
+ BNGE_HWRM_SHORT_MIN_TIMEOUT +
+ (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) *
+ BNGE_HWRM_MIN_TIMEOUT;
+}
+
+#define BNGE_HWRM_FIN_WAIT_USEC 50000
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd);
+int bnge_init_hwrm_resources(struct bnge_dev *bd);
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len);
+#define bnge_hwrm_req_init(bd, req, req_type) \
+ bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \
+ sizeof(*(req)))
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags);
+void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req,
+ unsigned int timeout);
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req);
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags);
+void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma);
+#endif /* _BNGE_HWRM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
new file mode 100644
index 000000000000..5c178fade065
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd)
+{
+ u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
+ bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (hwrm_spec_code > hwrm_ver)
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+
+ bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ fw_maj, fw_min, fw_bld, fw_rsv);
+
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bd->fw_ver_str);
+
+ snprintf(bd->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
+ }
+
+ bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bd->hwrm_cmd_timeout)
+ bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bd->hwrm_cmd_max_timeout)
+ bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
+ else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
+ dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
+ bd->hwrm_cmd_max_timeout / 1000);
+
+ bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+
+ if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ bd->chip_num = le16_to_cpu(resp->chip_num);
+ bd->chip_rev = resp->chip_rev;
+
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
+
+hwrm_ver_get_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int
+bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ memcpy(nvm_info, resp, sizeof(*resp));
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_reset(struct bnge_dev *bd)
+{
+ struct hwrm_func_reset_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
+{
+ struct hwrm_fw_set_time_input *req;
+ struct tm tm;
+ int rc;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
+
+ if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define BNGE_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnge_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ if (bd->ctx)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bd->ctx = ctx;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; ) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags &
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnge_init_ctx_initializer(ctxm, init_val, init_off,
+ BNGE_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNGE_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
+
+ctx_done:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ if (!rmem->nr_pages)
+ return;
+
+ BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ bnge_hwrm_req_hold(bd, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnge_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
+ rc = bnge_hwrm_req_send(bd, req);
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static int bnge_hwrm_get_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u16 cp, stats;
+ u16 rx, tx;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc) {
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ hw_resc->resv_irqs = cp;
+ rx = hw_resc->resv_rx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bnge_is_agg_reqd(bd))
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ rc = bnge_fix_rings_count(&rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
+
+get_rings_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static struct hwrm_func_cfg_input *
+__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ u32 enables = 0;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ return NULL;
+
+ req->fid = cpu_to_le16(0xffff);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
+ req->num_msix = cpu_to_le16(hwr->nq);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
+ req->enables = cpu_to_le32(enables);
+
+ return req;
+}
+
+static int
+bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ bnge_hwrm_req_drop(bd, req);
+ return 0;
+ }
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ return rc;
+
+ return bnge_hwrm_get_rings(bd);
+}
+
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ return bnge_hwrm_reserve_pf_rings(bd, hwr);
+}
+
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto func_qcfg_exit;
+
+ bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bd->max_mtu)
+ bd->max_mtu = BNGE_MAX_MTU;
+
+ if (bd->db_size)
+ goto func_qcfg_exit;
+
+ bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
+ bd->db_size <= bd->db_offset)
+ bd->db_size = pci_resource_len(bd->pdev, 2);
+
+func_qcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_resource_qcaps_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send_silent(bd, req);
+ if (rc)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+
+hwrm_func_resc_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ struct bnge_pf_info *pf = &bd->pf;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V1;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V2;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+
+ bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
+
+hwrm_func_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
+ int rc;
+
+ bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
+ bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bd->max_tpa_v2)
+ bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+#define BNGE_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
+{
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
+ u8 i, j, *qptr;
+ bool no_rdma;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bd->max_tc = resp->max_configurable_queues;
+ bd->max_lltc = resp->max_configurable_lossless_queues;
+ if (bd->max_tc > BNGE_MAX_QUEUE)
+ bd->max_tc = BNGE_MAX_QUEUE;
+
+ no_rdma = !bnge_is_roce_en(bd);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bd->max_tc; i++) {
+ bd->q_info[j].queue_id = *qptr;
+ bd->q_ids[i] = *qptr++;
+ bd->q_info[j].queue_profile = *qptr++;
+ bd->tc_to_qidx[j] = j;
+ if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
+ j++;
+ }
+ bd->max_q = bd->max_tc;
+ bd->max_tc = max_t(u8, j, 1);
+
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bd->max_tc = 1;
+
+ if (bd->max_lltc > bd->max_tc)
+ bd->max_lltc = bd->max_tc;
+
+qportcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
new file mode 100644
index 000000000000..6c03923eb559
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_LIB_H_
+#define _BNGE_HWRM_LIB_H_
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd);
+int bnge_hwrm_func_reset(struct bnge_dev *bd);
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last);
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr);
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+
+#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
new file mode 100644
index 000000000000..02254934f3d0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <linux/skbuff.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_ethtool.h"
+
+static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int bnge_open(struct net_device *dev)
+{
+ return 0;
+}
+
+static int bnge_close(struct net_device *dev)
+{
+ return 0;
+}
+
+static const struct net_device_ops bnge_netdev_ops = {
+ .ndo_open = bnge_open,
+ .ndo_stop = bnge_close,
+ .ndo_start_xmit = bnge_start_xmit,
+};
+
+static void bnge_init_mac_addr(struct bnge_dev *bd)
+{
+ eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
+}
+
+static void bnge_set_tpa_flags(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ bn->priv_flags &= ~BNGE_NET_EN_TPA;
+
+ if (bd->netdev->features & NETIF_F_LRO)
+ bn->priv_flags |= BNGE_NET_EN_LRO;
+ else if (bd->netdev->features & NETIF_F_GRO_HW)
+ bn->priv_flags |= BNGE_NET_EN_GRO;
+}
+
+static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
+}
+
+void bnge_set_ring_params(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
+ ring_size = bn->rx_ring_size;
+ bn->rx_agg_ring_size = 0;
+ bn->rx_agg_nr_pages = 0;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
+
+ bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bn->priv_flags |= BNGE_NET_EN_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ if (agg_factor) {
+ if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
+ bn->rx_ring_size, ring_size);
+ bn->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
+ RX_DESC_CNT);
+ if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bn->rx_agg_ring_size = agg_ring_size;
+ bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+
+ rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bn->rx_buf_use_size = rx_size;
+ bn->rx_buf_size = rx_space;
+
+ bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
+ bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bn->tx_ring_size;
+ bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
+ bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ max_rx_cmpl = bn->rx_ring_size;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ max_rx_cmpl += bd->max_tpa_v2;
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
+ bn->cp_ring_size = ring_size;
+
+ bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
+ if (bn->cp_nr_pages > MAX_CP_PAGES) {
+ bn->cp_nr_pages = MAX_CP_PAGES;
+ bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
+ ring_size, bn->cp_ring_size);
+ }
+ bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
+ bn->cp_ring_mask = bn->cp_bit - 1;
+}
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
+{
+ struct net_device *netdev;
+ struct bnge_net *bn;
+ int rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
+ max_irqs);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, bd->dev);
+ bd->netdev = netdev;
+
+ netdev->netdev_ops = &bnge_netdev_ops;
+
+ bnge_set_ethtool_ops(netdev);
+
+ bn = netdev_priv(netdev);
+ bn->netdev = netdev;
+ bn->bd = bd;
+
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->max_mtu = bd->max_mtu;
+
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_PARTIAL;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
+
+ netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ if (bd->tso_max_segs)
+ netif_set_tso_max_segs(netdev, bd->tso_max_segs);
+
+ bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
+ bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+
+ bnge_set_tpa_flags(bd);
+ bnge_set_ring_params(bd);
+
+ bnge_init_l2_fltr_tbl(bn);
+ bnge_init_mac_addr(bd);
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
+ goto err_netdev;
+ }
+
+ return 0;
+
+err_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+void bnge_netdev_free(struct bnge_dev *bd)
+{
+ struct net_device *netdev = bd->netdev;
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ bd->netdev = NULL;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
new file mode 100644
index 000000000000..a650d71a58db
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_NETDEV_H_
+#define _BNGE_NETDEV_H_
+
+#include <linux/bnxt/hsi.h>
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
+};
+
+struct bnge_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
+ u8 is_ts_pkt;
+ u8 is_push;
+ u8 action;
+ unsigned short nr_frags;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
+};
+
+struct bnge_sw_rx_bd {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+};
+
+struct bnge_sw_rx_agg_bd {
+ struct page *page;
+ unsigned int offset;
+ dma_addr_t mapping;
+};
+
+#define BNGE_RX_COPY_THRESH 256
+
+#define BNGE_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNGE_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
+enum {
+ BNGE_NET_EN_GRO = BIT(0),
+ BNGE_NET_EN_LRO = BIT(1),
+ BNGE_NET_EN_JUMBO = BIT(2),
+};
+
+#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+
+struct bnge_net {
+ struct bnge_dev *bd;
+ struct net_device *netdev;
+
+ u32 priv_flags;
+
+ u32 rx_ring_size;
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* usable size */
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ u16 rx_nr_pages;
+ u16 rx_agg_nr_pages;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u16 tx_nr_pages;
+
+ /* NQs and Completion rings */
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ u16 cp_nr_pages;
+
+#define BNGE_L2_FLTR_HASH_SIZE 32
+#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+};
+
+#define BNGE_DEFAULT_RX_RING_SIZE 511
+#define BNGE_DEFAULT_TX_RING_SIZE 511
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
+void bnge_netdev_free(struct bnge_dev *bd);
+void bnge_set_ring_params(struct bnge_dev *bd);
+
+#if (BNGE_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 16
+#else
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 128
+#endif
+
+#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT)
+
+#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
+#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
+#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
new file mode 100644
index 000000000000..c79a3607a1b7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_resc.h"
+
+static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
+{
+ u16 tcs = bd->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+
+ return tx / tcs;
+}
+
+static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
+}
+
+static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_stat_ctxs;
+}
+
+static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_cp_rings;
+}
+
+static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
+{
+ int roce_msix = BNGE_MAX_ROCE_MSIX;
+
+ return min_t(int, roce_msix, num_online_cpus() + 1);
+}
+
+static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_msix;
+
+ return 0;
+}
+
+static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_msix = num;
+}
+
+static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_stat_ctxs;
+
+ return 0;
+}
+
+static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_stat_ctxs = num_aux_ctx;
+}
+
+static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
+}
+
+static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
+{
+ int stat_ctx = 0;
+
+ if (bnge_is_roce_en(bd)) {
+ stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
+
+ if (!bd->pf.port_id && bd->port_count > 1)
+ stat_ctx++;
+ }
+
+ return stat_ctx;
+}
+
+static u16 bnge_nqs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_msix(bd);
+}
+
+static u16 bnge_cprs_demand(struct bnge_dev *bd)
+{
+ return bd->tx_nr_rings + bd->rx_nr_rings;
+}
+
+static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
+{
+ u16 max_irq = bnge_get_max_func_irqs(bd);
+ u16 total_demand = bd->nq_nr_rings + num;
+
+ if (max_irq < total_demand) {
+ num = max_irq - bd->nq_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+
+ return num;
+}
+
+static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
+{
+ return tx_chunks * bd->num_tc;
+}
+
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
+{
+ u16 _rx = *rx, _tx = *tx;
+
+ if (shared) {
+ *rx = min_t(u16, _rx, max);
+ *tx = min_t(u16, _tx, max);
+ } else {
+ if (max < 2)
+ return -ENOMEM;
+ while (_rx + _tx > max) {
+ if (_rx > _tx && _rx > 1)
+ _rx--;
+ else if (_tx > 1)
+ _tx--;
+ }
+ *rx = _rx;
+ *tx = _tx;
+ }
+
+ return 0;
+}
+
+static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
+ u16 *tx, u16 max_nq, bool sh)
+{
+ u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
+
+ if (tx_chunks != *tx) {
+ u16 tx_saved = tx_chunks, rc;
+
+ rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
+ if (rc)
+ return rc;
+ if (tx_chunks != tx_saved)
+ *tx = bnge_num_cp_to_tx(bd, tx_chunks);
+ return 0;
+ }
+
+ return bnge_fix_rings_count(rx, tx, max_nq, sh);
+}
+
+static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+{
+ if (!rx_rings)
+ return 0;
+
+ return bnge_adjust_pow_two(rx_rings - 1,
+ BNGE_RSS_TABLE_ENTRIES);
+}
+
+static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ return bnge_cal_nr_rss_ctxs(hwr->grp);
+}
+
+static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
+{
+ return 1;
+}
+
+static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+{
+ return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
+ BNGE_RSS_TABLE_ENTRIES;
+}
+
+static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 max_entries, pad;
+ u32 *rss_indir_tbl;
+ int i;
+
+ max_entries = bnge_get_rxfh_indir_size(bd);
+ rss_indir_tbl = &bd->rss_indir_tbl[0];
+
+ for (i = 0; i < max_entries; i++)
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
+ bd->rx_nr_rings);
+
+ pad = bd->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
+}
+
+static void bnge_copy_reserved_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->nq = hw_resc->resv_irqs;
+ hwr->cmpl = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+}
+
+static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
+ hwr->stat && hwr->cmpl;
+}
+
+static bool bnge_need_reserve_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 cprs = bnge_cprs_demand(bd);
+ u16 rx = bd->rx_nr_rings, stat;
+ u16 nqs = bnge_nqs_demand(bd);
+ u16 vnic;
+
+ if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
+ return true;
+
+ vnic = bnge_get_total_vnics(bd, rx);
+
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ stat = bnge_func_stat_ctxs_demand(bd);
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
+ return true;
+ if (hw_resc->resv_irqs != nqs)
+ return true;
+
+ return false;
+}
+
+int bnge_reserve_rings(struct bnge_dev *bd)
+{
+ u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
+ struct bnge_hw_rings hwr = {0};
+ u16 rx_rings, old_rx_rings;
+ u16 nq = bd->nq_nr_rings;
+ u16 aux_msix = 0;
+ bool sh = false;
+ u16 tx_cp;
+ int rc;
+
+ if (!bnge_need_reserve_rings(bd))
+ return 0;
+
+ if (!bnge_aux_registered(bd)) {
+ aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
+ if (!aux_msix)
+ bnge_aux_set_stat_ctxs(bd, 0);
+
+ if (aux_msix > aux_dflt_msix)
+ aux_msix = aux_dflt_msix;
+ hwr.nq = nq + aux_msix;
+ } else {
+ hwr.nq = bnge_nqs_demand(bd);
+ }
+
+ hwr.tx = bd->tx_nr_rings;
+ hwr.rx = bd->rx_nr_rings;
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ sh = true;
+ hwr.cmpl = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
+
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx <<= 1;
+ hwr.grp = bd->rx_nr_rings;
+ hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
+ hwr.stat = bnge_func_stat_ctxs_demand(bd);
+ old_rx_rings = bd->hw_resc.resv_rx_rings;
+
+ rc = bnge_hwrm_reserve_rings(bd, &hwr);
+ if (rc)
+ return rc;
+
+ bnge_copy_reserved_rings(bd, &hwr);
+
+ rx_rings = hwr.rx;
+ if (bnge_is_agg_reqd(bd)) {
+ if (hwr.rx >= 2)
+ rx_rings = hwr.rx >> 1;
+ else
+ return -ENOMEM;
+ }
+
+ rx_rings = min_t(u16, rx_rings, hwr.grp);
+ hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
+ if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
+ hwr.stat -= bnge_aux_get_stat_ctxs(bd);
+ hwr.nq = min_t(u16, hwr.nq, hwr.stat);
+
+ /* Adjust the rings */
+ rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx = rx_rings << 1;
+ tx_cp = hwr.tx;
+ hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bd->tx_nr_rings = hwr.tx;
+
+ if (rx_rings != bd->rx_nr_rings)
+ dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
+ rx_rings, bd->rx_nr_rings);
+
+ bd->rx_nr_rings = rx_rings;
+ bd->nq_nr_rings = hwr.nq;
+
+ if (!bnge_rings_ok(&hwr))
+ return -ENOMEM;
+
+ if (old_rx_rings != bd->hw_resc.resv_rx_rings)
+ bnge_set_dflt_rss_indir_tbl(bd);
+
+ if (!bnge_aux_registered(bd)) {
+ u16 resv_msix, resv_ctx, aux_ctxs;
+ struct bnge_hw_resc *hw_resc;
+
+ hw_resc = &bd->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
+ aux_msix = min_t(u16, resv_msix, aux_msix);
+ bnge_aux_set_msix_num(bd, aux_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
+ aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
+ bnge_aux_set_stat_ctxs(bd, aux_ctxs);
+ }
+
+ return rc;
+}
+
+int bnge_alloc_irqs(struct bnge_dev *bd)
+{
+ u16 aux_msix, tx_cp, num_entries;
+ int i, irqs_demand, rc;
+ u16 max, min = 1;
+
+ irqs_demand = bnge_nqs_demand(bd);
+ max = bnge_get_max_func_irqs(bd);
+ if (irqs_demand > max)
+ irqs_demand = max;
+
+ if (!(bd->flags & BNGE_EN_SHARED_CHNL))
+ min = 2;
+
+ irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
+ PCI_IRQ_MSIX);
+ aux_msix = bnge_aux_get_msix(bd);
+ if (irqs_demand < 0 || irqs_demand < aux_msix) {
+ rc = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ num_entries = irqs_demand;
+ if (pci_msix_can_alloc_dyn(bd->pdev))
+ num_entries = max;
+ bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
+ if (!bd->irq_tbl) {
+ rc = -ENOMEM;
+ goto err_free_irqs;
+ }
+
+ for (i = 0; i < irqs_demand; i++)
+ bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
+
+ bd->irqs_acquired = irqs_demand;
+ /* Reduce rings based upon num of vectors allocated.
+ * We dont need to consider NQs as they have been calculated
+ * and must be more than irqs_demand.
+ */
+ rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
+ &bd->tx_nr_rings,
+ irqs_demand - aux_msix, min == 1);
+ if (rc)
+ goto err_free_irqs;
+
+ tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
+ bd->nq_nr_rings = (min == 1) ?
+ max_t(u16, tx_cp, bd->rx_nr_rings) :
+ tx_cp + bd->rx_nr_rings;
+
+ /* Readjust tx_nr_rings_per_tc */
+ if (!bd->num_tc)
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+
+ return 0;
+
+err_free_irqs:
+ dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+void bnge_free_irqs(struct bnge_dev *bd)
+{
+ pci_free_irq_vectors(bd->pdev);
+ kfree(bd->irq_tbl);
+ bd->irq_tbl = NULL;
+}
+
+static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, u16 *max_nq)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 max_ring_grps = 0, max_cp;
+ int rc;
+
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
+ hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (bnge_is_agg_reqd(bd))
+ *max_rx >>= 1;
+
+ max_cp = bnge_get_max_func_cp_rings(bd);
+
+ /* Fix RX and TX rings according to number of CPs available */
+ rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
+
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, bool shared)
+{
+ u16 rx, tx, nq;
+
+ _bnge_get_max_rings(bd, &rx, &tx, &nq);
+ *max_rx = rx;
+ *max_tx = tx;
+ if (!rx || !tx || !nq)
+ return -ENOMEM;
+
+ return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
+}
+
+static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
+ if (rc) {
+ dev_info(bd->dev, "Not enough rings available\n");
+ return rc;
+ }
+
+ if (bnge_is_roce_en(bd)) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnge_get_max_func_cp_rings(bd);
+ max_stat = bnge_get_max_func_stat_ctxs(bd);
+ max_irq = bnge_get_max_func_irqs(bd);
+ if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNGE_MIN_ROCE_CP_RINGS;
+ max_irq -= BNGE_MIN_ROCE_CP_RINGS;
+ max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(u16, max_cp, max_irq);
+ max_cp = min_t(u16, max_cp, max_stat);
+ rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
+{
+ bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
+ bd->rx_nr_rings = bd->nq_nr_rings;
+ bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+}
+
+static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
+{
+ u16 dflt_rings, max_rx_rings, max_tx_rings;
+ int rc;
+
+ if (sh)
+ bd->flags |= BNGE_EN_SHARED_CHNL;
+
+ dflt_rings = netif_get_num_default_rss_queues();
+
+ rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
+ bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
+ bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+ else
+ bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Unable to reserve tx rings\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+
+ /* Rings may have been reduced, re-reserve them again */
+ if (bnge_need_reserve_rings(bd)) {
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Fewer rings reservation failed\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ }
+ if (rc) {
+ bd->tx_nr_rings = 0;
+ bd->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 entries;
+
+ entries = BNGE_MAX_RSS_TABLE_ENTRIES;
+
+ bd->rss_indir_tbl_entries = entries;
+ bd->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
+ if (!bd->rss_indir_tbl)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int bnge_net_init_dflt_config(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc;
+ int rc;
+
+ rc = bnge_alloc_rss_indir_tbl(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_net_init_dflt_rings(bd, true);
+ if (rc)
+ goto err_free_tbl;
+
+ hw_resc = &bd->hw_resc;
+ bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNGE_L2_FLTR_MAX_FLTR;
+
+ return 0;
+
+err_free_tbl:
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+ return rc;
+}
+
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
+{
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+}
+
+void bnge_aux_init_dflt_config(struct bnge_dev *bd)
+{
+ bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
+ bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
new file mode 100644
index 000000000000..54ef1c7d8822
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RESC_H_
+#define _BNGE_RESC_H_
+
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+
+struct bnge_hw_resc {
+ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
+ u16 min_cp_rings;
+ u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+ u16 min_vnics;
+ u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
+ u16 max_irqs;
+ u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+};
+
+struct bnge_hw_rings {
+ u16 tx;
+ u16 rx;
+ u16 grp;
+ u16 nq;
+ u16 cmpl;
+ u16 stat;
+ u16 vnic;
+ u16 rss_ctx;
+};
+
+/* "TXRX", 2 hypens, plus maximum integer */
+#define BNGE_IRQ_NAME_EXTRA 17
+struct bnge_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested:1;
+ u8 have_cpumask:1;
+ char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA];
+ cpumask_var_t cpu_mask;
+};
+
+int bnge_reserve_rings(struct bnge_dev *bd);
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared);
+int bnge_alloc_irqs(struct bnge_dev *bd);
+void bnge_free_irqs(struct bnge_dev *bd);
+int bnge_net_init_dflt_config(struct bnge_dev *bd);
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
+void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+
+static inline u32
+bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
+{
+ u32 blks = total_ent / ent_per_blk;
+
+ if (blks == 0 || blks == 1)
+ return ++blks;
+
+ if (!is_power_of_2(blks))
+ blks = roundup_pow_of_two(blks);
+
+ return blks;
+}
+
+#define BNGE_MAX_ROCE_MSIX 64
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#endif /* _BNGE_RESC_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
new file mode 100644
index 000000000000..52ada65943a0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+
+static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm,
+ void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNGE_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ int i;
+
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
+
+ rmem->pg_arr[i] = NULL;
+ }
+skip_pages:
+ if (rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
+ rmem->pg_tbl, rmem->dma_pg_tbl);
+ rmem->pg_tbl = NULL;
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
+ }
+}
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ u64 valid_bit = 0;
+ int i;
+
+ if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
+ &rmem->dma_pg_tbl,
+ GFP_KERNEL);
+ if (!rmem->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
+ GFP_KERNEL);
+ if (!rmem->pg_arr[i])
+ return -ENOMEM;
+
+ if (rmem->ctx_mem)
+ bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
+ rmem->page_size);
+
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
+ }
+
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ rmem->page_size = BNGE_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNGE_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG;
+ return bnge_alloc_ring(bd, rmem);
+}
+
+static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnge_ctx_mem_type *ctxm)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return -EINVAL;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ rmem->ctx_mem = ctxm;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
+ rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rmem->ctx_mem = ctxm;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ }
+
+ return rc;
+}
+
+static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+ struct bnge_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnge_free_ring(bd, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnge_free_ring(bd, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
+static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+
+ return rc;
+}
+
+static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ struct bnge_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNGE_CTX_MAX - 1;
+ else
+ last_type = BNGE_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnge_free_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+
+ ctx->flags &= ~BNGE_CTX_FLAG_INITED;
+ kfree(ctx);
+ bd->ctx = NULL;
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+int bnge_alloc_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_type *ctxm;
+ struct bnge_ctx_mem_info *ctx;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
+ u8 pg_lvl = 1;
+ int i, rc;
+
+ rc = bnge_hwrm_func_backing_store_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc);
+ return rc;
+ }
+
+ ctx = bd->ctx;
+ if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED))
+ return 0;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
+ if (bnge_is_roce_en(bd) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fast qp destroy feature enabled */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
+ }
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_CQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STAT];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ if (!bnge_is_roce_en(bd))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+
+ rc = bnge_backing_store_cfg(bd, ena);
+ if (rc) {
+ dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc);
+ return rc;
+ }
+ ctx->flags |= BNGE_CTX_FLAG_INITED;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
new file mode 100644
index 000000000000..300f1d8268ef
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RMEM_H_
+#define _BNGE_RMEM_H_
+
+struct bnge_ctx_mem_type;
+struct bnge_dev;
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+struct bnge_ring_mem_info {
+ /* Number of pages to next level */
+ int nr_pages;
+ int page_size;
+ u16 flags;
+#define BNGE_RMEM_VALID_PTE_FLAG 1
+#define BNGE_RMEM_RING_PTE_FLAG 2
+#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
+
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t dma_pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+
+ struct bnge_ctx_mem_type *ctx_mem;
+};
+
+/* The hardware supports certain page sizes.
+ * Use the supported page sizes to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNGE_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNGE_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNGE_PAGE_SHIFT 13
+#else
+#define BNGE_PAGE_SHIFT 16
+#endif
+#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNGE_RX_PAGE_SHIFT 15
+#else
+#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnge_ctx_pg_info {
+ u32 entries;
+ u32 nr_pages;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnge_ring_mem_info ring_mem;
+ struct bnge_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNGE_MAX_TQM_SP_RINGS 1
+#define BNGE_MAX_TQM_FP_RINGS 8
+#define BNGE_MAX_TQM_RINGS \
+ (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS)
+#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNGE_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNGE_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNGE_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
+#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNGE_CTX_QP \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNGE_CTX_SRQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNGE_CTX_CQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNGE_CTX_VNIC \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNGE_CTX_STAT \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNGE_CTX_STQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNGE_CTX_FTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNGE_CTX_MRAV \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNGE_CTX_TIM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNGE_CTX_TCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNGE_CTX_RCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNGE_CTX_MTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNGE_CTX_SQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNGE_CTX_RQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNGE_CTX_SRQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNGE_CTX_CQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNGE_CTX_SRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNGE_CTX_SRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNGE_CTX_CRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNGE_CTX_CRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNGE_CTX_RIGP0_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNGE_CTX_L2_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNGE_CTX_ROCE_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+
+#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1)
+#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1)
+#define BNGE_CTX_INV ((u16)-1)
+
+#define BNGE_CTX_V2_MAX \
+ (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1)
+
+#define BNGE_BS_CFG_ALL_DONE \
+ FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE
+
+struct bnge_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNGE_CTX_MEM_TYPE_VALID \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNGE_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNGE_MAX_SPLIT_ENTRY];
+ };
+ struct bnge_ctx_pg_info *pg_info;
+};
+
+struct bnge_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+ u32 flags;
+#define BNGE_CTX_FLAG_INITED 0x01
+ struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
+};
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+int bnge_alloc_ctx_mem(struct bnge_dev *bd);
+void bnge_free_ctx_mem(struct bnge_dev *bd);
+
+#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 17ae6df90723..9af81630c8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -344,7 +344,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 44199855ebfb..fc8dec37a9e4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1243,9 +1243,9 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
@@ -3318,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3361,20 +3364,21 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
}
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3460,19 +3464,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
@@ -3684,10 +3675,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3711,10 +3703,11 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index a84d015da5df..9221942290a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -332,7 +332,7 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 48ad2d6e125b..f0f05d7315ac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1768,7 +1768,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -10219,8 +10219,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index bacc8552bce1..00ca861c80dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -379,7 +379,7 @@ struct bnx2x_vlan_mac_obj {
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 243cb13cb01c..2800a90fba1f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -58,8 +58,8 @@
#include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h>
#include <linux/pci-tph.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -708,8 +709,7 @@ normal_tx:
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -721,7 +721,8 @@ normal_tx:
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -778,9 +779,11 @@ tx_dma_error:
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
@@ -809,6 +812,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
int tx_pkts = 0;
bool rc = false;
@@ -848,13 +852,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
@@ -921,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
gfp_t gfp)
{
netmem_ref netmem;
- netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
if (!netmem)
return 0;
- *mapping = page_pool_get_dma_addr_netmem(netmem);
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
return netmem;
}
@@ -1024,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
netmem_ref netmem;
- netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
if (!netmem)
return -ENOMEM;
@@ -1810,7 +1821,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -3425,9 +3436,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
dev_kfree_skb(skb);
}
@@ -3803,14 +3816,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size;
+ pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
- pp.pool_size += bp->rx_ring_size;
+ pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
- pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
@@ -3826,7 +3840,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
rxr->need_head_pool = page_pool_is_unreadable(pool);
if (bnxt_separate_head_pool(rxr)) {
- pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
@@ -3842,6 +3856,12 @@ err_destroy_pp:
return PTR_ERR(pool);
}
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;
@@ -3880,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -7119,7 +7140,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -14122,28 +14143,13 @@ static void bnxt_unlock_sp(struct bnxt *bp)
netdev_unlock(bp->dev);
}
-/* Same as bnxt_lock_sp() with additional rtnl_lock */
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
-{
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- netdev_lock(bp->dev);
-}
-
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
-{
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- netdev_unlock(bp->dev);
- rtnl_unlock();
-}
-
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -14151,9 +14157,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -14192,7 +14198,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -15084,17 +15090,15 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
- netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
netdev_unlock(bp->dev);
- rtnl_unlock();
goto ulp_start;
}
@@ -15114,7 +15118,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
netdev_lock(bp->dev);
@@ -15640,8 +15643,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -15649,8 +15651,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
}, bnxt_udp_tunnels_p7 = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -16042,6 +16043,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
goto err_reset;
}
+ bnxt_enable_rx_page_pool(rxr);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
@@ -16060,7 +16062,7 @@ err_reset:
rc);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- netif_close(dev);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -16775,6 +16777,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
dev->request_ops_lock = true;
+ dev->netmem_tx = true;
rc = register_netdev(dev);
if (rc)
@@ -16876,7 +16879,6 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
@@ -16921,7 +16923,6 @@ static int bnxt_resume(struct device *device)
resume_exit:
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16986,7 +16987,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_free_ctx_mem(bp, false);
netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -17087,7 +17088,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
@@ -17105,7 +17105,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
netdev_unlock(netdev);
- rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 67e70d3d0980..18d6c94d5cb8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -10,7 +10,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 71e14be2507e..a00b67334f9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 127b7015f676..3324afbb3bec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -10,7 +10,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
index d0bb4887acd0..a0a8d687dd99 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -7,7 +7,7 @@
* the Free Software Foundation.
*/
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 777880594a04..4c4581b0342e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -12,7 +12,7 @@
#include <linux/vmalloc.h>
#include <net/devlink.h>
#include <net/netdev_lock.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 6f6576dc417a..53a3bcb0efe0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -8,7 +8,7 @@
*/
#include <linux/dim.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f5d490bf997e..1b37612b1c01 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -26,7 +26,7 @@
#include <linux/timecounter.h>
#include <net/netdev_queues.h>
#include <net/netlink.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -1587,8 +1587,11 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1647,10 +1650,15 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
@@ -1768,10 +1776,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
rc = bnxt_grxclsrule(bp, cmd);
break;
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
-
default:
rc = -EOPNOTSUPP;
break;
@@ -1786,10 +1790,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
- break;
-
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
@@ -5521,6 +5521,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
.create_rxfh_context = bnxt_create_rxfh_context,
.modify_rxfh_context = bnxt_modify_rxfh_context,
.remove_rxfh_context = bnxt_remove_rxfh_context,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
index 669d24ba0e87..de3427c6c6aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -12,8 +12,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_hwmon.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index d2fd2d04ed47..5ce190f50120 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -20,8 +20,8 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index fb5f5b063c3d..791b3a0cdb83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -10,7 +10,7 @@
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 7542b6d2568b..ca660e6d28a4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -15,7 +15,7 @@
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ddddd89052f..480e18a32caa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <net/dcbnl.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -823,7 +823,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
@@ -1125,7 +1125,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d2ca90407cce..d72fd248f3aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -19,8 +19,8 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
@@ -1316,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1410,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 2450a369b792..61cf201bb0dc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -21,8 +21,8 @@
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 619f0844e778..bd116fd578d8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -12,8 +12,8 @@
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 09e7e8efa6fa..58d579dca3f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -17,7 +17,7 @@
#include <linux/filter.h>
#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_xdp.h"
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 97585c160de3..98971ae4f87d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2472,10 +2472,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
bcmgenet_rx_ring_int_enable(ring);
- }
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
@@ -3988,6 +3986,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
if (priv->wol_irq > 0) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b6437ba7a2eb..573e8b279e52 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -169,10 +169,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 91104cc2c238..b4dc93a48718 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6686,7 +6686,7 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* We only need to fill in the address because the other members
* of the RX descriptor are invariant, see tg3_init_rings.
*
- * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * Note the purposeful asymmetry of cpu vs. chip accesses. For
* posting buffers we only dirty the first cache line of the RX
* descriptor (containing the address). Whereas for the RX status
* buffers the cpu only reads the last cacheline of the RX descriptor
@@ -10145,7 +10145,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
/* Pseudo-header checksum is done by hardware logic and not
- * the offload processers, so make the chip do the pseudo-
+ * the offload processors, so make the chip do the pseudo-
* header checksums on receive. For transmit it is more
* convenient to do the pseudo-header checksum in software
* as Linux does that on transmit for us in all cases.
@@ -16610,7 +16610,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index b473f8014d9c..a9e7f88fa26d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2390,7 +2390,7 @@
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
-/* Fast Ethernet Tranceiver definitions */
+/* Fast Ethernet Transceiver definitions */
#define MII_TG3_FET_PTEST 0x17
#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
#define MII_TG3_FET_PTEST_TRIM_2 0x0002
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d1f1ae5ea161..ce95fad8cedd 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4109,8 +4109,12 @@ static const struct net_device_ops macb_netdev_ops = {
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
+ struct device_node *np = bp->pdev->dev.of_node;
+ bool refclk_ext;
u32 dcfg;
+ refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
+
if (dt_conf)
bp->caps = dt_conf->caps;
@@ -4141,6 +4145,9 @@ static void macb_configure_caps(struct macb *bp,
}
}
+ if (refclk_ext)
+ bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
+
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
@@ -5096,6 +5103,7 @@ static const struct macb_config mpfs_config = {
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
@@ -5105,8 +5113,7 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -5654,6 +5661,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5667,6 +5688,7 @@ static struct platform_driver macb_driver = {
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index ff8f2f9f9cae..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1208,45 +1208,6 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
}
EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 234b96b4f488..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,9 +54,6 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
int cn23xx_sriov_config(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..953edf0c7096 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -157,7 +157,7 @@ err_release_region:
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 87dd6f89ce51..c139fc423764 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -268,7 +268,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
@@ -278,7 +278,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d0ff0c170b1a..fc6053414b7d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -552,25 +552,28 @@ static int nicvf_get_rxnfc(struct net_device *dev,
info->data = nic->rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
default:
break;
}
return ret;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -628,19 +631,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -872,11 +862,12 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 3b7ad744b2dd..21495b5dce25 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1429,9 +1429,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
{
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
struct bgx *bgx = context;
- char bgx_sel[5];
+ char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
pr_warn("Invalid link device\n");
return AE_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index cbfa03d5663a..f3ada6e7cdc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -141,7 +141,7 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
@@ -179,7 +179,7 @@ static int pm3393_interrupt_disable(struct cmac *cmac)
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
@@ -222,7 +222,7 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
@@ -756,7 +756,7 @@ static int pm3393_mac_reset(adapter_t * adapter)
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 646ca0bc25bd..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 95e6f015a6af..0d85198fb03d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1316,7 +1316,7 @@ struct ch_sched_flowc {
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 1546c3db08f0..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1730,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1739,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -2199,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 51395c96b2e9..392723ef14e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3297,7 +3297,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
@@ -4816,7 +4816,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a5d2f84dcdd5..8524246fd67e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -186,7 +186,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
@@ -422,7 +422,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 64402e3646b3..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 175bf9b13058..171750fad44f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9348,7 +9348,7 @@ int t4_init_devlog_params(struct adapter *adap)
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 4e6ecb9c8dcc..31fab2415743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2191,7 +2191,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1c52592d3b65..56fcc531af2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -706,7 +706,7 @@ int t4vf_fl_pkt_align(struct adapter *adapter)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index d567e42e1760..465fa8077964 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1096,8 +1096,7 @@ new_buf:
copy = size;
if (msg->msg_flags & MSG_SPLICE_PAGES) {
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE)
goto new_buf;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 529160926a96..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -528,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -597,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -693,6 +692,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
.get_channels = enic_get_channels,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 5b7e6eb080f3..b608585f1954 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1550,7 +1550,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 8759f9f76b62..e5d2ede13845 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -143,7 +143,7 @@ static const struct pci_device_id xircom_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
-static struct pci_driver xircom_ops = {
+static struct pci_driver xircom_driver = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
@@ -1169,4 +1169,4 @@ investigate_write_descriptor(struct net_device *dev,
}
}
-module_pci_driver(xircom_ops);
+module_pci_driver(xircom_driver);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index da9b7715df05..cc60ee454bf9 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,18 +138,22 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
@@ -289,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -578,7 +588,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -1076,9 +1087,6 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
unsigned long flags;
@@ -1123,10 +1131,10 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1143,9 +1151,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1181,10 +1186,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1810,9 +1814,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba679025e866..4788cc94639d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -403,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d730af4a50c7..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3856,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f001a649f58f..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1073,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1104,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1119,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1132,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1195,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1449,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d2e21592119..cb004fd16252 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1465,10 +1465,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
ntohs(tcphdr->source));
dev_info(dev, "TCP dest port %d\n",
ntohs(tcphdr->dest));
- dev_info(dev, "TCP sequence num %d\n",
- ntohs(tcphdr->seq));
- dev_info(dev, "TCP ack_seq %d\n",
- ntohs(tcphdr->ack_seq));
+ dev_info(dev, "TCP sequence num %u\n",
+ ntohl(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %u\n",
+ ntohl(tcphdr->ack_seq));
} else if (ip_hdr(skb)->protocol ==
IPPROTO_UDP) {
udphdr = udp_hdr(skb);
@@ -4031,8 +4031,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a98d5af3f9e3..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -101,6 +102,8 @@ struct ftgmac100 {
/* AST2500/AST2600 RMII ref clock gate */
struct clk *rclk;
+ /* Aspeed reset control */
+ struct reset_control *rst;
/* Link management */
int cur_speed;
@@ -148,6 +151,23 @@ static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
{
u32 maccr = 0;
+ /* Aspeed RMII needs SCU reset to clear status */
+ if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
+ return err;
+ }
+ usleep_range(10000, 20000);
+ err = reset_control_deassert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
+ return err;
+ }
+ }
+
switch (priv->cur_speed) {
case SPEED_10:
case 0: /* no link */
@@ -1428,7 +1448,7 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
* order consistent to prevent dead lock.
*/
if (netdev->phydev)
@@ -1730,16 +1750,17 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
- if (!netdev->phydev)
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
if (priv->use_ncsi)
- fixed_phy_unregister(netdev->phydev);
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1968,6 +1989,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
+ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ goto err_phy_connect;
+ }
+
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 23c23cca2620..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -3150,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9986f6e1f587..d09e456f14c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -263,8 +263,8 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -299,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -329,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -364,21 +349,6 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
struct kernel_ethtool_ts_info *info)
{
@@ -401,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
of_node_put(ptp_node);
}
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
@@ -510,8 +482,8 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 74ef77cb7078..00474ed11d53 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -719,13 +719,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
@@ -767,11 +760,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -785,6 +773,28 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
@@ -939,6 +949,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index a466c2379146..4b0ae7d9af92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -448,7 +448,5 @@ bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
- xsk_tx_release(ch->xsk_pool);
-
return total_enqueued == budget;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index dcc3fbac3481..e4287725832e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1375,6 +1375,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1385,15 +1386,12 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 872d2cbd088b..62e8ee4d2f04 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -96,17 +96,17 @@ struct enetc_rx_swbd {
#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
- unsigned int packets;
- unsigned int bytes;
- unsigned int rx_alloc_errs;
- unsigned int xdp_drops;
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- unsigned int xdp_redirect;
- unsigned int xdp_redirect_failures;
- unsigned int recycles;
- unsigned int recycle_failures;
- unsigned int win_drop;
+ unsigned long packets;
+ unsigned long bytes;
+ unsigned long rx_alloc_errs;
+ unsigned long xdp_drops;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_drops;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_failures;
+ unsigned long recycles;
+ unsigned long recycle_failures;
+ unsigned long win_drop;
};
struct enetc_xdp_data {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index d38cd36be4a6..961e76cd8489 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -142,7 +142,7 @@ static const struct {
static const struct {
int reg;
char name[ETH_GSTRING_LEN] __nonstring;
-} enetc_port_counters[] = {
+} enetc_pm_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
{ ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
@@ -194,6 +194,12 @@ static const struct {
{ ENETC_PM_TSCOL(0), "MAC tx single collisions" },
{ ENETC_PM_TLCOL(0), "MAC tx late collisions" },
{ ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_port_counters[] = {
{ ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
{ ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
{ ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
@@ -240,6 +246,7 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
return len;
len += ARRAY_SIZE(enetc_port_counters);
+ len += ARRAY_SIZE(enetc_pm_counters);
return len;
}
@@ -266,6 +273,9 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
ethtool_cpy(&data, enetc_port_counters[i].name);
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ ethtool_cpy(&data, enetc_pm_counters[i].name);
+
break;
}
}
@@ -302,13 +312,16 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ data[o++] = enetc_port_rd64(hw, enetc_pm_counters[i].reg);
}
static void enetc_pause_stats(struct enetc_hw *hw, int mac,
struct ethtool_pause_stats *pause_stats)
{
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+ pause_stats->tx_pause_frames = enetc_port_rd64(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd64(hw, ENETC_PM_RXPF(mac));
}
static void enetc_get_pause_stats(struct net_device *ndev,
@@ -335,31 +348,31 @@ static void enetc_get_pause_stats(struct net_device *ndev,
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_mac_stats *s)
{
- s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
- s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
- s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
- s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
- s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
- s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
- s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
- s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
- s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
- s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
- s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
- s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
- s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
- s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
- s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
- s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
- s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
- s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+ s->FramesTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd64(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd64(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd64(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd64(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd64(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd64(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd64(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd64(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RBCA(mac));
}
static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_ctrl_stats *s)
{
- s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
- s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+ s->MACControlFramesTransmitted = enetc_port_rd64(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd64(hw, ENETC_PM_RCNP(mac));
}
static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
@@ -376,26 +389,26 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
struct ethtool_rmon_stats *s)
{
- s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
- s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
- s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
- s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
-
- s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
- s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
- s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
- s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
- s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
- s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
- s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
-
- s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
- s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
- s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
- s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
- s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
- s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
- s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+ s->undersize_pkts = enetc_port_rd64(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd64(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd64(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd64(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd64(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd64(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd64(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd64(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd64(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd64(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd64(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd64(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd64(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd64(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd64(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd64(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd64(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd64(hw, ENETC_PM_T1523X(mac));
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -467,7 +480,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -584,9 +598,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -639,8 +650,6 @@ static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- return enetc_get_rsshash(rxnfc);
default:
return -EOPNOTSUPP;
}
@@ -1228,6 +1237,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1258,6 +1268,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1284,6 +1295,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 53e8d18c7a34..73763e8f4879 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -43,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
@@ -533,6 +536,7 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd64(hw, off) _enetc_rd_reg64_wa((hw)->port + (off))
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index f63a29e2e031..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -829,19 +829,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
return -ENODEV;
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
+ return -ENODEV;
+ }
+
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
static const struct enetc_si_ops enetc_psi_ops = {
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c81f2ea588f2..5c8fdcef759b 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,14 +14,14 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
#include <linux/pm_qos.h>
-#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
#include <net/xdp.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
@@ -115,7 +115,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -342,7 +342,7 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
@@ -460,7 +460,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -495,7 +495,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -614,7 +614,6 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 17e9bddb9ddd..1383918f8a3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,56 +22,55 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/page_pool/helpers.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
-
-#include <asm/cacheflush.h>
#include "fec.h"
@@ -131,7 +130,7 @@ static const struct fec_devinfo fec_mvf600_info = {
FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
@@ -196,7 +195,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
@@ -276,16 +275,19 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
-#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_DRT BIT(1)
#define FEC_RCR_MII BIT(2)
#define FEC_RCR_PROMISC BIT(3)
#define FEC_RCR_BC_REJ BIT(4)
#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RGMII BIT(6)
#define FEC_RCR_RMII BIT(8)
#define FEC_RCR_10BASET BIT(9)
+#define FEC_RCR_NLC BIT(30)
/* TX WMARK bits */
#define FEC_TXWMRK_STRFWD BIT(8)
@@ -1043,7 +1045,9 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1121,6 +1125,17 @@ static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
}
}
+static void fec_set_hw_mac_addr(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1130,8 +1145,7 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 temp_mac[2];
- u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
if (fep->bufdesc_ex)
@@ -1143,11 +1157,7 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
/* Clear any outstanding interrupt, except MDIO. */
writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
@@ -1162,7 +1172,7 @@ fec_restart(struct net_device *ndev)
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
/* No Rcv on Xmit */
- rcntl |= 0x02;
+ rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
@@ -1191,14 +1201,11 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
- rcntl |= 0x40000000 | 0x00000020;
+ rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rcntl |= (1 << 6);
+ if (phy_interface_mode_is_rgmii(fep->phy_interface))
+ rcntl |= FEC_RCR_RGMII;
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= FEC_RCR_RMII;
else
@@ -1207,7 +1214,7 @@ fec_restart(struct net_device *ndev)
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET;
else
@@ -1581,7 +1588,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -1706,13 +1714,29 @@ xdp_err:
return ret;
}
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
@@ -1720,11 +1744,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
@@ -1843,10 +1864,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_mark_for_recycle(skb);
if (unlikely(need_swap)) {
+ u8 *data;
+
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
- data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1854,20 +1876,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1886,12 +1897,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
@@ -1939,7 +1944,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
@@ -3124,27 +3129,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
+ u32 rx_itr = 0, tx_itr = 0;
+ int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
- return;
-
- /* Select enet system clock as Interrupt Coalescing
- * timer Clock Source
- */
- rx_itr = FEC_ITR_CLK_SEL;
- tx_itr = FEC_ITR_CLK_SEL;
+ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */
- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(rx_ictt);
+ }
- rx_itr |= FEC_ITR_EN;
- tx_itr |= FEC_ITR_EN;
+ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(tx_ictt);
+ }
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
@@ -3348,7 +3351,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -3699,7 +3703,6 @@ static void set_multicast_list(struct net_device *ndev)
static int
fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (addr) {
@@ -3716,11 +3719,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (!netif_running(ndev))
return 0;
- writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
- (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
- fep->hwp + FEC_ADDR_LOW);
- writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2bfaf14f65c8..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c28ca17a81fd..fa88b47d526c 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,30 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -117,7 +117,7 @@ static u64 fec_ptp_read(struct cyclecounter *cc)
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -172,7 +172,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 3925441143fa..0291093f2e4e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1225,7 +1225,7 @@ int memac_initialization(struct mac_device *mac_dev,
* be careful and not enable this if we are using MII or RGMII, since
* those configurations modes don't use in-band autonegotiation.
*/
- if (!of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_present(mac_node, "managed") &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 56d2f79fb7e3..577f9b1780ad 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -491,8 +491,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = of_mdiobus_register(new_bus, np);
if (err) {
- dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
- new_bus->name);
+ dev_err_probe(&pdev->dev, err, "cannot register %s as MDIO bus\n",
+ new_bus->name);
goto error;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bcbcad613512..7c0f049f0938 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -97,6 +97,7 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/property.h>
#include "gianfar.h"
@@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_group_count(struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- if (of_node_name_eq(child, "queue-group"))
- num++;
-
- return num;
-}
-
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
@@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = gfar_of_group_count(np);
+ unsigned int num_grps;
+ num_grps = device_get_named_child_node_count(&ofdev->dev,
+ "queue-group");
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 781d92e703cb..5fd1f7327680 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -781,14 +781,26 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd)
+static int gfar_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ ret = 0;
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
}
static int gfar_check_filer_hardware(struct gfar_private *priv)
@@ -1398,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = gfar_set_hash_opts(priv, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
cmd->fs.ring_cookie >= priv->num_rx_queues) ||
@@ -1466,8 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
}
if (ptp)
@@ -1508,6 +1519,7 @@ const struct ethtool_ops gfar_ethtool_ops = {
#endif
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
+ .set_rxfh_fields = gfar_set_rxfh_fields,
.get_ts_info = gfar_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 564862a57124..14c9431e15e5 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 4520f1c07a63..e0ec227a50f7 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,5 +1,7 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
gve_buffer_mgmt_dqo.o
+
+gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 2fab38c8ee78..bceaf9b05cb4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -11,7 +11,9 @@
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/u64_stats_sync.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -188,6 +190,9 @@ struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
struct gve_rx_slot_page_info page_info;
+ /* XSK buffer */
+ struct xdp_buff *xsk_buff;
+
/* The DMA address corresponding to `page_info`. */
dma_addr_t addr;
@@ -329,7 +334,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
@@ -398,10 +402,24 @@ enum gve_packet_state {
GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
/* No valid completion received within the specified timeout. */
GVE_PACKET_STATE_TIMED_OUT_COMPL,
+ /* XSK pending packet has received a packet/reinjection completion, or
+ * has timed out. At this point, the pending packet can be counted by
+ * xsk_tx_complete and freed.
+ */
+ GVE_PACKET_STATE_XSK_COMPLETE,
+};
+
+enum gve_tx_pending_packet_dqo_type {
+ GVE_TX_PENDING_PACKET_DQO_SKB,
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
+ GVE_TX_PENDING_PACKET_DQO_XSK,
};
struct gve_tx_pending_packet_dqo {
- struct sk_buff *skb; /* skb for this packet */
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
/* 0th element corresponds to the linear portion of `skb`, should be
* unmapped with `dma_unmap_single`.
@@ -431,7 +449,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state;
+ u8 state : 3;
+
+ /* gve_tx_pending_packet_dqo_type */
+ u8 type : 2;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -453,6 +474,9 @@ struct gve_tx_ring {
/* DQO fields. */
struct {
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
+
/* Linked list of gve_tx_pending_packet_dqo. Index into
* pending_packets, or -1 if empty.
*
@@ -497,6 +521,8 @@ struct gve_tx_ring {
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
u32 free_tx_qpl_buf_cnt;
};
+
+ atomic_t xsk_reorder_queue_tail;
} dqo_tx;
};
@@ -530,6 +556,9 @@ struct gve_tx_ring {
/* Last TX ring index fetched by HW */
atomic_t hw_tx_head;
+ u16 xsk_reorder_queue_head;
+ u16 xsk_reorder_queue_tail;
+
/* List to track pending packets which received a miss
* completion but not a corresponding reinjection.
*/
@@ -583,6 +612,8 @@ struct gve_tx_ring {
struct gve_tx_pending_packet_dqo *pending_packets;
s16 num_pending_packets;
+ u16 *xsk_reorder_queue;
+
u32 complq_mask; /* complq size is complq_mask + 1 */
/* QPL fields */
@@ -750,6 +781,12 @@ struct gve_rss_config {
u32 *hash_lut;
};
+struct gve_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct gve_priv *priv;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -781,7 +818,9 @@ struct gve_priv {
struct gve_tx_queue_config tx_cfg;
struct gve_rx_queue_config rx_cfg;
- u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+ unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
+ u32 num_ntfy_blks; /* split between TX and RX so must be even */
+ int numa_node;
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
__be32 __iomem *db_bar2; /* "array" of doorbells */
@@ -813,6 +852,7 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_report_nic_timestamp_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
@@ -870,6 +910,14 @@ struct gve_priv {
u16 rss_lut_size;
bool cache_rss_config;
struct gve_rss_config rss_config;
+
+ /* True if the device supports reading the nic clock */
+ bool nic_timestamp_supported;
+ struct gve_ptp *ptp;
+ struct kernel_hwtstamp_config ts_config;
+ struct gve_nic_ts_report *nic_ts_report;
+ dma_addr_t nic_ts_report_bus;
+ u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
};
enum gve_service_task_flags_bit {
@@ -1138,6 +1186,7 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
{
switch (priv->queue_format) {
case GVE_GQI_QPL_FORMAT:
+ case GVE_DQO_RDA_FORMAT:
return true;
default:
return false;
@@ -1161,11 +1210,15 @@ void gve_free_queue_page_list(struct gve_priv *priv,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags);
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
@@ -1249,6 +1302,24 @@ int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
/* RSS config */
int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
+/* PTP and timestamping */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int gve_clock_nic_ts_read(struct gve_priv *priv);
+int gve_init_clock(struct gve_priv *priv);
+void gve_teardown_clock(struct gve_priv *priv);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gve_init_clock(struct gve_priv *priv)
+{
+ return 0;
+}
+
+static inline void gve_teardown_clock(struct gve_priv *priv) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 3e8fc33cc11f..4f33d094a2ef 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -46,6 +46,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -225,6 +226,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"RSS config");
*dev_op_rss_config = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_NIC_TIMESTAMP:
+ if (option_length < sizeof(**dev_op_nic_timestamp) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Nic Timestamp",
+ (int)sizeof(**dev_op_nic_timestamp),
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_nic_timestamp))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Nic Timestamp");
+ *dev_op_nic_timestamp = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -246,6 +264,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -269,6 +288,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
dev_op_flow_steering, dev_op_rss_config,
+ dev_op_nic_timestamp,
dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -306,6 +326,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_report_nic_timestamp_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
@@ -442,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -467,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -477,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -544,6 +566,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
+ priv->adminq_report_nic_timestamp_cnt++;
+ break;
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
@@ -564,6 +589,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
+ return -EINVAL;
}
return 0;
@@ -625,7 +651,7 @@ static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
/* The device specifies that the management vector can either be the first irq
* or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
- * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then
* the management vector is first.
*
* gve arranges the msix vectors so that the management vector is last.
@@ -709,13 +735,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
@@ -788,13 +820,19 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
@@ -820,13 +858,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
@@ -861,13 +905,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_set_default_desc_cnt(struct gve_priv *priv,
@@ -904,6 +954,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_flow_steering,
const struct gve_device_option_rss_config
*dev_op_rss_config,
+ const struct gve_device_option_nic_timestamp
+ *dev_op_nic_timestamp,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -980,10 +1032,15 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"RSS device option enabled with key size of %u, lut size of %u.\n",
priv->rss_key_size, priv->rss_lut_size);
}
+
+ if (dev_op_nic_timestamp &&
+ (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
+ priv->nic_timestamp_supported = true;
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL;
struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
@@ -1024,6 +1081,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_buffer_sizes,
&dev_op_flow_steering,
&dev_op_rss_config,
+ &dev_op_nic_timestamp,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1088,7 +1146,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_rss_config, dev_op_modify_ring);
+ dev_op_rss_config, dev_op_nic_timestamp,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1200,6 +1259,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
return err;
}
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP);
+ cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) {
+ .nic_ts_report_len =
+ cpu_to_be64(sizeof(struct gve_nic_ts_report)),
+ .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
struct gve_ptype_lut *ptype_lut)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 228217458275..22a74b6aa17e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -27,6 +27,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11,
GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
@@ -174,6 +175,12 @@ struct gve_device_option_rss_config {
static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+struct gve_device_option_nic_timestamp {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -192,6 +199,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd,
GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
@@ -206,6 +214,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0,
};
enum gve_sup_feature_mask {
@@ -214,6 +223,7 @@ enum gve_sup_feature_mask {
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
+ GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -392,6 +402,21 @@ struct gve_adminq_report_link_speed {
static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+struct gve_adminq_report_nic_ts {
+ __be64 nic_ts_report_len;
+ __be64 nic_ts_report_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
+
+struct gve_nic_ts_report {
+ __be64 nic_timestamp; /* NIC clock in nanoseconds */
+ __be64 reserved1;
+ __be64 reserved2;
+ __be64 reserved3;
+ __be64 reserved4;
+};
+
struct stats {
__be32 stat_name;
__be32 queue_id;
@@ -451,7 +476,7 @@ struct gve_ptype_entry {
};
struct gve_ptype_map {
- struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+ struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
};
struct gve_adminq_get_ptype_map {
@@ -585,6 +610,7 @@ union gve_adminq_command {
struct gve_adminq_query_flow_rules query_flow_rules;
struct gve_adminq_configure_rss configure_rss;
struct gve_adminq_query_rss query_rss;
+ struct gve_adminq_report_nic_ts report_nic_ts;
struct gve_adminq_extended_command extended_command;
};
};
@@ -624,6 +650,8 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index a71883e1d920..8f5021e59e0a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google, Inc.
*/
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_utils.h"
@@ -29,6 +30,10 @@ struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
/* Point buf_state to itself to mark it as allocated */
buf_state->next = buffer_id;
+ /* Clear the buffer pointers */
+ buf_state->page_info.page = NULL;
+ buf_state->xsk_buff = NULL;
+
return buf_state;
}
@@ -246,6 +251,7 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
+ .nid = priv->numa_node,
.dev = &priv->pdev->dev,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
@@ -285,7 +291,24 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
{
struct gve_rx_buf_state_dqo *buf_state;
- if (rx->dqo.page_pool) {
+ if (rx->xsk_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool);
+ if (unlikely(!buf_state->xsk_buff)) {
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ /* Allocated xsk buffer. Clear wakeup in case it was set. */
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr =
+ cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff));
+ return 0;
+ } else if (rx->dqo.page_pool) {
buf_state = gve_alloc_buf_state(rx);
if (WARN_ON_ONCE(!buf_state))
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index f79cd0591110..d17da841b5a0 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -247,7 +247,8 @@ struct gve_rx_compl_desc_dqo {
};
__le32 hash;
__le32 reserved6;
- __le64 reserved7;
+ __le32 reserved7;
+ __le32 ts; /* timestamp in nanosecs */
} __packed;
static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index e83773fb891f..6eb442096e02 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -37,6 +37,8 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+bool gve_xdp_poll_dqo(struct gve_notify_block *block);
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
@@ -60,6 +62,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
static inline void
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 3c1da0cf3f61..d0a223250845 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
"adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
- "adminq_query_rss_cnt",
+ "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -456,6 +456,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_cfg_flow_rule_cnt;
data[i++] = priv->adminq_cfg_rss_cnt;
data[i++] = priv->adminq_query_rss_cnt;
+ data[i++] = priv->adminq_report_nic_timestamp_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -667,7 +668,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev)
struct gve_priv *priv = netdev_priv(netdev);
u32 ret_flags = 0;
- /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0);
return ret_flags;
@@ -798,9 +799,6 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = gve_del_flow_rule(priv, cmd);
break;
- case ETHTOOL_SRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -835,9 +833,6 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
case ETHTOOL_GRXCLSRLALL:
err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -928,6 +923,27 @@ static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
return 0;
}
+static int gve_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (priv->nic_timestamp_supported) {
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ if (priv->ptp)
+ info->phc_index = ptp_clock_index(priv->ptp->clock);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -956,5 +972,5 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
.get_link_ksettings = gve_get_link_ksettings,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gve_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index d1aeb722d48f..1f411d7c4373 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google LLC
*/
+#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
@@ -414,14 +415,24 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ else
+ reschedule |= gve_xdp_poll_dqo(block);
+ }
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
+ * either datapath has more work to do.
+ */
+ if (priv->xdp_prog)
+ reschedule |= gve_xsk_tx_poll_dqo(block, budget);
reschedule |= work_done == budget;
}
@@ -457,10 +468,19 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
return work_done;
}
+static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
+{
+ if (priv->numa_node == NUMA_NO_NODE)
+ return cpu_all_mask;
+ else
+ return cpumask_of_node(priv->numa_node);
+}
+
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- unsigned int active_cpus;
+ const struct cpumask *node_mask;
+ unsigned int cur_cpu;
int vecs_enabled;
int i, j;
int err;
@@ -499,8 +519,6 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
}
- /* Half the notification blocks go to TX and half to RX */
- active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
@@ -529,6 +547,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
/* Setup the other blocks - the first n-1 vectors */
+ node_mask = gve_get_node_mask(priv);
+ cur_cpu = cpumask_first(node_mask);
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
@@ -545,9 +565,17 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
goto abort_with_some_ntfy_blocks;
}
block->irq = priv->msix_vectors[msix_idx].vector;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- get_cpu_mask(i % active_cpus));
+ irq_set_affinity_and_hint(block->irq,
+ cpumask_of(cur_cpu));
block->irq_db_index = &priv->irq_db_indices[i].index;
+
+ cur_cpu = cpumask_next(cur_cpu, node_mask);
+ /* Wrap once CPUs in the node have been exhausted, or when
+ * starting RX queue affinities. TX and RX queues of the same
+ * index share affinity.
+ */
+ if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
+ cur_cpu = cpumask_first(node_mask);
}
return 0;
abort_with_some_ntfy_blocks:
@@ -619,9 +647,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_counter_array(priv);
if (err)
goto abort_with_rss_config_cache;
- err = gve_alloc_notify_blocks(priv);
+ err = gve_init_clock(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_clock;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -674,6 +705,8 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
abort_with_rss_config_cache:
@@ -722,6 +755,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
+ gve_teardown_clock(priv);
gve_clear_device_resources_ok(priv);
}
@@ -1030,7 +1064,7 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(gfp_flags);
+ *page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1131,18 +1165,84 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
+{
+ struct gve_rx_ring *rx;
+
+ if (!priv->rx)
+ return;
+
+ rx = &priv->rx[qid];
+ rx->xsk_pool = NULL;
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
+
+ if (!priv->tx)
+ return;
+ priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
+}
+
+static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 qid)
+{
+ struct gve_rx_ring *rx;
+ u16 tx_qid;
+ int err;
+
+ rx = &priv->rx[qid];
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, pool);
+ if (err) {
+ gve_unreg_xsk_pool(priv, qid);
+ return err;
+ }
+
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i;
+
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ gve_unreg_xsk_pool(priv, i);
+ }
+}
+
+static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
+{
+ if (!test_bit(qid, priv->xsk_pools))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, qid);
+}
+
static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{
struct napi_struct *napi;
struct gve_rx_ring *rx;
int err = 0;
- int i, j;
- u32 tx_qid;
+ int i;
if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1150,7 +1250,11 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- if (gve_is_qpl(priv))
+
+ xsk_pool = gve_get_xsk_pool(priv, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
@@ -1160,60 +1264,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (rx->xsk_pool) {
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
- napi->napi_id);
- if (err)
- goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
- xsk_pool_set_rxq_info(rx->xsk_pool,
- &rx->xsk_rxq);
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
return 0;
err:
- for (j = i; j >= 0; j--) {
- rx = &priv->rx[j];
- if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- }
+ gve_unreg_xdp_info(priv);
return err;
}
-static void gve_unreg_xdp_info(struct gve_priv *priv)
-{
- int i, tx_qid;
-
- if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
- return;
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- struct gve_rx_ring *rx = &priv->rx[i];
-
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (rx->xsk_pool) {
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- rx->xsk_pool = NULL;
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = NULL;
- }
-}
static void gve_drain_page_cache(struct gve_priv *priv)
{
@@ -1510,14 +1568,24 @@ out:
return err;
}
+static int gve_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ return gve_xdp_xmit_gqi(dev, n, frames, flags);
+ else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+ return gve_xdp_xmit_dqo(dev, n, frames, flags);
+
+ return -EOPNOTSUPP;
+}
+
static int gve_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
{
struct gve_priv *priv = netdev_priv(dev);
- struct napi_struct *napi;
- struct gve_rx_ring *rx;
- int tx_qid;
int err;
if (qid >= priv->rx_cfg.num_queues) {
@@ -1535,34 +1603,31 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
+ set_bit(qid, priv->xsk_pools);
+
/* If XDP prog is not installed or interface is down, return. */
if (!priv->xdp_prog || !netif_running(dev))
return 0;
- rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
+ err = gve_reg_xsk_pool(priv, dev, pool, qid);
if (err)
- goto err;
-
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
-
- xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
- rx->xsk_pool = pool;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- priv->tx[tx_qid].xsk_pool = pool;
+ goto err_xsk_pool_dma_mapped;
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv)) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto err_xsk_pool_registered;
+ }
return 0;
-err:
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+err_xsk_pool_registered:
+ gve_unreg_xsk_pool(priv, qid);
+err_xsk_pool_dma_mapped:
+ clear_bit(qid, priv->xsk_pools);
xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
return err;
}
@@ -1574,18 +1639,28 @@ static int gve_xsk_pool_disable(struct net_device *dev,
struct napi_struct *napi_tx;
struct xsk_buff_pool *pool;
int tx_qid;
+ int err;
- pool = xsk_get_pool_from_qid(dev, qid);
- if (!pool)
- return -EINVAL;
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed or interface is down, unmap DMA and
- * return.
- */
- if (!priv->xdp_prog || !netif_running(dev))
- goto done;
+ clear_bit(qid, priv->xsk_pools);
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (pool)
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
+
+ if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
+ return 0;
+
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv) && priv->xdp_prog) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ return err;
+ }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
@@ -1594,22 +1669,19 @@ static int gve_xsk_pool_disable(struct net_device *dev,
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ gve_unreg_xsk_pool(priv, qid);
smp_mb(); /* Make sure it is visible to the workers on datapath */
napi_enable(napi_rx);
- if (gve_rx_work_pending(&priv->rx[qid]))
- napi_schedule(napi_rx);
-
napi_enable(napi_tx);
- if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
- napi_schedule(napi_tx);
+ if (gve_is_gqi(priv)) {
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+ }
-done:
- xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
@@ -1645,9 +1717,8 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
- netdev_warn(dev, "XDP is not supported in mode %d.\n",
- priv->queue_format);
+ if (priv->header_split_enabled) {
+ netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n");
return -EOPNOTSUPP;
}
@@ -1727,7 +1798,7 @@ int gve_adjust_config(struct gve_priv *priv,
{
int err;
- /* Allocate resources for the new confiugration */
+ /* Allocate resources for the new configuration */
err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -1978,10 +2049,13 @@ u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
return GVE_DEFAULT_RX_BUFFER_SIZE;
}
-/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
+ * header split is not allowed.
+ */
bool gve_header_split_supported(const struct gve_priv *priv)
{
- return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+ return priv->header_buf_size &&
+ priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
@@ -2030,6 +2104,12 @@ static int gve_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
+ if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ netdev_warn(netdev,
+ "XDP is not supported when LRO is on.\n");
+ err = -EOPNOTSUPP;
+ goto revert_features;
+ }
if (netif_running(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
@@ -2049,6 +2129,46 @@ revert_features:
return err;
}
+static int gve_get_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ *kernel_config = priv->ts_config;
+ return 0;
+}
+
+static int gve_set_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
+ NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
+ return -ERANGE;
+ }
+
+ if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (!priv->nic_ts_report) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "RX timestamping is not supported");
+ kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -EOPNOTSUPP;
+ }
+
+ kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ gve_clock_nic_ts_read(priv);
+ ptp_schedule_worker(priv->ptp->clock, 0);
+ } else {
+ ptp_cancel_worker_sync(priv->ptp->clock);
+ }
+
+ priv->ts_config.rx_filter = kernel_config->rx_filter;
+
+ return 0;
+}
+
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
.ndo_features_check = gve_features_check,
@@ -2060,6 +2180,8 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_bpf = gve_xdp,
.ndo_xdp_xmit = gve_xdp_xmit,
.ndo_xsk_wakeup = gve_xsk_wakeup,
+ .ndo_hwtstamp_get = gve_get_ts_config,
+ .ndo_hwtstamp_set = gve_set_ts_config,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -2189,6 +2311,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
}
@@ -2243,7 +2369,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
- /* Big TCP is only supported on DQ*/
+ /* Big TCP is only supported on DQO */
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
@@ -2253,6 +2379,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
*/
priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
priv->mgmt_msix_idx = priv->num_ntfy_blks;
+ priv->numa_node = dev_to_node(&priv->pdev->dev);
priv->tx_cfg.max_queues =
min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
@@ -2279,11 +2406,26 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
}
+ priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
setup_device:
+ priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
+ if (!priv->xsk_pools) {
+ err = -ENOMEM;
+ goto err;
+ }
+
gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
- if (!err)
- return 0;
+ if (err)
+ goto err_free_xsk_bitmap;
+
+ return 0;
+
+err_free_xsk_bitmap:
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
err:
gve_adminq_free(&priv->pdev->dev, priv);
return err;
@@ -2293,6 +2435,8 @@ static void gve_teardown_priv_resources(struct gve_priv *priv)
{
gve_teardown_device_resources(priv);
gve_adminq_free(&priv->pdev->dev, priv);
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
}
static void gve_trigger_reset(struct gve_priv *priv)
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
new file mode 100644
index 000000000000..e96247c9d68d
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2025 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+/* Interval to schedule a nic timestamp calibration, 250ms. */
+#define GVE_NIC_TS_SYNC_INTERVAL_MS 250
+
+/* Read the nic timestamp from hardware via the admin queue. */
+int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ u64 nic_raw;
+ int err;
+
+ err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus);
+ if (err)
+ return err;
+
+ nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp);
+ WRITE_ONCE(priv->last_sync_nic_counter, nic_raw);
+
+ return 0;
+}
+
+static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
+ struct gve_priv *priv = ptp->priv;
+ int err;
+
+ if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv))
+ goto out;
+
+ err = gve_clock_nic_ts_read(priv);
+ if (err && net_ratelimit())
+ dev_err(&priv->pdev->dev,
+ "%s read err %d\n", __func__, err);
+
+out:
+ return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS);
+}
+
+static const struct ptp_clock_info gve_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "gve clock",
+ .do_aux_work = gve_ptp_do_aux_work,
+};
+
+static int gve_ptp_init(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp;
+ int err;
+
+ if (!priv->nic_timestamp_supported) {
+ dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
+ if (!priv->ptp)
+ return -ENOMEM;
+
+ ptp = priv->ptp;
+ ptp->info = gve_ptp_caps;
+ ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev);
+
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&priv->pdev->dev, "PTP clock registration failed\n");
+ err = PTR_ERR(ptp->clock);
+ goto free_ptp;
+ }
+
+ ptp->priv = priv;
+ return 0;
+
+free_ptp:
+ kfree(ptp);
+ priv->ptp = NULL;
+ return err;
+}
+
+static void gve_ptp_release(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp = priv->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->clock)
+ ptp_clock_unregister(ptp->clock);
+
+ kfree(ptp);
+ priv->ptp = NULL;
+}
+
+int gve_init_clock(struct gve_priv *priv)
+{
+ int err;
+
+ if (!priv->nic_timestamp_supported)
+ return 0;
+
+ err = gve_ptp_init(priv);
+ if (err)
+ return err;
+
+ priv->nic_ts_report =
+ dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ &priv->nic_ts_report_bus,
+ GFP_KERNEL);
+ if (!priv->nic_ts_report) {
+ dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__);
+ err = -ENOMEM;
+ goto release_ptp;
+ }
+
+ return 0;
+
+release_ptp:
+ gve_ptp_release(priv);
+ return err;
+}
+
+void gve_teardown_clock(struct gve_priv *priv)
+{
+ gve_ptp_release(priv);
+
+ if (priv->nic_ts_report) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 90e875c1832f..ec424d2f4f57 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -192,8 +192,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
*/
slots = rx->mask + 1;
- rx->data.page_info = kvzalloc(slots *
- sizeof(*rx->data.page_info), GFP_KERNEL);
+ rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
+ GFP_KERNEL, priv->numa_node);
if (!rx->data.page_info)
return -ENOMEM;
@@ -216,7 +216,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.raw_addressing) {
for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
- struct page *page = alloc_page(GFP_KERNEL);
+ struct page *page = alloc_pages_node(priv->numa_node,
+ GFP_KERNEL, 0);
if (!page) {
err = -ENOMEM;
@@ -303,10 +304,9 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
rx->qpl_copy_pool_head = 0;
- rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
- sizeof(rx->qpl_copy_pool[0]),
- GFP_KERNEL);
-
+ rx->qpl_copy_pool = kvcalloc_node(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->qpl_copy_pool) {
err = -ENOMEM;
goto abort_with_slots;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index dcb0545baa50..7380c2b7a2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -8,6 +8,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_utils.h"
+#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
@@ -15,6 +16,7 @@
#include <net/ip6_checksum.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
@@ -148,6 +150,10 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_to_page_pool(rx, bs, false);
else
gve_free_qpl_page_dqo(bs);
+ if (gve_buf_state_is_allocated(rx, bs) && bs->xsk_buff) {
+ xsk_buff_free(bs->xsk_buff);
+ bs->xsk_buff = NULL;
+ }
}
if (rx->dqo.qpl) {
@@ -236,9 +242,9 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
- rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
- sizeof(rx->dqo.buf_states[0]),
- GFP_KERNEL);
+ rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
+ sizeof(rx->dqo.buf_states[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->dqo.buf_states)
return -ENOMEM;
@@ -437,6 +443,29 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
+/* Expand the hardware timestamp to the full 64 bits of width, and add it to the
+ * skb.
+ *
+ * This algorithm works by using the passed hardware timestamp to generate a
+ * diff relative to the last read of the nic clock. This diff can be positive or
+ * negative, as it is possible that we have read the clock more recently than
+ * the hardware has received this packet. To detect this, we use the high bit of
+ * the diff, and assume that the read is more recent if the high bit is set. In
+ * this case we invert the process.
+ *
+ * Note that this means if the time delta between packet reception and the last
+ * clock read is greater than ~2 seconds, this will provide invalid results.
+ */
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+{
+ u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
+ struct sk_buff *skb = rx->ctx.skb_head;
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+}
+
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
@@ -464,7 +493,7 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
u16 buf_len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_pages_node(rx->gve->numa_node, GFP_ATOMIC, 0);
int num_frags;
if (!page)
@@ -547,27 +576,146 @@ static int gve_rx_append_frags(struct napi_struct *napi,
return 0;
}
+static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp)
+{
+ struct gve_tx_ring *tx;
+ struct xdp_frame *xdpf;
+ u32 tx_qid;
+ int err;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ if (rx->xsk_pool)
+ xsk_buff_free(xdp);
+ return -ENOSPC;
+ }
+
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ err = gve_xdp_xmit_one_dqo(priv, tx, xdpf);
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ return err;
+}
+
+static void gve_xsk_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ xsk_buff_free(xdp);
+ break;
+ case XDP_TX:
+ if (unlikely(gve_xdp_tx_dqo(priv, rx, xdp)))
+ goto err;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, xprog)))
+ goto err;
+ break;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+}
+
static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct xdp_buff *xdp, struct bpf_prog *xprog,
int xdp_act,
struct gve_rx_buf_state_dqo *buf_state)
{
- u64_stats_update_begin(&rx->statss);
+ int err;
switch (xdp_act) {
case XDP_ABORTED:
case XDP_DROP:
default:
- rx->xdp_actions[xdp_act]++;
+ gve_free_buffer(rx, buf_state);
break;
case XDP_TX:
- rx->xdp_tx_errors++;
+ err = gve_xdp_tx_dqo(priv, rx, xdp);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
case XDP_REDIRECT:
- rx->xdp_redirect_errors++;
+ err = xdp_do_redirect(priv->dev, xdp, xprog);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
}
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ else if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
u64_stats_update_end(&rx->statss);
gve_free_buffer(rx, buf_state);
+ return;
+}
+
+static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ struct bpf_prog *xprog)
+{
+ struct xdp_buff *xdp = buf_state->xsk_buff;
+ struct gve_priv *priv = rx->gve;
+ int xdp_act;
+
+ xdp->data_end = xdp->data + buf_len;
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (xprog) {
+ xdp_act = bpf_prog_run_xdp(xprog, xdp);
+ buf_len = xdp->data_end - xdp->data;
+ if (xdp_act != XDP_PASS) {
+ gve_xsk_done_dqo(priv, rx, xdp, xprog, xdp_act);
+ gve_free_buf_state(rx, buf_state);
+ return 0;
+ }
+ }
+
+ /* Copy the data to skb */
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ xdp->data, buf_len);
+ if (unlikely(!rx->ctx.skb_head)) {
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ /* Free XSK buffer and Buffer state */
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+
+ /* Update Stats */
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ return 0;
}
/* Returns 0 if descriptor is completed successfully.
@@ -608,6 +756,10 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (buf_state->xsk_buff)
+ return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
@@ -658,7 +810,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- xprog = READ_ONCE(priv->xdp_prog);
if (xprog) {
struct xdp_buff xdp;
void *old_data;
@@ -767,6 +918,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
+ if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
+ gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
@@ -786,16 +940,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
{
- struct napi_struct *napi = &block->napi;
- netdev_features_t feat = napi->dev->features;
-
- struct gve_rx_ring *rx = block->rx;
- struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
-
+ struct gve_rx_compl_queue_dqo *complq;
+ struct napi_struct *napi;
+ netdev_features_t feat;
+ struct gve_rx_ring *rx;
+ struct gve_priv *priv;
+ u64 xdp_redirects;
u32 work_done = 0;
u64 bytes = 0;
+ u64 xdp_txs;
int err;
+ napi = &block->napi;
+ feat = napi->dev->features;
+
+ rx = block->rx;
+ priv = rx->gve;
+ complq = &rx->dqo.complq;
+
+ xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ xdp_txs = rx->xdp_actions[XDP_TX];
+
while (work_done < budget) {
struct gve_rx_compl_desc_dqo *compl_desc =
&complq->desc_ring[complq->head];
@@ -869,6 +1034,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
rx->ctx.skb_tail = NULL;
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush_dqo(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
gve_rx_post_buffers_dqo(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 1b40bf0c811a..c6ff0968929d 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -823,8 +823,8 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
return ndescs;
}
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags)
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_tx_ring *tx;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 9d705d94b065..6f1d515673d2 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -9,9 +9,11 @@
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
+#include <linux/bpf.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
@@ -110,6 +112,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx)
return false;
}
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+}
+
static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{
@@ -198,7 +208,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->netdev_txq)
+ netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
}
@@ -231,6 +242,9 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_ring = NULL;
}
+ kvfree(tx->dqo.xsk_reorder_queue);
+ tx->dqo.xsk_reorder_queue = NULL;
+
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
@@ -276,7 +290,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_tx_add_to_block(priv, idx);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
@@ -295,6 +310,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
tx->dev = hdev;
+ spin_lock_init(&tx->dqo_tx.xdp_lock);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
@@ -333,6 +349,17 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+
+ /* Only alloc xsk pool for XDP queues */
+ if (idx >= cfg->qcfg->num_queues && cfg->num_xdp_rings) {
+ tx->dqo.xsk_reorder_queue =
+ kvcalloc(tx->dqo.complq_mask + 1,
+ sizeof(tx->dqo.xsk_reorder_queue[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.xsk_reorder_queue)
+ goto err;
+ }
+
tx->dqo_compl.miss_completions.head = -1;
tx->dqo_compl.miss_completions.tail = -1;
tx->dqo_compl.timed_out_completions.head = -1;
@@ -439,12 +466,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+/* Checks if the requested number of slots are available in the ring */
+static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req)
+{
+ u32 num_avail = num_avail_tx_slots(tx);
+
+ slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP;
+
+ if (num_avail >= slots_req)
+ return true;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ return num_avail_tx_slots(tx) >= slots_req;
+}
+
static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
int desc_count, int buf_count)
{
return gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= desc_count &&
- gve_has_free_tx_qpl_bufs(tx, buf_count);
+ gve_has_tx_slots_available(tx, desc_count) &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
}
/* Stops the queue if available descriptors is less than 'count'.
@@ -456,12 +499,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
- /* Update cached TX head pointer */
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
- if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
- return 0;
-
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
@@ -472,8 +509,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
/* After stopping queue, check if we can transmit again in order to
* avoid TOCTOU bug.
*/
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
@@ -500,11 +535,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
}
static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
- struct sk_buff *skb, u32 len, u64 addr,
+ bool enable_csum, u32 len, u64 addr,
s16 compl_tag, bool eop, bool is_gso)
{
- const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
-
while (len > 0) {
struct gve_tx_pkt_desc_dqo *desc =
&tx->dqo.tx_ring[*desc_idx].pkt;
@@ -515,7 +548,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
.buf_addr = cpu_to_le64(addr),
.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
.end_of_packet = cur_eop,
- .checksum_offload_enable = checksum_offload_en,
+ .checksum_offload_enable = enable_csum,
.compl_tag = cpu_to_le16(compl_tag),
.buf_size = cur_len,
};
@@ -612,6 +645,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
+static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx)
+{
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+
+ if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+}
+
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
struct sk_buff *skb,
struct gve_tx_pending_packet_dqo *pkt,
@@ -619,6 +671,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -644,7 +697,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -664,7 +717,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag, is_eop, is_gso);
}
@@ -709,6 +762,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
u32 copy_offset = 0;
dma_addr_t dma_addr;
u32 copy_len;
@@ -730,7 +784,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
copy_offset += copy_len;
dma_sync_single_for_device(tx->dev, dma_addr,
copy_len, DMA_TO_DEVICE);
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum,
copy_len,
dma_addr,
completion_tag,
@@ -768,6 +822,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
return -ENOMEM;
pkt->skb = skb;
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB;
completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
@@ -800,24 +855,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
- /* Commit the changes to our state */
- tx->dqo_tx.tail = desc_idx;
-
- /* Request a descriptor completion on the last descriptor of the
- * packet if we are allowed to by the HW enforced interval.
- */
- {
- u32 last_desc_idx = (desc_idx - 1) & tx->mask;
- u32 last_report_event_interval =
- (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
-
- if (unlikely(last_report_event_interval >=
- GVE_TX_MIN_RE_INTERVAL)) {
- tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
- tx->dqo_tx.last_re_idx = last_desc_idx;
- }
- }
-
+ gve_tx_update_tail(tx, desc_idx);
return 0;
err:
@@ -951,9 +989,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
- if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
- num_buffer_descs))) {
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs,
+ num_buffer_descs))) {
return -1;
}
@@ -970,6 +1007,38 @@ drop:
return 0;
}
+static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx,
+ u16 completion_tag)
+{
+ u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ tx->dqo.xsk_reorder_queue[tail] = completion_tag;
+ tail = (tail + 1) & tx->dqo.complq_mask;
+ atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail);
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_xsk_reorder_queue_head(struct gve_tx_ring *tx)
+{
+ u32 head = tx->dqo_compl.xsk_reorder_queue_head;
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail) {
+ tx->dqo_compl.xsk_reorder_queue_tail =
+ atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail)
+ return NULL;
+ }
+
+ return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]];
+}
+
+static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx)
+{
+ tx->dqo_compl.xsk_reorder_queue_head++;
+ tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask;
+}
+
/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{
@@ -993,6 +1062,62 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xsk_buff_pool *pool = tx->xsk_pool;
+ struct xdp_desc desc;
+ bool repoll = false;
+ int sent = 0;
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (; sent < budget; sent++) {
+ struct gve_tx_pending_packet_dqo *pkt;
+ s16 completion_tag;
+ dma_addr_t addr;
+ u32 desc_idx;
+
+ if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) {
+ repoll = true;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XSK;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc.len);
+
+ desc_idx = tx->dqo_tx.tail;
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ true, desc.len,
+ addr, completion_tag, true,
+ false);
+ ++pkt->num_bufs;
+ gve_tx_update_tail(tx, desc_idx);
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+ gve_xsk_reorder_queue_push_dqo(tx, completion_tag);
+ }
+
+ if (sent) {
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ xsk_tx_release(pool);
+ }
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+
+ return (sent == budget) || repoll;
+}
+
static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet)
{
@@ -1107,16 +1232,35 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
}
}
tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
- if (tx->dqo.qpl)
- gve_free_tx_qpl_bufs(tx, pending_packet);
- else
+
+ switch (pending_packet->type) {
+ case GVE_TX_PENDING_PACKET_DQO_SKB:
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->skb->len;
+
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME:
gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->xdpf->len;
- *bytes += pending_packet->skb->len;
- (*pkts)++;
- napi_consume_skb(pending_packet->skb, is_napi);
- pending_packet->skb = NULL;
- gve_free_pending_packet(tx, pending_packet);
+ xdp_return_frame(pending_packet->xdpf);
+ pending_packet->xdpf = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XSK:
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static void gve_handle_miss_completion(struct gve_priv *priv,
@@ -1213,8 +1357,34 @@ static void remove_timed_out_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
pending_packet);
+
+ /* Need to count XSK packets in xsk_tx_completed. */
+ if (pending_packet->type == GVE_TX_PENDING_PACKET_DQO_XSK)
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ else
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx)
+{
+ u32 num_xsks = 0;
+
+ while (true) {
+ struct gve_tx_pending_packet_dqo *pending_packet =
+ gve_xsk_reorder_queue_head(tx);
+
+ if (!pending_packet ||
+ pending_packet->state != GVE_PACKET_STATE_XSK_COMPLETE)
+ break;
+
+ num_xsks++;
+ gve_xsk_reorder_queue_pop_dqo(tx);
gve_free_pending_packet(tx, pending_packet);
}
+
+ if (num_xsks)
+ xsk_tx_completed(tx->xsk_pool, num_xsks);
}
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -1287,13 +1457,17 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
num_descs_cleaned++;
}
- netdev_tx_completed_queue(tx->netdev_txq,
- pkt_compl_pkts + miss_compl_pkts,
- pkt_compl_bytes + miss_compl_bytes);
+ if (tx->netdev_txq)
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
+ if (tx->xsk_pool)
+ gve_tx_process_xsk_completions(tx);
+
u64_stats_update_begin(&tx->statss);
tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
@@ -1325,3 +1499,111 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool)
+ return gve_xsk_tx_dqo(priv, tx, budget);
+
+ return 0;
+}
+
+bool gve_xdp_poll_dqo(struct gve_notify_block *block)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ gve_clean_tx_done_dqo(priv, tx, &block->napi);
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
+
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf)
+{
+ struct gve_tx_pending_packet_dqo *pkt;
+ u32 desc_idx = tx->dqo_tx.tail;
+ s16 completion_tag;
+ int num_descs = 1;
+ dma_addr_t addr;
+ int err;
+
+ if (unlikely(!gve_has_tx_slots_available(tx, num_descs)))
+ return -EBUSY;
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (unlikely(!pkt))
+ return -EBUSY;
+
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME;
+ pkt->num_bufs = 0;
+ pkt->xdpf = xdpf;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ /* Generate Packet Descriptor */
+ addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(tx->dev, addr);
+ if (unlikely(err))
+ goto err;
+
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ pkt->num_bufs++;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ false, xdpf->len,
+ addr, completion_tag, true,
+ false);
+
+ gve_tx_update_tail(tx, desc_idx);
+ return 0;
+
+err:
+ pkt->xdpf = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
+ return err;
+}
+
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 7725cb0c5c8a..ea09a09c451b 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -258,6 +258,7 @@ struct hbg_stats {
u64 tx_dma_err_cnt;
u64 np_link_fail_cnt;
+ u64 reset_fail_cnt;
};
struct hbg_priv {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
index f23fb5920c3c..c0ce74cf7382 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -156,6 +156,7 @@ static const struct hbg_push_stats_info hbg_push_stats_list[] = {
HBG_PUSH_STATS_I(tx_drop_cnt, 84),
HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+ HBG_PUSH_STATS_I(reset_fail_cnt, 87),
};
static int hbg_push_msg_send(struct hbg_priv *priv,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index ff3295b60a69..83cf75bf7a17 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
{
int ret;
- ASSERT_RTNL();
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_warn(&priv->pdev->dev,
"failed to reset because port is up\n");
return -EBUSY;
@@ -64,10 +66,10 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
netif_device_detach(priv->netdev);
priv->reset_type = type;
- set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
if (ret) {
+ priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
}
@@ -83,28 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
type != priv->reset_type)
return 0;
- ASSERT_RTNL();
-
- clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
ret = hbg_rebuild(priv);
if (ret) {
+ priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
return ret;
}
netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
-/* must be protected by rtnl lock */
int hbg_reset(struct hbg_priv *priv)
{
int ret;
- ASSERT_RTNL();
ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
if (ret)
return ret;
@@ -169,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
- rtnl_lock();
hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
}
@@ -179,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
struct hbg_priv *priv = netdev_priv(netdev);
hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
- rtnl_unlock();
}
static const struct pci_error_handlers hbg_pci_err_handler = {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 55520053270a..1d62ff913737 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -84,6 +84,7 @@ static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
HBG_STATS_I(tx_dma_err_cnt),
HBG_STATS_I(tx_timeout_cnt),
+ HBG_STATS_I(reset_fail_cnt),
};
static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 9b65eef62b3f..d0aa0661ecd4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -12,12 +12,21 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
#define HBG_PCU_FRAME_LEN_PLUS 4
+#define HBG_FIFO_TX_FULL_THRSLD 0x3F0
+#define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
+#define HBG_FIFO_RX_FULL_THRSLD 0x240
+#define HBG_FIFO_RX_EMPTY_THRSLD 0x190
+#define HBG_CFG_FIFO_FULL_THRSLD 0x10
+#define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
+
static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
{
return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
@@ -168,6 +177,11 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
+ /* burst_len BIT(29) set to 1 can improve the TX performance.
+ * But packet drop occurs when mtu > 2000.
+ * So, BIT(29) reset to 0 when mtu > 2000.
+ */
+ u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
u32 frame_len;
frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
@@ -175,6 +189,9 @@ void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
hbg_hw_set_pcu_max_frame_len(priv, frame_len);
hbg_hw_set_mac_max_frame_len(priv, frame_len);
+
+ hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
+ HBG_REG_BRUST_LENGTH_B, burst_len_bit);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -213,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ u32 link_status;
+ int ret;
+
hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
@@ -224,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
- if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
- HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
hbg_np_link_fail_task_schedule(priv);
}
@@ -264,6 +290,41 @@ void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
}
+static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value = 0;
+
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
+
+ if (dir & HBG_DIR_TX)
+ hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
+
+ if (dir & HBG_DIR_RX)
+ hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value;
+
+ value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
+
+ if (dir & HBG_DIR_TX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
+ }
+
+ if (dir & HBG_DIR_RX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
+ }
+
+ hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
+}
+
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
{
u32 ctrl = 0;
@@ -324,5 +385,12 @@ int hbg_hw_init(struct hbg_priv *priv)
hbg_hw_init_rx_control(priv);
hbg_hw_init_transmit_ctrl(priv);
+
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
+ HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
+ HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
+ hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
+ HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 42b0083c9193..8b7b476ed7fb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,7 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
@@ -19,6 +20,7 @@
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+#define HBG_NO_PHY 0xFF
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
@@ -229,6 +231,39 @@ void hbg_phy_stop(struct hbg_priv *priv)
phy_stop(priv->mac.phydev);
}
+static void hbg_fixed_phy_uninit(void *data)
+{
+ fixed_phy_unregister((struct phy_device *)data);
+}
+
+static int hbg_fixed_phy_init(struct hbg_priv *priv)
+{
+ struct fixed_phy_status hbg_fixed_phy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .pause = 1,
+ .asym_pause = 1,
+ };
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = fixed_phy_register(&hbg_fixed_phy_status, NULL);
+ if (IS_ERR(phydev)) {
+ dev_err_probe(dev, PTR_ERR(phydev),
+ "failed to register fixed PHY device\n");
+ return PTR_ERR(phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, hbg_fixed_phy_uninit, phydev);
+ if (ret)
+ return ret;
+
+ priv->mac.phydev = phydev;
+ return hbg_phy_connect(priv);
+}
+
int hbg_mdio_init(struct hbg_priv *priv)
{
struct device *dev = &priv->pdev->dev;
@@ -238,6 +273,9 @@ int hbg_mdio_init(struct hbg_priv *priv)
int ret;
mac->phy_addr = priv->dev_specs.phy_addr;
+ if (mac->phy_addr == HBG_NO_PHY)
+ return hbg_fixed_phy_init(priv);
+
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
return dev_err_probe(dev, -ENOMEM,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index a6e7f5e62b48..a39d1e796e4a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -141,7 +141,13 @@
/* PCU */
#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_FIFO_THRSLD_FULL_M GENMASK(25, 16)
+#define HBG_REG_FIFO_THRSLD_EMPTY_M GENMASK(9, 0)
#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M GENMASK(31, 24)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M GENMASK(23, 16)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M GENMASK(15, 8)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M GENMASK(7, 0)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -185,6 +191,8 @@
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_BRUST_LENGTH_ADDR (HBG_REG_SGMII_BASE + 0x04C4)
+#define HBG_REG_BRUST_LENGTH_B BIT(29)
#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
index 2883a5899ae2..8b6110599e10 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
- return (ring->ntu + ring->len - ring->ntc) % ring->len;
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 4e44f28288f9..3b548f71fa8a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -339,6 +339,10 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+#define hnae3_seq_file_to_ae_dev(s) (dev_get_drvdata((s)->private))
+#define hnae3_seq_file_to_handle(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->handle)
+
enum hnae3_tc_map_mode {
HNAE3_TC_MAP_MODE_PRIO,
HNAE3_TC_MAP_MODE_DSCP,
@@ -434,8 +438,11 @@ struct hnae3_ae_dev {
u32 dev_version;
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
+ struct hnae3_handle *handle;
};
+typedef int (*read_func)(struct seq_file *s, void *data);
+
/* This struct defines the operation on the handle.
*
* init_ae_dev(): (mandatory)
@@ -580,8 +587,6 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
- * dbg_read_cmd
- * Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@@ -594,6 +599,8 @@ struct hnae3_ae_dev {
* Get wake on lan info
* set_wol
* Config wake on lan
+ * dbg_get_read_func
+ * Return the read func for debugfs seq file
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -690,9 +697,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -748,8 +755,6 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -796,6 +801,9 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
+ int (*dbg_get_read_func)(struct hnae3_handle *handle,
+ enum hnae3_dbg_cmd cmd,
+ read_func *func);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 4ad4e8ab2f1f..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -348,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index 4e2bb6556b1c..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -422,7 +422,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
}
EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -448,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 4e5d8bc39a1b..0255c8acb744 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/seq_file.h>
#include <linux/string_choices.h>
#include "hnae3.h"
@@ -40,323 +41,279 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
.name = "tm_nodes",
.cmd = HNAE3_DBG_CMD_TM_NODES,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_priority",
.cmd = HNAE3_DBG_CMD_TM_PRI,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_map",
.cmd = HNAE3_DBG_CMD_TM_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_pg",
.cmd = HNAE3_DBG_CMD_TM_PG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_port",
.cmd = HNAE3_DBG_CMD_TM_PORT,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tc_sch_info",
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pause_cfg",
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pri_map",
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_dscp_map",
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dev_info",
.cmd = HNAE3_DBG_CMD_DEV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "rx_bd_queue",
.cmd = HNAE3_DBG_CMD_RX_BD,
.dentry = HNS3_DBG_DENTRY_RX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mc",
.cmd = HNAE3_DBG_CMD_MAC_MC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mng_tbl",
.cmd = HNAE3_DBG_CMD_MNG_TBL,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "loopback",
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "interrupt_info",
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "reset_info",
.cmd = HNAE3_DBG_CMD_RESET_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "imp_info",
.cmd = HNAE3_DBG_CMD_IMP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncl_config",
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac_tnl_status",
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "bios_common",
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ssu",
.cmd = HNAE3_DBG_CMD_REG_SSU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "igu_egu",
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rpu",
.cmd = HNAE3_DBG_CMD_REG_RPU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncsi",
.cmd = HNAE3_DBG_CMD_REG_NCSI,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rtc",
.cmd = HNAE3_DBG_CMD_REG_RTC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ppp",
.cmd = HNAE3_DBG_CMD_REG_PPP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rcb",
.cmd = HNAE3_DBG_CMD_REG_RCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac",
.cmd = HNAE3_DBG_CMD_REG_MAC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dcb",
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "queue_map",
.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "rx_queue_info",
.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_queue_info",
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "service_task_info",
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "vlan_config",
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "fd_counter",
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "umv_info",
.cmd = HNAE3_DBG_CMD_UMV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "page_pool_info",
.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "coalesce_info",
.cmd = HNAE3_DBG_CMD_COAL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
};
@@ -421,71 +378,17 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
-static const struct hns3_dbg_item coal_info_items[] = {
- { "VEC_ID", 2 },
- { "ALGO_STATE", 2 },
- { "PROFILE_ID", 2 },
- { "CQE_MODE", 2 },
- { "TUNE_STATE", 2 },
- { "STEPS_LEFT", 2 },
- { "STEPS_RIGHT", 2 },
- { "TIRED", 2 },
- { "SW_GL", 2 },
- { "SW_QL", 2 },
- { "HW_GL", 2 },
- { "HW_QL", 2 },
-};
-
static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
static const char * const
dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
-static void hns3_dbg_fill_content(char *content, u16 len,
- const struct hns3_dbg_item *items,
- const char **result, u16 size)
-{
-#define HNS3_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HNS3_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HNS3_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- char **result, int i, bool is_tx)
+ struct seq_file *s, int i, bool is_tx)
{
unsigned int gl_offset, ql_offset;
struct hns3_enet_coalesce *coal;
unsigned int reg_val;
- unsigned int j = 0;
struct dim *dim;
bool ql_enable;
@@ -503,193 +406,96 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
ql_enable = tqp_vector->rx_group.coal.ql_enable;
}
- sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
- dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
- dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
- dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
- dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
- sprintf(result[j++], "%u", coal->int_gl);
- sprintf(result[j++], "%u", coal->int_ql);
+ seq_printf(s, "%-8d", i);
+ seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ seq_printf(s, "%-12u", dim->profile_ix);
+ seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
+ dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
reg_val = readl(tqp_vector->mask_addr + gl_offset) &
HNS3_VECTOR_GL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%-7u", reg_val);
if (ql_enable) {
reg_val = readl(tqp_vector->mask_addr + ql_offset) &
HNS3_VECTOR_QL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%u\n", reg_val);
} else {
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA\n");
}
}
-static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
- int *pos, bool is_tx)
+static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
{
- char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(coal_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
- *pos += scnprintf(buf + *pos, len - *pos,
- "%s interrupt coalesce info:\n",
- is_tx ? "tx" : "rx");
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- NULL, ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "VEC_ID ALGO_STATE PROFILE_ID CQE_MODE TUNE_STATE ");
+ seq_puts(s, "STEPS_LEFT STEPS_RIGHT TIRED SW_GL SW_QL ");
+ seq_puts(s, "HW_GL HW_QL\n");
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_get_coal_info(tqp_vector, result, i, is_tx);
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- (const char **)result,
- ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ hns3_get_coal_info(tqp_vector, s, i, is_tx);
}
}
-static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_coal_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dump_coal_info(h, buf, len, &pos, true);
- pos += scnprintf(buf + pos, len - pos, "\n");
- hns3_dump_coal_info(h, buf, len, &pos, false);
+ hns3_dump_coal_info(s, true);
+ seq_puts(s, "\n");
+ hns3_dump_coal_info(s, false);
return 0;
}
-static const struct hns3_dbg_item tx_spare_info_items[] = {
- { "QUEUE_ID", 2 },
- { "COPYBREAK", 2 },
- { "LEN", 7 },
- { "NTU", 4 },
- { "NTC", 4 },
- { "LTC", 4 },
- { "DMA", 17 },
-};
-
-static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
- int len, u32 ring_num, int *pos)
-{
- char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_tx_spare *tx_spare = ring->tx_spare;
- char *result[ARRAY_SIZE(tx_spare_info_items)];
- char content[HNS3_DBG_INFO_LEN];
- u32 i, j;
-
- if (!tx_spare) {
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx spare buffer is not enabled\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
- result[i] = &data_str[i][0];
-
- *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
- hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
- NULL, ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
-
- for (i = 0; i < ring_num; i++) {
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u", ring->tx_copybreak);
- sprintf(result[j++], "%u", tx_spare->len);
- sprintf(result[j++], "%u", tx_spare->next_to_use);
- sprintf(result[j++], "%u", tx_spare->next_to_clean);
- sprintf(result[j++], "%u", tx_spare->last_to_clean);
- sprintf(result[j++], "%pad", &tx_spare->dma);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_spare_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- }
-}
-
-static const struct hns3_dbg_item rx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "BD_LEN", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "PKTNUM", 5 },
- { "COPYBREAK", 2 },
- { "RING_EN", 2 },
- { "RX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%u", ring->rx_copybreak);
-
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG)));
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
+ seq_printf(s, "%-11u", readl_relaxed(base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-11u", ring->rx_copybreak);
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG)));
+ seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
+ HNS3_RING_RX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -697,12 +503,9 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID BD_NUM BD_LEN TAIL HEAD FBDNUM ");
+ seq_puts(s, "PKTNUM COPYBREAK RING_EN RX_RING_EN BASE_ADDR\n");
- hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
- NULL, ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -713,88 +516,51 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_rx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_queue_info(ring, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "TC", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "OFFSET", 2 },
- { "PKTNUM", 5 },
- { "RING_EN", 2 },
- { "TX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
-
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG)));
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
+ seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
+ seq_printf(s, "%-11u",
+ readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s",
- str_on_off(readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG)));
+ seq_printf(s, "%-12s",
+ str_on_off(readl_relaxed(base +
+ HNS3_RING_TX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -802,12 +568,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
- result[i] = &data_str[i][0];
-
- hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
- NULL, ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "QUEUE_ID BD_NUM TC TAIL HEAD FBDNUM OFFSET ");
+ seq_puts(s, "PKTNUM RING_EN TX_RING_EN BASE_ADDR\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
@@ -819,338 +581,213 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[i];
- hns3_dump_tx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_queue_info(ring, s, i);
}
- hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
-
return 0;
}
-static const struct hns3_dbg_item queue_map_items[] = {
- { "local_queue_id", 2 },
- { "global_queue_id", 2 },
- { "vector_id", 2 },
-};
-
-static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_queue_map(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(queue_map_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
- int pos = 0;
- int j;
u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "local_queue_id global_queue_id vector_id\n");
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- NULL, ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u",
- h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%d",
- priv->ring[i].tqp_vector->vector_irq);
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- (const char **)result,
- ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%-16u%-17u%d\n", i,
+ h->ae_algo->ops->get_global_queue_id(h, i),
+ priv->ring[i].tqp_vector->vector_irq);
}
return 0;
}
-static const struct hns3_dbg_item rx_bd_info_items[] = {
- { "BD_IDX", 3 },
- { "L234_INFO", 2 },
- { "PKT_LEN", 3 },
- { "SIZE", 4 },
- { "RSS_HASH", 4 },
- { "FD_ID", 2 },
- { "VLAN_TAG", 2 },
- { "O_DM_VLAN_ID_FB", 2 },
- { "OT_VLAN_TAG", 2 },
- { "BD_BASE_INFO", 2 },
- { "PTYPE", 2 },
- { "HW_CSUM", 2 },
-};
-
static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
- struct hns3_desc *desc, char **result, int idx)
+ struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
+ idx, le32_to_cpu(desc->rx.l234_info),
+ le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
+ le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
+ le16_to_cpu(desc->rx.vlan_tag),
+ le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
+ le16_to_cpu(desc->rx.ot_vlan_tag),
+ le32_to_cpu(desc->rx.bd_base_info));
+
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
- sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
- HNS3_RXD_PTYPE_M,
- HNS3_RXD_PTYPE_S));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ seq_printf(s, "%-7lu%-9u\n",
+ hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S),
+ le16_to_cpu(desc->csum));
} else {
- sprintf(result[j++], "NA");
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA NA\n");
}
}
-static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(rx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u rx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX L234_INFO PKT_LEN SIZE ");
+ seq_puts(s, "RSS_HASH FD_ID VLAN_TAG O_DM_VLAN_ID_FB ");
+ seq_puts(s, "OT_VLAN_TAG BD_BASE_INFO PTYPE HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u rx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
- NULL, ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_rx_bd_info(priv, desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_bd_info_items, (const char **)result,
- ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_bd_info(priv, desc, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 2 },
- { "ADDRESS", 13 },
- { "VLAN_TAG", 2 },
- { "SIZE", 2 },
- { "T_CS_VLAN_TSO", 2 },
- { "OT_VLAN_TAG", 3 },
- { "TV", 5 },
- { "OLT_VLAN_LEN", 2 },
- { "PAYLEN_OL4CS", 2 },
- { "BD_FE_SC_VLD", 2 },
- { "MSS_HW_CSUM", 0 },
-};
-
-static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
- sprintf(result[j++], "%#x",
- le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%u",
- le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
- sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+ seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
+ idx, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->tx.vlan_tag),
+ le16_to_cpu(desc->tx.send_size),
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
+ le16_to_cpu(desc->tx.outer_vlan_tag),
+ le16_to_cpu(desc->tx.tv),
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
+ le32_to_cpu(desc->tx.paylen_ol4cs),
+ le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
+ le16_to_cpu(desc->tx.mss_hw_csum));
}
-static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(tx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u tx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX ADDRESS VLAN_TAG SIZE ");
+ seq_puts(s, "T_CS_VLAN_TSO OT_VLAN_TAG TV OLT_VLAN_LEN ");
+ seq_puts(s, "PAYLEN_OL4CS BD_FE_SC_VLD MSS_HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u tx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
- NULL, ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid];
+ ring = &priv->ring[data->qid];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_tx_bd_info(desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_bd_info_items, (const char **)result,
- ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_bd_info(desc, s, i);
}
return 0;
}
-static void
-hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
- *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+ seq_puts(s, "dev capability:\n");
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str_yes_no(state));
+ seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
+ str_yes_no(state));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
-static void
-hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct net_device *dev = kinfo->netdev;
- *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
- *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
- dev_specs->mac_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
- dev_specs->mng_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
- dev_specs->rss_key_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
- kinfo->rss_size);
- *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
- kinfo->req_rss_size);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Task queue pairs numbers: %u\n",
- kinfo->num_tqps);
- *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
- kinfo->rx_buf_len);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
- kinfo->num_tx_desc);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
- kinfo->num_rx_desc);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
- dev_specs->int_ql_max);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
- dev_specs->max_int_gl);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
- dev_specs->max_tm_rate);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
- dev_specs->max_qset_num);
- *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
- dev_specs->umv_size);
- *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
- dev_specs->mc_mac_size);
- *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
- dev_specs->mac_stats_num);
- *pos += scnprintf(buf + *pos, len - *pos,
- "TX timeout threshold: %d seconds\n",
- dev->watchdog_timeo / HZ);
- *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
- dev_specs->hilink_version);
+ seq_puts(s, "dev_spec:\n");
+ seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ seq_printf(s, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
+ seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
+ seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
+ seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ seq_printf(s, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
+ seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
+ seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
+ seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
+ seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
+ seq_printf(s, "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+ seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
+ seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
}
-static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_dev_info(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
- hns3_dbg_dev_caps(h, buf, len, &pos);
-
- hns3_dbg_dev_specs(h, buf, len, &pos);
+ hns3_dbg_dev_caps(h, s);
+ hns3_dbg_dev_specs(h, s);
return 0;
}
-static const struct hns3_dbg_item page_pool_info_items[] = {
- { "QUEUE_ID", 2 },
- { "ALLOCATE_CNT", 2 },
- { "FREE_CNT", 6 },
- { "POOL_SIZE(PAGE_NUM)", 2 },
- { "ORDER", 2 },
- { "NUMA_ID", 2 },
- { "MAX_LEN", 2 },
-};
-
static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
- char **result, u32 index)
+ struct seq_file *s, u32 index)
{
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u",
- READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%d",
- atomic_read(&ring->page_pool->pages_state_release_cnt));
- sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
- sprintf(result[j++], "%u", ring->page_pool->p.order);
- sprintf(result[j++], "%d", ring->page_pool->p.nid);
- sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
+ index,
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt),
+ atomic_read(&ring->page_pool->pages_state_release_cnt),
+ ring->page_pool->p.pool_size,
+ ring->page_pool->p.order,
+ ring->page_pool->p.nid,
+ ring->page_pool->p.max_len / 1024);
}
-static int
-hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -1163,162 +800,44 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID ALLOCATE_CNT FREE_CNT ");
+ seq_puts(s, "POOL_SIZE(PAGE_NUM) ORDER NUMA_ID MAX_LEN\n");
- hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
- NULL, ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
+
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_page_pool_info(ring, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- page_pool_info_items,
- (const char **)result,
- ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_page_pool_info(ring, s, i);
}
return 0;
}
-static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
- *index = i;
- return 0;
- }
- }
-
- dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
- dbg_data->cmd);
- return -EINVAL;
-}
-
-static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
- {
- .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
- .dbg_dump = hns3_dbg_queue_map,
- },
- {
- .cmd = HNAE3_DBG_CMD_DEV_INFO,
- .dbg_dump = hns3_dbg_dev_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_BD,
- .dbg_dump_bd = hns3_dbg_tx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_BD,
- .dbg_dump_bd = hns3_dbg_rx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_rx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_tx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
- .dbg_dump = hns3_dbg_page_pool_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_COAL_INFO,
- .dbg_dump = hns3_dbg_coal_info,
- },
-};
-
-static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
-{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
- const struct hns3_dbg_func *cmd_func;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
- if (cmd == hns3_dbg_cmd_func[i].cmd) {
- cmd_func = &hns3_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(dbg_data->handle, buf,
- len);
- else
- return cmd_func->dbg_dump_bd(dbg_data, buf,
- len);
- }
- }
-
- if (!ops->dbg_read_cmd)
- return -EOPNOTSUPP;
-
- return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
-}
-
-static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- char *buf = filp->private_data;
-
- return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-}
-
-static int hns3_dbg_open(struct inode *inode, struct file *filp)
+static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
{
- struct hns3_dbg_data *dbg_data = inode->i_private;
- struct hnae3_handle *handle = dbg_data->handle;
- struct hns3_nic_priv *priv = handle->priv;
- u32 index;
- char *buf;
- int ret;
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EBUSY;
- ret = hns3_dbg_get_cmd_index(dbg_data, &index);
- if (ret)
- return ret;
-
- buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- buf, hns3_dbg_cmd[index].buf_len);
- if (ret) {
- kvfree(buf);
- return ret;
- }
-
- filp->private_data = buf;
- return 0;
-}
+ if (data->cmd == HNAE3_DBG_CMD_TX_BD)
+ return hns3_dbg_tx_bd_info(s, private);
+ else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
+ return hns3_dbg_rx_bd_info(s, private);
-static int hns3_dbg_release(struct inode *inode, struct file *filp)
-{
- kvfree(filp->private_data);
- filp->private_data = NULL;
- return 0;
+ return -EOPNOTSUPP;
}
-
-static const struct file_operations hns3_dbg_fops = {
- .owner = THIS_MODULE,
- .open = hns3_dbg_open,
- .read = hns3_dbg_read,
- .release = hns3_dbg_release,
-};
+DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
{
- struct dentry *entry_dir;
struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
u16 max_queue_num;
unsigned int i;
@@ -1337,34 +856,73 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
data[i].qid = i;
sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
debugfs_create_file(name, 0400, entry_dir, &data[i],
- &hns3_dbg_fops);
+ &hns3_dbg_bd_info_fops);
}
return 0;
}
-static int
-hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
{
- struct hns3_dbg_data *data;
+ struct device *dev = &handle->pdev->dev;
struct dentry *entry_dir;
+ read_func func = NULL;
+
+ switch (hns3_dbg_cmd[cmd].cmd) {
+ case HNAE3_DBG_CMD_TX_QUEUE_INFO:
+ func = hns3_dbg_tx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_RX_QUEUE_INFO:
+ func = hns3_dbg_rx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_QUEUE_MAP:
+ func = hns3_dbg_queue_map;
+ break;
+ case HNAE3_DBG_CMD_PAGE_POOL_INFO:
+ func = hns3_dbg_page_pool_info;
+ break;
+ case HNAE3_DBG_CMD_COAL_INFO:
+ func = hns3_dbg_coal_info;
+ break;
+ case HNAE3_DBG_CMD_DEV_INFO:
+ func = hns3_dbg_dev_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
+
+ return 0;
+}
+
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
+{
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct device *dev = &handle->pdev->dev;
+ struct dentry *entry_dir;
+ read_func func;
+ int ret;
+
+ if (!ops->dbg_get_read_func)
+ return 0;
+
+ ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
+ if (ret)
+ return ret;
- data->handle = handle;
- data->cmd = hns3_dbg_cmd[cmd].cmd;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
- debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
- data, &hns3_dbg_fops);
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
return 0;
}
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 4a5ef8a90a10..57c9d3fc1b27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -6,15 +6,6 @@
#include "hnae3.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_READ_LEN_128KB 0x20000
-#define HNS3_DBG_READ_LEN_1MB 0x100000
-#define HNS3_DBG_READ_LEN_4MB 0x400000
-#define HNS3_DBG_READ_LEN_5MB 0x500000
-#define HNS3_DBG_WRITE_LEN 1024
-
-#define HNS3_DBG_DATA_STR_LEN 32
-#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
@@ -49,16 +40,9 @@ struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
- u32 buf_len;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
-struct hns3_dbg_func {
- enum hnae3_dbg_cmd cmd;
- int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
- int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
-};
-
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index aaa803563bd2..bfa5568baa92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -548,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -961,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1308,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1508,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1694,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1867,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1895,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2110,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2126,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2211,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2451,7 +2451,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2548,7 +2548,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2774,7 +2774,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2855,7 +2855,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3825,7 +3825,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4751,7 +4751,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -5255,7 +5255,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5293,7 +5293,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5328,6 +5328,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5965,7 +5967,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5992,8 +5994,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index caf7a4df8585..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -623,7 +623,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -694,7 +694,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6715222aeb66..d5454e126c85 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -86,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -171,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -436,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -489,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -540,8 +540,8 @@ static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
@@ -569,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -692,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -706,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -725,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -751,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -794,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -814,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -842,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -861,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -932,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -954,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -978,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -988,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1024,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1189,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1275,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1300,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1308,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1377,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1449,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1473,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1649,8 +1662,8 @@ static void hns3_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1700,8 +1713,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1725,8 +1738,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1747,8 +1760,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1797,8 +1810,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1924,7 +1937,7 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
@@ -2105,6 +2118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2142,6 +2157,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index c46490693594..b76d25074e99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -12,6 +12,9 @@
#include "hclge_tm.h"
#include "hnae3.h"
+#define hclge_seq_file_to_hdev(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
+
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -721,48 +724,6 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-/* make sure: len(name) + interval >= maxlen(item data) + 2,
- * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
- * and print as "%u"(maxlen: 10), so the interval should be at least 5.
- */
-static void hclge_dbg_fill_content(char *content, u16 len,
- const struct hclge_dbg_item *items,
- const char **result, u16 size)
-{
-#define HCLGE_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HCLGE_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HCLGE_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
@@ -826,14 +787,14 @@ int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -846,13 +807,12 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
- cnt++, dfx_message->message);
+ seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
for (i = 0; i < cnt; i++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+ seq_printf(s, "item%u\t", i);
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
dfx_message = reg_info->dfx_msg;
@@ -867,10 +827,9 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
if (i > 0 && !entry)
desc++;
- *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
kfree(desc_src);
@@ -880,14 +839,14 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -914,9 +873,8 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (!dfx_message->flag)
continue;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%s: %#x\n", dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
@@ -940,8 +898,8 @@ static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
-static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -962,16 +920,15 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
offset = hclge_dbg_mac_en_status[i].offset;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- hclge_dbg_mac_en_status[i].message,
- hnae3_get_bit(loop_en, offset));
+ seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
}
return 0;
}
-static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -988,16 +945,14 @@ static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
- req->min_frm_size);
+ seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
+ seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
return 0;
}
-static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
+ struct seq_file *s)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -1018,33 +973,31 @@ static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup,
- HCLGE_MAC_DUPLEX_SHIFT));
+ seq_printf(s, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ seq_printf(s, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
return 0;
}
-static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, s);
}
-static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1055,8 +1008,8 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ seq_puts(s, "qset_id roce_qset_mask nic_qset_mask ");
+ seq_puts(s, "qset_shaping_pass qset_bp_status\n");
for (qset_id = 0; qset_id < qset_num; qset_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
HCLGE_OPC_QSET_DFX_STS);
@@ -1065,17 +1018,14 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%04u %#x %#x %#x %#x\n",
- qset_id, req.bit0, req.bit1, req.bit2,
- req.bit3);
+ seq_printf(s, "%04u %#-16x%#-15x%#-19x%#-x\n",
+ qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1086,8 +1036,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ seq_puts(s, "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
for (pri_id = 0; pri_id < pri_num; pri_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
HCLGE_OPC_PRI_DFX_STS);
@@ -1096,24 +1045,21 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pri_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-10x%#-19x%#-x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ seq_puts(s, "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
HCLGE_OPC_PG_DFX_STS);
@@ -1122,47 +1068,41 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pg_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-9x%#-18x%#-x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc;
u16 nq_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ seq_puts(s, "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_NQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
- nq_id, le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%04u %#-19x",
+ nq_id, le32_to_cpu(desc.data[1]));
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- " %#x\n",
- le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
}
return 0;
}
-static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1176,16 +1116,13 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- req.bit0);
- *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- req.bit1);
+ seq_printf(s, "port_mask: %#x\n", req.bit0);
+ seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
return 0;
}
-static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc[2];
u8 port_id = 0;
@@ -1196,32 +1133,23 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
- le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "sch_roce_fifo_afull_gap: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx_private_waterline: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
- le32_to_cpu(desc[0].data[5]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
- le32_to_cpu(desc[1].data[0]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
- le32_to_cpu(desc[1].data[1]));
+ seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
+ seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
@@ -1231,65 +1159,60 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
return 0;
}
-static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_qset(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pri(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pg(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_queue(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_port(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+ return hclge_dbg_dump_dcb_tm(hdev, s);
}
-static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
+static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
const struct hclge_dbg_reg_type_info *reg_info;
- int pos = 0, ret = 0;
- int i;
+ int ret = 0;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
if (cmd == reg_info->cmd) {
if (cmd == HNAE3_DBG_CMD_REG_TQP)
- return hclge_dbg_dump_reg_tqp(hdev, reg_info,
- buf, len, &pos);
+ return hclge_dbg_dump_reg_tqp(hdev,
+ reg_info, s);
- ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
- len, &pos);
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
if (ret)
break;
}
@@ -1298,12 +1221,57 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
return ret;
}
-static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
+}
+
+static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
+}
+
+static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
+}
+
+static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
+}
+
+static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
+}
+
+static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
+}
+
+static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
+}
+
+static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
+}
+
+static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
+}
+
+static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ets_tc_weight_cmd *ets_weight;
+ const char *sch_mode_str;
struct hclge_desc desc;
- char *sch_mode_str;
- int pos = 0;
int ret;
u8 i;
@@ -1323,72 +1291,37 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
- hdev->tm_info.num_tc);
- pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
+ seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
- pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ seq_puts(s, "TC MODE WEIGHT\n");
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
- pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str, ets_weight->tc_weight[i]);
+ seq_printf(s, "%u %4s %3u\n", i, sch_mode_str,
+ ets_weight->tc_weight[i]);
}
return 0;
}
-static const struct hclge_dbg_item tm_pg_items[] = {
- { "ID", 2 },
- { "PRI_MAP", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
- char **result, u8 *index)
+static void hclge_dbg_fill_shaper_content(struct seq_file *s,
+ struct hclge_tm_shaper_para *para)
{
- sprintf(result[(*index)++], "%3u", para->ir_b);
- sprintf(result[(*index)++], "%3u", para->ir_u);
- sprintf(result[(*index)++], "%3u", para->ir_s);
- sprintf(result[(*index)++], "%3u", para->bs_b);
- sprintf(result[(*index)++], "%3u", para->bs_s);
- sprintf(result[(*index)++], "%3u", para->flag);
- sprintf(result[(*index)++], "%6u", para->rate);
+ seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
+ para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
}
-static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
- char *buf, int len)
+static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
- u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
- char content[HCLGE_DBG_TM_INFO_LEN];
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pg_id, sch_mode, weight, pri_bit_map;
+ const char *sch_mode_str;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
- result[i] = data_str;
- data_str += HCLGE_DBG_DATA_STR_LEN;
- }
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- NULL, ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "ID PRI_MAP MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
@@ -1418,68 +1351,41 @@ static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%02u", pg_id);
- sprintf(result[j++], "0x%02x", pri_bit_map);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- (const char **)result,
- ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%02u 0x%-7x%-6s%-6u", pg_id, pri_bit_map,
+ sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
-{
- char *data_str;
- int ret;
-
- data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
- HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
-
- kfree(data_str);
-
- return ret;
-}
-
-static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_shaper_para shaper_para;
- int pos = 0;
int ret;
ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u %1u %6u\n",
- shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
- shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
- shaper_para.rate);
+ seq_puts(s, "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ seq_printf(s, "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
return 0;
}
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
- char *buf, int len)
+ struct seq_file *s)
{
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_bp_to_qs_map_cmd *map;
struct hclge_desc desc;
- int pos = 0;
u8 group_id;
u8 grp_num;
u16 i = 0;
@@ -1505,27 +1411,27 @@ static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- pos += scnprintf(buf + pos, len - pos,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[i + 7],
- qset_mapping[i + 6], qset_mapping[i + 5],
- qset_mapping[i + 4], qset_mapping[i + 3],
- qset_mapping[i + 2], qset_mapping[i + 1],
- qset_mapping[i]);
+ seq_printf(s,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
i += 8;
}
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u16 queue_id;
u16 qset_id;
u8 link_vld;
- int pos = 0;
u8 pri_id;
u8 tc_id;
int ret;
@@ -1544,32 +1450,28 @@ static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %3u %2u\n",
- queue_id, qset_id, pri_id, tc_id);
+ seq_puts(s, "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ seq_printf(s, "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev))
continue;
- ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
@@ -1582,65 +1484,36 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
- pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
- nodes->pg_base_id, nodes->pg_num);
- pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
- nodes->pri_base_id, nodes->pri_num);
- pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
- le16_to_cpu(nodes->qset_base_id),
- le16_to_cpu(nodes->qset_num));
- pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
- le16_to_cpu(nodes->queue_base_id),
- le16_to_cpu(nodes->queue_num));
+ seq_puts(s, " BASE_ID MAX_NUM\n");
+ seq_printf(s, "PG %4u %4u\n", nodes->pg_base_id,
+ nodes->pg_num);
+ seq_printf(s, "PRI %4u %4u\n", nodes->pri_base_id,
+ nodes->pri_num);
+ seq_printf(s, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ seq_printf(s, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
return 0;
}
-static const struct hclge_dbg_item tm_pri_items[] = {
- { "ID", 4 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
- char content[HCLGE_DBG_TM_INFO_LEN];
- u8 pri_num, sch_mode, weight, i, j;
- char *data_str;
- int pos, ret;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pri_num, sch_mode, weight, i;
+ const char *sch_mode_str;
+ int ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
- GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- NULL, ARRAY_SIZE(tm_pri_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -1666,59 +1539,31 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- (const char **)result,
- ARRAY_SIZE(tm_pri_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-6s%-6u", i, sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
out:
- kfree(data_str);
return ret;
}
-static const struct hclge_dbg_item tm_qset_items[] = {
- { "ID", 4 },
- { "MAP_PRI", 2 },
- { "LINK_VLD", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "IR_B", 2 },
- { "IR_U", 2 },
- { "IR_S", 2 },
- { "BS_B", 2 },
- { "BS_S", 2 },
- { "FLAG", 2 },
- { "RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 priority, link_vld, sch_mode, weight;
struct hclge_tm_shaper_para shaper_para;
- char content[HCLGE_DBG_TM_INFO_LEN];
+ const char *sch_mode_str;
u16 qset_num, i;
- int ret, pos;
- u8 j;
+ int ret;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
- result[i] = &data_str[i][0];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- NULL, ARRAY_SIZE(tm_qset_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MAP_PRI LINK_VLD MODE DWRR IR_B IR_U IR_S ");
+ seq_puts(s, "BS_B BS_S FLAG RATE(Mbps)\n");
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -1740,29 +1585,22 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4u", priority);
- sprintf(result[j++], "%4u", link_vld);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- (const char **)result,
- ARRAY_SIZE(tm_qset_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-9u%-10u%-6s%-6u", i, priority, link_vld,
+ sch_mode_str, weight);
+ seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
}
return 0;
}
-static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -1775,23 +1613,21 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+ seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
+ seq_printf(s, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
return 0;
}
#define HCLGE_DBG_TC_MASK 0x0F
-static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
{
#define HCLGE_DBG_TC_BIT_WIDTH 4
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
- int pos = 0;
u8 *pri_tc;
u8 tc, i;
int ret;
@@ -1806,33 +1642,33 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
- pri_map->vlan_pri);
- pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+ seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ seq_puts(s, "PRI TC\n");
pri_tc = (u8 *)pri_map;
for (i = 0; i < HNAE3_MAX_TC; i++) {
tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
tc &= HCLGE_DBG_TC_MASK;
- pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ seq_printf(s, "%u %u\n", i, tc);
}
return 0;
}
-static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
{
- struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ struct hnae3_knic_private_info *kinfo;
u8 *req0 = (u8 *)desc[0].data;
u8 *req1 = (u8 *)desc[1].data;
u8 dscp_tc[HNAE3_MAX_DSCP];
- int pos, ret;
+ int ret;
u8 i, j;
- pos = scnprintf(buf, len, "tc map mode: %s\n",
- tc_map_mode_str[kinfo->tc_map_mode]);
+ kinfo = &hdev->vport[0].nic.kinfo;
+
+ seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
return 0;
@@ -1847,7 +1683,7 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+ seq_puts(s, "\nDSCP PRIO TC\n");
/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
@@ -1865,18 +1701,17 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
continue;
- pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
- i, kinfo->dscp_prio[i], dscp_tc[i]);
+ seq_printf(s, " %2u %u %u\n", i, kinfo->dscp_prio[i],
+ dscp_tc[i]);
}
return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
@@ -1889,19 +1724,17 @@ static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
@@ -1912,26 +1745,24 @@ static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ seq_printf(s, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
@@ -1943,21 +1774,19 @@ static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
}
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "\n");
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ seq_puts(s, "\n");
+ seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
@@ -1969,20 +1798,18 @@ static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
}
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
@@ -1997,28 +1824,25 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return pos;
+ return 0;
}
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
- char *buf, int len)
+ struct seq_file *s)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
@@ -2031,86 +1855,75 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
if (ret < 0)
return ret;
return 0;
}
-static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_ethertype_idx_rd_cmd *req0;
struct hclge_desc desc;
u32 msg_egress_port;
- int pos = 0;
int ret, i;
- pos += scnprintf(buf + pos, len - pos,
- "entry mac_addr mask ether ");
- pos += scnprintf(buf + pos, len - pos,
- "mask vlan mask i_map i_dir e_type ");
- pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+ seq_puts(s, "entry mac_addr mask ether ");
+ seq_puts(s, "mask vlan mask i_map i_dir e_type ");
+ seq_puts(s, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2123,46 +1936,40 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
if (!req0->resp_code)
continue;
- pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
- le16_to_cpu(req0->index), req0->mac_addr);
+ seq_printf(s, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
- pos += scnprintf(buf + pos, len - pos,
- "%x %04x %x %04x ",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) &
- HCLGE_DBG_MNG_VLAN_TAG);
+ seq_printf(s, "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
- pos += scnprintf(buf + pos, len - pos,
- "%x %02x %02x ",
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ seq_printf(s, "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- pos += scnprintf(buf + pos, len - pos,
- "%x %x %02x %04x %x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ seq_printf(s, "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
return 0;
}
-#define HCLGE_DBG_TCAM_BUF_SIZE 256
-
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
- char *tcam_buf,
+ struct seq_file *s,
struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
- int pos = 0;
int ret, i;
__le32 *req;
@@ -2184,27 +1991,23 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
if (ret)
return ret;
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
- tcam_msg.loc);
+ seq_printf(s, "read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
return ret;
}
@@ -2228,14 +2031,13 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
return cnt;
}
-static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
{
- u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
- char *tcam_buf;
- int pos = 0;
+ u32 rule_num;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
@@ -2243,6 +2045,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
return -EOPNOTSUPP;
}
+ rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
if (!hdev->hclge_fd_rule_num || !rule_num)
return 0;
@@ -2250,12 +2053,6 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
if (!rule_locs)
return -ENOMEM;
- tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
- if (!tcam_buf) {
- kfree(rule_locs);
- return -ENOMEM;
- }
-
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt < 0) {
ret = rule_cnt;
@@ -2269,38 +2066,34 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
- ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
-
- ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
- kfree(tcam_buf);
kfree(rule_locs);
return ret;
}
-static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_fd_ad_cnt_read_cmd *req;
char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- int pos = 0;
int ret;
u64 cnt;
u8 i;
@@ -2308,8 +2101,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- pos += scnprintf(buf + pos, len - pos,
- "func_id\thit_times\n");
+ seq_puts(s, "func_id\thit_times\n");
for (i = 0; i < func_num; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
@@ -2323,8 +2115,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
}
cnt = le64_to_cpu(req->cnt);
hclge_dbg_get_func_id_str(str_id, i);
- pos += scnprintf(buf + pos, len - pos,
- "%s\t%llu\n", str_id, cnt);
+ seq_printf(s, "%s\t%llu\n", str_id, cnt);
}
return 0;
@@ -2375,74 +2166,95 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
-static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u32 i, offset;
+
+ seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
+ seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
+ seq_printf(s, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
+ seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
+ seq_printf(s, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
+ seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ seq_printf(s, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
unsigned long rem_nsec;
- int pos = 0;
u64 lc;
lc = local_clock();
rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
- (unsigned long)lc, rem_nsec / 1000);
- pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
- jiffies_to_msecs(jiffies - hdev->last_serv_processed));
- pos += scnprintf(buf + pos, len - pos,
- "last_service_task_processed: %lu(jiffies)\n",
- hdev->last_serv_processed);
- pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
- hdev->serv_processed_cnt);
+ seq_printf(s, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ seq_printf(s, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
return 0;
}
-static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
- pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
- hdev->num_nic_msi);
- pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
- hdev->num_roce_msi);
- pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
- hdev->num_msi_used);
- pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
- hdev->num_msi_left);
+ seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
+ seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
return 0;
}
-static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
- char *buf, int len, u32 bd_num)
+static void hclge_dbg_imp_info_data_print(struct seq_file *s,
+ struct hclge_desc *desc_src,
+ u32 bd_num)
{
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
struct hclge_desc *desc_index = desc_src;
u32 offset = 0;
- int pos = 0;
u32 i, j;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
for (i = 0; i < bd_num; i++) {
j = 0;
while (j < HCLGE_DESC_DATA_LEN - 1) {
- pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
- offset);
- pos += scnprintf(buf + pos, len - pos, "0x%08x ",
- le32_to_cpu(desc_index->data[j++]));
- pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
- le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%04x | ", offset);
+ seq_printf(s, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
}
desc_index++;
}
}
-static int
-hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_get_imp_bd_cmd *req;
struct hclge_desc *desc_src;
struct hclge_desc desc;
@@ -2479,7 +2291,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+ hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
kfree(desc_src);
@@ -2490,7 +2302,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -2502,9 +2314,8 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, len - *pos,
- "0x%04x | 0x%08x\n", offset,
- le32_to_cpu(desc[i].data[j]));
+ seq_printf(s, "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
offset += sizeof(u32);
*index -= sizeof(u32);
@@ -2515,19 +2326,18 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
}
}
-static int
-hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
{
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
- int pos = 0;
u32 data0;
int ret;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
while (index > 0) {
data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
@@ -2540,27 +2350,26 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ hclge_ncl_config_data_print(desc, &index, s);
}
return 0;
}
-static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
- int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
- hdev->hw.mac.mac_id);
+ seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2572,8 +2381,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2584,24 +2392,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- str_on_off(loopback_en));
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
- pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- str_on_off(loopback_en));
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ seq_printf(s, "serdes parallel loopback: %s\n",
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- str_on_off(loopback_en));
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
}
return 0;
@@ -2610,107 +2416,75 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static int
-hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
- int pos = 0;
- pos += scnprintf(buf + pos, len - pos,
- "Recently generated mac tnl interruption:\n");
+ seq_puts(s, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos,
- "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+ seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
}
return 0;
}
-
-static const struct hclge_dbg_item mac_list_items[] = {
- { "FUNC_ID", 2 },
- { "MAC_ADDR", 12 },
- { "STATE", 2 },
-};
-
-static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
- bool is_unicast)
+static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
{
- char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
- char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
- int pos = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
- is_unicast ? "UC" : "MC");
- hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
- NULL, ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
+ seq_puts(s, "FUNC_ID MAC_ADDR STATE\n");
for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
- i = 0;
- result[i++] = hclge_dbg_get_func_id_str(str_id,
- func_id);
- sprintf(result[i++], "%pM", mac_node->mac_addr);
- sprintf(result[i++], "%5s",
- hclge_mac_state_str[mac_node->state]);
- hclge_dbg_fill_content(content, sizeof(content),
- mac_list_items,
- (const char **)result,
- ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ if (func_id)
+ seq_printf(s, "vf%-7u", func_id - 1U);
+ else
+ seq_puts(s, "pf ");
+ seq_printf(s, "%pM ", mac_node->mac_addr);
+ seq_printf(s, "%5s\n",
+ hclge_mac_state_str[mac_node->state]);
}
spin_unlock_bh(&vport->mac_list_lock);
}
}
-static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1;
struct hclge_vport *vport;
- int pos = 0;
u8 i;
- pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
- hdev->num_alloc_vport);
- pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
- hdev->max_umv_size);
- pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
- hdev->wanted_umv_size);
- pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
- hdev->priv_umv_size);
+ seq_printf(s, "num_alloc_vport : %u\n", hdev->num_alloc_vport);
+ seq_printf(s, "max_umv_size : %u\n", hdev->max_umv_size);
+ seq_printf(s, "wanted_umv_size : %u\n", hdev->wanted_umv_size);
+ seq_printf(s, "priv_umv_size : %u\n", hdev->priv_umv_size);
mutex_lock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
- hdev->share_umv_size);
+ seq_printf(s, "share_umv_size : %u\n", hdev->share_umv_size);
for (i = 0; i < func_num; i++) {
vport = &hdev->vport[i];
- pos += scnprintf(buf + pos, len - pos,
- "vport(%u) used_umv_num : %u\n",
- i, vport->used_umv_num);
+ seq_printf(s, "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
mutex_unlock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
- hdev->used_mc_mac_num);
+ seq_printf(s, "used_mc_mac_num : %u\n", hdev->used_mc_mac_num);
return 0;
}
@@ -2852,38 +2626,12 @@ static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
return 0;
}
-static const struct hclge_dbg_item vlan_filter_items[] = {
- { "FUNC_ID", 2 },
- { "I_VF_VLAN_FILTER", 2 },
- { "E_VF_VLAN_FILTER", 2 },
- { "PORT_VLAN_FILTER_BYPASS", 0 }
-};
-
-static const struct hclge_dbg_item vlan_offload_items[] = {
- { "FUNC_ID", 2 },
- { "PVID", 4 },
- { "ACCEPT_TAG1", 2 },
- { "ACCEPT_TAG2", 2 },
- { "ACCEPT_UNTAG1", 2 },
- { "ACCEPT_UNTAG2", 2 },
- { "INSERT_TAG1", 2 },
- { "INSERT_TAG2", 2 },
- { "SHIFT_TAG", 2 },
- { "STRIP_TAG1", 2 },
- { "STRIP_TAG2", 2 },
- { "DROP_TAG1", 2 },
- { "DROP_TAG2", 2 },
- { "PRI_ONLY_TAG1", 2 },
- { "PRI_ONLY_TAG2", 0 }
-};
-
-static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_filter_items)];
- u8 i, j, vlan_fe, bypass, ingress, egress;
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ u8 i, vlan_fe, bypass, ingress, egress;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
@@ -2893,14 +2641,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
- *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- str_on_off(ingress));
- *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- str_on_off(egress));
+ seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
+ seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
- hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
- NULL, ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID I_VF_VLAN_FILTER E_VF_VLAN_FILTER ");
+ seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
@@ -2913,37 +2658,32 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
if (ret)
return ret;
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_on_off(ingress);
- result[j++] = str_on_off(egress);
- result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ?
- str_on_off(bypass) : "NA";
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_filter_items, result,
- ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ seq_printf(s, "%-9s%-18s%-18s%s\n",
+ hclge_dbg_get_func_id_str(str_id, i),
+ str_on_off(ingress), str_on_off(egress),
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA");
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
return 0;
}
-static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_offload_items)];
- char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_dbg_vlan_cfg vlan_cfg;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
- u8 i, j;
+ u8 i;
- hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
- NULL, ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID PVID ACCEPT_TAG1 ACCEPT_TAG2 ACCEPT_UNTAG1 ");
+ seq_puts(s, "ACCEPT_UNTAG2 INSERT_TAG1 INSERT_TAG2 SHIFT_TAG ");
+ seq_puts(s, "STRIP_TAG1 STRIP_TAG2 DROP_TAG1 DROP_TAG2 ");
+ seq_puts(s, "PRI_ONLY_TAG1 PRI_ONLY_TAG2\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
@@ -2954,106 +2694,92 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
if (ret)
return ret;
- sprintf(str_pvid, "%u", vlan_cfg.pvid);
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_pvid;
- result[j++] = str_on_off(vlan_cfg.accept_tag1);
- result[j++] = str_on_off(vlan_cfg.accept_tag2);
- result[j++] = str_on_off(vlan_cfg.accept_untag1);
- result[j++] = str_on_off(vlan_cfg.accept_untag2);
- result[j++] = str_on_off(vlan_cfg.insert_tag1);
- result[j++] = str_on_off(vlan_cfg.insert_tag2);
- result[j++] = str_on_off(vlan_cfg.shift_tag);
- result[j++] = str_on_off(vlan_cfg.strip_tag1);
- result[j++] = str_on_off(vlan_cfg.strip_tag2);
- result[j++] = str_on_off(vlan_cfg.drop_tag1);
- result[j++] = str_on_off(vlan_cfg.drop_tag2);
- result[j++] = str_on_off(vlan_cfg.pri_only1);
- result[j++] = str_on_off(vlan_cfg.pri_only2);
-
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_offload_items, result,
- ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
+ seq_printf(s, "%-6u", vlan_cfg.pvid);
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
+ seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
}
return 0;
}
-static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+ return hclge_dbg_dump_vlan_offload_config(hdev, s);
}
-static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
- int pos = 0;
u32 hw_cfg;
int ret;
- pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
- ptp->info.name);
- pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
- pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
- &ptp->flags)));
- pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
- &ptp->flags)));
+ seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
+ seq_printf(s, "ptp enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
+ seq_printf(s, "ptp tx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
+ seq_printf(s, "ptp rx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
- pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
- last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+ seq_printf(s, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
- pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
- tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
- pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
- ptp->tx_skipped);
- pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
- ptp->tx_timeout);
- pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
- ptp->last_tx_seqid);
+ seq_printf(s, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
+ seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
+ seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
+ seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
+
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
- sw_cfg, hw_cfg);
+ seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
- pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
- ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+ seq_printf(s, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, true);
+ hclge_dbg_dump_mac_list(s, true);
return 0;
}
-static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, false);
+ hclge_dbg_dump_mac_list(s, false);
return 0;
}
@@ -3061,156 +2787,156 @@ static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
{
.cmd = HNAE3_DBG_CMD_TM_NODES,
- .dbg_dump = hclge_dbg_dump_tm_nodes,
+ .dbg_read_func = hclge_dbg_dump_tm_nodes,
},
{
.cmd = HNAE3_DBG_CMD_TM_PRI,
- .dbg_dump = hclge_dbg_dump_tm_pri,
+ .dbg_read_func = hclge_dbg_dump_tm_pri,
},
{
.cmd = HNAE3_DBG_CMD_TM_QSET,
- .dbg_dump = hclge_dbg_dump_tm_qset,
+ .dbg_read_func = hclge_dbg_dump_tm_qset,
},
{
.cmd = HNAE3_DBG_CMD_TM_MAP,
- .dbg_dump = hclge_dbg_dump_tm_map,
+ .dbg_read_func = hclge_dbg_dump_tm_map,
},
{
.cmd = HNAE3_DBG_CMD_TM_PG,
- .dbg_dump = hclge_dbg_dump_tm_pg,
+ .dbg_read_func = hclge_dbg_dump_tm_pg,
},
{
.cmd = HNAE3_DBG_CMD_TM_PORT,
- .dbg_dump = hclge_dbg_dump_tm_port,
+ .dbg_read_func = hclge_dbg_dump_tm_port,
},
{
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
- .dbg_dump = hclge_dbg_dump_tc,
+ .dbg_read_func = hclge_dbg_dump_tc,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
- .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
- .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ .dbg_read_func = hclge_dbg_dump_qos_pri_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
- .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ .dbg_read_func = hclge_dbg_dump_qos_dscp_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
- .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
},
{
.cmd = HNAE3_DBG_CMD_MAC_UC,
- .dbg_dump = hclge_dbg_dump_mac_uc,
+ .dbg_read_func = hclge_dbg_dump_mac_uc,
},
{
.cmd = HNAE3_DBG_CMD_MAC_MC,
- .dbg_dump = hclge_dbg_dump_mac_mc,
+ .dbg_read_func = hclge_dbg_dump_mac_mc,
},
{
.cmd = HNAE3_DBG_CMD_MNG_TBL,
- .dbg_dump = hclge_dbg_dump_mng_table,
+ .dbg_read_func = hclge_dbg_dump_mng_table,
},
{
.cmd = HNAE3_DBG_CMD_LOOPBACK,
- .dbg_dump = hclge_dbg_dump_loopback,
+ .dbg_read_func = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
- .dbg_dump = hclge_dbg_dump_ptp_info,
+ .dbg_read_func = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
- .dbg_dump = hclge_dbg_dump_interrupt,
+ .dbg_read_func = hclge_dbg_dump_interrupt,
},
{
.cmd = HNAE3_DBG_CMD_RESET_INFO,
- .dbg_dump = hclge_dbg_dump_rst_info,
+ .dbg_read_func = hclge_dbg_seq_dump_rst_info,
},
{
.cmd = HNAE3_DBG_CMD_IMP_INFO,
- .dbg_dump = hclge_dbg_get_imp_stats_info,
+ .dbg_read_func = hclge_dbg_get_imp_stats_info,
},
{
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
- .dbg_dump = hclge_dbg_dump_ncl_config,
+ .dbg_read_func = hclge_dbg_dump_ncl_config,
},
{
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_SSU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RPU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_NCSI,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RTC,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_PPP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RCB,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_TQP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_MAC,
- .dbg_dump = hclge_dbg_dump_mac,
+ .dbg_read_func = hclge_dbg_dump_mac,
},
{
.cmd = HNAE3_DBG_CMD_REG_DCB,
- .dbg_dump = hclge_dbg_dump_dcb,
+ .dbg_read_func = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
- .dbg_dump = hclge_dbg_dump_fd_tcam,
+ .dbg_read_func = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
- .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ .dbg_read_func = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
- .dbg_dump = hclge_dbg_dump_serv_info,
+ .dbg_read_func = hclge_dbg_dump_serv_info,
},
{
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
- .dbg_dump = hclge_dbg_dump_vlan_config,
+ .dbg_read_func = hclge_dbg_dump_vlan_config,
},
{
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
- .dbg_dump = hclge_dbg_dump_fd_counter,
+ .dbg_read_func = hclge_dbg_dump_fd_counter,
},
{
.cmd = HNAE3_DBG_CMD_UMV_INFO,
- .dbg_dump = hclge_dbg_dump_umv_info,
+ .dbg_read_func = hclge_dbg_dump_umv_info,
},
};
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len)
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func)
{
struct hclge_vport *vport = hclge_get_vport(handle);
const struct hclge_dbg_func *cmd_func;
@@ -3220,11 +2946,8 @@ int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
if (cmd == hclge_dbg_cmd_func[i].cmd) {
cmd_func = &hclge_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(hdev, buf, len);
- else
- return cmd_func->dbg_dump_reg(hdev, cmd, buf,
- len);
+ *func = cmd_func->dbg_read_func;
+ return 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 2b998cbed826..317f79efd54c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -92,6 +92,7 @@ struct hclge_dbg_func {
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
char *buf, int len);
+ read_func dbg_read_func;
};
struct hclge_dbg_status_dfx_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 30bdec1acb57..f209a05e2033 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -490,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -582,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -599,7 +599,7 @@ static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 **data)
{
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
return;
@@ -2358,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2624,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2652,7 +2652,7 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
if (ret)
return ret;
- hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
@@ -3446,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -4872,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4890,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6989,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -8223,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9292,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -10719,7 +10719,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10730,7 +10730,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -11220,7 +11220,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11264,7 +11264,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11429,7 +11429,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -12094,7 +12094,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -12119,7 +12119,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12870,7 +12870,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12910,7 +12910,7 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b9fc719880bb..032b472d2368 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,8 +1142,8 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 59c863306657..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9a456ebf9b7c..96553109f44c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 63483636c074..61faddcc3dd0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 4cf6ac04662a..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -606,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -624,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -2465,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2625,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 7d9d9dbc7560..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -127,37 +127,38 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_queue *tqp;
- int i, j, reg_um;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(common_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
tqp = &hdev->htqp[j].q;
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = readl_relaxed(tqp->io_base -
HCLGEVF_TQP_REG_OFFSET +
ring_reg_addr_list[i]);
}
- reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index c559dd4291d3..e9f338e9dbe7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -919,9 +919,10 @@ static int hinic_set_channels(struct net_device *netdev,
return 0;
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rss_type rss_type = { 0 };
int err;
@@ -964,7 +965,7 @@ static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
return 0;
}
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
struct hinic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
@@ -1000,16 +1001,18 @@ static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_dev *nic_dev = netdev_priv(dev);
+ struct hinic_rss_type *rss_type;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ rss_type = &nic_dev->rss_type;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- }
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1108,26 +1111,6 @@ static int hinic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1797,11 +1780,12 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
@@ -1829,11 +1813,12 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 045c47786a04..28114a59347e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -605,7 +605,7 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
/**
* ceq_elements_init - Initialize all the elements in the ceq
* @eq: the event queue
- * @init_val: value to init with it the elements
+ * @init_val: value to init the elements with
**/
static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 3f9c31d29215..97c1584dc05b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -861,7 +861,7 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
HINIC_MBOX_HEADER_SET(cmd, CMD) |
- /* The vf's offset to it's associated pf */
+ /* The vf's offset to its associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index ae08257dd1d2..3f7f73430be4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -482,7 +482,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
@@ -506,9 +505,9 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
if (likely(wqebb_cnt > txq->tx_stop_thrs))
txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
- netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ netif_subqueue_try_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return NETDEV_TX_BUSY;
}
@@ -542,12 +541,11 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
goto err_drop_pkt;
}
- netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
- skb->len);
- netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_stop_thrs,
- tx_q->tx_start_thrs);
+ netif_subqueue_sent(netdev, txq->sq->q_id, skb->len);
+ netif_subqueue_maybe_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_stop_thrs,
+ txq->tx_start_thrs);
hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
@@ -631,7 +629,6 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
struct net_device *netdev = txq->netdev;
u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
unsigned int bytes_compl = 0;
unsigned int pkts = 0;
u16 wqebb_cnt = 0;
@@ -663,8 +660,8 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return pkts == HINIC3_TX_POLL_WEIGHT;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 24046fe16634..6f0821f1e798 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -211,98 +211,169 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
struct ibmveth_buff_pool *pool)
{
- u32 i;
- u32 count = pool->size - atomic_read(&pool->available);
- u32 buffers_added = 0;
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
+ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
+ u32 remaining = pool->size - atomic_read(&pool->available);
+ u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
unsigned long lpar_rc;
+ u32 buffers_added = 0;
+ u32 i, filled, batch;
+ struct vio_dev *vdev;
dma_addr_t dma_addr;
+ struct device *dev;
+ u32 index;
+
+ vdev = adapter->vdev;
+ dev = &vdev->dev;
mb();
- for (i = 0; i < count; ++i) {
- union ibmveth_buf_desc desc;
+ batch = adapter->rx_buffers_per_hcall;
- free_index = pool->consumer_index;
- index = pool->free_map[free_index];
- skb = NULL;
+ while (remaining > 0) {
+ unsigned int free_index = pool->consumer_index;
- if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
- schedule_work(&adapter->work);
- goto bad_index_failure;
- }
+ /* Fill a batch of descriptors */
+ for (filled = 0; filled < min(remaining, batch); filled++) {
+ index = pool->free_map[free_index];
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ adapter->replenish_add_buff_failure++;
+ netdev_info(adapter->netdev,
+ "Invalid map index %u, reset\n",
+ index);
+ schedule_work(&adapter->work);
+ break;
+ }
+
+ if (!pool->skbuff[index]) {
+ struct sk_buff *skb = NULL;
- /* are we allocating a new buffer or recycling an old one */
- if (pool->skbuff[index])
- goto reuse;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ adapter->replenish_no_mem++;
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
+
+ dma_addr = dma_map_single(dev, skb->data,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+ } else {
+ /* re-use case */
+ dma_addr = pool->dma_addr[index];
+ }
- if (!skb) {
- netdev_dbg(adapter->netdev,
- "replenish: unable to allocate skb\n");
- adapter->replenish_no_mem++;
- break;
- }
+ if (rx_flush) {
+ unsigned int len;
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- pool->buff_size, DMA_FROM_DEVICE);
+ len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
+ len = min(pool->buff_size, len);
+ ibmveth_flush_buffer(pool->skbuff[index]->data,
+ len);
+ }
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto failure;
+ descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
+ pool->buff_size;
+ descs[filled].fields.address = dma_addr;
- pool->dma_addr[index] = dma_addr;
- pool->skbuff[index] = skb;
+ correlators[filled] = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlators[filled];
- if (rx_flush) {
- unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
- ibmveth_flush_buffer(skb->data, len);
+ free_index++;
+ if (free_index >= pool->size)
+ free_index = 0;
}
-reuse:
- dma_addr = pool->dma_addr[index];
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)pool->skbuff[index]->data = correlator;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
- desc.desc);
+ if (!filled)
+ break;
+ /* single buffer case*/
+ if (filled == 1)
+ lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
+ descs[0].desc);
+ else
+ /* Multi-buffer hcall */
+ lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
+ descs[0].desc,
+ descs[1].desc,
+ descs[2].desc,
+ descs[3].desc,
+ descs[4].desc,
+ descs[5].desc,
+ descs[6].desc,
+ descs[7].desc);
if (lpar_rc != H_SUCCESS) {
- netdev_warn(adapter->netdev,
- "%sadd_logical_lan failed %lu\n",
- skb ? "" : "When recycling: ", lpar_rc);
- goto failure;
+ dev_warn_ratelimited(dev,
+ "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
+ filled, lpar_rc, batch);
+ goto hcall_failure;
}
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
+ /* Only update pool state after hcall succeeds */
+ for (i = 0; i < filled; i++) {
+ free_index = pool->consumer_index;
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- buffers_added++;
- adapter->replenish_add_buff_success++;
- }
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ }
- mb();
- atomic_add(buffers_added, &(pool->available));
- return;
+ buffers_added += filled;
+ adapter->replenish_add_buff_success += filled;
+ remaining -= filled;
-failure:
+ memset(&descs, 0, sizeof(descs));
+ memset(&correlators, 0, sizeof(correlators));
+ continue;
- if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(pool->skbuff[index]);
- pool->skbuff[index] = NULL;
-bad_index_failure:
- adapter->replenish_add_buff_failure++;
+hcall_failure:
+ for (i = 0; i < filled; i++) {
+ index = correlators[i] & 0xffffffffUL;
+ dma_addr = pool->dma_addr[index];
+
+ if (pool->skbuff[index]) {
+ if (dma_addr &&
+ !dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
+ }
+ }
+ adapter->replenish_add_buff_failure += filled;
+
+ /*
+ * If multi rx buffers hcall is no longer supported by FW
+ * e.g. in the case of Live Parttion Migration
+ */
+ if (batch > 1 && lpar_rc == H_FUNCTION) {
+ /*
+ * Instead of retry submit single buffer individually
+ * here just set the max rx buffer per hcall to 1
+ * buffers will be respleshed next time
+ * when ibmveth_replenish_buffer_pool() is called again
+ * with single-buffer case
+ */
+ netdev_info(adapter->netdev,
+ "RX Multi buffers not supported by FW, rc=%lu\n",
+ lpar_rc);
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_info(adapter->netdev,
+ "Next rx replesh will fall back to single-buffer hcall\n");
+ }
+ break;
+ }
mb();
atomic_add(buffers_added, &(pool->available));
@@ -1783,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->features |= NETIF_F_FRAGLIST;
}
+ if (ret == H_SUCCESS &&
+ (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
+ adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
+ netdev_dbg(netdev,
+ "RX Multi-buffer hcall supported by FW, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ } else {
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_dbg(netdev,
+ "RX Single-buffer hcall mode, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index b0a2460ec9f9..068f99df133e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -28,6 +28,7 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
@@ -46,6 +47,24 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+static inline long h_add_logical_lan_buffers(unsigned long unit_address,
+ unsigned long desc1,
+ unsigned long desc2,
+ unsigned long desc3,
+ unsigned long desc4,
+ unsigned long desc5,
+ unsigned long desc6,
+ unsigned long desc7,
+ unsigned long desc8)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
+ retbuf, unit_address,
+ desc1, desc2, desc3, desc4,
+ desc5, desc6, desc7, desc8);
+}
+
/* FW allows us to send 6 descriptors but we only use one so mark
* the other 5 as unused (0)
*/
@@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
#define IBMVETH_DEFAULT_QUEUES 8U
+#define IBMVETH_MAX_RX_PER_HCALL 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -151,6 +171,7 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
+ unsigned int rx_buffers_per_hcall;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 92647e137cf8..eec971567aac 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2312,8 +2312,6 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_pool->num_buffers - 1 :
tx_pool->consumer_index - 1;
tx_buff = &tx_pool->tx_buff[index];
- adapter->netdev->stats.tx_packets--;
- adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
@@ -2647,9 +2645,6 @@ tx_err:
}
out:
rcu_read_unlock();
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
@@ -3452,6 +3447,25 @@ err:
return -ret;
}
+static void ibmvnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ stats->rx_packets += adapter->rx_stats_buffers[i].packets;
+ stats->rx_bytes += adapter->rx_stats_buffers[i].bytes;
+ }
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ stats->tx_packets += adapter->tx_stats_buffers[i].batched_packets;
+ stats->tx_packets += adapter->tx_stats_buffers[i].direct_packets;
+ stats->tx_bytes += adapter->tx_stats_buffers[i].bytes;
+ stats->tx_dropped += adapter->tx_stats_buffers[i].dropped_packets;
+ }
+}
+
static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -3567,8 +3581,6 @@ restart_poll:
length = skb->len;
napi_gro_receive(napi, skb); /* send it up */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
adapter->rx_stats_buffers[scrq_num].packets++;
adapter->rx_stats_buffers[scrq_num].bytes += length;
frames_processed++;
@@ -3678,6 +3690,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_rx_mode = ibmvnic_set_multi,
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = ibmvnic_get_stats64,
.ndo_tx_timeout = ibmvnic_tx_timeout,
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5a331c1c76cb..b05cc0d7a15d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -231,6 +231,7 @@ config I40E
depends on PCI
select AUXILIARY_BUS
select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -260,6 +261,7 @@ config I40E_DCB
config IAVF
tristate
select LIBIE
+ select LIBIE_ADMINQ
select NET_SHAPER
config I40EVF
@@ -294,6 +296,7 @@ config ICE
select AUXILIARY_BUS
select DIMLIB
select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9364bc2b4eb1..c0bbb12eed2e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2096,54 +2096,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
}
}
-static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info,
- u32 __always_unused *rule_locs)
+static int e1000_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
- info->data = 0;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc;
+ info->data = 0;
- mrqc = er32(MRQC);
+ mrqc = er32(MRQC);
- if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
- return 0;
-
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
- }
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ return 0;
}
static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
@@ -2352,7 +2345,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
- .get_rxnfc = e1000_get_rxnfc,
+ .get_rxfh_fields = e1000_get_rxfh_fields,
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1bc5b6c0b897..1954a04460d1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev,
return 0;
}
-static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *cmd)
+static int fm10k_get_rssh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on fm10k */
@@ -743,9 +745,6 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = interface->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = fm10k_get_rss_hash_opts(interface, cmd);
- break;
default:
break;
}
@@ -753,9 +752,11 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *nfc)
+static int fm10k_set_rssh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags);
int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
@@ -871,22 +872,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fm10k_set_rss_hash_opt(interface, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
{
struct fm10k_hw *hw = &interface->hw;
@@ -1176,7 +1161,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
.get_rxnfc = fm10k_get_rxnfc,
- .set_rxnfc = fm10k_set_rxnfc,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
@@ -1186,6 +1170,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
.set_rxfh = fm10k_set_rssh,
+ .get_rxfh_fields = fm10k_get_rssh_fields,
+ .set_rxfh_fields = fm10k_set_rssh_fields,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7c600d6e66ba..49aa4497efce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -548,6 +548,7 @@ struct i40e_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
+ u32 link_down_events;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -660,7 +661,7 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
struct work_struct ptp_extts0_work;
ktime_t ptp_reset_start;
@@ -1302,8 +1303,11 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 175c1320c143..096ec46bb619 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -18,7 +18,7 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -44,7 +44,7 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -80,7 +80,7 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
**/
static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code;
int i;
@@ -108,9 +108,9 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -119,12 +119,12 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -691,8 +691,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
@@ -750,7 +750,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
**/
static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -758,7 +758,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
{
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
int status = 0;
@@ -771,7 +771,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
@@ -851,9 +851,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -905,13 +905,13 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = -EBUSY;
else
status = -EIO;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
@@ -954,7 +954,7 @@ asq_send_command_error:
**/
int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -972,7 +972,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
}
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -996,12 +996,12 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
**/
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
int status;
@@ -1023,13 +1023,13 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
*
* Fill the desc with default values
**/
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1047,7 +1047,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1081,9 +1081,9 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -1107,14 +1107,14 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, I40E_PF_ARQT, ntc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 55b5bb884d73..1be97a3a86ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -9,7 +9,7 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct i40e_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -119,7 +119,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c8f35d4de271..76d872b91a38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
#include <linux/bits.h>
#include <linux/types.h>
@@ -30,75 +32,6 @@
/* API version 1.10 for X722 devices adds ability to request FEC encoding */
#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
@@ -320,21 +253,6 @@ struct i40e_aqc_get_version {
__le16 api_minor;
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
@@ -352,75 +270,6 @@ struct i40e_aqc_set_pf_context {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
- u8 command_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_ISCSI 0x0022
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
-
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 59263551c383..5f1a405cbbf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -682,9 +682,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -711,8 +709,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index b11c35e307ca..270e7e8cf9cf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -69,66 +69,6 @@ int i40e_set_mac_type(struct i40e_hw *hw)
}
/**
- * i40e_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case I40E_AQ_RC_OK:
- return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -141,7 +81,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u32 effective_mask = hw->debug_mask & mask;
char prefix[27];
u16 len;
@@ -164,12 +104,12 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && buf_len != 0 && len != 0 &&
(effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
@@ -214,14 +154,14 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
int i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ struct i40e_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
+ cmd = libie_aq_raw(&desc);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
@@ -245,9 +185,8 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
int status;
u16 flags;
@@ -258,9 +197,10 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -326,10 +266,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ struct i40e_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (set)
@@ -339,9 +278,10 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -439,13 +379,13 @@ i40e_aq_mac_address_read(struct i40e_hw *hw,
struct i40e_aqc_mac_address_read_data *addrs,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_read *cmd_data =
- (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ struct i40e_aqc_mac_address_read *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+ cmd_data = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, addrs,
sizeof(*addrs), cmd_details);
@@ -465,13 +405,13 @@ int i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_write *cmd_data =
- (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ struct i40e_aqc_mac_address_write *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
+ cmd_data = libie_aq_raw(&desc);
cmd_data->command_flags = cpu_to_le16(flags);
cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
@@ -1061,7 +1001,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!abilities)
@@ -1071,36 +1011,36 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
if (qualified_modules)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
if (report_init)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
switch (hw->aq.asq_last_status) {
- case I40E_AQ_RC_EIO:
+ case LIBIE_AQ_RC_EIO:
status = -EIO;
break;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = -EIO;
break;
- /* also covers I40E_AQ_RC_OK */
+ /* also covers LIBIE_AQ_RC_OK */
default:
break;
}
- } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ } while ((hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN) &&
(total_delay < max_delay));
if (status)
@@ -1137,9 +1077,8 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aq_set_phy_config *cmd =
- (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ struct i40e_aq_set_phy_config *cmd;
+ struct libie_aq_desc desc;
int status;
if (!config)
@@ -1148,6 +1087,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_config);
+ cmd = libie_aq_raw(&desc);
*cmd = *config;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1259,14 +1199,14 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_clear_pxe *cmd =
- (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ struct i40e_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
+ cmd = libie_aq_raw(&desc);
cmd->rx_cnt = 0x2;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1288,14 +1228,14 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_link_restart_an *cmd =
- (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ struct i40e_aqc_set_link_restart_an *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_PHY_RESTART_AN;
if (enable_link)
cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
@@ -1320,16 +1260,16 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_link_status *resp =
- (struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_aqc_get_link_status *resp;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
u16 command_flags;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ resp = libie_aq_raw(&desc);
if (enable_lse)
command_flags = I40E_AQ_LSE_ENABLE;
else
@@ -1415,14 +1355,14 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
u16 mask,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_int_mask *cmd =
- (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ struct i40e_aqc_set_phy_int_mask *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
+ cmd = libie_aq_raw(&desc);
cmd->event_mask = cpu_to_le16(mask);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1441,11 +1381,11 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_lb_mode *cmd =
- (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ struct i40e_aqc_set_lb_mode *cmd;
+ struct libie_aq_desc desc;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
+ cmd = libie_aq_raw(&desc);
if (ena_lpbk) {
if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
@@ -1467,14 +1407,14 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_debug *cmd =
- (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ struct i40e_aqc_set_phy_debug *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
+ cmd = libie_aq_raw(&desc);
cmd->command_flags = cmd_flags;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1494,23 +1434,22 @@ int i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
cmd->connection_type = vsi_ctx->connection_type;
cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1538,15 +1477,14 @@ int i40e_aq_set_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1566,15 +1504,14 @@ int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(0);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1597,15 +1534,15 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1636,15 +1573,15 @@ int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1671,15 +1608,15 @@ int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1707,15 +1644,15 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1748,9 +1685,8 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
@@ -1760,6 +1696,7 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = cpu_to_le16(seid);
@@ -1783,14 +1720,14 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 seid, bool set_filter,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set_filter)
cmd->promiscuous_flags
|= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
@@ -1815,20 +1752,19 @@ int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1857,19 +1793,18 @@ int i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1896,16 +1831,16 @@ int i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *scfg =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ scfg = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
scfg->seid = cpu_to_le16(*start_seid);
status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
@@ -1930,13 +1865,13 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_switch_config *scfg =
- (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ struct i40e_aqc_set_switch_config *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
+ scfg = libie_aq_raw(&desc);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
@@ -1968,11 +1903,11 @@ int i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_version *resp =
- (struct i40e_aqc_get_version *)&desc.params.raw;
+ struct i40e_aqc_get_version *resp;
+ struct libie_aq_desc desc;
int status;
+ resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2005,22 +1940,22 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_driver_version *cmd =
- (struct i40e_aqc_driver_version *)&desc.params.raw;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
int status;
u16 len;
if (dv == NULL)
return -EINVAL;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
- cmd->driver_major_ver = dv->major_version;
- cmd->driver_minor_ver = dv->minor_version;
- cmd->driver_build_ver = dv->build_version;
- cmd->driver_subbuild_ver = dv->subbuild_version;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_version;
+ cmd->minor_ver = dv->minor_version;
+ cmd->build_ver = dv->build_version;
+ cmd->subbuild_ver = dv->subbuild_version;
len = 0;
while (len < sizeof(dv->driver_string) &&
@@ -2120,11 +2055,9 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
bool enable_stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_veb *cmd =
- (struct i40e_aqc_add_veb *)&desc.params.raw;
- struct i40e_aqc_add_veb_completion *resp =
- (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp;
+ struct i40e_aqc_add_veb *cmd;
+ struct libie_aq_desc desc;
u16 veb_flags = 0;
int status;
@@ -2132,6 +2065,8 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
if (!!uplink_seid != !!downlink_seid)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
cmd->uplink_seid = cpu_to_le16(uplink_seid);
@@ -2178,15 +2113,14 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 *vebs_used, u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
- (struct i40e_aqc_get_veb_parameters_completion *)
- &desc.params.raw;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (veb_seid == 0)
return -EINVAL;
+ cmd_resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_veb_parameters);
cmd_resp->seid = cpu_to_le16(veb_seid);
@@ -2228,10 +2162,9 @@ get_veb_exit:
**/
static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
- struct i40e_aq_desc *desc, u16 count, u16 seid)
+ struct libie_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc->params.raw;
+ struct i40e_aqc_macvlan *cmd = libie_aq_raw(desc);
u16 buf_size;
int i;
@@ -2249,9 +2182,9 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return buf_size;
}
@@ -2271,7 +2204,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2302,9 +2235,9 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2331,9 +2264,8 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
+ struct i40e_aqc_macvlan *cmd;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
@@ -2344,14 +2276,15 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
cmd_details, true);
@@ -2378,10 +2311,10 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
struct i40e_aqc_macvlan *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2391,15 +2324,15 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
- cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
cmd_details, true, aq_status);
@@ -2421,21 +2354,21 @@ int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pf_vf_message *cmd =
- (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ struct i40e_aqc_pf_vf_message *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
@@ -2456,9 +2389,8 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd_resp =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (reg_val == NULL)
@@ -2466,6 +2398,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2491,13 +2424,13 @@ int i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
@@ -2524,16 +2457,16 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd_resp =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
- cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp = libie_aq_raw(&desc);
+ cmd_resp->res_id = cpu_to_le16(resource);
cmd_resp->access_type = cpu_to_le16(access);
- cmd_resp->resource_number = cpu_to_le32(sdp_number);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
/* The completion specifies the maximum time in ms that the driver
@@ -2542,7 +2475,7 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
* busy return value and the timeout field indicates the maximum time
* the current owner of the resource has to free it.
*/
- if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ if (!status || hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -2562,15 +2495,15 @@ int i40e_aq_release_resource(struct i40e_hw *hw,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
- cmd->resource_id = cpu_to_le16(resource);
- cmd->resource_number = cpu_to_le32(sdp_number);
+ cmd = libie_aq_raw(&desc);
+ cmd->res_id = cpu_to_le16(resource);
+ cmd->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2594,9 +2527,8 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2607,6 +2539,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2614,9 +2547,9 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -2639,9 +2572,8 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2652,6 +2584,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2678,7 +2611,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 cap_count,
enum i40e_admin_queue_opc list_type_opc)
{
- struct i40e_aqc_list_capabilities_element_resp *cap;
+ struct libie_aqc_list_caps_elem *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
@@ -2687,7 +2620,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
int status;
u32 i = 0;
- cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+ cap = (struct libie_aqc_list_caps_elem *)buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
p = &hw->dev_caps;
@@ -2697,17 +2630,17 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
return;
for (i = 0; i < cap_count; i++, cap++) {
- id = le16_to_cpu(cap->id);
+ id = le16_to_cpu(cap->cap);
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
- major_rev = cap->major_rev;
+ major_rev = cap->major_ver;
switch (id) {
- case I40E_AQ_CAP_ID_SWITCH_MODE:
+ case LIBIE_AQC_CAPS_SWITCH_MODE:
p->switch_mode = number;
break;
- case I40E_AQ_CAP_ID_MNG_MODE:
+ case LIBIE_AQC_CAPS_MNG_MODE:
p->management_mode = number;
if (major_rev > 1) {
p->mng_protocols_over_mctp = logical_id;
@@ -2718,76 +2651,76 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->mng_protocols_over_mctp = 0;
}
break;
- case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ case LIBIE_AQC_CAPS_NPAR_ACTIVE:
p->npar_enable = number;
break;
- case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ case LIBIE_AQC_CAPS_OS2BMC_CAP:
p->os2bmc = number;
break;
- case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
p->valid_functions = number;
break;
- case I40E_AQ_CAP_ID_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
if (number == 1)
p->sr_iov_1_1 = true;
break;
- case I40E_AQ_CAP_ID_VF:
+ case LIBIE_AQC_CAPS_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
break;
- case I40E_AQ_CAP_ID_VMDQ:
+ case LIBIE_AQC_CAPS_VMDQ:
if (number == 1)
p->vmdq = true;
break;
- case I40E_AQ_CAP_ID_8021QBG:
+ case LIBIE_AQC_CAPS_8021QBG:
if (number == 1)
p->evb_802_1_qbg = true;
break;
- case I40E_AQ_CAP_ID_8021QBR:
+ case LIBIE_AQC_CAPS_8021QBR:
if (number == 1)
p->evb_802_1_qbh = true;
break;
- case I40E_AQ_CAP_ID_VSI:
+ case LIBIE_AQC_CAPS_VSI:
p->num_vsis = number;
break;
- case I40E_AQ_CAP_ID_DCB:
+ case LIBIE_AQC_CAPS_DCB:
if (number == 1) {
p->dcb = true;
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
break;
- case I40E_AQ_CAP_ID_FCOE:
+ case LIBIE_AQC_CAPS_FCOE:
if (number == 1)
p->fcoe = true;
break;
- case I40E_AQ_CAP_ID_ISCSI:
+ case LIBIE_AQC_CAPS_ISCSI:
if (number == 1)
p->iscsi = true;
break;
- case I40E_AQ_CAP_ID_RSS:
+ case LIBIE_AQC_CAPS_RSS:
p->rss = true;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
- case I40E_AQ_CAP_ID_RXQ:
+ case LIBIE_AQC_CAPS_RXQS:
p->num_rx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_TXQ:
+ case LIBIE_AQC_CAPS_TXQS:
p->num_tx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
p->num_msix_vectors);
break;
- case I40E_AQ_CAP_ID_VF_MSIX:
+ case LIBIE_AQC_CAPS_VF_MSIX:
p->num_msix_vectors_vf = number;
break;
- case I40E_AQ_CAP_ID_FLEX10:
+ case LIBIE_AQC_CAPS_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = true;
@@ -2803,42 +2736,42 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
break;
- case I40E_AQ_CAP_ID_CEM:
+ case LIBIE_AQC_CAPS_CEM:
if (number == 1)
p->mgmt_cem = true;
break;
- case I40E_AQ_CAP_ID_IWARP:
+ case LIBIE_AQC_CAPS_RDMA:
if (number == 1)
p->iwarp = true;
break;
- case I40E_AQ_CAP_ID_LED:
+ case LIBIE_AQC_CAPS_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_SDP:
+ case LIBIE_AQC_CAPS_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_MDIO:
+ case LIBIE_AQC_CAPS_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
break;
- case I40E_AQ_CAP_ID_1588:
+ case LIBIE_AQC_CAPS_1588:
if (number == 1)
p->ieee_1588 = true;
break;
- case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ case LIBIE_AQC_CAPS_FD:
p->fd = true;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
- case I40E_AQ_CAP_ID_WSR_PROT:
+ case LIBIE_AQC_CAPS_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
- case I40E_AQ_CAP_ID_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
p->sec_rev_disabled = true;
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
@@ -2930,11 +2863,11 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aqc_list_capabilites *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status = 0;
- cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
@@ -2944,9 +2877,9 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
*data_size = le16_to_cpu(desc.datalen);
@@ -2979,9 +2912,8 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2992,6 +2924,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3009,9 +2942,9 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -3037,11 +2970,9 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_get_mib *cmd =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- struct i40e_aqc_lldp_get_mib *resp =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp;
+ struct i40e_aqc_lldp_get_mib *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3049,16 +2980,18 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
@@ -3087,19 +3020,19 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (buff_size == 0 || !buff)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->type = mib_type;
@@ -3124,13 +3057,13 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_mib *cmd =
- (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_update_mib *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ cmd = libie_aq_raw(&desc);
if (!enable_update)
cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
@@ -3152,13 +3085,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_stop *cmd =
- (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ struct i40e_aqc_lldp_stop *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ cmd = libie_aq_raw(&desc);
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
@@ -3186,13 +3119,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_start *cmd =
- (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ struct i40e_aqc_lldp_start *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
@@ -3219,9 +3152,8 @@ int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_dcb_parameters *cmd =
- (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ struct i40e_aqc_set_dcb_parameters *cmd;
+ struct libie_aq_desc desc;
int status;
if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
@@ -3230,6 +3162,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
+ cmd = libie_aq_raw(&desc);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
@@ -3252,7 +3185,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3260,7 +3193,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
cmd_details);
@@ -3284,15 +3217,15 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_udp_tunnel *cmd =
- (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
- struct i40e_aqc_del_udp_tunnel_completion *resp =
- (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp;
+ struct i40e_aqc_add_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->udp_port = cpu_to_le16(udp_port);
cmd->protocol_type = protocol_index;
@@ -3313,13 +3246,13 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_remove_udp_tunnel *cmd =
- (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_remove_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ cmd = libie_aq_raw(&desc);
cmd->index = index;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3338,9 +3271,8 @@ int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *cmd;
+ struct libie_aq_desc desc;
int status;
if (seid == 0)
@@ -3348,6 +3280,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+ cmd = libie_aq_raw(&desc);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
@@ -3368,7 +3301,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -3394,9 +3327,8 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_tx_sched_ind *cmd =
- (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct libie_aq_desc desc;
int status;
bool cmd_param_flag = false;
@@ -3423,12 +3355,13 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (cmd_param_flag)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -3451,14 +3384,14 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_vsi_bw_limit *cmd =
- (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
+ cmd = libie_aq_raw(&desc);
cmd->vsi_seid = cpu_to_le16(seid);
cmd->credit = cpu_to_le16(credit);
cmd->max_credit = max_credit;
@@ -3786,18 +3719,16 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_control_packet_filter *cmd =
- (struct i40e_aqc_add_remove_control_packet_filter *)
- &desc.params.raw;
- struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
- (struct i40e_aqc_add_remove_control_packet_filter_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd;
+ struct libie_aq_desc desc;
int status;
if (vsi_seid == 0)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
if (is_add) {
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_control_packet_filter);
@@ -3865,15 +3796,15 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write *cmd_resp =
- (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ struct i40e_aqc_alternate_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val0)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address0 = cpu_to_le32(reg_addr0);
cmd_resp->address1 = cpu_to_le32(reg_addr1);
@@ -3901,10 +3832,10 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
cmd->vsi_seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3922,7 +3853,7 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
int i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -3999,11 +3930,9 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_dump_internals *cmd =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- struct i40e_aqc_debug_dump_internals *resp =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp;
+ struct i40e_aqc_debug_dump_internals *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -4011,10 +3940,12 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
cmd->cluster_id = cluster_id;
cmd->table_id = table_id;
@@ -4091,18 +4022,18 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
u16 bwd_size = sizeof(*bw_data);
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(bwd_size);
@@ -4534,9 +4465,8 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val)
@@ -4544,6 +4474,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -4572,7 +4503,7 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -4600,13 +4531,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
@@ -4634,7 +4565,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -4693,14 +4624,14 @@ int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -4738,14 +4669,14 @@ int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -4777,19 +4708,18 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_write_personalization_profile *cmd =
- (struct i40e_aqc_write_personalization_profile *)
- &desc.params.raw;
+ struct i40e_aqc_write_personalization_profile *cmd;
struct i40e_aqc_write_ddp_resp *resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -4797,7 +4727,7 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ resp = libie_aq_raw(&desc);
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -4819,17 +4749,17 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_applied_profiles *cmd =
- (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ struct i40e_aqc_get_applied_profiles *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->flags = flags;
@@ -4891,7 +4821,7 @@ i40e_find_segment_in_package(u32 segment_type,
static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
struct i40e_profile_aq_section *aq)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
int status;
@@ -4902,10 +4832,10 @@ static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
msglen = aq->datalen;
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
msg = &aq->data[0];
}
@@ -5122,18 +5052,18 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5159,9 +5089,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5169,9 +5098,10 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
@@ -5215,18 +5145,18 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5252,9 +5182,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5262,9 +5191,10 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 352e957443fd..9e0c9597aeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -750,7 +750,7 @@ static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -799,7 +799,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
}
/* CEE mode not enabled try querying IEEE data */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
return i40e_get_ieee_dcb_config(hw);
if (ret)
@@ -816,7 +816,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -925,11 +925,11 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
if (!ret) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT) {
/* MIB is not available yet but the agent is running */
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
ret = 0;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
ret = 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 8aa43aefe84c..a2ccf4c5e30b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -136,7 +136,7 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB ETS configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB PFC configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -226,7 +226,7 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -291,7 +291,7 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 6cd9da662ae1..6cd6f23d42a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -489,7 +489,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -502,7 +502,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -1268,10 +1268,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
@@ -1279,10 +1279,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3);
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low);
if (cnt != 10) {
dev_info(&pf->pdev->dev,
"send aq_cmd: bad command string, cnt=%d\n",
@@ -1307,19 +1307,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
kfree(desc);
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
u16 buffer_len;
u8 *buff;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
@@ -1327,10 +1327,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3,
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low,
&buffer_len);
if (cnt != 11) {
dev_info(&pf->pdev->dev,
@@ -1350,7 +1350,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
goto command_write_done;
}
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
ret = i40e_asq_send_command(&pf->hw, desc, buff,
buffer_len, NULL);
if (!ret) {
@@ -1368,10 +1368,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
print_hex_dump(KERN_INFO, "AQ buffer WB: ",
DUMP_PREFIX_OFFSET, 16, 1,
buff, buffer_len, true);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 8a7a83f83ee5..86c72596617a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,6 +3,7 @@
/* ethtool support for i40e */
+#include <linux/net/intel/libie/pctype.h>
#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
@@ -1461,7 +1462,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1471,7 +1472,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1519,7 +1520,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1533,7 +1534,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
done:
@@ -1640,7 +1641,7 @@ static int i40e_nway_reset(struct net_device *netdev)
if (ret) {
netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -1757,19 +1758,19 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1917,13 +1918,13 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if (ret_val && hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev,
"read NVM failed, invalid offset 0x%x\n",
offset);
break;
} else if (ret_val &&
- hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+ hw->aq.asq_last_status == LIBIE_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
"read NVM failed, access, offset 0x%x\n",
offset);
@@ -2749,6 +2750,15 @@ skip_ol_tests:
netif_info(pf, drv, netdev, "testing failed\n");
}
+static void i40e_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -3129,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __i40e_set_coalesce(netdev, ec, queue);
}
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+static int i40e_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 flow_pctype = 0;
u64 i_set = 0;
@@ -3146,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -3412,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
switch (rule->flow_type) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
break;
default:
/* If we have stored a filter with a flow type not listed here
@@ -3535,9 +3542,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = i40e_get_rss_hash_opts(pf, cmd);
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -3566,7 +3570,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
@@ -3611,15 +3615,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
}
#define FLOW_PCTYPES_SIZE 64
-/**
- * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @pf: pointer to the physical function struct
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+static int i40e_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3643,40 +3645,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -3685,7 +3687,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -3694,15 +3696,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -4312,36 +4314,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
fdir_filter_count = &pf->fd_sctp4_filter_cnt;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
fdir_filter_count = &pf->fd_tcp4_filter_cnt;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
fdir_filter_count = &pf->fd_sctp6_filter_cnt;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
fdir_filter_count = &pf->fd_tcp6_filter_cnt;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
fdir_filter_count = &pf->fd_udp6_filter_cnt;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
fdir_filter_count = &pf->fd_ip6_filter_cnt;
flex_l3 = true;
break;
@@ -4677,8 +4679,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
- if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
@@ -4954,13 +4956,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40e_set_rss_hash_opt(pf, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
@@ -5251,9 +5249,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ enum libie_aq_err adq_err;
u32 reset_needed = 0;
int status;
u32 i, j;
@@ -5373,12 +5371,11 @@ flags_complete:
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
0, NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
}
@@ -5440,16 +5437,16 @@ flags_complete:
if (status) {
adq_err = pf->hw.aq.asq_last_status;
switch (adq_err) {
- case I40E_AQ_RC_EEXIST:
+ case LIBIE_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
reset_needed = 0;
break;
- case I40E_AQ_RC_EPERM:
+ case LIBIE_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
dev_warn(&pf->pdev->dev,
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
return -EBUSY;
@@ -5457,8 +5454,7 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %pe, %s\n",
ERR_PTR(status),
- i40e_aq_str(&pf->hw,
- adq_err));
+ libie_aq_str(adq_err));
return -EINVAL;
}
}
@@ -5809,6 +5805,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_regs = i40e_get_regs,
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = i40e_get_link_ext_stats,
.get_wol = i40e_get_wol,
.set_wol = i40e_set_wol,
.set_eeprom = i40e_set_eeprom,
@@ -5835,6 +5832,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
.get_rxfh = i40e_get_rxfh,
.set_rxfh = i40e_set_rxfh,
+ .get_rxfh_fields = i40e_get_rxfh_fields,
+ .set_rxfh_fields = i40e_set_rxfh_fields,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_module_info = i40e_get_module_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f1c9e575703e..b83f823e4917 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3,6 +3,7 @@
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <linux/module.h>
@@ -100,6 +101,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -1813,7 +1815,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
/* schedule our worker thread which will take care of
@@ -1845,7 +1847,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -1857,7 +1859,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -2339,19 +2341,18 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == LIBIE_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
- vsi_name, ERR_PTR(aq_ret),
- i40e_aq_str(hw, aq_status));
+ vsi_name, ERR_PTR(aq_ret), libie_aq_str(aq_status));
}
}
@@ -2374,7 +2375,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int fcnt;
i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
@@ -2385,19 +2386,17 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_status), vsi_name);
+ libie_aq_str(aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi_name);
+ libie_aq_str(aq_status), vsi_name, vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi->type);
+ libie_aq_str(aq_status), vsi_name, vsi->type);
}
}
}
@@ -2440,8 +2439,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s, forcing overflow promiscuous on %s\n",
- i40e_aq_str(hw, hw->aq.asq_last_status),
- vsi_name);
+ libie_aq_str(hw->aq.asq_last_status), vsi_name);
}
return aq_ret;
@@ -2482,7 +2480,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2494,7 +2492,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
@@ -2504,7 +2502,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
@@ -2812,7 +2810,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
"set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
@@ -2833,7 +2831,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc ? "on" : "off",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
out:
@@ -2954,27 +2952,6 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * i40e_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return i40e_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return i40e_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -3003,8 +2980,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3038,8 +3014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3283,8 +3258,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
dev_info(&vsi->back->pdev->dev,
"add pvid failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -5554,7 +5528,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5565,7 +5539,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5755,7 +5729,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
if (ret) {
dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -5811,7 +5785,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
@@ -5878,7 +5852,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5891,7 +5865,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -6005,7 +5979,7 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
max_tx_rate, seid, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
@@ -6017,8 +5991,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
**/
static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{
- enum i40e_admin_queue_err last_aq_status;
struct i40e_cloud_filter *cfilter;
+ enum libie_aq_err last_aq_status;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
@@ -6081,7 +6055,7 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
+ libie_aq_str(last_aq_status));
kfree(cfilter);
}
@@ -6216,7 +6190,7 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
kfree(lut);
return ret;
}
@@ -6315,8 +6289,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
dev_info(&pf->pdev->dev,
"add new vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -6559,12 +6532,10 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
pf->last_sw_conf_valid_flags,
mode, NULL);
- if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ if (ret && hw->aq.asq_last_status != LIBIE_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
@@ -6763,8 +6734,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -6773,8 +6743,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -6855,7 +6824,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
dev_info(&pf->pdev->dev,
"Resume Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6879,8 +6848,7 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"Suspend Port Tx failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6919,8 +6887,7 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
if (ret) {
dev_info(&pf->pdev->dev,
"Set DCB Config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7036,8 +7003,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"Modify Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7076,8 +7042,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"DCB Updated failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7160,8 +7125,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev,
"Enable Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
}
@@ -7234,14 +7198,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
- } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -7497,8 +7460,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
speed = abilities.link_speed;
@@ -7509,8 +7471,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
@@ -7554,8 +7515,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"set phy config ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return err;
}
@@ -7895,8 +7855,7 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
}
dev_info(&pf->pdev->dev,
"Error adding mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7968,8 +7927,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -8184,8 +8142,7 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
} else {
dev_info(&pf->pdev->dev,
"Error deleting mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
}
break;
}
@@ -9188,47 +9145,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for TCP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for Other/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
/* Reprogram the default input set for Other/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
@@ -9439,8 +9396,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lldp_get_mib *mib =
- (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *mib = libie_aq_raw(&e->desc);
struct i40e_hw *hw = &pf->hw;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
@@ -9497,8 +9453,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
goto exit;
}
@@ -9579,8 +9534,7 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lan_overflow *data =
- (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ struct i40e_aqc_lan_overflow *data = libie_aq_raw(&e->desc);
u32 queue = le32_to_cpu(data->prtdcb_rupto);
u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
struct i40e_hw *hw = &pf->hw;
@@ -9656,7 +9610,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
@@ -9959,6 +9913,9 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
+ if (!new_link && old_link)
+ pf->link_down_events++;
+
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -10097,8 +10054,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_aqc_get_link_status *status = libie_aq_raw(&e->desc);
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -10306,8 +10262,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10318,8 +10273,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10342,8 +10296,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10354,8 +10307,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10470,12 +10422,12 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
{
- struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ struct libie_aqc_list_caps_elem *cap_buf;
u16 data_size;
int buf_len;
int err;
- buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ buf_len = 40 * sizeof(struct libie_aqc_list_caps_elem);
do {
cap_buf = kzalloc(buf_len, GFP_KERNEL);
if (!cap_buf)
@@ -10488,15 +10440,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
/* data loaded, buffer no longer needed */
kfree(cap_buf);
- if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ } else if (pf->hw.aq.asq_last_status != LIBIE_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -10633,8 +10584,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -10875,8 +10825,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto clear_recovery;
}
i40e_get_oem_version(&pf->hw);
@@ -10987,8 +10936,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -11086,8 +11034,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (ret)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
@@ -11118,8 +11065,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -12325,8 +12271,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12339,8 +12284,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12507,7 +12451,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= i40e_pf_get_default_rss_hena(pf);
+ hena |= i40e_pf_get_default_rss_hashcfg(pf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -13001,8 +12945,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
NULL);
if (ret) {
netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13021,8 +12964,7 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13622,7 +13564,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -13650,6 +13591,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_xsk_wakeup = i40e_xsk_wakeup,
.ndo_dfwd_add_station = i40e_fwd_add,
.ndo_dfwd_del_station = i40e_fwd_del,
+ .ndo_hwtstamp_get = i40e_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = i40e_ptp_hwtstamp_set,
};
/**
@@ -13911,8 +13854,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -13941,8 +13883,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %d aq_err %s\n",
ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13961,8 +13902,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13985,8 +13925,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
"failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
break;
@@ -14080,8 +14019,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"add vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -14111,8 +14049,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get vsi bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -14560,8 +14497,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14570,8 +14506,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw ets config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14759,8 +14694,7 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14770,16 +14704,14 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB statistics idx, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -14974,9 +14906,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
if (ret) {
dev_info(&pf->pdev->dev,
"get switch config failed err %d aq_err %s\n",
- ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ret, libie_aq_str(pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -15021,8 +14951,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't fetch switch config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -15045,12 +14974,11 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
pf->last_sw_conf_valid_flags = valid_flags;
@@ -15891,7 +15819,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
- pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
@@ -15953,8 +15880,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
/* VF MDD event logs are rate limited to one second intervals */
ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
@@ -15976,8 +15902,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -16108,8 +16033,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
@@ -16119,8 +16043,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7f0936f4e05e..ed3c54e36be3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -997,7 +997,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
+ struct libie_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
@@ -1011,7 +1011,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
memset(&hw->nvm_wb_desc, 0, aq_desc_len);
/* get the aq descriptor */
@@ -1022,7 +1022,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = -EINVAL;
return -EINVAL;
}
- aq_desc = (struct i40e_aq_desc *)bytes;
+ aq_desc = (struct libie_aq_desc *)bytes;
/* if data buffer needed, make sure it's ready */
aq_data_len = cmd->data_size - aq_desc_len;
@@ -1053,7 +1053,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"%s err %pe aq_err %s\n",
__func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
}
@@ -1087,7 +1087,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
/* check offset range */
@@ -1154,7 +1154,7 @@ static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
/* check copylength range */
@@ -1442,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
+ if (status && hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1628,9 +1628,9 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
* @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
+ struct libie_aq_desc *desc)
{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+ u32 aq_desc_len = sizeof(struct libie_aq_desc);
if (opcode == hw->nvm_wait_opcode) {
memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 099bb8ab7d70..aef5de53ce3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -23,22 +23,22 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -46,7 +46,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -155,7 +154,7 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
@@ -163,7 +162,7 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -339,7 +338,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc);
+ struct libie_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index b72a4b5d76b9..33535418178b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -550,7 +550,7 @@ static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
pins.gpio_4 = pf->ptp_pins->gpio_4;
/* To turn on the pin - find the corresponding one based on
- * the given index. To to turn the function off - find
+ * the given index. To turn the function off - find
* which pin had it assigned. Don't use ptp_find_pin here
* because it tries to lock the pincfg_mux which is locked by
* ptp_pin_store() that calls here.
@@ -912,23 +912,26 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
}
/**
- * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_get - interface to read the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
* deconstruct it from the registers.
**/
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &pf->tstamp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = pf->tstamp_config;
+
+ return 0;
}
/**
@@ -1167,7 +1170,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
* more broad if the specific filter is not directly supported.
**/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
@@ -1290,9 +1293,10 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_set - interface to control the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -1303,26 +1307,25 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
* as the user receives the timestamps they care about and the user is notified
* the filter has been broadened.
**/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
int err;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = i40e_ptp_set_timestamp_mode(pf, &config);
+ err = i40e_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* save these settings for future reference */
- pf->tstamp_config = config;
+ pf->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c006f716a3bd..048c33039130 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
@@ -397,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP);
if (ret) {
kfree(raw_packet);
@@ -444,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP);
if (ret) {
kfree(raw_packet);
@@ -499,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
if (ret) {
kfree(raw_packet);
@@ -543,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
int i;
if (ipv4) {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4;
} else {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6;
}
for (i = iter_start; i <= iter_end; i++) {
@@ -2948,9 +2949,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7c26c9a2bf65..1e5fd63d47f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -4,6 +4,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
#include <net/xdp.h>
#include "i40e_type.h"
@@ -71,30 +72,30 @@ enum i40e_dyn_idx {
#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define i40e_pf_get_default_rss_hena(pf) \
+#define I40E_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hashcfg(pf) \
(test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 28568e126850..ed8bbdb586da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -24,7 +24,7 @@
/* forward declaration */
struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -555,8 +555,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
- struct i40e_aq_desc nvm_wb_desc;
- struct i40e_aq_desc nvm_aq_event_desc;
+ struct libie_aq_desc nvm_wb_desc;
+ struct libie_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -929,38 +929,6 @@ struct i40e_filter_program_desc {
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 7ccfc1191ae5..9b8efdeafbcf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -812,7 +812,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
}
if (!idx) {
- u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
@@ -841,8 +841,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hashcfg >> 32));
/* program mac filter only for VF VSI */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -1289,9 +1290,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
return aq_ret;
}
@@ -1305,9 +1305,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
}
return aq_ret;
@@ -1322,9 +1321,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1338,9 +1336,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -3447,15 +3444,15 @@ err:
}
/**
- * i40e_vc_get_rss_hena
+ * i40e_vc_get_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Return the RSS HENA bits allowed by the hardware
+ * Return the RSS Hash configuration bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
struct i40e_pf *pf = vf->pf;
int aq_ret = 0;
int len = 0;
@@ -3464,7 +3461,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -3472,26 +3469,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- vrh->hena = i40e_pf_get_default_rss_hena(pf);
+ vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
}
/**
- * i40e_vc_set_rss_hena
+ * i40e_vc_set_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Set the RSS HENA bits for the VF
+ * Set the RSS Hash configuration bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh =
- (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int aq_ret = 0;
@@ -3500,13 +3497,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)vrh->hashcfg);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(vrh->hena >> 32));
+ (u32)(vrh->hashcfg >> 32));
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret);
}
/**
@@ -3746,8 +3744,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
hlist_del(&cfilter->cloud_node);
kfree(cfilter);
@@ -3849,7 +3846,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err;
}
@@ -3985,7 +3982,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -4253,11 +4250,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ ret = i40e_vc_get_rss_hashcfg(vf, msg);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ ret = i40e_vc_set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index f7a98ff43a57..a87e0c6d4017 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -114,8 +114,6 @@ struct iavf_q_vector {
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -315,8 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
@@ -456,7 +454,7 @@ struct iavf_adapter {
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
- u64 hena;
+ u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
@@ -600,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
-void iavf_get_hena(struct iavf_adapter *adapter);
-void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter);
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 82fcd18ad660..6937b7dd44cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -18,7 +18,7 @@ static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -45,7 +45,7 @@ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -81,7 +81,7 @@ static void iavf_free_adminq_arq(struct iavf_hw *hw)
**/
static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
@@ -111,9 +111,9 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -122,12 +122,12 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -558,8 +558,8 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
struct iavf_adminq_ring *asq = &hw->aq.asq;
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct iavf_aq_desc desc_cb;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
@@ -573,7 +573,7 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
memset((void *)details, 0,
sizeof(struct iavf_asq_cmd_details));
ntc++;
@@ -615,14 +615,14 @@ bool iavf_asq_done(struct iavf_hw *hw)
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
- struct iavf_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum iavf_status status = 0;
u16 retval = 0;
@@ -637,7 +637,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
@@ -717,9 +717,9 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -766,13 +766,13 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -809,12 +809,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -832,7 +832,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
@@ -866,9 +866,9 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & IAVF_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
@@ -892,14 +892,14 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, IAVF_VF_ARQT1, ntc);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 406506f64bdd..bbf5c4b3a2ae 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -9,7 +9,7 @@
#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct iavf_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct iavf_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define IAVF_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct iavf_asq_cmd_details {
/* ARQ event information */
struct iavf_arq_event_info {
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum iavf_admin_queue_err asq_last_status;
- enum iavf_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -123,6 +123,6 @@ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
#define IAVF_AQ_LARGE_BUF 512
#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
index bc512308557b..0482c9ce9b9c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_ADMINQ_CMD_H_
#define _IAVF_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the iavf Admin Queue commands and is shared between